Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
84 changes: 84 additions & 0 deletions src/content/__tests__/resolveAudioClip.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
import { describe, it, expect, vi, beforeEach } from "vitest";
import { resolveAudioClip } from "../resolveAudioClip";

vi.mock("../../sandboxes/logStep", () => ({
logStep: vi.fn(),
}));

vi.mock("../selectAudioClip", () => ({
selectAudioClip: vi.fn(),
}));

vi.mock("../selectAttachedAudioClip", () => ({
selectAttachedAudioClip: vi.fn(),
}));

const { selectAudioClip } = await import("../selectAudioClip");
const { selectAttachedAudioClip } = await import("../selectAttachedAudioClip");

const mockClip = {
songFilename: "song.mp3",
songTitle: "song",
songBuffer: Buffer.from("audio"),
startSeconds: 0,
durationSeconds: 15,
lyrics: { title: "song", fullLyrics: "", segments: [] },
clipLyrics: "",
clipReason: "best",
clipMood: "happy",
};

describe("resolveAudioClip", () => {
beforeEach(() => {
vi.clearAllMocks();
});

it("uses selectAttachedAudioClip when songs contain a URL", async () => {
vi.mocked(selectAttachedAudioClip).mockResolvedValue(mockClip);

const result = await resolveAudioClip({
songs: ["hiccups", "https://example.com/track.mp3"],
lipsync: false,
githubRepo: "https://github.com/test/repo",
artistSlug: "artist",
});

expect(selectAttachedAudioClip).toHaveBeenCalledWith({
audioUrl: "https://example.com/track.mp3",
lipsync: false,
});
expect(selectAudioClip).not.toHaveBeenCalled();
expect(result).toBe(mockClip);
});

it("uses selectAudioClip when songs are all slugs", async () => {
vi.mocked(selectAudioClip).mockResolvedValue(mockClip);

const payload = {
songs: ["hiccups", "adhd"],
lipsync: true,
githubRepo: "https://github.com/test/repo",
artistSlug: "artist",
};
const result = await resolveAudioClip(payload);

expect(selectAudioClip).toHaveBeenCalledWith(payload);
expect(selectAttachedAudioClip).not.toHaveBeenCalled();
expect(result).toBe(mockClip);
});

it("uses selectAudioClip when songs is undefined", async () => {
vi.mocked(selectAudioClip).mockResolvedValue(mockClip);

const payload = {
songs: undefined,
lipsync: false,
githubRepo: "https://github.com/test/repo",
artistSlug: "artist",
};
const result = await resolveAudioClip(payload);

expect(selectAudioClip).toHaveBeenCalledWith(payload);
expect(result).toBe(mockClip);
});
});
86 changes: 86 additions & 0 deletions src/content/__tests__/resolveFaceGuide.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
import { resolveFaceGuide } from "../resolveFaceGuide";

vi.mock("../../sandboxes/logStep", () => ({
logStep: vi.fn(),
}));

vi.mock("../fetchImageFromUrl", () => ({
fetchImageFromUrl: vi.fn(),
}));

vi.mock("../fetchGithubFile", () => ({
fetchGithubFile: vi.fn(),
}));

vi.mock("@fal-ai/client", () => ({
fal: { storage: { upload: vi.fn() } },
}));

const { fetchImageFromUrl } = await import("../fetchImageFromUrl");
const { fetchGithubFile } = await import("../fetchGithubFile");
const { fal } = await import("@fal-ai/client");

describe("resolveFaceGuide", () => {
beforeEach(() => {
vi.clearAllMocks();
});

it("returns null when template does not use face guide", async () => {
const result = await resolveFaceGuide({
usesFaceGuide: false,
images: ["https://example.com/face.png"],
githubRepo: "https://github.com/test/repo",
artistSlug: "artist",
});

expect(result).toBeNull();
expect(fetchImageFromUrl).not.toHaveBeenCalled();
});

it("uses fetchImageFromUrl when images array has entries", async () => {
vi.mocked(fetchImageFromUrl).mockResolvedValue("https://fal.ai/uploaded.png");

const result = await resolveFaceGuide({
usesFaceGuide: true,
images: ["https://example.com/face.png"],
githubRepo: "https://github.com/test/repo",
artistSlug: "artist",
});

expect(fetchImageFromUrl).toHaveBeenCalledWith("https://example.com/face.png");
expect(result).toBe("https://fal.ai/uploaded.png");
});

it("fetches from GitHub when no images provided", async () => {
const buffer = Buffer.from("image-data");
vi.mocked(fetchGithubFile).mockResolvedValue(buffer);
vi.mocked(fal.storage.upload).mockResolvedValue("https://fal.ai/github.png");

const result = await resolveFaceGuide({
usesFaceGuide: true,
images: undefined,
githubRepo: "https://github.com/test/repo",
artistSlug: "artist",
});

expect(fetchGithubFile).toHaveBeenCalledWith(
"https://github.com/test/repo",
"artists/artist/context/images/face-guide.png",
);
expect(result).toBe("https://fal.ai/github.png");
});

it("throws when GitHub face-guide is not found", async () => {
vi.mocked(fetchGithubFile).mockResolvedValue(null);

await expect(
resolveFaceGuide({
usesFaceGuide: true,
images: undefined,
githubRepo: "https://github.com/test/repo",
artistSlug: "artist",
}),
).rejects.toThrow("face-guide.png not found");
});
});
153 changes: 153 additions & 0 deletions src/content/__tests__/selectAttachedAudioClip.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,153 @@
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
import { selectAttachedAudioClip } from "../selectAttachedAudioClip";

vi.mock("../../sandboxes/logStep", () => ({
logStep: vi.fn(),
}));

vi.mock("../transcribeSong", () => ({
transcribeSong: vi.fn(),
}));

vi.mock("../analyzeClips", () => ({
analyzeClips: vi.fn(),
}));

vi.mock("../defaultPipelineConfig", () => ({
DEFAULT_PIPELINE_CONFIG: { clipDuration: 15 },
}));

const { transcribeSong } = await import("../transcribeSong");
const { analyzeClips } = await import("../analyzeClips");

describe("selectAttachedAudioClip", () => {
beforeEach(() => {
vi.clearAllMocks();
vi.stubGlobal(
"fetch",
vi.fn().mockResolvedValue({
ok: true,
arrayBuffer: () => Promise.resolve(new ArrayBuffer(100)),
}),
);
});

afterEach(() => {
vi.unstubAllGlobals();
});

it("downloads audio from the provided URL", async () => {
vi.mocked(transcribeSong).mockResolvedValue({
title: "my-song",
fullLyrics: "hello world",
segments: [{ start: 0, end: 5, text: "hello world" }],
});
vi.mocked(analyzeClips).mockResolvedValue([
{ startSeconds: 0, lyrics: "hello", reason: "good", mood: "happy", hasLyrics: true },
]);

await selectAttachedAudioClip({
audioUrl: "https://blob.vercel-storage.com/song.mp3",
lipsync: false,
});

expect(fetch).toHaveBeenCalledWith("https://blob.vercel-storage.com/song.mp3");
});

it("throws when download fails", async () => {
vi.stubGlobal(
"fetch",
vi.fn().mockResolvedValue({ ok: false, status: 404, statusText: "Not Found" }),
);

await expect(
selectAttachedAudioClip({
audioUrl: "https://example.com/missing.mp3",
lipsync: false,
}),
).rejects.toThrow("Failed to download attached audio: 404 Not Found");
});

it("derives filename from URL path", async () => {
vi.mocked(transcribeSong).mockResolvedValue({
title: "my-track",
fullLyrics: "",
segments: [],
});
vi.mocked(analyzeClips).mockResolvedValue([]);

const result = await selectAttachedAudioClip({
audioUrl: "https://blob.vercel-storage.com/content-attachments/audio/my-track.mp3",
lipsync: false,
});

expect(result.songFilename).toBe("my-track.mp3");
expect(result.songTitle).toBe("my-track");
});

it("transcribes the downloaded audio", async () => {
vi.mocked(transcribeSong).mockResolvedValue({
title: "song",
fullLyrics: "lyrics here",
segments: [{ start: 0, end: 10, text: "lyrics here" }],
});
vi.mocked(analyzeClips).mockResolvedValue([
{ startSeconds: 0, lyrics: "lyrics", reason: "best", mood: "chill", hasLyrics: true },
]);

const result = await selectAttachedAudioClip({
audioUrl: "https://blob.vercel-storage.com/song.mp3",
lipsync: false,
});

expect(transcribeSong).toHaveBeenCalledWith(expect.any(Buffer), "song.mp3");
expect(result.lyrics.fullLyrics).toBe("lyrics here");
});

it("prefers clips with lyrics when lipsync is true", async () => {
vi.mocked(transcribeSong).mockResolvedValue({
title: "song",
fullLyrics: "",
segments: [],
});
vi.mocked(analyzeClips).mockResolvedValue([
{ startSeconds: 0, lyrics: "", reason: "instrumental", mood: "chill", hasLyrics: false },
{ startSeconds: 30, lyrics: "words", reason: "vocal", mood: "happy", hasLyrics: true },
]);

const result = await selectAttachedAudioClip({
audioUrl: "https://blob.vercel-storage.com/song.mp3",
lipsync: true,
});

expect(result.startSeconds).toBe(30);
});

it("returns SelectedAudioClip interface shape", async () => {
vi.mocked(transcribeSong).mockResolvedValue({
title: "song",
fullLyrics: "full lyrics",
segments: [{ start: 0, end: 10, text: "clip text" }],
});
vi.mocked(analyzeClips).mockResolvedValue([
{ startSeconds: 0, lyrics: "clip", reason: "best clip", mood: "energetic", hasLyrics: true },
]);

const result = await selectAttachedAudioClip({
audioUrl: "https://blob.vercel-storage.com/song.mp3",
lipsync: false,
});

expect(result).toMatchObject({
songFilename: expect.any(String),
songTitle: expect.any(String),
songBuffer: expect.any(Buffer),
startSeconds: expect.any(Number),
durationSeconds: 15,
lyrics: expect.any(Object),
clipLyrics: expect.any(String),
clipReason: expect.any(String),
clipMood: expect.any(String),
});
});
});
28 changes: 28 additions & 0 deletions src/content/fetchImageFromUrl.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import { fal } from "@fal-ai/client";
import { logStep } from "../sandboxes/logStep";

/**
* Downloads an image from a public URL and uploads it to fal.ai storage.
*
* @param imageUrl - Public URL of the image
* @returns fal.ai storage URL for the uploaded image
*/
export async function fetchImageFromUrl(imageUrl: string): Promise<string> {
logStep("Downloading image from URL");
const response = await fetch(imageUrl);
if (!response.ok) {
throw new Error(`Failed to download image: ${response.status}`);
}
const imageBuffer = Buffer.from(await response.arrayBuffer());

logStep("Uploading image to fal.ai storage", true, {
sizeBytes: imageBuffer.byteLength,
});
const contentType = response.headers.get("content-type") || "image/png";
const originalName = new URL(imageUrl).pathname.split("/").pop() || "image.png";
const faceGuideFile = new File([new Uint8Array(imageBuffer)], originalName, { type: contentType });
const falUrl = await fal.storage.upload(faceGuideFile);

logStep("Image uploaded", false, { falUrl });
return falUrl;
}
23 changes: 23 additions & 0 deletions src/content/resolveAudioClip.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
import { logStep } from "../sandboxes/logStep";
import { selectAudioClip, type SelectedAudioClip } from "./selectAudioClip";
import { selectAttachedAudioClip } from "./selectAttachedAudioClip";
import type { CreateContentPayload } from "../schemas/contentCreationSchema";

/**
* Resolves the audio clip for the content pipeline.
* If any entry in songs is a URL, downloads and processes it directly.
* Otherwise delegates to selectAudioClip to pick from the artist's repo.
*/
export async function resolveAudioClip(
payload: Pick<CreateContentPayload, "songs" | "lipsync" | "githubRepo" | "artistSlug">,
): Promise<SelectedAudioClip> {
const songUrl = payload.songs?.find(s => s.startsWith("http"));

if (songUrl) {
logStep("Using song URL from songs array");
return selectAttachedAudioClip({ audioUrl: songUrl, lipsync: payload.lipsync });
}

logStep("Selecting audio clip from repo");
return selectAudioClip(payload);
}
Loading
Loading