burtenshaw
first commit
ea3f4d3
import { HF_TOKEN } from "$env/static/private";
import { error } from "@sveltejs/kit";
import * as HfInference from "@huggingface/inference";
if (!HF_TOKEN) {
console.error("HF_TOKEN environment variable not set.");
throw error(500, "Server configuration error: Missing API Token");
}
const client = new HfInference.InferenceClient(HF_TOKEN);
/** @type {import('./$types').RequestHandler} */
export async function POST({ request }) {
const { loraModelId, prompt } = await request.json();
if (!loraModelId) {
throw error(400, "Missing loraModelId in request body");
}
if (!prompt) {
throw error(400, "Missing prompt in request body");
}
console.log(
`API: Generating image with Fal.ai - LoRA: "${loraModelId}", Prompt: "${prompt}"`
);
try {
// Call Fal AI provider using textToImage
const imageBlob = await client.textToImage({
provider: "fal-ai", // Specify Fal AI as the provider
model: loraModelId, // Use the provided loraModelId as the model
inputs: prompt, // Use the provided prompt as the input
// Optional parameters can be added here if needed
});
console.log("Fal AI image generation successful. Returning Blob.");
// Return the Blob directly in the response
return new Response(imageBlob, {
headers: {
"Content-Type": imageBlob.type, // Set correct content type (e.g., image/png)
},
});
} catch (err) {
console.error("Error during Fal AI textToImage call:", err);
// Check if the error object has specific properties for better reporting
const message = err.message || "Unknown error during image generation";
const status = err.status || 500;
throw error(status, `Error generating image via Fal AI: ${message}`);
}
}