File size: 1,765 Bytes
ea3f4d3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import { HF_TOKEN } from "$env/static/private";
import { error } from "@sveltejs/kit";
import * as HfInference from "@huggingface/inference";

if (!HF_TOKEN) {
  console.error("HF_TOKEN environment variable not set.");
  throw error(500, "Server configuration error: Missing API Token");
}

const client = new HfInference.InferenceClient(HF_TOKEN);

/** @type {import('./$types').RequestHandler} */
export async function POST({ request }) {
  const { loraModelId, prompt } = await request.json();

  if (!loraModelId) {
    throw error(400, "Missing loraModelId in request body");
  }
  if (!prompt) {
    throw error(400, "Missing prompt in request body");
  }

  console.log(
    `API: Generating image with Fal.ai - LoRA: "${loraModelId}", Prompt: "${prompt}"`
  );

  try {
    // Call Fal AI provider using textToImage
    const imageBlob = await client.textToImage({
      provider: "fal-ai", // Specify Fal AI as the provider
      model: loraModelId, // Use the provided loraModelId as the model
      inputs: prompt, // Use the provided prompt as the input
      // Optional parameters can be added here if needed
    });

    console.log("Fal AI image generation successful. Returning Blob.");

    // Return the Blob directly in the response
    return new Response(imageBlob, {
      headers: {
        "Content-Type": imageBlob.type, // Set correct content type (e.g., image/png)
      },
    });
  } catch (err) {
    console.error("Error during Fal AI textToImage call:", err);
    // Check if the error object has specific properties for better reporting
    const message = err.message || "Unknown error during image generation";
    const status = err.status || 500;
    throw error(status, `Error generating image via Fal AI: ${message}`);
  }
}