Spaces:
Sleeping
Sleeping
from openai import OpenAI | |
from app.config import DEEPSEEK_API_KEY | |
def get_chat_completion( | |
user_prompt: str, | |
system_prompt: str = "You are a helpful AI assistant.", | |
model: str = "deepseek/deepseek-chat-v3-0324:free", | |
api_key: str = DEEPSEEK_API_KEY, | |
) -> str: | |
""" | |
Get chat completion from DeepSeek model via OpenRouter API. | |
Args: | |
user_prompt: The user's input prompt | |
system_prompt: The system role prompt (default is generic assistant) | |
model: The model to use (default is DeepSeek-V2.5) | |
api_key: API key for OpenRouter (default from config) | |
Returns: | |
The generated response from the model | |
""" | |
client = OpenAI( | |
base_url="https://openrouter.ai/api/v1", | |
api_key=api_key, | |
) | |
headers = { | |
"HTTP-Referer": 'https://huggingface.co./spaces/Nekoko/NekoAI-Lab', | |
"X-Title": "Meme-Search" | |
} | |
completion = client.chat.completions.create( | |
extra_headers=headers, | |
model=model, | |
messages=[ | |
{"role": "system", "content": system_prompt}, | |
{"role": "user", "content": user_prompt}, | |
], | |
) | |
content = completion.choices[0].message.content | |
print('>>>> LLM Response', content) | |
return content | |
# Example usage: | |
if __name__ == "__main__": | |
response = get_chat_completion( | |
user_prompt="What's the capital of France?", | |
system_prompt="You are a knowledgeable geography assistant." | |
) | |
print(response) |