Spaces:
Niansuh
/
Running

eb2 / app.py
rr1's picture
Update app.py
de99d2e verified
import json
import os
import random
import time
import uuid
import asyncio
from starlette.responses import Response, StreamingResponse
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
# 配置常量
CONFIG = {
"API": {
"BASE_URL": "https://fragments.e2b.dev",
"API_KEY": os.getenv("API_KEY","sk-123456")
},
"RETRY": {
"MAX_ATTEMPTS": 1,
"DELAY_BASE": 1000
},
"MODEL_CONFIG": {
"o1": {
"id": "o1",
"provider": "OpenAI",
"providerId": "openai",
"name": "o1",
"multiModal": True,
"Systemprompt": "",
"opt_max": {
"temperatureMax": 2,
"max_tokensMax": 0,
"presence_penaltyMax": 2,
"frequency_penaltyMax": 2,
"top_pMax": 1,
"top_kMax": 500
}
},
"o1-mini": {
"id": "o1",
"provider": "OpenAI",
"providerId": "openai",
"name": "o1-mini",
"multiModal": False,
"Systemprompt": "",
"opt_max": {
"temperatureMax": 2,
"max_tokensMax": 0,
"presence_penaltyMax": 2,
"frequency_penaltyMax": 2,
"top_pMax": 1,
"top_kMax": 500
}
},
"o3-mini": {
"id": "o3-mini",
"provider": "OpenAI",
"providerId": "openai",
"name": "o3 mini",
"multiModal": False,
"Systemprompt": "",
"opt_max": {
"temperatureMax": 2,
"max_tokensMax": 0,
"presence_penaltyMax": 2,
"frequency_penaltyMax": 2,
"top_pMax": 1,
"top_kMax": 500
}
},
"gpt-4.5-preview": {
"id": "gpt-4.5-preview",
"provider": "OpenAI",
"providerId": "openai",
"name": "GPT-4.5",
"multiModal": True,
"Systemprompt": "",
"opt_max": {
"temperatureMax": 2,
"max_tokensMax": 0,
"presence_penaltyMax": 2,
"frequency_penaltyMax": 2,
"top_pMax": 1,
"top_kMax": 500
}
},
"gpt-4o": {
"id": "gpt-4o",
"provider": "OpenAI",
"providerId": "openai",
"name": "GPT-4o",
"multiModal": True,
"Systemprompt": "",
"opt_max": {
"temperatureMax": 2,
"max_tokensMax": 16380,
"presence_penaltyMax": 2,
"frequency_penaltyMax": 2,
"top_pMax": 1,
"top_kMax": 500
}
},
"gpt-4-turbo": {
"id": "gpt-4-turbo",
"provider": "OpenAI",
"providerId": "openai",
"name": "GPT-4 Turbo",
"multiModal": True,
"Systemprompt": "",
"opt_max": {
"temperatureMax": 2,
"max_tokensMax": 16380,
"presence_penaltyMax": 2,
"frequency_penaltyMax": 2,
"top_pMax": 1,
"top_kMax": 500
}
},
"gemini-1.5-pro": {
"id": "gemini-1.5-pro-002",
"provider": "Google Vertex AI",
"providerId": "vertex",
"name": "Gemini 1.5 Pro",
"multiModal": True,
"Systemprompt": "",
"opt_max": {
"temperatureMax": 2,
"max_tokensMax": 8192,
"presence_penaltyMax": 2,
"frequency_penaltyMax": 2,
"top_pMax": 1,
"top_kMax": 500
}
},
"gemini-exp-1121": {
"id": "gemini-exp-1121",
"provider": "Google Generative AI",
"providerId": "google",
"name": "Gemini Experimental 1121",
"multiModal": True,
"Systemprompt": "",
"opt_max": {
"temperatureMax": 2,
"max_tokensMax": 8192,
"presence_penaltyMax": 2,
"frequency_penaltyMax": 2,
"top_pMax": 1,
"top_kMax": 40
}
},
"gemini-2.0-flash-exp": {
"id": "models/gemini-2.0-flash-exp",
"provider": "Google Generative AI",
"providerId": "google",
"name": "Gemini 2.0 Flash",
"multiModal": True,
"Systemprompt": "",
"opt_max": {
"temperatureMax": 2,
"max_tokensMax": 8192,
"presence_penaltyMax": 2,
"frequency_penaltyMax": 2,
"top_pMax": 1,
"top_kMax": 40
}
},
"claude-3-7-sonnet-latest": {
"id": "claude-3-5-sonnet-latest",
"provider": "Anthropic",
"providerId": "anthropic",
"name": "Claude 3.7 Sonnet",
"multiModal": True,
"Systemprompt": "",
"opt_max": {
"temperatureMax": 1,
"max_tokensMax": 8192,
"presence_penaltyMax": 2,
"frequency_penaltyMax": 2,
"top_pMax": 1,
"top_kMax": 500
}
},
"claude-3-5-sonnet-latest": {
"id": "claude-3-5-sonnet-latest",
"provider": "Anthropic",
"providerId": "anthropic",
"name": "Claude 3.5 Sonnet",
"multiModal": True,
"Systemprompt": "",
"opt_max": {
"temperatureMax": 1,
"max_tokensMax": 8192,
"presence_penaltyMax": 2,
"frequency_penaltyMax": 2,
"top_pMax": 1,
"top_kMax": 500
}
},
"claude-3-5-haiku-latest": {
"id": "claude-3-5-haiku-latest",
"provider": "Anthropic",
"providerId": "anthropic",
"name": "Claude 3.5 Haiku",
"multiModal": False,
"Systemprompt": "",
"opt_max": {
"temperatureMax": 1,
"max_tokensMax": 8192,
"presence_penaltyMax": 2,
"frequency_penaltyMax": 2,
"top_pMax": 1,
"top_kMax": 500
}
}
},
"DEFAULT_HEADERS": {
'accept': '*/*',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
'cache-control': 'no-cache',
'content-type': 'application/json',
'origin': 'https://fragments.e2b.dev',
'pragma': 'no-cache',
'priority': 'u=1, i',
'referer': 'https://fragments.e2b.dev/',
'sec-ch-ua': '"Not(A:Brand";v="99", "Microsoft Edge";v="133", "Chromium";v="133"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36 Edg/133.0.0.0'
},
"MODEL_PROMPT": "Chatting with users and starting role-playing, the most important thing is to pay attention to their latest messages, use only 'text' to output the chat text reply content generated for user messages, and finally output it in code"
}
# 工具类
class Utils:
@staticmethod
def uuidv4():
return str(uuid.uuid4())
@staticmethod
async def config_opt(params, model_config):
if not model_config.get("opt_max"):
return None
options_map = {
"temperature": "temperatureMax",
"max_tokens": "max_tokensMax",
"presence_penalty": "presence_penaltyMax",
"frequency_penalty": "frequency_penaltyMax",
"top_p": "top_pMax",
"top_k": "top_kMax"
}
constrained_params = {}
for key, value in params.items():
max_key = options_map.get(key)
if (max_key and
max_key in model_config["opt_max"] and
value is not None):
constrained_params[key] = min(value, model_config["opt_max"][max_key])
return constrained_params
# API客户端类
class ApiClient:
def __init__(self, model_id):
if model_id not in CONFIG["MODEL_CONFIG"]:
raise ValueError(f"不支持的模型: {model_id}")
self.model_config = CONFIG["MODEL_CONFIG"][model_id]
def process_message_content(self, content):
if isinstance(content, str):
return content
if isinstance(content, list):
return "\n".join([item["text"] for item in content if item["type"] == "text"])
if isinstance(content, dict):
return content.get("text")
return None
async def prepare_chat_request(self, request, config=None):
opt_config = config or {"model": self.model_config["id"]}
return {
"userID": Utils.uuidv4(),
"messages": await self.transform_messages(request),
"template": {
"text": {
"name": CONFIG["MODEL_PROMPT"],
"lib": [""],
"file": "pages/ChatWithUsers.txt",
"instructions": self.model_config["Systemprompt"],
"port": None
}
},
"model": {
"id": self.model_config["id"],
"provider": self.model_config["provider"],
"providerId": self.model_config["providerId"],
"name": self.model_config["name"],
"multiModal": self.model_config["multiModal"]
},
"config": opt_config
}
async def transform_messages(self, request):
merged_messages = []
for current in request["messages"]:
current_content = self.process_message_content(current["content"])
if current_content is None:
continue
if (merged_messages and
current and
merged_messages[-1]["role"] == current["role"]):
last_content = self.process_message_content(merged_messages[-1]["content"])
if last_content is not None:
merged_messages[-1]["content"] = f"{last_content}\n{current_content}"
continue
merged_messages.append(current)
messages = []
for msg in merged_messages:
if msg["role"] in ["system", "user"]:
messages.append({
"role": "user",
"content": [{
"type": "text",
"text": msg["content"]
}]
})
elif msg["role"] == "assistant":
messages.append({
"role": "assistant",
"content": [{
"type": "text",
"text": msg["content"]
}]
})
return messages
# 响应处理类
class ResponseHandler:
@staticmethod
async def handle_stream_response(chat_message, model):
async def stream_generator():
index = 0
while True:
# 如果已经发送完所有内容
if index >= len(chat_message):
yield f"data: [DONE]\n\n"
break
chunk_size = random.randint(15, 29)
chunk = chat_message[index:index + chunk_size]
event_data = {
"id": Utils.uuidv4(),
"object": "chat.completion.chunk",
"created": int(time.time()),
"model": model,
"choices": [{
"index": 0,
"delta": {"content": chunk},
"finish_reason": "stop" if index + chunk_size >= len(chat_message) else None
}]
}
try:
payload = f"data: {json.dumps(event_data)}\n\n"
yield payload
except Exception as error:
raise Exception(f"json转换失败: {error}")
index += chunk_size
await asyncio.sleep(0.05) # 50ms delay
return StreamingResponse(
stream_generator(),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
}
)
@staticmethod
async def handle_normal_response(chat_message, model):
response_data = {
"id": Utils.uuidv4(),
"object": "chat.completion",
"created": int(time.time()),
"model": model,
"choices": [{
"index": 0,
"message": {
"role": "assistant",
"content": chat_message
},
"finish_reason": "stop"
}],
"usage": None
}
return Response(
content=json.dumps(response_data),
media_type="application/json"
)
# FastAPI 应用
app = FastAPI()
# 添加 CORS 中间件
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# 模型列表端点
@app.get("/hf/v1/models")
async def get_models():
return {
"object": "list",
"data": [
{
"id": model,
"object": "model",
"created": int(time.time()),
"owned_by": "e2b",
}
for model in CONFIG["MODEL_CONFIG"].keys()
]
}
# 聊天完成端点
@app.post("/hf/v1/chat/completions")
async def chat_completions(request: Request):
try:
# 验证授权
auth_header = request.headers.get("authorization", "")
auth_token = auth_header.replace("Bearer ", "") if auth_header else ""
if auth_token != CONFIG["API"]["API_KEY"]:
return Response(
content=json.dumps({"error": "Unauthorized"}),
status_code=401,
media_type="application/json"
)
# 解析请求体
request_body = await request.json()
model = request_body.get("model")
temperature = request_body.get("temperature")
max_tokens = request_body.get("max_tokens")
presence_penalty = request_body.get("presence_penalty")
frequency_penalty = request_body.get("frequency_penalty")
top_p = request_body.get("top_p")
top_k = request_body.get("top_k")
stream = request_body.get("stream", False)
# 配置选项
config_opt = await Utils.config_opt(
{
"temperature": temperature,
"max_tokens": max_tokens,
"presence_penalty": presence_penalty,
"frequency_penalty": frequency_penalty,
"top_p": top_p,
"top_k": top_k
},
CONFIG["MODEL_CONFIG"][model]
)
#config_opt = {"model": model if CONFIG["MODEL_CONFIG"].get(model) else "gpt-4o"}
# 准备请求
api_client = ApiClient(model)
request_payload = await api_client.prepare_chat_request(request_body, config_opt)
# 发送请求到上游服务
import httpx
async with httpx.AsyncClient() as client:
response = await client.post(
f"{CONFIG['API']['BASE_URL']}/api/chat",
headers=CONFIG["DEFAULT_HEADERS"],
json=request_payload
)
response_data = response.json()
# 处理响应
chat_message = (response_data.get('code', '') or response_data.get('text', '') or response_data or '').strip() or None
if not chat_message:
raise Exception("No response from upstream service")
# 返回流式或普通响应
if stream:
return await ResponseHandler.handle_stream_response(chat_message, model)
else:
return await ResponseHandler.handle_normal_response(chat_message, model)
except Exception as error:
return Response(
content=json.dumps({
"error": {
"message": f"{str(error)} 请求失败,可能是上下文超出限制或其他错误,请稍后重试。",
"type": "server_error",
"param": None,
"code": getattr(error, "code", None)
}
}),
status_code=500,
media_type="application/json"
)
# 404 处理
@app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
async def not_found(request: Request, path: str):
return Response(
content="服务运行成功,请使用正确请求路径",
status_code=404,
headers={"Access-Control-Allow-Origin": "*"}
)
# 启动服务器
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)