Spaces:
Runtime error
Runtime error
Commit
·
1337025
1
Parent(s):
b248825
update vllm
Browse files- README.md +68 -1
- app.py +145 -4
- packages.txt +1 -0
- requirements.txt +6 -0
README.md
CHANGED
@@ -11,4 +11,71 @@ license: mit
|
|
11 |
short_description: NanoV
|
12 |
---
|
13 |
|
14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
short_description: NanoV
|
12 |
---
|
13 |
|
14 |
+
# vLLM OpenAI 兼容API服务
|
15 |
+
|
16 |
+
这个 Hugging Face Space 提供了一个基于 vLLM 的 OpenAI 兼容 API 服务。vLLM 是一个高性能的 LLM 推理引擎,提供了与 OpenAI API 兼容的接口,让你可以使用与 OpenAI 相同的客户端代码来调用开源模型。
|
17 |
+
|
18 |
+
## 特点
|
19 |
+
|
20 |
+
- 提供完全兼容 OpenAI 的 API 接口
|
21 |
+
- 支持 Chat Completions API (/v1/chat/completions)
|
22 |
+
- 支持 Completions API (/v1/completions)
|
23 |
+
- 包含一个简单的控制面板来管理服务
|
24 |
+
|
25 |
+
## 使用方法
|
26 |
+
|
27 |
+
### 控制面板
|
28 |
+
|
29 |
+
Space 启动后,会显示一个简单的控制面板,你可以:
|
30 |
+
|
31 |
+
1. 启动/停止 vLLM 服务
|
32 |
+
2. 查看服务状态
|
33 |
+
3. 查看服务日志
|
34 |
+
4. 获取 API 测试信息
|
35 |
+
|
36 |
+
### API 使用
|
37 |
+
|
38 |
+
你可以使用任何支持 OpenAI API 的客户端库来调用这个服务。例如,使用官方的 Python 客户端:
|
39 |
+
|
40 |
+
```python
|
41 |
+
from openai import OpenAI
|
42 |
+
|
43 |
+
client = OpenAI(
|
44 |
+
base_url="https://你的HF_SPACE_URL/v1",
|
45 |
+
api_key="你设置的API_KEY(如果有)",
|
46 |
+
)
|
47 |
+
|
48 |
+
completion = client.chat.completions.create(
|
49 |
+
model="模型名称",
|
50 |
+
messages=[
|
51 |
+
{"role": "user", "content": "Hello!"}
|
52 |
+
]
|
53 |
+
)
|
54 |
+
|
55 |
+
print(completion.choices[0].message)
|
56 |
+
```
|
57 |
+
|
58 |
+
## 环境变量
|
59 |
+
|
60 |
+
你可以在 Space 设置页面中设置以下环境变量来自定义服务:
|
61 |
+
|
62 |
+
- `MODEL_NAME`: 要加载的模型名称(默认: "NousResearch/Nous-Hermes-2-Yi-9B")
|
63 |
+
- `API_KEY`: API 访问密钥,如果设置了,则需要在请求中提供
|
64 |
+
- `API_PORT`: API 服务端口(默认: 8000)
|
65 |
+
- `GRADIO_PORT`: Gradio UI 端口(默认: 7860)
|
66 |
+
|
67 |
+
## 注意事项
|
68 |
+
|
69 |
+
- 首次启动服务时,需要下载模型,这可能需要几分钟时间
|
70 |
+
- 请确保选择了足够的 GPU 资源来运行模型
|
71 |
+
- 如果模型加载失败,请检查日志并考虑使用更小的模型
|
72 |
+
|
73 |
+
## 支持的 API
|
74 |
+
|
75 |
+
- Chat Completions API (/v1/chat/completions)
|
76 |
+
- Completions API (/v1/completions)
|
77 |
+
- 其他 vLLM 支持的 OpenAI 兼容 API
|
78 |
+
|
79 |
+
## 技术细节
|
80 |
+
|
81 |
+
这个 Space 使用 vLLM 的 OpenAI 兼容服务器功能,详情可参考 [vLLM 文档](https://docs.vllm.ai/en/latest/serving/openai_compatible_server.html)。
|
app.py
CHANGED
@@ -1,7 +1,148 @@
|
|
|
|
|
|
1 |
import gradio as gr
|
|
|
2 |
|
3 |
-
|
4 |
-
|
|
|
|
|
5 |
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
import gradio as gr
|
4 |
+
from subprocess import Popen, PIPE
|
5 |
|
6 |
+
# 设置环境变量
|
7 |
+
MODEL_NAME = os.environ.get("MODEL_NAME", "NousResearch/Nous-Hermes-2-Yi-9B")
|
8 |
+
API_PORT = int(os.environ.get("API_PORT", 8000))
|
9 |
+
GRADIO_PORT = int(os.environ.get("GRADIO_PORT", 7860))
|
10 |
|
11 |
+
# vLLM server进程
|
12 |
+
vllm_process = None
|
13 |
+
|
14 |
+
def start_vllm_server():
|
15 |
+
global vllm_process
|
16 |
+
if vllm_process is not None:
|
17 |
+
return "vLLM 服务已经在运行"
|
18 |
+
|
19 |
+
# 构建启动命令
|
20 |
+
cmd = [
|
21 |
+
"vllm",
|
22 |
+
"serve",
|
23 |
+
MODEL_NAME,
|
24 |
+
"--host", "0.0.0.0",
|
25 |
+
"--port", str(API_PORT),
|
26 |
+
"--dtype", "auto",
|
27 |
+
"--max-model-len", "2048", # 设置模型最大长度
|
28 |
+
"--gpu-memory-utilization", "0.9" # 使用90%的GPU内存
|
29 |
+
]
|
30 |
+
|
31 |
+
# 是否启用API密钥
|
32 |
+
api_key = os.environ.get("API_KEY", "")
|
33 |
+
if api_key:
|
34 |
+
cmd.extend(["--api-key", api_key])
|
35 |
+
|
36 |
+
# 打印启动命令
|
37 |
+
print(f"启动命令: {' '.join(cmd)}")
|
38 |
+
|
39 |
+
# 启动vLLM服务
|
40 |
+
try:
|
41 |
+
vllm_process = Popen(cmd, stdout=PIPE, stderr=PIPE, text=True)
|
42 |
+
return "vLLM 服务器已启动!请等待模型加载完成..."
|
43 |
+
except Exception as e:
|
44 |
+
return f"启动vLLM服务器时出错: {str(e)}"
|
45 |
+
|
46 |
+
def stop_vllm_server():
|
47 |
+
global vllm_process
|
48 |
+
if vllm_process is None:
|
49 |
+
return "vLLM 服务未运行"
|
50 |
+
|
51 |
+
vllm_process.terminate()
|
52 |
+
vllm_process = None
|
53 |
+
return "vLLM 服务已停止"
|
54 |
+
|
55 |
+
def check_server_status():
|
56 |
+
if vllm_process is None:
|
57 |
+
return "未运行"
|
58 |
+
|
59 |
+
return_code = vllm_process.poll()
|
60 |
+
if return_code is None:
|
61 |
+
return "运行中"
|
62 |
+
else:
|
63 |
+
return f"已停止 (返回码: {return_code})"
|
64 |
+
|
65 |
+
def get_server_logs():
|
66 |
+
if vllm_process is None:
|
67 |
+
return "服务未运行,无日志可显示"
|
68 |
+
|
69 |
+
# 从进程读取输出
|
70 |
+
output = ""
|
71 |
+
while True:
|
72 |
+
line_out = vllm_process.stdout.readline()
|
73 |
+
line_err = vllm_process.stderr.readline()
|
74 |
+
|
75 |
+
if not line_out and not line_err:
|
76 |
+
break
|
77 |
+
|
78 |
+
if line_out:
|
79 |
+
output += line_out + "\n"
|
80 |
+
if line_err:
|
81 |
+
output += "[ERROR] " + line_err + "\n"
|
82 |
+
|
83 |
+
return output if output else "暂无新日志"
|
84 |
+
|
85 |
+
def serve_test_ui():
|
86 |
+
"""提供一个简单的测试UI"""
|
87 |
+
with gr.Blocks(title="vLLM OpenAI兼容API服务") as demo:
|
88 |
+
with gr.Row():
|
89 |
+
with gr.Column():
|
90 |
+
gr.Markdown("# vLLM OpenAI 兼容API服务控制面板")
|
91 |
+
|
92 |
+
with gr.Row():
|
93 |
+
start_btn = gr.Button("启动服务", variant="primary")
|
94 |
+
stop_btn = gr.Button("停止服务", variant="stop")
|
95 |
+
|
96 |
+
status_text = gr.Textbox(label="服务状态", value="未运行", interactive=False)
|
97 |
+
refresh_btn = gr.Button("刷新状态")
|
98 |
+
|
99 |
+
logs_text = gr.Textbox(label="服务日志", interactive=False, lines=15)
|
100 |
+
logs_refresh_btn = gr.Button("刷新日志")
|
101 |
+
|
102 |
+
# API测试部分
|
103 |
+
gr.Markdown("## API 信息")
|
104 |
+
api_info = gr.Markdown(f"""
|
105 |
+
API地址: `http://localhost:{API_PORT}/v1/...`
|
106 |
+
|
107 |
+
Hugging Face Space公开URL: 部署后查看Space详情获取
|
108 |
+
|
109 |
+
当前加载模型: `{MODEL_NAME}`
|
110 |
+
|
111 |
+
API密钥: `{os.environ.get("API_KEY", "未设置")}`
|
112 |
+
|
113 |
+
## 测试命令
|
114 |
+
```python
|
115 |
+
from openai import OpenAI
|
116 |
+
|
117 |
+
client = OpenAI(
|
118 |
+
base_url="http://你的HF_SPACE_URL/v1",
|
119 |
+
api_key="{os.environ.get("API_KEY", "None")}",
|
120 |
+
)
|
121 |
+
|
122 |
+
completion = client.chat.completions.create(
|
123 |
+
model="{MODEL_NAME}",
|
124 |
+
messages=[
|
125 |
+
{"role": "user", "content": "Hello!"}
|
126 |
+
]
|
127 |
+
)
|
128 |
+
|
129 |
+
print(completion.choices[0].message)
|
130 |
+
```
|
131 |
+
""")
|
132 |
+
|
133 |
+
# 设置事件处理
|
134 |
+
start_btn.click(start_vllm_server, inputs=[], outputs=status_text)
|
135 |
+
stop_btn.click(stop_vllm_server, inputs=[], outputs=status_text)
|
136 |
+
refresh_btn.click(check_server_status, inputs=[], outputs=status_text)
|
137 |
+
logs_refresh_btn.click(get_server_logs, inputs=[], outputs=logs_text)
|
138 |
+
|
139 |
+
# 页面加载时自动启动服务
|
140 |
+
demo.load(start_vllm_server, inputs=[], outputs=status_text)
|
141 |
+
|
142 |
+
return demo
|
143 |
+
|
144 |
+
# 启动测试UI
|
145 |
+
if __name__ == "__main__":
|
146 |
+
# 创建并启动UI
|
147 |
+
demo = serve_test_ui()
|
148 |
+
demo.queue().launch(server_name="0.0.0.0", server_port=GRADIO_PORT, share=True)
|
packages.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
build-essential
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
vllm>=0.8.2
|
2 |
+
gradio>=4.0.0
|
3 |
+
openai>=1.0.0
|
4 |
+
pydantic>=2.0.0
|
5 |
+
fastapi>=0.100.0
|
6 |
+
uvicorn>=0.22.0
|