abdullahalioo commited on
Commit
fa8e2ce
·
verified ·
1 Parent(s): 9ba7021

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -43
app.py CHANGED
@@ -1,15 +1,13 @@
1
  import os
2
  from fastapi import FastAPI, HTTPException
3
- from fastapi.responses import StreamingResponse, Response
4
  from openai import AsyncOpenAI
5
 
6
  app = FastAPI()
7
 
8
- # Initialize global token
9
- token = os.getenv("GITHUB_TOKEN")
10
-
11
  async def generate_ai_response(prompt: str):
12
- global token
 
13
  if not token:
14
  raise HTTPException(status_code=500, detail="GitHub token not configured")
15
 
@@ -21,7 +19,7 @@ async def generate_ai_response(prompt: str):
21
  try:
22
  stream = await client.chat.completions.create(
23
  messages=[
24
- {"role": "system", "content": "You are a helpful assistant."},
25
  {"role": "user", "content": prompt}
26
  ],
27
  model=model,
@@ -38,50 +36,15 @@ async def generate_ai_response(prompt: str):
38
  yield f"Error: {str(err)}"
39
  raise HTTPException(status_code=500, detail="AI generation failed")
40
 
41
- class CustomStreamingResponse(Response):
42
- def __init__(self, content, token, media_type="text/event-stream", status_code=200):
43
- super().__init__(content=content, media_type=media_type, status_code=status_code)
44
- self.token = token
45
-
46
- async def __call__(self, scope, receive, send):
47
- await send({
48
- "type": "http.response.start",
49
- "status": self.status_code,
50
- "headers": [
51
- (b"content-type", self.media_type.encode()),
52
- (b"x-token-value", self.token.encode())
53
- ]
54
- })
55
- async for chunk in self.body_iterator:
56
- await send({
57
- "type": "http.response.body",
58
- "body": chunk.encode() if isinstance(chunk, str) else chunk,
59
- "more_body": True
60
- })
61
- await send({
62
- "type": "http.response.body",
63
- "body": b"",
64
- "more_body": False
65
- })
66
-
67
  @app.post("/generate")
68
  async def generate_response(prompt: str):
69
  if not prompt:
70
  raise HTTPException(status_code=400, detail="Prompt cannot be empty")
71
 
72
- global token
73
- return CustomStreamingResponse(
74
- content=generate_ai_response(prompt),
75
- token=token,
76
  media_type="text/event-stream"
77
  )
78
 
79
- @app.get("/get-token")
80
- async def get_token():
81
- global token
82
- if not token:
83
- raise HTTPException(status_code=500, detail="GitHub token not configured")
84
- return {"token": token}
85
-
86
  def get_app():
87
  return app
 
1
  import os
2
  from fastapi import FastAPI, HTTPException
3
+ from fastapi.responses import StreamingResponse
4
  from openai import AsyncOpenAI
5
 
6
  app = FastAPI()
7
 
 
 
 
8
  async def generate_ai_response(prompt: str):
9
+ # Configuration for unofficial GitHub AI endpoint
10
+ token = os.getenv("GITHUB_TOKEN")
11
  if not token:
12
  raise HTTPException(status_code=500, detail="GitHub token not configured")
13
 
 
19
  try:
20
  stream = await client.chat.completions.create(
21
  messages=[
22
+ {"role": "system", "content": "You are a helpful assistant. named Orion and made by Abdullah Ali"},
23
  {"role": "user", "content": prompt}
24
  ],
25
  model=model,
 
36
  yield f"Error: {str(err)}"
37
  raise HTTPException(status_code=500, detail="AI generation failed")
38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  @app.post("/generate")
40
  async def generate_response(prompt: str):
41
  if not prompt:
42
  raise HTTPException(status_code=400, detail="Prompt cannot be empty")
43
 
44
+ return StreamingResponse(
45
+ generate_ai_response(prompt),
 
 
46
  media_type="text/event-stream"
47
  )
48
 
 
 
 
 
 
 
 
49
  def get_app():
50
  return app