Spaces:
Restarting
Restarting
Update app.py
Browse files
app.py
CHANGED
@@ -2,6 +2,7 @@
|
|
2 |
|
3 |
import os
|
4 |
import time
|
|
|
5 |
import requests
|
6 |
import pandas as pd
|
7 |
import gradio as gr
|
@@ -27,10 +28,7 @@ class GAIAAgent:
|
|
27 |
def answer(self, prompt: str) -> str:
|
28 |
payload = {
|
29 |
"inputs": prompt,
|
30 |
-
"parameters": {
|
31 |
-
"max_new_tokens": 512,
|
32 |
-
"temperature": 0.2
|
33 |
-
}
|
34 |
}
|
35 |
url = f"https://api-inference.huggingface.co/models/{self.model_id}"
|
36 |
resp = requests.post(url, headers=self.headers, json=payload, timeout=60)
|
@@ -42,59 +40,62 @@ class GAIAAgent:
|
|
42 |
|
43 |
# --- Gradio callback ---
|
44 |
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
)
|
|
|
55 |
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
if not questions:
|
61 |
-
return "❌ No questions found. Check your API_URL.", pd.DataFrame()
|
62 |
-
|
63 |
-
# 2) Init agent
|
64 |
-
agent = GAIAAgent(MODEL_ID, hf_token)
|
65 |
-
|
66 |
-
# 3) Answer each
|
67 |
-
results = []
|
68 |
-
payload = []
|
69 |
-
for item in questions:
|
70 |
-
tid = item.get("task_id")
|
71 |
-
qtxt = item.get("question", "")
|
72 |
-
try:
|
73 |
-
ans = agent.answer(qtxt)
|
74 |
-
except Exception as e:
|
75 |
-
ans = f"ERROR: {e}"
|
76 |
-
results.append({"Task ID": tid, "Question": qtxt, "Answer": ans})
|
77 |
-
payload.append({"task_id": tid, "submitted_answer": ans})
|
78 |
-
time.sleep(0.5)
|
79 |
-
|
80 |
-
# 4) Submit (no agent_code)
|
81 |
-
submission = {
|
82 |
-
"username": username,
|
83 |
-
"answers": payload
|
84 |
-
}
|
85 |
-
s_resp = requests.post(f"{API_URL}/submit", json=submission, timeout=60)
|
86 |
-
s_resp.raise_for_status()
|
87 |
-
data = s_resp.json()
|
88 |
-
|
89 |
-
# 5) Build status text
|
90 |
-
status = (
|
91 |
-
f"✅ **Submission Successful!**\n\n"
|
92 |
-
f"**User:** {data.get('username')}\n"
|
93 |
-
f"**Score:** {data.get('score')}% "
|
94 |
-
f"({data.get('correct_count')}/{data.get('total_attempted')} correct)\n"
|
95 |
-
f"**Message:** {data.get('message')}"
|
96 |
-
)
|
97 |
-
return status, pd.DataFrame(results)
|
98 |
|
99 |
# --- Gradio UI ---
|
100 |
with gr.Blocks() as demo:
|
|
|
2 |
|
3 |
import os
|
4 |
import time
|
5 |
+
import traceback
|
6 |
import requests
|
7 |
import pandas as pd
|
8 |
import gradio as gr
|
|
|
28 |
def answer(self, prompt: str) -> str:
|
29 |
payload = {
|
30 |
"inputs": prompt,
|
31 |
+
"parameters": {"max_new_tokens": 512, "temperature": 0.2}
|
|
|
|
|
|
|
32 |
}
|
33 |
url = f"https://api-inference.huggingface.co/models/{self.model_id}"
|
34 |
resp = requests.post(url, headers=self.headers, json=payload, timeout=60)
|
|
|
40 |
|
41 |
# --- Gradio callback ---
|
42 |
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
43 |
+
try:
|
44 |
+
if profile is None:
|
45 |
+
return ("⚠️ Please log in with your Hugging Face account.", pd.DataFrame())
|
46 |
+
username = profile.username
|
47 |
+
hf_token = HF_TOKEN_ENV or getattr(profile, "access_token", None)
|
48 |
+
if not hf_token:
|
49 |
+
return (
|
50 |
+
"❌ No Hugging Face token found.\n"
|
51 |
+
"Set HUGGINGFACEHUB_API_TOKEN in Secrets or log in via the button.",
|
52 |
+
pd.DataFrame()
|
53 |
+
)
|
54 |
+
|
55 |
+
# 1) Fetch GAIA questions
|
56 |
+
q_resp = requests.get(f"{API_URL}/questions", timeout=15)
|
57 |
+
q_resp.raise_for_status()
|
58 |
+
questions = q_resp.json() or []
|
59 |
+
if not questions:
|
60 |
+
return ("❌ No questions found. Check your API_URL.", pd.DataFrame())
|
61 |
+
|
62 |
+
# 2) Init agent
|
63 |
+
agent = GAIAAgent(MODEL_ID, hf_token)
|
64 |
+
|
65 |
+
# 3) Answer each
|
66 |
+
results = []
|
67 |
+
payload = []
|
68 |
+
for item in questions:
|
69 |
+
tid = item.get("task_id")
|
70 |
+
qtxt = item.get("question", "")
|
71 |
+
try:
|
72 |
+
ans = agent.answer(qtxt)
|
73 |
+
except Exception as e:
|
74 |
+
ans = f"ERROR: {e}"
|
75 |
+
results.append({"Task ID": tid, "Question": qtxt, "Answer": ans})
|
76 |
+
payload.append({"task_id": tid, "submitted_answer": ans})
|
77 |
+
time.sleep(0.5)
|
78 |
+
|
79 |
+
# 4) Submit
|
80 |
+
submission = {"username": username, "answers": payload}
|
81 |
+
s_resp = requests.post(f"{API_URL}/submit", json=submission, timeout=60)
|
82 |
+
s_resp.raise_for_status()
|
83 |
+
data = s_resp.json()
|
84 |
+
|
85 |
+
# 5) Build status text
|
86 |
+
status = (
|
87 |
+
f"✅ **Submission Successful!**\n\n"
|
88 |
+
f"**User:** {data.get('username')}\n"
|
89 |
+
f"**Score:** {data.get('score')}% "
|
90 |
+
f"({data.get('correct_count')}/{data.get('total_attempted')} correct)\n"
|
91 |
+
f"**Message:** {data.get('message')}"
|
92 |
)
|
93 |
+
return status, pd.DataFrame(results)
|
94 |
|
95 |
+
except Exception as e:
|
96 |
+
tb = traceback.format_exc()
|
97 |
+
print("[ERROR]", tb)
|
98 |
+
return (f"❌ Unexpected error:\n{e}", pd.DataFrame())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
|
100 |
# --- Gradio UI ---
|
101 |
with gr.Blocks() as demo:
|