Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Clémentine
commited on
Commit
·
67741f2
1
Parent(s):
570d85c
added evaluation + leaderboard generation + reorg of the viz
Browse files- yourbench_space/app.py +89 -53
- yourbench_space/evaluation.py +89 -0
- yourbench_space/leaderboard_space/app.py +31 -0
- yourbench_space/leaderboard_space/env.py +16 -0
- yourbench_space/leaderboard_space/requirements.txt +2 -0
- yourbench_space/leaderboard_space/utils.py +198 -0
- yourbench_space/utils.py +24 -11
yourbench_space/app.py
CHANGED
@@ -1,10 +1,13 @@
|
|
|
|
1 |
import os
|
2 |
import sys
|
3 |
import time
|
4 |
import gradio as gr
|
5 |
-
|
6 |
-
from
|
7 |
from huggingface_hub import whoami
|
|
|
|
|
8 |
|
9 |
from yourbench_space.config import generate_and_save_config
|
10 |
from yourbench_space.utils import (
|
@@ -15,6 +18,8 @@ from yourbench_space.utils import (
|
|
15 |
update_dataset,
|
16 |
STAGES,
|
17 |
)
|
|
|
|
|
18 |
|
19 |
project_description = """
|
20 |
# YourBench 🚀
|
@@ -74,11 +79,11 @@ def update_process_status():
|
|
74 |
|
75 |
return gr.update(value=True, label="Process Status: Running")
|
76 |
|
77 |
-
def prepare_task(oauth_token: gr.OAuthToken | None,
|
78 |
new_env = os.environ.copy()
|
79 |
if oauth_token:
|
80 |
new_env["HF_TOKEN"] = oauth_token.token
|
81 |
-
new_env["DATASET_PREFIX"] =
|
82 |
manager.start_process(custom_env=new_env)
|
83 |
|
84 |
|
@@ -104,31 +109,55 @@ def switch_to_run_generation_tab():
|
|
104 |
def enable_button(files):
|
105 |
return gr.update(interactive=bool(files))
|
106 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
|
108 |
with gr.Blocks(theme=gr.themes.Default()) as app:
|
109 |
gr.Markdown(project_description)
|
110 |
|
111 |
-
with gr.Row():
|
112 |
-
gr.Markdown("## ⚙️ YourBench Setup", elem_id="setup-title")
|
113 |
-
with gr.Column(scale=0, min_width=150):
|
114 |
-
login_btn = gr.LoginButton()
|
115 |
-
|
116 |
with gr.Tabs() as tabs:
|
117 |
with gr.Tab("Setup", id=0):
|
118 |
with gr.Row():
|
119 |
-
with gr.
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
|
|
|
|
132 |
|
133 |
with gr.Accordion("Upload documents"):
|
134 |
file_input = gr.File(
|
@@ -142,18 +171,18 @@ with gr.Blocks(theme=gr.themes.Default()) as app:
|
|
142 |
file_input,
|
143 |
output,
|
144 |
)
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
|
152 |
file_input.change(enable_button, inputs=file_input, outputs=preview_button)
|
153 |
|
154 |
preview_button.click(
|
155 |
generate_and_return,
|
156 |
-
inputs=[hf_org_dropdown,
|
157 |
outputs=[log_message, download_button],
|
158 |
)
|
159 |
preview_button.click(
|
@@ -164,9 +193,26 @@ with gr.Blocks(theme=gr.themes.Default()) as app:
|
|
164 |
|
165 |
with gr.Tab("Run Generation", id=1):
|
166 |
with gr.Row():
|
167 |
-
|
168 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
169 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
170 |
with gr.Accordion("Stages", open=True):
|
171 |
stages_table = gr.CheckboxGroup(
|
172 |
choices=STAGES,
|
@@ -175,27 +221,6 @@ with gr.Blocks(theme=gr.themes.Default()) as app:
|
|
175 |
interactive=False,
|
176 |
)
|
177 |
|
178 |
-
log_timer = gr.Timer(1.0, active=True)
|
179 |
-
log_timer.tick(
|
180 |
-
manager.read_and_get_output, outputs=[log_output, stages_table]
|
181 |
-
)
|
182 |
-
|
183 |
-
with gr.Row():
|
184 |
-
process_status = gr.Checkbox(label="Process Status", interactive=False)
|
185 |
-
status_timer = gr.Timer(1.0, active=True)
|
186 |
-
status_timer.tick(update_process_status, outputs=process_status)
|
187 |
-
|
188 |
-
with gr.Row():
|
189 |
-
start_button = gr.Button("Start Task")
|
190 |
-
start_button.click(prepare_task, inputs=[login_btn, hf_dataset_prefix])
|
191 |
-
|
192 |
-
stop_button = gr.Button("Stop Task")
|
193 |
-
stop_button.click(manager.stop_process)
|
194 |
-
|
195 |
-
kill_button = gr.Button("Kill Task")
|
196 |
-
kill_button.click(manager.kill_process)
|
197 |
-
|
198 |
-
with gr.Row():
|
199 |
with gr.Accordion("Ingestion"):
|
200 |
ingestion_df = gr.DataFrame()
|
201 |
|
@@ -209,8 +234,19 @@ with gr.Blocks(theme=gr.themes.Default()) as app:
|
|
209 |
answers_df = gr.DataFrame()
|
210 |
|
211 |
stages_table.change(
|
212 |
-
update_dataset, inputs=[stages_table, hf_org_dropdown,
|
213 |
)
|
214 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
215 |
|
216 |
app.launch(allowed_paths=["/app"])
|
|
|
1 |
+
import asyncio
|
2 |
import os
|
3 |
import sys
|
4 |
import time
|
5 |
import gradio as gr
|
6 |
+
|
7 |
+
from datasets import load_dataset
|
8 |
from huggingface_hub import whoami
|
9 |
+
from loguru import logger
|
10 |
+
from pathlib import Path
|
11 |
|
12 |
from yourbench_space.config import generate_and_save_config
|
13 |
from yourbench_space.utils import (
|
|
|
18 |
update_dataset,
|
19 |
STAGES,
|
20 |
)
|
21 |
+
from yourbench_space.evaluation import create_eval_file, run_evaluations
|
22 |
+
from yourbench_space.leaderboard_space.env import HF_TOKEN
|
23 |
|
24 |
project_description = """
|
25 |
# YourBench 🚀
|
|
|
79 |
|
80 |
return gr.update(value=True, label="Process Status: Running")
|
81 |
|
82 |
+
def prepare_task(oauth_token: gr.OAuthToken | None, hf_dataset_name: str, _=None):
|
83 |
new_env = os.environ.copy()
|
84 |
if oauth_token:
|
85 |
new_env["HF_TOKEN"] = oauth_token.token
|
86 |
+
new_env["DATASET_PREFIX"] = hf_dataset_name
|
87 |
manager.start_process(custom_env=new_env)
|
88 |
|
89 |
|
|
|
109 |
def enable_button(files):
|
110 |
return gr.update(interactive=bool(files))
|
111 |
|
112 |
+
def run_evaluation_pipeline(oauth_token: gr.OAuthToken | None, org_name, eval_name):
|
113 |
+
# Test dataset existence
|
114 |
+
eval_ds_name = f"{org_name}/{eval_name}"
|
115 |
+
# Test dataset existence
|
116 |
+
try:
|
117 |
+
load_dataset(eval_ds_name, streaming=True)
|
118 |
+
except Exception as e:
|
119 |
+
print(f"Error while loading the dataset: {e}")
|
120 |
+
return
|
121 |
+
# Run evaluations
|
122 |
+
create_eval_file(eval_ds_name)
|
123 |
+
status = asyncio.run(run_evaluations(eval_ds_name=eval_ds_name, org=org_name))
|
124 |
+
# Create space
|
125 |
+
from huggingface_hub import HfApi
|
126 |
+
repo_id = f"{org_name}/leaderboard_yourbench_{eval_ds_name.replace('/', '_')}"
|
127 |
+
api = HfApi()
|
128 |
+
|
129 |
+
try:
|
130 |
+
api.create_repo(repo_id=repo_id, repo_type="space", space_sdk="gradio")
|
131 |
+
api.upload_folder(repo_id=repo_id, repo_type="space", folder_path="src/")
|
132 |
+
api.add_space_secret(repo_id=repo_id, key="HF_TOKEN", value=HF_TOKEN)
|
133 |
+
api.add_space_variable(repo_id=repo_id, key="TASK", value=eval_ds_name)
|
134 |
+
api.add_space_variable(repo_id=repo_id, key="ORG_NAME", value=org_name)
|
135 |
+
except Exception as e:
|
136 |
+
status = "Evaluation" + status + "\nLeaderboard creation:" + e
|
137 |
+
return status
|
138 |
+
|
139 |
|
140 |
with gr.Blocks(theme=gr.themes.Default()) as app:
|
141 |
gr.Markdown(project_description)
|
142 |
|
|
|
|
|
|
|
|
|
|
|
143 |
with gr.Tabs() as tabs:
|
144 |
with gr.Tab("Setup", id=0):
|
145 |
with gr.Row():
|
146 |
+
with gr.Column():
|
147 |
+
login_btn = gr.LoginButton()
|
148 |
+
with gr.Accordion("Hugging Face Settings"):
|
149 |
+
hf_org_dropdown = gr.Dropdown(
|
150 |
+
choices=[], label="Organization", allow_custom_value=True
|
151 |
+
)
|
152 |
+
app.load(
|
153 |
+
update_hf_org_dropdown, inputs=None, outputs=hf_org_dropdown
|
154 |
+
)
|
155 |
+
|
156 |
+
hf_dataset_name = gr.Textbox(
|
157 |
+
label="Dataset name",
|
158 |
+
value="yourbench",
|
159 |
+
info="Name of your new evaluation dataset",
|
160 |
+
)
|
161 |
|
162 |
with gr.Accordion("Upload documents"):
|
163 |
file_input = gr.File(
|
|
|
171 |
file_input,
|
172 |
output,
|
173 |
)
|
174 |
+
with gr.Row():
|
175 |
+
preview_button = gr.Button("Generate New Config", interactive=False)
|
176 |
+
log_message = gr.Textbox(label="Log Message", visible=True)
|
177 |
+
download_button = gr.File(
|
178 |
+
label="Download Config", visible=False, interactive=False
|
179 |
+
)
|
180 |
|
181 |
file_input.change(enable_button, inputs=file_input, outputs=preview_button)
|
182 |
|
183 |
preview_button.click(
|
184 |
generate_and_return,
|
185 |
+
inputs=[hf_org_dropdown, hf_dataset_name],
|
186 |
outputs=[log_message, download_button],
|
187 |
)
|
188 |
preview_button.click(
|
|
|
193 |
|
194 |
with gr.Tab("Run Generation", id=1):
|
195 |
with gr.Row():
|
196 |
+
start_button = gr.Button("Start Task")
|
197 |
+
start_button.click(prepare_task, inputs=[login_btn, hf_dataset_name])
|
198 |
+
|
199 |
+
stop_button = gr.Button("Stop Task")
|
200 |
+
stop_button.click(manager.stop_process)
|
201 |
+
|
202 |
+
kill_button = gr.Button("Kill Task")
|
203 |
+
kill_button.click(manager.kill_process)
|
204 |
|
205 |
+
with gr.Column():
|
206 |
+
with gr.Row():
|
207 |
+
with gr.Accordion("Log Output", open=True):
|
208 |
+
log_output = gr.Code(language=None, lines=20, interactive=False)
|
209 |
+
|
210 |
+
with gr.Row():
|
211 |
+
process_status = gr.Checkbox(label="Process Status", interactive=False)
|
212 |
+
status_timer = gr.Timer(1.0, active=True)
|
213 |
+
status_timer.tick(update_process_status, outputs=process_status)
|
214 |
+
|
215 |
+
with gr.Column():
|
216 |
with gr.Accordion("Stages", open=True):
|
217 |
stages_table = gr.CheckboxGroup(
|
218 |
choices=STAGES,
|
|
|
221 |
interactive=False,
|
222 |
)
|
223 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
224 |
with gr.Accordion("Ingestion"):
|
225 |
ingestion_df = gr.DataFrame()
|
226 |
|
|
|
234 |
answers_df = gr.DataFrame()
|
235 |
|
236 |
stages_table.change(
|
237 |
+
update_dataset, inputs=[stages_table, hf_org_dropdown, hf_dataset_name], outputs=[ingestion_df, summarization_df, single_hop, answers_df]
|
238 |
)
|
239 |
|
240 |
+
log_timer = gr.Timer(1.0, active=True)
|
241 |
+
log_timer.tick(
|
242 |
+
manager.read_and_get_output, outputs=[log_output, stages_table]
|
243 |
+
)
|
244 |
+
with gr.Tab("Evaluate", id=2):
|
245 |
+
with gr.Row():
|
246 |
+
btn_launch_evals = gr.Button("Launch evaluations")
|
247 |
+
status = gr.Textbox(label="Status")
|
248 |
+
|
249 |
+
btn_launch_evals.click(run_evaluation_pipeline, [hf_org_dropdown, hf_dataset_name], status)
|
250 |
+
|
251 |
|
252 |
app.launch(allowed_paths=["/app"])
|
yourbench_space/evaluation.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio, os
|
2 |
+
from src.env import INIT_MODELS
|
3 |
+
|
4 |
+
ON_SPACES=os.environ.get("system") == "spaces"
|
5 |
+
OUTPUT_DIR = "/data" if ON_SPACES else "."
|
6 |
+
|
7 |
+
def create_eval_file(eval_ds_name):
|
8 |
+
# TODO: replace by Nathan's call
|
9 |
+
content = """
|
10 |
+
from aenum import extend_enum
|
11 |
+
|
12 |
+
from lighteval.metrics.metrics import Metrics
|
13 |
+
from lighteval.metrics.utils.metric_utils import (
|
14 |
+
CorpusLevelMetricGrouping,
|
15 |
+
MetricCategory,
|
16 |
+
MetricUseCase,
|
17 |
+
)
|
18 |
+
from lighteval.tasks.lighteval_task import LightevalTaskConfig
|
19 |
+
from lighteval.tasks.extended.hle.main import JudgeLLMHLE
|
20 |
+
from lighteval.tasks.requests import Doc
|
21 |
+
|
22 |
+
|
23 |
+
def prompt_function(line, task_name: str = None):
|
24 |
+
if line["image"] not in [None, ""]:
|
25 |
+
return
|
26 |
+
|
27 |
+
return Doc(
|
28 |
+
task_name=task_name,
|
29 |
+
query="Question: " + line["question"] + "\\nAnswer:",
|
30 |
+
choices=[line["answer"]],
|
31 |
+
gold_index=0,
|
32 |
+
specific={"question": line["question"]},
|
33 |
+
)
|
34 |
+
""" + f"""
|
35 |
+
|
36 |
+
hle = LightevalTaskConfig(
|
37 |
+
name="{eval_ds_name.replace('/', '_')}",
|
38 |
+
suite=["custom"],
|
39 |
+
prompt_function=prompt_function,
|
40 |
+
hf_repo="{eval_ds_name}",
|
41 |
+
hf_subset="default",
|
42 |
+
hf_avail_splits=["test"],
|
43 |
+
evaluation_splits=["test"],
|
44 |
+
few_shots_split=None,
|
45 |
+
few_shots_select=None,
|
46 |
+
generation_size=8192,
|
47 |
+
metric=[Metrics.exact_match],
|
48 |
+
stop_sequence=[],
|
49 |
+
trust_dataset=True,
|
50 |
+
version=0,
|
51 |
+
)
|
52 |
+
|
53 |
+
|
54 |
+
TASKS_TABLE = [hle]
|
55 |
+
"""
|
56 |
+
|
57 |
+
with open(f"{OUTPUT_DIR}/custom_task.py", "w") as f:
|
58 |
+
f.write(content)
|
59 |
+
|
60 |
+
async def run_process(args: list) -> dict:
|
61 |
+
process = await asyncio.create_subprocess_exec(
|
62 |
+
*args,
|
63 |
+
stdout=asyncio.subprocess.PIPE,
|
64 |
+
stderr=asyncio.subprocess.PIPE
|
65 |
+
)
|
66 |
+
await asyncio.wait_for(process.wait(), timeout=180)
|
67 |
+
stdout = await process.stdout.read()
|
68 |
+
stderr = await process.stderr.read()
|
69 |
+
return {
|
70 |
+
'pid': process.pid,
|
71 |
+
'stdout': stdout.decode(),
|
72 |
+
'stderr': stderr.decode()
|
73 |
+
}
|
74 |
+
|
75 |
+
async def run_evaluations(eval_ds_name: str, org: str) -> list:
|
76 |
+
tasks = []
|
77 |
+
for model_name, provider in INIT_MODELS:
|
78 |
+
args = [
|
79 |
+
"lighteval",
|
80 |
+
"endpoint", "inference-providers", f"model={model_name},provider={provider}",
|
81 |
+
f"custom|{eval_ds_name.replace('/', '_')}|0|0", "--custom-tasks", f"{OUTPUT_DIR}/custom_task.py", "--max-samples", "10",
|
82 |
+
"--output-dir", f"{OUTPUT_DIR}", "--save-details", "--results-org", org, "--push-to-hub"
|
83 |
+
]
|
84 |
+
tasks.append(run_process(args))
|
85 |
+
# Will capture the task if failed
|
86 |
+
processes = await asyncio.gather(*tasks, return_exceptions=True)
|
87 |
+
if all(not isinstance(result, Exception) for result in processes):
|
88 |
+
return "✅"
|
89 |
+
return "At least one model failed"
|
yourbench_space/leaderboard_space/app.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
from utils import run_pipeline, update_examples
|
4 |
+
from env import TASK
|
5 |
+
|
6 |
+
with gr.Blocks(
|
7 |
+
title="YourBench Leaderboard",
|
8 |
+
theme=gr.themes.Soft(),
|
9 |
+
css="button { margin: 0 10px; padding: 5px 15px; }",
|
10 |
+
) as demo:
|
11 |
+
# DISPLAY TABLE AND ANALYSIS
|
12 |
+
title = gr.Markdown(f"YourBench auto-Leaderboard for {TASK}")
|
13 |
+
leaderboard = gr.DataFrame(label="Results", interactive=False)
|
14 |
+
samples_ix = gr.Number(
|
15 |
+
label="Example Index",
|
16 |
+
value=0,
|
17 |
+
step=1,
|
18 |
+
info="Navigate through different examples"
|
19 |
+
)
|
20 |
+
with gr.Tab("Hardest samples"):
|
21 |
+
hard_samples = gr.HTML()
|
22 |
+
with gr.Tab("Easiest samples"):
|
23 |
+
easy_samples = gr.HTML()
|
24 |
+
with gr.Tab("All samples"):
|
25 |
+
all_samples = gr.HTML()
|
26 |
+
|
27 |
+
samples_ix.change(update_examples, samples_ix, [easy_samples, hard_samples, all_samples])
|
28 |
+
|
29 |
+
demo.load(run_pipeline, [samples_ix], [leaderboard, easy_samples, hard_samples, all_samples])
|
30 |
+
|
31 |
+
demo.launch()
|
yourbench_space/leaderboard_space/env.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
INIT_MODELS = [
|
3 |
+
# 70B
|
4 |
+
("Qwen/Qwen2.5-72B-Instruct", "sambanova"),
|
5 |
+
("meta-llama/Llama-3.3-70B-Instruct", "together"),
|
6 |
+
("deepseek-ai/DeepSeek-R1-Distill-Llama-70B", "sambanova"),
|
7 |
+
# 20 to 30B
|
8 |
+
("Qwen/QwQ-32B", "sambanova"),
|
9 |
+
("mistralai/Mistral-Small-24B-Instruct-2501", "together"),
|
10 |
+
#("allenai/OLMo-2-0325-32B-Instruct", "hf-inference")
|
11 |
+
]
|
12 |
+
MODELS = [m[0] for m in INIT_MODELS]
|
13 |
+
TASK = os.getenv("TASK")
|
14 |
+
# With storage
|
15 |
+
HF_TOKEN=os.getenv("HF_TOKEN")
|
16 |
+
ORG_NAME = os.getenv("ORG_NAME")
|
yourbench_space/leaderboard_space/requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
datasets
|
2 |
+
huggingface_hub
|
yourbench_space/leaderboard_space/utils.py
ADDED
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datasets import load_dataset, Dataset
|
2 |
+
from functools import lru_cache
|
3 |
+
from typing import Tuple
|
4 |
+
import gradio as gr
|
5 |
+
import json
|
6 |
+
|
7 |
+
from env import MODELS, TASK, ORG_NAME
|
8 |
+
|
9 |
+
def aggregate_results() -> list:
|
10 |
+
"""From the path of outputs and model list, extracts the current scores and stores them in a list of dicts with model, score, time as keys
|
11 |
+
"""
|
12 |
+
all_results = []
|
13 |
+
for org_model in MODELS:
|
14 |
+
try:
|
15 |
+
path = f"{ORG_NAME}/details_{org_model.replace('/', '__')}_private"
|
16 |
+
ds = load_dataset(path, "results", split="latest")
|
17 |
+
config = json.loads(ds["config_general"][0])
|
18 |
+
results = json.loads(ds["results"][0])
|
19 |
+
|
20 |
+
# Model data
|
21 |
+
org, model = org_model.split("/")
|
22 |
+
|
23 |
+
cur_result = {
|
24 |
+
"Org": org,
|
25 |
+
"Model": model,
|
26 |
+
"Duration (s)": config["end_time"] - config["start_time"]
|
27 |
+
}
|
28 |
+
|
29 |
+
# Extract the task from the JSON data
|
30 |
+
for k_metric, v_dict in results.items():
|
31 |
+
if k_metric != "all":
|
32 |
+
for k, v in v_dict.items():
|
33 |
+
cur_result[f"{k}({k_metric})"] = v
|
34 |
+
all_results.append(cur_result)
|
35 |
+
except Exception as e:
|
36 |
+
print(f"Error processing {model} {ORG_NAME}: {e}")
|
37 |
+
return all_results
|
38 |
+
|
39 |
+
def extract_dataviz() -> Tuple[list, list]:
|
40 |
+
"""From the path of outputs and model list, extracts from the details the worst samples, best samples
|
41 |
+
"""
|
42 |
+
all_samples = {}
|
43 |
+
for org_model in MODELS:
|
44 |
+
try:
|
45 |
+
path = f"{ORG_NAME}/details_{org_model.replace('/', '__')}_private"
|
46 |
+
ds = load_dataset(path, f"custom_{TASK.replace('/', '_')}_0", split="latest")
|
47 |
+
|
48 |
+
for ix, row in enumerate(ds):
|
49 |
+
prompt = row["full_prompt"]
|
50 |
+
gold = row["gold"]
|
51 |
+
score = list(row["metrics"].values())[0]
|
52 |
+
prediction = row["predictions"][0]
|
53 |
+
|
54 |
+
|
55 |
+
# We store flattened samples in a dict
|
56 |
+
# ix -> ix, prompt, gold, model_score for each model, model_prediction for each model
|
57 |
+
# then 2 lists: model_scores and models, to aggreg more easily
|
58 |
+
if ix not in all_samples:
|
59 |
+
all_samples[ix] = {
|
60 |
+
"ix": ix,
|
61 |
+
"prompt": prompt,
|
62 |
+
"gold": gold[0] if isinstance(gold, list) else gold,
|
63 |
+
# A bit redundant, but put in their own boxes for simplicity of access later
|
64 |
+
"model_scores": [],
|
65 |
+
"models": []
|
66 |
+
}
|
67 |
+
if org_model not in all_samples[ix]["models"]:
|
68 |
+
all_samples[ix][f"{org_model}_score"] = row["metrics"]
|
69 |
+
all_samples[ix][f"{org_model}_prediction"] = prediction
|
70 |
+
all_samples[ix]["model_scores"].append(score)
|
71 |
+
all_samples[ix]["models"].append(org_model)
|
72 |
+
|
73 |
+
except Exception as e:
|
74 |
+
print(f"Error processing {org_model}: {e}")
|
75 |
+
|
76 |
+
full_samples = sorted(list(all_samples.values()), key= lambda r: r['ix'])
|
77 |
+
hard_samples = sorted([sample for sample in all_samples.values() if sum(sample["model_scores"]) == 0], key= lambda r: r['ix'])
|
78 |
+
easy_samples = sorted([sample for sample in all_samples.values() if sum(sample["model_scores"]) == len(sample["model_scores"])], key= lambda r: r['ix'])
|
79 |
+
return easy_samples, hard_samples, full_samples
|
80 |
+
|
81 |
+
def samples_to_box_display(samples: list, example_index: int = 0):
|
82 |
+
"""Adapted from Nathan's code in https://huggingface.co/spaces/SaylorTwift/OpenEvalsModelDetails/
|
83 |
+
"""
|
84 |
+
if len(samples) == 0:
|
85 |
+
return "No samples in this category!"
|
86 |
+
outputs = []
|
87 |
+
sample = samples[example_index]
|
88 |
+
for model in sample["models"]:
|
89 |
+
try:
|
90 |
+
outputs.append({
|
91 |
+
'Model': model,
|
92 |
+
'Prediction': sample[f'{model}_prediction'],
|
93 |
+
'Prompt': sample['prompt'],
|
94 |
+
'Metrics': sample[f'{model}_score'],
|
95 |
+
'Gold': sample['gold']
|
96 |
+
})
|
97 |
+
except (KeyError, IndexError):
|
98 |
+
continue
|
99 |
+
|
100 |
+
if not outputs:
|
101 |
+
return "No results found for the selected combination."
|
102 |
+
|
103 |
+
# Create HTML output with all models
|
104 |
+
html_output = "<div style='max-width: 800px; margin: 0 auto;'>\n\n"
|
105 |
+
|
106 |
+
# Show gold answer at the top with distinct styling
|
107 |
+
if outputs:
|
108 |
+
html_output += "<div style='background: #e6f3e6; padding: 20px; border-radius: 10px; margin-bottom: 20px;'>\n"
|
109 |
+
html_output += "<h3 style='margin-top: 0;'>Ground Truth</h3>\n"
|
110 |
+
html_output += "<div style='overflow-x: auto; max-width: 100%;'>\n"
|
111 |
+
html_output += f"<pre style='white-space: pre-wrap; word-wrap: break-word; margin: 0;'><code>{outputs[0]['Gold']}</code></pre>\n"
|
112 |
+
html_output += "</div>\n"
|
113 |
+
html_output += "</div>\n"
|
114 |
+
|
115 |
+
for output in outputs:
|
116 |
+
html_output += "<div style='background: #f5f5f5; padding: 20px; margin-bottom: 20px; border-radius: 10px;'>\n"
|
117 |
+
html_output += f"<h2 style='margin-top: 0;'>{output['Model']}</h2>\n"
|
118 |
+
|
119 |
+
# Format metrics as a clean table
|
120 |
+
html_output += "<details open style='margin-bottom: 15px;'>\n"
|
121 |
+
html_output += "<summary><h3 style='display: inline; margin: 0;'>Metrics</h3></summary>\n"
|
122 |
+
metrics = output['Metrics']
|
123 |
+
if isinstance(metrics, str):
|
124 |
+
metrics = eval(metrics)
|
125 |
+
html_output += "<div style='overflow-x: auto;'>\n"
|
126 |
+
html_output += "<table style='width: 100%; margin: 10px 0; border-collapse: collapse;'>\n"
|
127 |
+
for key, value in metrics.items():
|
128 |
+
if isinstance(value, float):
|
129 |
+
value = f"{value:.3f}"
|
130 |
+
html_output += f"<tr><td style='padding: 5px; border-bottom: 1px solid #ddd;'><strong>{key}</strong></td><td style='padding: 5px; border-bottom: 1px solid #ddd;'>{value}</td></tr>\n"
|
131 |
+
html_output += "</table>\n"
|
132 |
+
html_output += "</div>\n"
|
133 |
+
html_output += "</details>\n\n"
|
134 |
+
|
135 |
+
# Handle prompt formatting with better styling
|
136 |
+
html_output += "<details style='margin-bottom: 15px;'>\n"
|
137 |
+
html_output += "<summary><h3 style='display: inline; margin: 0;'>Prompt</h3></summary>\n"
|
138 |
+
html_output += "<div style='background: #ffffff; padding: 15px; border-radius: 5px; margin-top: 10px;'>\n"
|
139 |
+
|
140 |
+
prompt_text = output['Prompt']
|
141 |
+
if isinstance(prompt_text, list):
|
142 |
+
for i, msg in enumerate(prompt_text):
|
143 |
+
if isinstance(msg, dict) and 'content' in msg:
|
144 |
+
role = msg.get('role', 'message').title()
|
145 |
+
html_output += "<div style='margin-bottom: 10px;'>\n"
|
146 |
+
html_output += f"<strong>{role}:</strong>\n"
|
147 |
+
html_output += "<div style='overflow-x: auto;'>\n"
|
148 |
+
html_output += f"<pre style='white-space: pre-wrap; word-wrap: break-word; margin: 5px 0;'><code>{msg['content']}</code></pre>\n"
|
149 |
+
html_output += "</div>\n"
|
150 |
+
html_output += "</div>\n"
|
151 |
+
else:
|
152 |
+
html_output += "<div style='margin-bottom: 10px;'>\n"
|
153 |
+
html_output += "<div style='overflow-x: auto;'>\n"
|
154 |
+
html_output += f"<pre style='white-space: pre-wrap; word-wrap: break-word; margin: 5px 0;'><code>{json.dumps(msg, indent=2)}</code></pre>\n"
|
155 |
+
html_output += "</div>\n"
|
156 |
+
html_output += "</div>\n"
|
157 |
+
else:
|
158 |
+
html_output += "<div style='overflow-x: auto;'>\n"
|
159 |
+
if isinstance(prompt_text, dict) and 'content' in prompt_text:
|
160 |
+
html_output += f"<pre style='white-space: pre-wrap; word-wrap: break-word; margin: 5px 0;'><code>{prompt_text['content']}</code></pre>\n"
|
161 |
+
else:
|
162 |
+
html_output += f"<pre style='white-space: pre-wrap; word-wrap: break-word; margin: 5px 0;'><code>{prompt_text}</code></pre>\n"
|
163 |
+
html_output += "</div>\n"
|
164 |
+
|
165 |
+
html_output += "</div>\n"
|
166 |
+
html_output += "</details>\n\n"
|
167 |
+
|
168 |
+
# Style prediction output - now in a collapsible section
|
169 |
+
html_output += "<details open style='margin-bottom: 15px;'>\n"
|
170 |
+
html_output += "<summary><h3 style='display: inline; margin: 0;'>Prediction</h3>"
|
171 |
+
# Add word count in a muted style
|
172 |
+
word_count = len(output['Prediction'].split())
|
173 |
+
html_output += f"<span style='color: #666; font-size: 0.8em; margin-left: 10px;'>({word_count} words)</span>"
|
174 |
+
html_output += "</summary>\n"
|
175 |
+
html_output += "<div style='background: #ffffff; padding: 15px; border-radius: 5px; margin-top: 10px;'>\n"
|
176 |
+
html_output += "<div style='overflow-x: auto;'>\n"
|
177 |
+
html_output += f"<pre style='white-space: pre-wrap; word-wrap: break-word; margin: 0;'><code>{output['Prediction']}</code></pre>\n"
|
178 |
+
html_output += "</div>\n"
|
179 |
+
html_output += "</div>\n"
|
180 |
+
html_output += "</details>\n"
|
181 |
+
html_output += "</div>\n\n"
|
182 |
+
|
183 |
+
html_output += "</div>"
|
184 |
+
return html_output
|
185 |
+
|
186 |
+
def run_pipeline(samples_ix: int = 0):
|
187 |
+
results = aggregate_results()
|
188 |
+
best_samples, worst_samples, all_samples = extract_dataviz()
|
189 |
+
return gr.Dataframe(Dataset.from_list(results).to_pandas(), visible=True), \
|
190 |
+
gr.HTML(samples_to_box_display(best_samples, samples_ix), label="Easiest samples (always found)", visible=True), \
|
191 |
+
gr.HTML(samples_to_box_display(worst_samples, samples_ix), label="Hardest samples (always failed)", visible=True), \
|
192 |
+
gr.HTML(samples_to_box_display(all_samples, samples_ix), label="All samples", visible=True)
|
193 |
+
|
194 |
+
def update_examples(samples_ix: int = 0):
|
195 |
+
best_samples, worst_samples, all_samples = extract_dataviz()
|
196 |
+
return samples_to_box_display(best_samples, samples_ix), \
|
197 |
+
samples_to_box_display(worst_samples, samples_ix), \
|
198 |
+
samples_to_box_display(all_samples, samples_ix)
|
yourbench_space/utils.py
CHANGED
@@ -22,6 +22,8 @@ STAGES = [
|
|
22 |
"chunking",
|
23 |
"single_shot_question_generation",
|
24 |
"answer_generation",
|
|
|
|
|
25 |
# "judge_answers", # to uncomment when fixed
|
26 |
]
|
27 |
|
@@ -55,20 +57,31 @@ def update_dataset(stages, hf_org, hf_prefix):
|
|
55 |
"""
|
56 |
Updates the dataset based on the provided stages and dataset configuration.
|
57 |
"""
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
|
|
62 |
# Construct dataset name from config
|
63 |
dataset_name = f"{hf_org}/{hf_prefix}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
-
|
66 |
-
ingestion_df = pd.DataFrame(load_dataset(dataset_name, name="ingested", split="train", streaming=True))
|
67 |
-
summarization_df = pd.DataFrame(load_dataset(dataset_name, name="summarization", split="train", streaming=True))
|
68 |
-
single_hop = pd.DataFrame(load_dataset(dataset_name, name="single_shot_question_generation", split="train", streaming=True))
|
69 |
-
answers_df = pd.DataFrame(load_dataset(dataset_name, name="answer_generation", split="train", streaming=True))
|
70 |
-
|
71 |
-
return (ingestion_df, summarization_df, single_hop, answers_df)
|
72 |
|
73 |
class SubprocessManager:
|
74 |
def __init__(self, command):
|
|
|
22 |
"chunking",
|
23 |
"single_shot_question_generation",
|
24 |
"answer_generation",
|
25 |
+
#"evaluate_models",
|
26 |
+
#"create_leaderboard"
|
27 |
# "judge_answers", # to uncomment when fixed
|
28 |
]
|
29 |
|
|
|
57 |
"""
|
58 |
Updates the dataset based on the provided stages and dataset configuration.
|
59 |
"""
|
60 |
+
ingestion_df = pd.DataFrame()
|
61 |
+
summarization_df = pd.DataFrame()
|
62 |
+
single_hop_df = pd.DataFrame()
|
63 |
+
answers_df = pd.DataFrame()
|
64 |
+
|
65 |
# Construct dataset name from config
|
66 |
dataset_name = f"{hf_org}/{hf_prefix}"
|
67 |
+
|
68 |
+
# TODO: add cache dir
|
69 |
+
# Will be able to group everything in one pass once the names get homogeneized
|
70 |
+
if "ingestion" in stages:
|
71 |
+
# TODO: why is the key "ingested" and not "ingestion"? (does not match the other splits)
|
72 |
+
ingestion_ds = load_dataset(dataset_name, name="ingested", split="train", streaming=True)
|
73 |
+
ingestion_df = pd.DataFrame([next(iter(ingestion_ds)) for _ in range(5)])
|
74 |
+
if "summarization" in stages:
|
75 |
+
summarization_ds = load_dataset(dataset_name, name="summarization", split="train", streaming=True)
|
76 |
+
summarization_df = pd.DataFrame([next(iter(summarization_ds)) for _ in range(5)])
|
77 |
+
if "single_shot_question_generation" in stages:
|
78 |
+
single_hop_ds = load_dataset(dataset_name, name="single_shot_question_generation", split="train", streaming=True)
|
79 |
+
single_hop_df = pd.DataFrame([next(iter(single_hop_ds)) for _ in range(5)])
|
80 |
+
if "answer_generation" in stages:
|
81 |
+
answers_ds = load_dataset(dataset_name, name="answer_generation", split="train", streaming=True)
|
82 |
+
answers_df = pd.DataFrame([next(iter(answers_ds)) for _ in range(5)])
|
83 |
|
84 |
+
return (ingestion_df, summarization_df, single_hop_df, answers_df)
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
|
86 |
class SubprocessManager:
|
87 |
def __init__(self, command):
|