clefourrier HF Staff commited on
Commit
f4ad64c
·
verified ·
1 Parent(s): c3b2c51

Upload folder using huggingface_hub

Browse files
__pycache__/display_samples.cpython-310.pyc ADDED
Binary file (4.53 kB). View file
 
__pycache__/env.cpython-310.pyc ADDED
Binary file (628 Bytes). View file
 
__pycache__/evaluation.cpython-310.pyc ADDED
Binary file (2.6 kB). View file
 
__pycache__/utils.cpython-310.pyc ADDED
Binary file (6.76 kB). View file
 
app.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ from utils import run_pipeline, update_examples
4
+
5
+ with gr.Blocks(
6
+ title="YourBench Leaderboard",
7
+ theme=gr.themes.Soft(),
8
+ css="button { margin: 0 10px; padding: 5px 15px; }",
9
+ ) as demo:
10
+ # DISPLAY TABLE AND ANALYSIS
11
+ with gr.Column(visible=False) as col:
12
+ leaderboard = gr.DataFrame(interactive=False)
13
+ samples_ix = gr.Number(
14
+ label="Example Index",
15
+ value=0,
16
+ step=1,
17
+ info="Navigate through different examples"
18
+ )
19
+ with gr.Tab("Hardest samples"):
20
+ hard_samples = gr.HTML()
21
+ with gr.Tab("Easiest samples"):
22
+ easy_samples = gr.HTML()
23
+ with gr.Tab("All samples"):
24
+ all_samples = gr.HTML()
25
+
26
+ samples_ix.change(update_examples, samples_ix, [easy_samples, hard_samples, all_samples])
27
+
28
+ demo.load(run_pipeline, [samples_ix], [leaderboard, easy_samples, hard_samples, all_samples])
29
+
30
+ demo.launch()
env.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ INIT_MODELS = [
3
+ # 70B
4
+ ("Qwen/Qwen2.5-72B-Instruct", "sambanova"),
5
+ ("meta-llama/Llama-3.3-70B-Instruct", "together"),
6
+ ("deepseek-ai/DeepSeek-R1-Distill-Llama-70B", "sambanova"),
7
+ # 20 to 30B
8
+ ("Qwen/QwQ-32B", "sambanova"),
9
+ ("mistralai/Mistral-Small-24B-Instruct-2501", "together"),
10
+ #("allenai/OLMo-2-0325-32B-Instruct", "hf-inference")
11
+ ]
12
+ MODELS = [m[0] for m in INIT_MODELS]
13
+ TASK = os.getenv("TASK")
14
+ # With storage
15
+ HF_TOKEN=os.getenv("HF_TOKEN")
16
+ ORG_NAME = os.getenv("ORG_NAME")
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ datasets
2
+ huggingface_hub
utils.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset, Dataset
2
+ from functools import lru_cache
3
+ from typing import Tuple
4
+ import gradio as gr
5
+ import json
6
+
7
+ from env import MODELS, TASK, ORG_NAME
8
+
9
+ def aggregate_results() -> list:
10
+ """From the path of outputs and model list, extracts the current scores and stores them in a list of dicts with model, score, time as keys
11
+ """
12
+ all_results = []
13
+ for org_model in MODELS:
14
+ try:
15
+ path = f"{ORG_NAME}/details_{org_model.replace('/', '__')}_private"
16
+ ds = load_dataset(path, "results", split="latest")
17
+ config = json.loads(ds["config_general"][0])
18
+ results = json.loads(ds["results"][0])
19
+
20
+ # Model data
21
+ org, model = org_model.split("/")
22
+
23
+ cur_result = {
24
+ "Org": org,
25
+ "Model": model,
26
+ "Duration (s)": config["end_time"] - config["start_time"]
27
+ }
28
+
29
+ # Extract the task from the JSON data
30
+ for k_metric, v_dict in results.items():
31
+ if k_metric != "all":
32
+ for k, v in v_dict.items():
33
+ cur_result[f"{k}({k_metric})"] = v
34
+ all_results.append(cur_result)
35
+ except Exception as e:
36
+ print(f"Error processing {model} {ORG_NAME}: {e}")
37
+ return all_results
38
+
39
+ def extract_dataviz() -> Tuple[list, list]:
40
+ """From the path of outputs and model list, extracts from the details the worst samples, best samples
41
+ """
42
+ all_samples = {}
43
+ for org_model in MODELS:
44
+ try:
45
+ path = f"{ORG_NAME}/details_{org_model.replace('/', '__')}_private"
46
+ ds = load_dataset(path, f"custom_{TASK.replace('/', '_')}_0", split="latest")
47
+
48
+ for ix, row in enumerate(ds):
49
+ prompt = row["full_prompt"]
50
+ gold = row["gold"]
51
+ score = list(row["metrics"].values())[0]
52
+ prediction = row["predictions"][0]
53
+
54
+
55
+ # We store flattened samples in a dict
56
+ # ix -> ix, prompt, gold, model_score for each model, model_prediction for each model
57
+ # then 2 lists: model_scores and models, to aggreg more easily
58
+ if ix not in all_samples:
59
+ all_samples[ix] = {
60
+ "ix": ix,
61
+ "prompt": prompt,
62
+ "gold": gold[0] if isinstance(gold, list) else gold,
63
+ # A bit redundant, but put in their own boxes for simplicity of access later
64
+ "model_scores": [],
65
+ "models": []
66
+ }
67
+ if org_model not in all_samples[ix]["models"]:
68
+ all_samples[ix][f"{org_model}_score"] = row["metrics"]
69
+ all_samples[ix][f"{org_model}_prediction"] = prediction
70
+ all_samples[ix]["model_scores"].append(score)
71
+ all_samples[ix]["models"].append(org_model)
72
+
73
+ except Exception as e:
74
+ print(f"Error processing {org_model}: {e}")
75
+
76
+ full_samples = sorted(list(all_samples.values()), key= lambda r: r['ix'])
77
+ hard_samples = sorted([sample for sample in all_samples.values() if sum(sample["model_scores"]) == 0], key= lambda r: r['ix'])
78
+ easy_samples = sorted([sample for sample in all_samples.values() if sum(sample["model_scores"]) == len(sample["model_scores"])], key= lambda r: r['ix'])
79
+ return easy_samples, hard_samples, full_samples
80
+
81
+ def samples_to_box_display(samples: list, example_index: int = 0):
82
+ """Adapted from Nathan's code in https://huggingface.co/spaces/SaylorTwift/OpenEvalsModelDetails/
83
+ """
84
+ if len(samples) == 0:
85
+ return "No samples in this category!"
86
+ outputs = []
87
+ sample = samples[example_index]
88
+ for model in sample["models"]:
89
+ try:
90
+ outputs.append({
91
+ 'Model': model,
92
+ 'Prediction': sample[f'{model}_prediction'],
93
+ 'Prompt': sample['prompt'],
94
+ 'Metrics': sample[f'{model}_score'],
95
+ 'Gold': sample['gold']
96
+ })
97
+ except (KeyError, IndexError):
98
+ continue
99
+
100
+ if not outputs:
101
+ return "No results found for the selected combination."
102
+
103
+ # Create HTML output with all models
104
+ html_output = "<div style='max-width: 800px; margin: 0 auto;'>\n\n"
105
+
106
+ # Show gold answer at the top with distinct styling
107
+ if outputs:
108
+ html_output += "<div style='background: #e6f3e6; padding: 20px; border-radius: 10px; margin-bottom: 20px;'>\n"
109
+ html_output += "<h3 style='margin-top: 0;'>Ground Truth</h3>\n"
110
+ html_output += "<div style='overflow-x: auto; max-width: 100%;'>\n"
111
+ html_output += f"<pre style='white-space: pre-wrap; word-wrap: break-word; margin: 0;'><code>{outputs[0]['Gold']}</code></pre>\n"
112
+ html_output += "</div>\n"
113
+ html_output += "</div>\n"
114
+
115
+ for output in outputs:
116
+ html_output += "<div style='background: #f5f5f5; padding: 20px; margin-bottom: 20px; border-radius: 10px;'>\n"
117
+ html_output += f"<h2 style='margin-top: 0;'>{output['Model']}</h2>\n"
118
+
119
+ # Format metrics as a clean table
120
+ html_output += "<details open style='margin-bottom: 15px;'>\n"
121
+ html_output += "<summary><h3 style='display: inline; margin: 0;'>Metrics</h3></summary>\n"
122
+ metrics = output['Metrics']
123
+ if isinstance(metrics, str):
124
+ metrics = eval(metrics)
125
+ html_output += "<div style='overflow-x: auto;'>\n"
126
+ html_output += "<table style='width: 100%; margin: 10px 0; border-collapse: collapse;'>\n"
127
+ for key, value in metrics.items():
128
+ if isinstance(value, float):
129
+ value = f"{value:.3f}"
130
+ html_output += f"<tr><td style='padding: 5px; border-bottom: 1px solid #ddd;'><strong>{key}</strong></td><td style='padding: 5px; border-bottom: 1px solid #ddd;'>{value}</td></tr>\n"
131
+ html_output += "</table>\n"
132
+ html_output += "</div>\n"
133
+ html_output += "</details>\n\n"
134
+
135
+ # Handle prompt formatting with better styling
136
+ html_output += "<details style='margin-bottom: 15px;'>\n"
137
+ html_output += "<summary><h3 style='display: inline; margin: 0;'>Prompt</h3></summary>\n"
138
+ html_output += "<div style='background: #ffffff; padding: 15px; border-radius: 5px; margin-top: 10px;'>\n"
139
+
140
+ prompt_text = output['Prompt']
141
+ if isinstance(prompt_text, list):
142
+ for i, msg in enumerate(prompt_text):
143
+ if isinstance(msg, dict) and 'content' in msg:
144
+ role = msg.get('role', 'message').title()
145
+ html_output += "<div style='margin-bottom: 10px;'>\n"
146
+ html_output += f"<strong>{role}:</strong>\n"
147
+ html_output += "<div style='overflow-x: auto;'>\n"
148
+ html_output += f"<pre style='white-space: pre-wrap; word-wrap: break-word; margin: 5px 0;'><code>{msg['content']}</code></pre>\n"
149
+ html_output += "</div>\n"
150
+ html_output += "</div>\n"
151
+ else:
152
+ html_output += "<div style='margin-bottom: 10px;'>\n"
153
+ html_output += "<div style='overflow-x: auto;'>\n"
154
+ html_output += f"<pre style='white-space: pre-wrap; word-wrap: break-word; margin: 5px 0;'><code>{json.dumps(msg, indent=2)}</code></pre>\n"
155
+ html_output += "</div>\n"
156
+ html_output += "</div>\n"
157
+ else:
158
+ html_output += "<div style='overflow-x: auto;'>\n"
159
+ if isinstance(prompt_text, dict) and 'content' in prompt_text:
160
+ html_output += f"<pre style='white-space: pre-wrap; word-wrap: break-word; margin: 5px 0;'><code>{prompt_text['content']}</code></pre>\n"
161
+ else:
162
+ html_output += f"<pre style='white-space: pre-wrap; word-wrap: break-word; margin: 5px 0;'><code>{prompt_text}</code></pre>\n"
163
+ html_output += "</div>\n"
164
+
165
+ html_output += "</div>\n"
166
+ html_output += "</details>\n\n"
167
+
168
+ # Style prediction output - now in a collapsible section
169
+ html_output += "<details open style='margin-bottom: 15px;'>\n"
170
+ html_output += "<summary><h3 style='display: inline; margin: 0;'>Prediction</h3>"
171
+ # Add word count in a muted style
172
+ word_count = len(output['Prediction'].split())
173
+ html_output += f"<span style='color: #666; font-size: 0.8em; margin-left: 10px;'>({word_count} words)</span>"
174
+ html_output += "</summary>\n"
175
+ html_output += "<div style='background: #ffffff; padding: 15px; border-radius: 5px; margin-top: 10px;'>\n"
176
+ html_output += "<div style='overflow-x: auto;'>\n"
177
+ html_output += f"<pre style='white-space: pre-wrap; word-wrap: break-word; margin: 0;'><code>{output['Prediction']}</code></pre>\n"
178
+ html_output += "</div>\n"
179
+ html_output += "</div>\n"
180
+ html_output += "</details>\n"
181
+ html_output += "</div>\n\n"
182
+
183
+ html_output += "</div>"
184
+ return html_output
185
+
186
+ def run_pipeline(samples_ix: int = 0):
187
+ results = aggregate_results()
188
+ best_samples, worst_samples, all_samples = extract_dataviz()
189
+ return gr.Dataframe(Dataset.from_list(results).to_pandas(), visible=True), \
190
+ gr.HTML(samples_to_box_display(best_samples, samples_ix), label="Easiest samples (always found)", visible=True), \
191
+ gr.HTML(samples_to_box_display(worst_samples, samples_ix), label="Hardest samples (always failed)", visible=True), \
192
+ gr.HTML(samples_to_box_display(all_samples, samples_ix), label="All samples", visible=True)
193
+
194
+ def update_examples(samples_ix: int = 0):
195
+ best_samples, worst_samples, all_samples = extract_dataviz()
196
+ return samples_to_box_display(best_samples, samples_ix), \
197
+ samples_to_box_display(worst_samples, samples_ix), \
198
+ samples_to_box_display(all_samples, samples_ix)