import json import pandas as pd import gradio as gr from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns from css_html_js import custom_css, trigger_plot from parse import read_json, read_data, parse_agg from utils import model_hyperlink, filter_RTLRepo, filter_bench, filter_bench_all, handle_special_cases, type_emoji from typing import Union from about import CITATION_BUTTON_LABEL, CITATION_BUTTON_TEXT import numpy as np import plotly.graph_objects as go import plotly.express as px from gradio.themes.utils import colors def filter_leaderboard(task, benchmark, model_type, search_query, max_params): subset = df.copy() # Filter by task specific benchmarks when 'All' benchmarks is selected if task == "Spec-to-RTL": valid_benchmarks = s2r_benchs if benchmark == 'All': subset = subset[subset['Benchmark'].isin(valid_benchmarks)] elif task == "Code Completion": valid_benchmarks = cc_benchs if benchmark == 'All': subset = subset[subset['Benchmark'].isin(valid_benchmarks)] elif task == "Line Completion": valid_benchmarks = lc_benchs if benchmark == 'All': subset = subset[subset['Benchmark'].isin(valid_benchmarks)] if benchmark != 'All': subset = df[df['Benchmark'] == benchmark] if model_type != 'All': # without emojis subset = subset[subset['Model Type'] == model_type] if search_query: subset = subset[subset['Model'].str.contains(search_query, case=False, na=False)] max_params = float(max_params) subset = subset[subset['Params'] <= max_params] if benchmark == 'All': if task == 'Spec-to-RTL': return filter_bench_all(subset, df_agg, agg_column='Agg S2R') elif task == 'Code Completion': return filter_bench_all(subset, df_agg, agg_column='Agg MC') elif task == 'Line Completion': return filter_RTLRepo(subset) elif benchmark == 'RTL-Repo': return filter_RTLRepo(subset) else: agg_column = None if benchmark == 'VerilogEval S2R': agg_column = 'Agg VerilogEval S2R' elif benchmark == 'VerilogEval MC': agg_column = 'Agg VerilogEval MC' elif benchmark == 'RTLLM': agg_column = 'Agg RTLLM' elif benchmark == 'VeriGen': agg_column = 'Agg VeriGen' return filter_bench(subset, df_agg, agg_column) def update_benchmarks_by_task(task): if task == "Spec-to-RTL": new_benchmarks = ["All"] + s2r_benchs elif task == "Code Completion": new_benchmarks = ["All"] + cc_benchs elif task == "Line Completion": new_benchmarks = lc_benchs else: new_benchmarks = ["All"] + benchmarks benchmark_value = "All" if "All" in new_benchmarks else new_benchmarks[0] filtered = filter_leaderboard(task, benchmark_value, model_type_dropdown.value, search_box.value, params_slider.value) return gr.update(value=benchmark_value, choices=new_benchmarks), filtered def generate_scatter_plot(benchmark, metric): benchmark, metric = handle_special_cases(benchmark, metric) subset = df[df['Benchmark'] == benchmark] if benchmark == "RTL-Repo": subset = subset[subset['Metric'].str.contains('EM', case=False, na=False)] detailed_scores = subset.groupby('Model', as_index=False)['Score'].mean() detailed_scores.rename(columns={'Score': 'Exact Matching (EM)'}, inplace=True) else: detailed_scores = subset.pivot_table(index='Model', columns='Metric', values='Score').reset_index() details = df[['Model', 'Params', 'Model Type']].drop_duplicates('Model') scatter_data = pd.merge(detailed_scores, details, on='Model', how='left').dropna(subset=['Params', metric]) scatter_data['x'] = scatter_data['Params'] scatter_data['y'] = scatter_data[metric] scatter_data['size'] = (scatter_data['x'] ** 0.3) * 40 type_colors = {"General": "green", "Coding": "yellow", "RTL-Specific": "blue"} scatter_data['color'] = scatter_data['Model Type'].map(type_colors).fillna('gray') y_axis_limits = { 'Functionality (FNC)': [5, 90], 'Syntax (STX)': [20, 100], 'Synthesis (SYN)': [5, 90], 'Power': [0, 50], 'Performance': [0, 50], 'Area': [0, 50], 'Exact Matching (EM)': [0, 50] } y_range = y_axis_limits.get(metric, [0, 80]) fig = px.scatter( scatter_data, x='x', y='y', log_x=True, size='size', color='Model Type', text='Model', hover_data={metric: ':.2f'}, title=f'Params vs. {metric} for {benchmark}', labels={'x': '# Params (Log Scale)', 'y': metric}, template="plotly_white", height=600, width=1200 ) fig.update_traces( textposition='top center', textfont_size=10, marker=dict(opacity=0.8, line=dict(width=0.5, color='black')) ) fig.update_layout( xaxis=dict( showgrid=True, type='log', tickmode='array', tickvals=[8, 14, 32, 72, 200, 700], ticktext=['8', '14', '32', '72', '200', '700'] ), showlegend=False, yaxis=dict(range=y_range), margin=dict(l=50, r=50, t=50, b=50), plot_bgcolor='white' ) return fig js_func = """ function refresh() { const url = new URL(window.location); if (url.searchParams.get('__theme') !== 'light') { url.searchParams.set('__theme', 'light'); window.location.href = url.href; } } """ with gr.Blocks(css=custom_css, js=js_func, theme=gr.themes.Default(primary_hue=colors.emerald)) as app: df, benchmarks, metrics, default_metric = read_data() df_agg = parse_agg("./aggregated_scores.csv") tasks = ["Spec-to-RTL", "Code Completion", "Line Completion"] s2r_benchs = ["VerilogEval S2R", "RTLLM"] cc_benchs = ["VerilogEval MC", "VeriGen"] lc_benchs = ["RTL-Repo"] non_rtl_metrics = ["Syntax (STX)", "Functionality (FNC)", "Synthesis (SYN)", "Power", "Performance", "Area"] rtl_metrics = ["Exact Matching (EM)"] model_types = ['All', 'General', 'Coding', 'RTL-Specific'] gr.HTML("""
Welcome to the TuRTLe Model Leaderboard! Use the filters below to explore different RTL benchmarks and models.
If you have any inquiries or wish to collaborate: hpai@bsc.es
The High-Performance Artificial Intelligence (HPAI) group is part of the Barcelona Supercomputing Center (BSC). This leaderboard is maintained by HPAI as part of our commitment to open science.
Feel free to contact us:
Email: hpai@bsc.es