Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -2,21 +2,23 @@ import gradio as gr
|
|
2 |
import spaces
|
3 |
import torch
|
4 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
|
|
|
|
5 |
import re
|
6 |
import requests
|
7 |
from urllib.parse import urlparse
|
8 |
import xml.etree.ElementTree as ET
|
9 |
|
10 |
-
#
|
11 |
model_path = 'ssocean/NAIP'
|
12 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
13 |
|
14 |
-
#
|
15 |
model = None
|
16 |
tokenizer = None
|
17 |
|
18 |
def fetch_arxiv_paper(arxiv_input):
|
19 |
-
"""arXiv URL
|
20 |
try:
|
21 |
if 'arxiv.org' in arxiv_input:
|
22 |
parsed = urlparse(arxiv_input)
|
@@ -26,57 +28,56 @@ def fetch_arxiv_paper(arxiv_input):
|
|
26 |
api_url = f'http://export.arxiv.org/api/query?id_list={arxiv_id}'
|
27 |
resp = requests.get(api_url)
|
28 |
if resp.status_code != 200:
|
29 |
-
return {"title":"", "abstract":"", "success":False, "message":"arXiv API
|
30 |
root = ET.fromstring(resp.text)
|
31 |
-
ns = {'atom':
|
32 |
entry = root.find('.//atom:entry', ns)
|
33 |
if entry is None:
|
34 |
-
return {"title":"", "abstract":"", "success":False, "message":"
|
35 |
title = entry.find('atom:title', ns).text.strip()
|
36 |
abstract = entry.find('atom:summary', ns).text.strip()
|
37 |
-
return {"title":
|
38 |
except Exception as e:
|
39 |
-
return {"title":"", "abstract":"", "success":False, "message":f"
|
40 |
|
41 |
@spaces.GPU(duration=60, enable_queue=True)
|
42 |
def predict(title, abstract):
|
43 |
-
"""
|
44 |
global model, tokenizer
|
45 |
|
46 |
-
# ์ต์ด ํธ์ถ ์ ๋ชจ๋ธยทํ ํฌ๋์ด์ ๋ก๋
|
47 |
if model is None:
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
58 |
-
model.to(device)
|
59 |
model.eval()
|
60 |
|
61 |
-
|
62 |
-
text = (
|
63 |
f"Given a certain paper,\n"
|
64 |
f"Title: {title.strip()}\n"
|
65 |
f"Abstract: {abstract.strip()}\n"
|
66 |
f"Predict its normalized academic impact (0~1):"
|
67 |
)
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
score = min(1.0, prob + 0.05) # +0.05 ๋ณด์ , ์ต๋ 1.0
|
76 |
-
return round(score, 4)
|
77 |
-
except Exception as e:
|
78 |
-
print("Prediction error:", e)
|
79 |
-
return 0.0
|
80 |
|
81 |
def get_grade_and_emoji(score):
|
82 |
if score >= 0.900: return "AAA ๐"
|
@@ -90,17 +91,17 @@ def get_grade_and_emoji(score):
|
|
90 |
return "C ๐"
|
91 |
|
92 |
def validate_input(title, abstract):
|
93 |
-
"""
|
94 |
-
|
95 |
if len(title.split()) < 3:
|
96 |
-
return False, "
|
97 |
if len(abstract.split()) < 50:
|
98 |
-
return False, "
|
99 |
-
if
|
100 |
-
return False, "
|
101 |
-
if
|
102 |
-
return False, "
|
103 |
-
return True, "
|
104 |
|
105 |
def update_button_status(title, abstract):
|
106 |
valid, msg = validate_input(title, abstract)
|
@@ -110,13 +111,12 @@ def update_button_status(title, abstract):
|
|
110 |
|
111 |
def process_arxiv_input(arxiv_input):
|
112 |
if not arxiv_input.strip():
|
113 |
-
return "", "", "URL
|
114 |
res = fetch_arxiv_paper(arxiv_input)
|
115 |
if res["success"]:
|
116 |
return res["title"], res["abstract"], res["message"]
|
117 |
return "", "", res["message"]
|
118 |
|
119 |
-
# CSS ์ ์
|
120 |
css = """
|
121 |
.gradio-container {
|
122 |
font-family: 'Arial', sans-serif;
|
@@ -130,17 +130,11 @@ css = """
|
|
130 |
-webkit-background-clip: text;
|
131 |
-webkit-text-fill-color: transparent;
|
132 |
}
|
133 |
-
.sub-title {
|
134 |
-
text-align: center;
|
135 |
-
color: #4b5563;
|
136 |
-
font-size: 1.5rem !important;
|
137 |
-
margin-bottom: 2rem !important;
|
138 |
-
}
|
139 |
.input-section {
|
140 |
-
background:
|
141 |
padding: 2rem;
|
142 |
border-radius: 1rem;
|
143 |
-
box-shadow: 0 4px 6px
|
144 |
}
|
145 |
.result-section {
|
146 |
background: #f8fafc;
|
@@ -148,13 +142,7 @@ css = """
|
|
148 |
border-radius: 1rem;
|
149 |
margin-top: 2rem;
|
150 |
}
|
151 |
-
.methodology-section {
|
152 |
-
background: #ecfdf5;
|
153 |
-
padding: 2rem;
|
154 |
-
border-radius: 1rem;
|
155 |
-
margin-top: 2rem;
|
156 |
-
}
|
157 |
-
.example-section {
|
158 |
background: #fff7ed;
|
159 |
padding: 2rem;
|
160 |
border-radius: 1rem;
|
@@ -166,139 +154,134 @@ css = """
|
|
166 |
margin: 1rem 0;
|
167 |
}
|
168 |
.arxiv-input {
|
169 |
-
margin-bottom: 1.5rem;
|
170 |
-
padding: 1rem;
|
171 |
background: #f3f4f6;
|
|
|
172 |
border-radius: 0.5rem;
|
|
|
173 |
}
|
174 |
.arxiv-link {
|
175 |
color: #2563eb;
|
176 |
text-decoration: underline;
|
177 |
font-size: 0.9em;
|
178 |
-
margin-top: 0.5em;
|
179 |
}
|
180 |
.arxiv-note {
|
181 |
-
color: #
|
182 |
font-size: 0.9em;
|
183 |
margin-top: 0.5em;
|
184 |
margin-bottom: 0.5em;
|
185 |
}
|
186 |
"""
|
187 |
|
188 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
189 |
with gr.Blocks(theme=gr.themes.Default(), css=css) as iface:
|
190 |
-
gr.Markdown(
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
gr.HTML("""<a href="https://visitorbadge.io/status?path=https%3A%2F%2FVIDraft-PaperImpact.hf.space">
|
197 |
-
<img src="https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2FVIDraft-PaperImpact.hf.space&countColor=%23263759" />
|
198 |
-
</a>""")
|
199 |
|
200 |
with gr.Row():
|
201 |
with gr.Column(elem_classes="input-section"):
|
202 |
-
gr.
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
210 |
|
211 |
-
gr.Markdown("###
|
212 |
title_input = gr.Textbox(
|
213 |
lines=2,
|
214 |
-
placeholder="
|
215 |
-
label="
|
216 |
)
|
217 |
abstract_input = gr.Textbox(
|
218 |
lines=5,
|
219 |
-
placeholder="
|
220 |
-
label="
|
221 |
)
|
222 |
-
|
223 |
-
|
224 |
|
225 |
with gr.Column(elem_classes="result-section"):
|
226 |
-
|
227 |
-
|
228 |
|
229 |
with gr.Row(elem_classes="methodology-section"):
|
230 |
-
gr.Markdown(
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
"""
|
239 |
-
)
|
240 |
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
| C | <0.299 | Limited Impact | ๐ |
|
256 |
-
"""
|
257 |
-
)
|
258 |
|
259 |
with gr.Row(elem_classes="example-section"):
|
260 |
-
gr.Markdown("###
|
261 |
-
example_papers = [
|
262 |
-
{
|
263 |
-
"title": "Attention Is All You Need",
|
264 |
-
"abstract": "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks that include an encoder and a decoder. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train.",
|
265 |
-
"score": 0.982,
|
266 |
-
"note": "๐ซ Revolutionary paper that introduced the Transformer architecture, fundamentally changing NLP and deep learning."
|
267 |
-
},
|
268 |
-
{
|
269 |
-
"title": "Language Models are Few-Shot Learners",
|
270 |
-
"abstract": "Recent work has demonstrated substantial gains on many NLP tasks and benchmarks by pre-training on a large corpus of text followed by fine-tuning on a specific task. While typically task-agnostic in architecture, this method still requires task-specific fine-tuning datasets of thousands or tens of thousands of examples. By contrast, humans can generally perform a new language task from only a few examples or from simple instructions - something which current NLP systems still largely struggle to do. Here we show that scaling up language models greatly improves task-agnostic, few-shot performance, sometimes even reaching competitiveness with prior state-of-the-art fine-tuning approaches.",
|
271 |
-
"score": 0.956,
|
272 |
-
"note": "๐ Groundbreaking GPT-3 paper that demonstrated the power of large language models."
|
273 |
-
},
|
274 |
-
{
|
275 |
-
"title": "An Empirical Study of Neural Network Training Protocols",
|
276 |
-
"abstract": "This paper presents a comparative analysis of different training protocols for neural networks across various architectures. We examine the effects of learning rate schedules, batch size selection, and optimization algorithms on model convergence and final performance. Our experiments span multiple datasets and model sizes, providing practical insights for deep learning practitioners.",
|
277 |
-
"score": 0.623,
|
278 |
-
"note": "๐ Solid research paper with useful findings but more limited scope and impact."
|
279 |
-
}
|
280 |
-
]
|
281 |
for paper in example_papers:
|
282 |
-
gr.Markdown(
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
"""
|
290 |
-
)
|
291 |
|
292 |
-
#
|
293 |
-
title_input.change(update_button_status, [title_input, abstract_input], [
|
294 |
-
abstract_input.change(update_button_status, [title_input, abstract_input], [
|
295 |
-
|
296 |
|
297 |
-
def
|
298 |
s = predict(t, a)
|
299 |
return s, get_grade_and_emoji(s)
|
300 |
|
301 |
-
|
302 |
|
303 |
if __name__ == "__main__":
|
304 |
iface.launch()
|
|
|
2 |
import spaces
|
3 |
import torch
|
4 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
5 |
+
import torch.nn.functional as F
|
6 |
+
import torch.nn as nn
|
7 |
import re
|
8 |
import requests
|
9 |
from urllib.parse import urlparse
|
10 |
import xml.etree.ElementTree as ET
|
11 |
|
12 |
+
# Model repository and device setup
|
13 |
model_path = 'ssocean/NAIP'
|
14 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
15 |
|
16 |
+
# Globals
|
17 |
model = None
|
18 |
tokenizer = None
|
19 |
|
20 |
def fetch_arxiv_paper(arxiv_input):
|
21 |
+
"""Fetch title & abstract from arXiv URL or ID."""
|
22 |
try:
|
23 |
if 'arxiv.org' in arxiv_input:
|
24 |
parsed = urlparse(arxiv_input)
|
|
|
28 |
api_url = f'http://export.arxiv.org/api/query?id_list={arxiv_id}'
|
29 |
resp = requests.get(api_url)
|
30 |
if resp.status_code != 200:
|
31 |
+
return {"title":"", "abstract":"", "success":False, "message":"arXiv API error"}
|
32 |
root = ET.fromstring(resp.text)
|
33 |
+
ns = {'atom':'http://www.w3.org/2005/Atom'}
|
34 |
entry = root.find('.//atom:entry', ns)
|
35 |
if entry is None:
|
36 |
+
return {"title":"", "abstract":"", "success":False, "message":"Paper not found"}
|
37 |
title = entry.find('atom:title', ns).text.strip()
|
38 |
abstract = entry.find('atom:summary', ns).text.strip()
|
39 |
+
return {"title":title, "abstract":abstract, "success":True, "message":"Fetched successfully"}
|
40 |
except Exception as e:
|
41 |
+
return {"title":"", "abstract":"", "success":False, "message":f"Error: {e}"}
|
42 |
|
43 |
@spaces.GPU(duration=60, enable_queue=True)
|
44 |
def predict(title, abstract):
|
45 |
+
"""Predict a normalized impact score (0โ1) from title & abstract."""
|
46 |
global model, tokenizer
|
47 |
|
|
|
48 |
if model is None:
|
49 |
+
# Try loading full-precision on GPU first
|
50 |
+
try:
|
51 |
+
model = AutoModelForSequenceClassification.from_pretrained(
|
52 |
+
model_path,
|
53 |
+
num_labels=1,
|
54 |
+
torch_dtype=torch.float32,
|
55 |
+
device_map="auto"
|
56 |
+
)
|
57 |
+
except RuntimeError:
|
58 |
+
# Fallback to CPU-only
|
59 |
+
model = AutoModelForSequenceClassification.from_pretrained(
|
60 |
+
model_path,
|
61 |
+
num_labels=1,
|
62 |
+
torch_dtype=torch.float32,
|
63 |
+
device_map="cpu"
|
64 |
+
)
|
65 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
|
|
66 |
model.eval()
|
67 |
|
68 |
+
prompt = (
|
|
|
69 |
f"Given a certain paper,\n"
|
70 |
f"Title: {title.strip()}\n"
|
71 |
f"Abstract: {abstract.strip()}\n"
|
72 |
f"Predict its normalized academic impact (0~1):"
|
73 |
)
|
74 |
+
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1024)
|
75 |
+
inputs = {k: v.to(device) for k, v in inputs.items()}
|
76 |
+
with torch.no_grad():
|
77 |
+
logits = model(**inputs).logits
|
78 |
+
prob = torch.sigmoid(logits).item()
|
79 |
+
score = min(1.0, prob + 0.05) # +0.05 adjustment
|
80 |
+
return round(score, 4)
|
|
|
|
|
|
|
|
|
|
|
81 |
|
82 |
def get_grade_and_emoji(score):
|
83 |
if score >= 0.900: return "AAA ๐"
|
|
|
91 |
return "C ๐"
|
92 |
|
93 |
def validate_input(title, abstract):
|
94 |
+
"""Ensure title โฅ3 words, abstract โฅ50 words, and ASCII-only."""
|
95 |
+
non_ascii = re.compile(r'[^\x00-\x7F]')
|
96 |
if len(title.split()) < 3:
|
97 |
+
return False, "Title must be at least 3 words."
|
98 |
if len(abstract.split()) < 50:
|
99 |
+
return False, "Abstract must be at least 50 words."
|
100 |
+
if non_ascii.search(title):
|
101 |
+
return False, "Title contains non-ASCII characters."
|
102 |
+
if non_ascii.search(abstract):
|
103 |
+
return False, "Abstract contains non-ASCII characters."
|
104 |
+
return True, "Inputs look good."
|
105 |
|
106 |
def update_button_status(title, abstract):
|
107 |
valid, msg = validate_input(title, abstract)
|
|
|
111 |
|
112 |
def process_arxiv_input(arxiv_input):
|
113 |
if not arxiv_input.strip():
|
114 |
+
return "", "", "Please enter an arXiv URL or ID"
|
115 |
res = fetch_arxiv_paper(arxiv_input)
|
116 |
if res["success"]:
|
117 |
return res["title"], res["abstract"], res["message"]
|
118 |
return "", "", res["message"]
|
119 |
|
|
|
120 |
css = """
|
121 |
.gradio-container {
|
122 |
font-family: 'Arial', sans-serif;
|
|
|
130 |
-webkit-background-clip: text;
|
131 |
-webkit-text-fill-color: transparent;
|
132 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
.input-section {
|
134 |
+
background: #ffffff;
|
135 |
padding: 2rem;
|
136 |
border-radius: 1rem;
|
137 |
+
box-shadow: 0 4px 6px rgba(0,0,0,0.1);
|
138 |
}
|
139 |
.result-section {
|
140 |
background: #f8fafc;
|
|
|
142 |
border-radius: 1rem;
|
143 |
margin-top: 2rem;
|
144 |
}
|
145 |
+
.methodology-section, .example-section {
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
background: #fff7ed;
|
147 |
padding: 2rem;
|
148 |
border-radius: 1rem;
|
|
|
154 |
margin: 1rem 0;
|
155 |
}
|
156 |
.arxiv-input {
|
|
|
|
|
157 |
background: #f3f4f6;
|
158 |
+
padding: 1rem;
|
159 |
border-radius: 0.5rem;
|
160 |
+
margin-bottom: 1.5rem;
|
161 |
}
|
162 |
.arxiv-link {
|
163 |
color: #2563eb;
|
164 |
text-decoration: underline;
|
165 |
font-size: 0.9em;
|
|
|
166 |
}
|
167 |
.arxiv-note {
|
168 |
+
color: #666666;
|
169 |
font-size: 0.9em;
|
170 |
margin-top: 0.5em;
|
171 |
margin-bottom: 0.5em;
|
172 |
}
|
173 |
"""
|
174 |
|
175 |
+
example_papers = [
|
176 |
+
{
|
177 |
+
"title": "Attention Is All You Need",
|
178 |
+
"abstract": "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks that include an encoder and a decoder. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train.",
|
179 |
+
"score": 0.982,
|
180 |
+
"note": "Revolutionary paper introducing the Transformer architecture."
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"title": "Language Models are Few-Shot Learners",
|
184 |
+
"abstract": "Recent work has demonstrated substantial gains on many NLP tasks and benchmarks by pre-training on a large corpus of text followed by fine-tuning on a specific task. While typically task-agnostic in architecture, this method still requires task-specific fine-tuning datasets of thousands or tens of thousands of examples. By contrast, humans can generally perform a new language task from only a few examples or from simple instructionsโsomething which current NLP systems still largely struggle to do. Here we show that scaling up language models greatly improves task-agnostic, few-shot performance, sometimes even reaching competitiveness with prior state-of-the-art fine-tuning approaches.",
|
185 |
+
"score": 0.956,
|
186 |
+
"note": "Groundbreaking GPT-3 paper on few-shot learning."
|
187 |
+
},
|
188 |
+
{
|
189 |
+
"title": "An Empirical Study of Neural Network Training Protocols",
|
190 |
+
"abstract": "This paper presents a comparative analysis of different training protocols for neural networks across various architectures. We examine the effects of learning rate schedules, batch size selection, and optimization algorithms on model convergence and final performance. Our experiments span multiple datasets and model sizes, providing practical insights for deep learning practitioners.",
|
191 |
+
"score": 0.623,
|
192 |
+
"note": "Solid empirical comparison of training protocols."
|
193 |
+
}
|
194 |
+
]
|
195 |
+
|
196 |
with gr.Blocks(theme=gr.themes.Default(), css=css) as iface:
|
197 |
+
gr.Markdown("<div class='main-title'>Papers Impact: AI-Powered Research Impact Predictor</div>")
|
198 |
+
gr.HTML("""
|
199 |
+
<a href="https://visitorbadge.io/status?path=https%3A%2F%2FVIDraft-PaperImpact.hf.space">
|
200 |
+
<img src="https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2FVIDraft-PaperImpact.hf.space&countColor=%23263759" />
|
201 |
+
</a>
|
202 |
+
""")
|
|
|
|
|
|
|
203 |
|
204 |
with gr.Row():
|
205 |
with gr.Column(elem_classes="input-section"):
|
206 |
+
with gr.Group(elem_classes="arxiv-input"):
|
207 |
+
gr.Markdown("### Import from arXiv")
|
208 |
+
arxiv_input = gr.Textbox(
|
209 |
+
lines=1,
|
210 |
+
placeholder="e.g. 2504.11651",
|
211 |
+
label="arXiv URL or ID",
|
212 |
+
value="2504.11651"
|
213 |
+
)
|
214 |
+
gr.Markdown("""
|
215 |
+
<p class="arxiv-note">
|
216 |
+
Click to use the default example or visit <a href="https://arxiv.org" class="arxiv-link" target="_blank">arxiv.org</a>
|
217 |
+
</p>
|
218 |
+
""")
|
219 |
+
fetch_button = gr.Button("๐ Fetch Paper Details", variant="secondary")
|
220 |
|
221 |
+
gr.Markdown("### Or Enter Paper Details Manually")
|
222 |
title_input = gr.Textbox(
|
223 |
lines=2,
|
224 |
+
placeholder="Enter paper title (minimum 3 words)...",
|
225 |
+
label="Paper Title"
|
226 |
)
|
227 |
abstract_input = gr.Textbox(
|
228 |
lines=5,
|
229 |
+
placeholder="Enter paper abstract (minimum 50 words)...",
|
230 |
+
label="Paper Abstract"
|
231 |
)
|
232 |
+
validation_status = gr.Textbox(label="Validation Status", interactive=False)
|
233 |
+
submit_button = gr.Button("๐ฏ Predict Impact", interactive=False, variant="primary")
|
234 |
|
235 |
with gr.Column(elem_classes="result-section"):
|
236 |
+
score_output = gr.Number(label="Impact Score")
|
237 |
+
grade_output = gr.Textbox(label="Grade", elem_classes="grade-display")
|
238 |
|
239 |
with gr.Row(elem_classes="methodology-section"):
|
240 |
+
gr.Markdown("""
|
241 |
+
### Scientific Methodology
|
242 |
+
- **Training Data**: Papers from CS.CV, CS.CL (NLP), and CS.AI fields
|
243 |
+
- **Optimization**: NDCG optimization with Sigmoid activation & MSE loss
|
244 |
+
- **Validation**: Cross-validated on historical citation data
|
245 |
+
- **Architecture**: Transformer-based text encoder
|
246 |
+
- **Metrics**: Citation-pattern analysis & research influence
|
247 |
+
""")
|
|
|
|
|
248 |
|
249 |
+
gr.Markdown("""
|
250 |
+
### Rating Scale
|
251 |
+
| Grade | Score Range | Description | Emoji |
|
252 |
+
|-------|-------------|---------------------|-------|
|
253 |
+
| AAA | 0.900โ1.000 | Exceptional Impact | ๐ |
|
254 |
+
| AA | 0.800โ0.899 | Very High Impact | โญ |
|
255 |
+
| A | 0.650โ0.799 | High Impact | โจ |
|
256 |
+
| BBB | 0.600โ0.649 | Above Average | ๐ต |
|
257 |
+
| BB | 0.550โ0.599 | Moderate Impact | ๐ |
|
258 |
+
| B | 0.500โ0.549 | Average Impact | ๐ |
|
259 |
+
| CCC | 0.400โ0.499 | Below Average | ๐ |
|
260 |
+
| CC | 0.300โ0.399 | Low Impact | โ๏ธ |
|
261 |
+
| C | <0.300 | Limited Impact | ๐ |
|
262 |
+
""")
|
|
|
|
|
|
|
263 |
|
264 |
with gr.Row(elem_classes="example-section"):
|
265 |
+
gr.Markdown("### Example Papers")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
266 |
for paper in example_papers:
|
267 |
+
gr.Markdown(f"""
|
268 |
+
#### {paper['title']}
|
269 |
+
**Score**: {paper['score']} | **Grade**: {get_grade_and_emoji(paper['score'])}
|
270 |
+
{paper['abstract']}
|
271 |
+
*{paper['note']}*
|
272 |
+
---
|
273 |
+
""")
|
|
|
|
|
274 |
|
275 |
+
# Event handlers
|
276 |
+
title_input.change(update_button_status, [title_input, abstract_input], [validation_status, submit_button])
|
277 |
+
abstract_input.change(update_button_status, [title_input, abstract_input], [validation_status, submit_button])
|
278 |
+
fetch_button.click(process_arxiv_input, [arxiv_input], [title_input, abstract_input, validation_status])
|
279 |
|
280 |
+
def run_prediction(t, a):
|
281 |
s = predict(t, a)
|
282 |
return s, get_grade_and_emoji(s)
|
283 |
|
284 |
+
submit_button.click(run_prediction, [title_input, abstract_input], [score_output, grade_output])
|
285 |
|
286 |
if __name__ == "__main__":
|
287 |
iface.launch()
|