Spaces:
Running
on
Zero
Running
on
Zero
Update app-backup.py
Browse files- app-backup.py +73 -27
app-backup.py
CHANGED
@@ -79,20 +79,19 @@ def predict(title, abstract):
|
|
79 |
# 1) Load config
|
80 |
config = AutoConfig.from_pretrained(model_path)
|
81 |
|
82 |
-
# 2) Remove quantization_config if it exists
|
83 |
-
# This ensures that 'quantization_config.to_dict()' won't be called
|
84 |
if hasattr(config, "quantization_config"):
|
85 |
del config.quantization_config
|
86 |
|
87 |
-
# 3)
|
88 |
config.num_labels = 1
|
89 |
|
90 |
# 4) Load the model
|
91 |
model_loaded = AutoModelForSequenceClassification.from_pretrained(
|
92 |
model_path,
|
93 |
-
config=config,
|
94 |
-
torch_dtype=torch.float32,
|
95 |
-
device_map=None,
|
96 |
low_cpu_mem_usage=False
|
97 |
)
|
98 |
model_loaded.to(device)
|
@@ -104,7 +103,6 @@ def predict(title, abstract):
|
|
104 |
# Assign to globals
|
105 |
model, tokenizer = model_loaded, tokenizer_loaded
|
106 |
|
107 |
-
# Construct the input text prompt
|
108 |
text = (
|
109 |
f"Given a certain paper,\n"
|
110 |
f"Title: {title.strip()}\n"
|
@@ -129,15 +127,23 @@ def predict(title, abstract):
|
|
129 |
# Grading
|
130 |
##################################################
|
131 |
def get_grade_and_emoji(score):
|
132 |
-
"""Map a 0β1 score to an A/B/C style grade with an emoji."""
|
133 |
-
if score >= 0.900:
|
134 |
-
|
135 |
-
if score >= 0.
|
136 |
-
|
137 |
-
if score >= 0.
|
138 |
-
|
139 |
-
if score >= 0.
|
140 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
141 |
return "C π"
|
142 |
|
143 |
##################################################
|
@@ -266,7 +272,6 @@ with gr.Blocks(theme=gr.themes.Default(), css=css) as iface:
|
|
266 |
gr.Markdown("<div class='main-title'>Papers Impact: AI-Powered Research Impact Predictor</div>")
|
267 |
gr.Markdown("**Predict the potential research impact (0β1) from title & abstract.**")
|
268 |
|
269 |
-
# Row with input column + output column
|
270 |
with gr.Row():
|
271 |
with gr.Column(elem_classes="input-section"):
|
272 |
gr.Markdown("### Import from arXiv")
|
@@ -277,6 +282,14 @@ with gr.Blocks(theme=gr.themes.Default(), css=css) as iface:
|
|
277 |
label="arXiv URL or ID",
|
278 |
value="2504.11651"
|
279 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
280 |
fetch_btn = gr.Button("π Fetch Paper Details", variant="secondary")
|
281 |
|
282 |
gr.Markdown("### Or Enter Manually")
|
@@ -297,6 +310,49 @@ with gr.Blocks(theme=gr.themes.Default(), css=css) as iface:
|
|
297 |
score_box = gr.Number(label="Impact Score")
|
298 |
grade_box = gr.Textbox(label="Grade", elem_classes="grade-display")
|
299 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
300 |
# Validation triggers
|
301 |
title_input.change(update_button_status, [title_input, abs_input], [status_box, predict_btn])
|
302 |
abs_input.change(update_button_status, [title_input, abs_input], [status_box, predict_btn])
|
@@ -311,16 +367,6 @@ with gr.Blocks(theme=gr.themes.Default(), css=css) as iface:
|
|
311 |
|
312 |
predict_btn.click(run_predict, [title_input, abs_input], [score_box, grade_box])
|
313 |
|
314 |
-
# Example papers
|
315 |
-
gr.Markdown("### Example Papers")
|
316 |
-
for paper in example_papers:
|
317 |
-
gr.Markdown(
|
318 |
-
f"**{paper['title']}** \n"
|
319 |
-
f"Score: {paper['score']} | Grade: {get_grade_and_emoji(paper['score'])} \n"
|
320 |
-
f"{paper['abstract']} \n"
|
321 |
-
f"*{paper['note']}*\n---"
|
322 |
-
)
|
323 |
-
|
324 |
##################################################
|
325 |
# Launch
|
326 |
##################################################
|
|
|
79 |
# 1) Load config
|
80 |
config = AutoConfig.from_pretrained(model_path)
|
81 |
|
82 |
+
# 2) Remove quantization_config if it exists (avoid NoneType error in PEFT)
|
|
|
83 |
if hasattr(config, "quantization_config"):
|
84 |
del config.quantization_config
|
85 |
|
86 |
+
# 3) Optionally set number of labels
|
87 |
config.num_labels = 1
|
88 |
|
89 |
# 4) Load the model
|
90 |
model_loaded = AutoModelForSequenceClassification.from_pretrained(
|
91 |
model_path,
|
92 |
+
config=config,
|
93 |
+
torch_dtype=torch.float32, # float32 for stable cublasLt
|
94 |
+
device_map=None,
|
95 |
low_cpu_mem_usage=False
|
96 |
)
|
97 |
model_loaded.to(device)
|
|
|
103 |
# Assign to globals
|
104 |
model, tokenizer = model_loaded, tokenizer_loaded
|
105 |
|
|
|
106 |
text = (
|
107 |
f"Given a certain paper,\n"
|
108 |
f"Title: {title.strip()}\n"
|
|
|
127 |
# Grading
|
128 |
##################################################
|
129 |
def get_grade_and_emoji(score):
|
130 |
+
"""Map a 0β1 score to an A/B/C style grade with an emoji indicator."""
|
131 |
+
if score >= 0.900:
|
132 |
+
return "AAA π"
|
133 |
+
if score >= 0.800:
|
134 |
+
return "AA β"
|
135 |
+
if score >= 0.650:
|
136 |
+
return "A β¨"
|
137 |
+
if score >= 0.600:
|
138 |
+
return "BBB π΅"
|
139 |
+
if score >= 0.550:
|
140 |
+
return "BB π"
|
141 |
+
if score >= 0.500:
|
142 |
+
return "B π"
|
143 |
+
if score >= 0.400:
|
144 |
+
return "CCC π"
|
145 |
+
if score >= 0.300:
|
146 |
+
return "CC βοΈ"
|
147 |
return "C π"
|
148 |
|
149 |
##################################################
|
|
|
272 |
gr.Markdown("<div class='main-title'>Papers Impact: AI-Powered Research Impact Predictor</div>")
|
273 |
gr.Markdown("**Predict the potential research impact (0β1) from title & abstract.**")
|
274 |
|
|
|
275 |
with gr.Row():
|
276 |
with gr.Column(elem_classes="input-section"):
|
277 |
gr.Markdown("### Import from arXiv")
|
|
|
282 |
label="arXiv URL or ID",
|
283 |
value="2504.11651"
|
284 |
)
|
285 |
+
gr.Markdown(
|
286 |
+
"""
|
287 |
+
<p>
|
288 |
+
Enter an arXiv ID or URL. For example:
|
289 |
+
<code>2504.11651</code> or <code>https://arxiv.org/pdf/2504.11651</code>
|
290 |
+
</p>
|
291 |
+
"""
|
292 |
+
)
|
293 |
fetch_btn = gr.Button("π Fetch Paper Details", variant="secondary")
|
294 |
|
295 |
gr.Markdown("### Or Enter Manually")
|
|
|
310 |
score_box = gr.Number(label="Impact Score")
|
311 |
grade_box = gr.Textbox(label="Grade", elem_classes="grade-display")
|
312 |
|
313 |
+
############## METHODOLOGY EXPLANATION ##############
|
314 |
+
gr.Markdown(
|
315 |
+
"""
|
316 |
+
### Scientific Methodology
|
317 |
+
- **Training Data**: Model trained on an extensive dataset of published papers in CS.CV, CS.CL, CS.AI
|
318 |
+
- **Optimization**: NDCG optimization with Sigmoid activation and MSE loss
|
319 |
+
- **Validation**: Cross-validated against historical citation data
|
320 |
+
- **Architecture**: Advanced transformer-based (LLaMA derivative) textual encoder
|
321 |
+
- **Metrics**: Quantitative analysis of citation patterns and research influence
|
322 |
+
"""
|
323 |
+
)
|
324 |
+
|
325 |
+
############## RATING SCALE ##############
|
326 |
+
gr.Markdown(
|
327 |
+
"""
|
328 |
+
### Rating Scale
|
329 |
+
| Grade | Score Range | Description | Emoji |
|
330 |
+
|-------|-------------|---------------------|-------|
|
331 |
+
| AAA | 0.900β1.000 | **Exceptional** | π |
|
332 |
+
| AA | 0.800β0.899 | **Very High** | β |
|
333 |
+
| A | 0.650β0.799 | **High** | β¨ |
|
334 |
+
| BBB | 0.600β0.649 | **Above Average** | π΅ |
|
335 |
+
| BB | 0.550β0.599 | **Moderate** | π |
|
336 |
+
| B | 0.500β0.549 | **Average** | π |
|
337 |
+
| CCC | 0.400β0.499 | **Below Average** | π |
|
338 |
+
| CC | 0.300β0.399 | **Low** | βοΈ |
|
339 |
+
| C | <0.300 | **Limited** | π |
|
340 |
+
"""
|
341 |
+
)
|
342 |
+
|
343 |
+
############## EXAMPLE PAPERS ##############
|
344 |
+
gr.Markdown("### Example Papers")
|
345 |
+
for paper in example_papers:
|
346 |
+
gr.Markdown(
|
347 |
+
f"**{paper['title']}** \n"
|
348 |
+
f"Score: {paper['score']} | Grade: {get_grade_and_emoji(paper['score'])} \n"
|
349 |
+
f"{paper['abstract']} \n"
|
350 |
+
f"*{paper['note']}*\n---"
|
351 |
+
)
|
352 |
+
|
353 |
+
##################################################
|
354 |
+
# Events
|
355 |
+
##################################################
|
356 |
# Validation triggers
|
357 |
title_input.change(update_button_status, [title_input, abs_input], [status_box, predict_btn])
|
358 |
abs_input.change(update_button_status, [title_input, abs_input], [status_box, predict_btn])
|
|
|
367 |
|
368 |
predict_btn.click(run_predict, [title_input, abs_input], [score_box, grade_box])
|
369 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
370 |
##################################################
|
371 |
# Launch
|
372 |
##################################################
|