Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -3,38 +3,34 @@ import requests
|
|
3 |
import pandas as pd
|
4 |
import textstat
|
5 |
import os
|
6 |
-
from transformers import pipeline
|
7 |
|
8 |
# Récupération du token Hugging Face
|
9 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
10 |
|
11 |
-
# Fonction pour appeler l'API Zephyr-7B
|
12 |
def call_zephyr_api(prompt, hf_token=HF_TOKEN):
|
13 |
API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
|
14 |
headers = {"Authorization": f"Bearer {hf_token}"}
|
15 |
-
payload = {"inputs": prompt
|
16 |
|
17 |
try:
|
18 |
response = requests.post(API_URL, headers=headers, json=payload, timeout=60)
|
19 |
response.raise_for_status()
|
20 |
return response.json()[0]["generated_text"].strip()
|
21 |
except Exception as e:
|
22 |
-
raise gr.Error(f"Erreur d'appel API Hugging Face : {str(e)}")
|
23 |
-
|
24 |
-
# Pipeline d'analyse de sentiment initial (optionnel)
|
25 |
-
classifier = pipeline("sentiment-analysis", model="mrm8488/distilroberta-finetuned-financial-news-sentiment-analysis")
|
26 |
|
27 |
# Fonction principale d'analyse
|
28 |
def full_analysis(text, history):
|
29 |
if not text:
|
30 |
-
return "Entrez une phrase.", "", 0, history,
|
31 |
|
32 |
-
# 1. Demander à Zephyr de
|
33 |
prompt_sentiment = f"""
|
34 |
You are a financial news sentiment detector.
|
35 |
|
36 |
Given the following news text:
|
37 |
-
"{text}"
|
38 |
|
39 |
Respond only with one word: positive, neutral, or negative.
|
40 |
|
@@ -45,12 +41,12 @@ Do not add any explanation or extra text.
|
|
45 |
if detected_sentiment not in ["positive", "neutral", "negative"]:
|
46 |
detected_sentiment = "neutral"
|
47 |
|
48 |
-
# 2. Demander
|
49 |
prompt_explanation = f"""
|
50 |
You are a financial analyst AI.
|
51 |
|
52 |
Given the following financial news:
|
53 |
-
"{text}"
|
54 |
|
55 |
The detected sentiment is: {detected_sentiment}.
|
56 |
|
@@ -59,11 +55,11 @@ Write a concise paragraph.
|
|
59 |
"""
|
60 |
explanation = call_zephyr_api(prompt_explanation)
|
61 |
|
62 |
-
# 3. Calculer
|
63 |
clarity_score = textstat.flesch_reading_ease(explanation)
|
64 |
-
clarity_score = max(0, min(clarity_score, 100)) #
|
65 |
|
66 |
-
# 4.
|
67 |
history.append({
|
68 |
"Texte": text,
|
69 |
"Sentiment": detected_sentiment.capitalize(),
|
@@ -71,20 +67,7 @@ Write a concise paragraph.
|
|
71 |
"Explication": explanation
|
72 |
})
|
73 |
|
74 |
-
return detected_sentiment.capitalize(), explanation, clarity_score, history, clarity_score
|
75 |
-
|
76 |
-
# Fonction pour générer la barre de clarté
|
77 |
-
def generate_clarity_bar(score, sentiment):
|
78 |
-
color = "green" if sentiment.lower() == "positive" else ("red" if sentiment.lower() == "negative" else "gray")
|
79 |
-
return gr.BarPlot.update(
|
80 |
-
value=[["Clarity", score]],
|
81 |
-
x="label",
|
82 |
-
y="value",
|
83 |
-
colors=[color],
|
84 |
-
width=400,
|
85 |
-
height=50,
|
86 |
-
y_lim=[0, 100]
|
87 |
-
)
|
88 |
|
89 |
# Fonction pour télécharger l'historique
|
90 |
def download_history(history):
|
@@ -107,12 +90,10 @@ def launch_app():
|
|
107 |
analyze_btn = gr.Button("Analyser")
|
108 |
download_btn = gr.Button("Télécharger l'historique")
|
109 |
|
110 |
-
|
111 |
-
sentiment_output = gr.Textbox(label="Sentiment Détecté")
|
112 |
-
clarity_bar = gr.BarPlot()
|
113 |
-
|
114 |
explanation_output = gr.Textbox(label="Explication de l'IA", lines=5)
|
115 |
clarity_score_text = gr.Textbox(label="Score de Clarté (%)")
|
|
|
116 |
file_output = gr.File(label="Fichier CSV")
|
117 |
|
118 |
history = gr.State([])
|
@@ -120,7 +101,7 @@ def launch_app():
|
|
120 |
analyze_btn.click(
|
121 |
full_analysis,
|
122 |
inputs=[input_text, history],
|
123 |
-
outputs=[sentiment_output, explanation_output, clarity_score_text, history,
|
124 |
)
|
125 |
|
126 |
download_btn.click(
|
|
|
3 |
import pandas as pd
|
4 |
import textstat
|
5 |
import os
|
|
|
6 |
|
7 |
# Récupération du token Hugging Face
|
8 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
9 |
|
10 |
+
# Fonction pour appeler l'API Zephyr-7B
|
11 |
def call_zephyr_api(prompt, hf_token=HF_TOKEN):
|
12 |
API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
|
13 |
headers = {"Authorization": f"Bearer {hf_token}"}
|
14 |
+
payload = {"inputs": prompt}
|
15 |
|
16 |
try:
|
17 |
response = requests.post(API_URL, headers=headers, json=payload, timeout=60)
|
18 |
response.raise_for_status()
|
19 |
return response.json()[0]["generated_text"].strip()
|
20 |
except Exception as e:
|
21 |
+
raise gr.Error(f"❌ Erreur d'appel API Hugging Face : {str(e)}")
|
|
|
|
|
|
|
22 |
|
23 |
# Fonction principale d'analyse
|
24 |
def full_analysis(text, history):
|
25 |
if not text:
|
26 |
+
return "Entrez une phrase.", "", 0, history, 0
|
27 |
|
28 |
+
# 1. Demander à Zephyr de détecter le sentiment
|
29 |
prompt_sentiment = f"""
|
30 |
You are a financial news sentiment detector.
|
31 |
|
32 |
Given the following news text:
|
33 |
+
\"{text}\"
|
34 |
|
35 |
Respond only with one word: positive, neutral, or negative.
|
36 |
|
|
|
41 |
if detected_sentiment not in ["positive", "neutral", "negative"]:
|
42 |
detected_sentiment = "neutral"
|
43 |
|
44 |
+
# 2. Demander à Zephyr d'expliquer
|
45 |
prompt_explanation = f"""
|
46 |
You are a financial analyst AI.
|
47 |
|
48 |
Given the following financial news:
|
49 |
+
\"{text}\"
|
50 |
|
51 |
The detected sentiment is: {detected_sentiment}.
|
52 |
|
|
|
55 |
"""
|
56 |
explanation = call_zephyr_api(prompt_explanation)
|
57 |
|
58 |
+
# 3. Calculer la clarté
|
59 |
clarity_score = textstat.flesch_reading_ease(explanation)
|
60 |
+
clarity_score = max(0, min(clarity_score, 100)) # Limité entre 0-100
|
61 |
|
62 |
+
# 4. Sauvegarder dans l'historique
|
63 |
history.append({
|
64 |
"Texte": text,
|
65 |
"Sentiment": detected_sentiment.capitalize(),
|
|
|
67 |
"Explication": explanation
|
68 |
})
|
69 |
|
70 |
+
return detected_sentiment.capitalize(), explanation, clarity_score, history, int(clarity_score)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
# Fonction pour télécharger l'historique
|
73 |
def download_history(history):
|
|
|
90 |
analyze_btn = gr.Button("Analyser")
|
91 |
download_btn = gr.Button("Télécharger l'historique")
|
92 |
|
93 |
+
sentiment_output = gr.Textbox(label="Sentiment Détecté")
|
|
|
|
|
|
|
94 |
explanation_output = gr.Textbox(label="Explication de l'IA", lines=5)
|
95 |
clarity_score_text = gr.Textbox(label="Score de Clarté (%)")
|
96 |
+
clarity_slider = gr.Slider(0, 100, label="Clarté (%)", interactive=False)
|
97 |
file_output = gr.File(label="Fichier CSV")
|
98 |
|
99 |
history = gr.State([])
|
|
|
101 |
analyze_btn.click(
|
102 |
full_analysis,
|
103 |
inputs=[input_text, history],
|
104 |
+
outputs=[sentiment_output, explanation_output, clarity_score_text, history, clarity_slider]
|
105 |
)
|
106 |
|
107 |
download_btn.click(
|