DinoFrog commited on
Commit
15a1d6d
·
verified ·
1 Parent(s): 3e473a8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -53
app.py CHANGED
@@ -1,29 +1,21 @@
1
  import gradio as gr
2
- import requests
3
  from transformers import pipeline
4
  from langdetect import detect
 
5
  import pandas as pd
6
- import textstat
7
- import matplotlib.pyplot as plt
8
  import os
9
 
10
  HF_TOKEN = os.getenv("HF_TOKEN")
11
 
12
-
13
  # Fonction pour appeler l'API Zephyr
14
  def call_zephyr_api(prompt, hf_token=HF_TOKEN):
15
- API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
16
- headers = {"Authorization": f"Bearer {hf_token}"}
17
- payload = {"inputs": prompt, "parameters": {"max_new_tokens": 300}}
18
-
19
  try:
20
- response = requests.post(API_URL, headers=headers, json=payload, timeout=60)
21
- response.raise_for_status()
22
- return response.json()[0]["generated_text"]
23
  except Exception as e:
24
  raise gr.Error(f"❌ Erreur d'appel API Hugging Face : {str(e)}")
25
 
26
-
27
  # Chargement du modèle de sentiment
28
  classifier = pipeline("sentiment-analysis", model="mrm8488/distilroberta-finetuned-financial-news-sentiment-analysis")
29
 
@@ -41,25 +33,10 @@ def suggest_model(text):
41
  else:
42
  return "Précis"
43
 
44
- # Fonction pour générer un graphique de clarté
45
- def plot_clarity(clarity_scores):
46
- plt.figure(figsize=(8, 4))
47
- plt.plot(range(1, len(clarity_scores) + 1), clarity_scores, marker='o')
48
- plt.title("Évolution du Score de Clarté")
49
- plt.xlabel("Numéro d'analyse")
50
- plt.ylabel("Score de Clarté")
51
- plt.ylim(0, 100)
52
- plt.grid(True)
53
- return plt.gcf()
54
-
55
- # Fonction pour reset le graphique
56
- def reset_clarity_graph():
57
- return [], plot_clarity([])
58
-
59
  # Fonction d'analyse
60
- def full_analysis(text, mode, detail_mode, count, history, clarity_scores):
61
  if not text:
62
- return "Entrez une phrase.", "", "", 0, history, clarity_scores, None, None
63
 
64
  try:
65
  lang = detect(text)
@@ -72,30 +49,35 @@ def full_analysis(text, mode, detail_mode, count, history, clarity_scores):
72
  result = classifier(text)[0]
73
  sentiment_output = f"Sentiment : {result['label']} (Score: {result['score']:.2f})"
74
 
75
- prompt = f"""
76
- You are a financial analyst AI.
77
- Based on the following financial news: \"{text}\",
78
- explain clearly why the sentiment is {result['label'].lower()}.
79
- {"Write a concise paragraph." if detail_mode == "Normal" else "Write a detailed explanation over multiple paragraphs."}
80
- """
81
-
 
 
 
 
 
 
 
 
 
82
  explanation_en = call_zephyr_api(prompt)
83
  explanation_fr = translator_to_fr(explanation_en, max_length=512)[0]['translation_text']
84
 
85
- clarity_score = textstat.flesch_reading_ease(explanation_en)
86
- clarity_scores.append(clarity_score)
87
-
88
  count += 1
89
  history.append({
90
  "Texte": text,
91
  "Sentiment": result['label'],
92
  "Score": f"{result['score']:.2f}",
93
  "Explication_EN": explanation_en,
94
- "Explication_FR": explanation_fr,
95
- "Clarté": f"{clarity_score:.1f}"
96
  })
97
 
98
- return sentiment_output, explanation_en, explanation_fr, clarity_score, count, history, clarity_scores, plot_clarity(clarity_scores)
99
 
100
  # Fonction pour télécharger historique CSV
101
  def download_history(history):
@@ -109,13 +91,11 @@ def download_history(history):
109
  # Interface Gradio
110
  def launch_app():
111
  with gr.Blocks(theme=gr.themes.Base(), css="body {background-color: #0D1117; color: white;} .gr-button {background-color: #161B22; border: 1px solid #30363D;}") as iface:
112
-
113
  gr.Markdown("# 📈 Analyse Financière Premium + Explication IA", elem_id="title")
114
  gr.Markdown("Entrez une actualité financière. L'IA analyse et explique en anglais/français. Choisissez votre mode d'explication.")
115
 
116
  count = gr.State(0)
117
  history = gr.State([])
118
- clarity_scores = gr.State([])
119
 
120
  with gr.Row():
121
  input_text = gr.Textbox(lines=4, placeholder="Entrez une actualité ici...", label="Texte à analyser")
@@ -145,21 +125,14 @@ def launch_app():
145
  with gr.Column():
146
  explanation_output_fr = gr.Textbox(label="Explication en Français")
147
 
148
- clarity_score_output = gr.Textbox(label="Score de Clarté (Flesch Reading Ease)")
149
- clarity_plot = gr.Plot(label="Graphique des Scores de Clarté")
150
  download_file = gr.File(label="Fichier CSV")
151
 
152
  input_text.change(lambda t: gr.update(value=suggest_model(t)), inputs=[input_text], outputs=[mode_selector])
153
 
154
  analyze_btn.click(
155
  full_analysis,
156
- inputs=[input_text, mode_selector, detail_mode_selector, count, history, clarity_scores],
157
- outputs=[sentiment_output, explanation_output_en, explanation_output_fr, clarity_score_output, count, history, clarity_scores, clarity_plot]
158
- )
159
-
160
- reset_graph_btn.click(
161
- reset_clarity_graph,
162
- outputs=[clarity_scores, clarity_plot]
163
  )
164
 
165
  download_btn.click(
 
1
  import gradio as gr
 
2
  from transformers import pipeline
3
  from langdetect import detect
4
+ from huggingface_hub import InferenceClient
5
  import pandas as pd
 
 
6
  import os
7
 
8
  HF_TOKEN = os.getenv("HF_TOKEN")
9
 
 
10
  # Fonction pour appeler l'API Zephyr
11
  def call_zephyr_api(prompt, hf_token=HF_TOKEN):
12
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", token=hf_token)
 
 
 
13
  try:
14
+ response = client.text_generation(prompt, max_new_tokens=300)
15
+ return response
 
16
  except Exception as e:
17
  raise gr.Error(f"❌ Erreur d'appel API Hugging Face : {str(e)}")
18
 
 
19
  # Chargement du modèle de sentiment
20
  classifier = pipeline("sentiment-analysis", model="mrm8488/distilroberta-finetuned-financial-news-sentiment-analysis")
21
 
 
33
  else:
34
  return "Précis"
35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  # Fonction d'analyse
37
+ def full_analysis(text, mode, detail_mode, count, history):
38
  if not text:
39
+ return "Entrez une phrase.", "", "", 0, history, None
40
 
41
  try:
42
  lang = detect(text)
 
49
  result = classifier(text)[0]
50
  sentiment_output = f"Sentiment : {result['label']} (Score: {result['score']:.2f})"
51
 
52
+ prompt = f"""<|system|>
53
+ You are a professional financial analyst AI.
54
+ </s>
55
+ <|user|>
56
+ Analyze the following financial news carefully:
57
+ "{text}"
58
+
59
+ The detected sentiment for this news is: {result['label'].lower()}.
60
+
61
+ Now, explain why the sentiment is {result['label'].lower()} using a logical, fact-based explanation.
62
+ Base your reasoning only on the given news text.
63
+ Do not repeat the news text or the prompt.
64
+ Respond only with your financial analysis in one clear paragraph.
65
+ Write in a clear and professional tone.
66
+ </s>
67
+ <|assistant|>"""
68
  explanation_en = call_zephyr_api(prompt)
69
  explanation_fr = translator_to_fr(explanation_en, max_length=512)[0]['translation_text']
70
 
 
 
 
71
  count += 1
72
  history.append({
73
  "Texte": text,
74
  "Sentiment": result['label'],
75
  "Score": f"{result['score']:.2f}",
76
  "Explication_EN": explanation_en,
77
+ "Explication_FR": explanation_fr
 
78
  })
79
 
80
+ return sentiment_output, explanation_en, explanation_fr, count, history
81
 
82
  # Fonction pour télécharger historique CSV
83
  def download_history(history):
 
91
  # Interface Gradio
92
  def launch_app():
93
  with gr.Blocks(theme=gr.themes.Base(), css="body {background-color: #0D1117; color: white;} .gr-button {background-color: #161B22; border: 1px solid #30363D;}") as iface:
 
94
  gr.Markdown("# 📈 Analyse Financière Premium + Explication IA", elem_id="title")
95
  gr.Markdown("Entrez une actualité financière. L'IA analyse et explique en anglais/français. Choisissez votre mode d'explication.")
96
 
97
  count = gr.State(0)
98
  history = gr.State([])
 
99
 
100
  with gr.Row():
101
  input_text = gr.Textbox(lines=4, placeholder="Entrez une actualité ici...", label="Texte à analyser")
 
125
  with gr.Column():
126
  explanation_output_fr = gr.Textbox(label="Explication en Français")
127
 
 
 
128
  download_file = gr.File(label="Fichier CSV")
129
 
130
  input_text.change(lambda t: gr.update(value=suggest_model(t)), inputs=[input_text], outputs=[mode_selector])
131
 
132
  analyze_btn.click(
133
  full_analysis,
134
+ inputs=[input_text, mode_selector, detail_mode_selector, count, history],
135
+ outputs=[sentiment_output, explanation_output_en, explanation_output_fr, count, history]
 
 
 
 
 
136
  )
137
 
138
  download_btn.click(