Update app.py
Browse files
app.py
CHANGED
@@ -7,11 +7,21 @@ import os
|
|
7 |
|
8 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
9 |
|
10 |
-
# Fonction pour appeler l'API Zephyr
|
11 |
-
def call_zephyr_api(prompt, hf_token=HF_TOKEN):
|
12 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", token=hf_token)
|
13 |
try:
|
14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
return response
|
16 |
except Exception as e:
|
17 |
raise gr.Error(f"❌ Erreur d'appel API Hugging Face : {str(e)}")
|
@@ -84,14 +94,14 @@ Given the following question about a potential economic event: "{text}"
|
|
84 |
Assume the event happens (e.g., if the question is "Will the Federal Reserve raise interest rates?", assume they do raise rates). What would be the likely economic impact of this event? Provide a concise explanation in one paragraph, focusing on the potential positive or negative effects on the economy. Do not repeat the question or the prompt in your response.
|
85 |
</s>
|
86 |
<|assistant|>"""
|
87 |
-
prediction_response = call_zephyr_api(prediction_prompt)
|
88 |
|
89 |
# Étape 2 : Analyser le sentiment de la réponse de Zephyr
|
90 |
result = classifier(prediction_response)[0]
|
91 |
sentiment_output = f"Sentiment prédictif : {result['label']} (Score: {result['score']:.2f})"
|
92 |
sentiment_gauge = create_sentiment_gauge(result['label'], result['score'])
|
93 |
|
94 |
-
# Étape 3 : Générer une explication détaillée
|
95 |
explanation_prompt = f"""<|system|>
|
96 |
You are a professional financial analyst AI.
|
97 |
</s>
|
@@ -102,10 +112,10 @@ Based on your prediction of the economic impact, which is: "{prediction_response
|
|
102 |
|
103 |
The predicted sentiment for this impact is: {result['label'].lower()}.
|
104 |
|
105 |
-
Now, explain why the sentiment is {result['label'].lower()} using a logical, fact-based explanation. Base your reasoning only on the predicted economic impact. Respond only with your financial analysis in one clear paragraph. Write in a clear and professional tone.
|
106 |
</s>
|
107 |
<|assistant|>"""
|
108 |
-
explanation_en = call_zephyr_api(explanation_prompt)
|
109 |
explanation_fr = translator_to_fr(explanation_en, max_length=512)[0]['translation_text']
|
110 |
|
111 |
count += 1
|
@@ -144,12 +154,12 @@ def launch_app():
|
|
144 |
mode_selector = gr.Dropdown(
|
145 |
choices=["Rapide", "Équilibré", "Précis"],
|
146 |
value="Équilibré",
|
147 |
-
label="Mode
|
148 |
)
|
149 |
detail_mode_selector = gr.Dropdown(
|
150 |
choices=["Normal", "Expert"],
|
151 |
value="Normal",
|
152 |
-
label="Niveau de détail"
|
153 |
)
|
154 |
|
155 |
analyze_btn = gr.Button("Analyser")
|
|
|
7 |
|
8 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
9 |
|
10 |
+
# Fonction pour appeler l'API Zephyr avec des paramètres ajustés
|
11 |
+
def call_zephyr_api(prompt, mode, hf_token=HF_TOKEN):
|
12 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", token=hf_token)
|
13 |
try:
|
14 |
+
# Ajuster les paramètres en fonction du mode
|
15 |
+
if mode == "Rapide":
|
16 |
+
max_new_tokens = 100 # Réponse courte
|
17 |
+
temperature = 0.5 # Moins de créativité
|
18 |
+
elif mode == "Équilibré":
|
19 |
+
max_new_tokens = 200 # Réponse moyenne
|
20 |
+
temperature = 0.7 # Créativité modérée
|
21 |
+
else: # Précis
|
22 |
+
max_new_tokens = 300 # Réponse longue
|
23 |
+
temperature = 0.9 # Plus de créativité
|
24 |
+
response = client.text_generation(prompt, max_new_tokens=max_new_tokens, temperature=temperature)
|
25 |
return response
|
26 |
except Exception as e:
|
27 |
raise gr.Error(f"❌ Erreur d'appel API Hugging Face : {str(e)}")
|
|
|
94 |
Assume the event happens (e.g., if the question is "Will the Federal Reserve raise interest rates?", assume they do raise rates). What would be the likely economic impact of this event? Provide a concise explanation in one paragraph, focusing on the potential positive or negative effects on the economy. Do not repeat the question or the prompt in your response.
|
95 |
</s>
|
96 |
<|assistant|>"""
|
97 |
+
prediction_response = call_zephyr_api(prediction_prompt, mode)
|
98 |
|
99 |
# Étape 2 : Analyser le sentiment de la réponse de Zephyr
|
100 |
result = classifier(prediction_response)[0]
|
101 |
sentiment_output = f"Sentiment prédictif : {result['label']} (Score: {result['score']:.2f})"
|
102 |
sentiment_gauge = create_sentiment_gauge(result['label'], result['score'])
|
103 |
|
104 |
+
# Étape 3 : Générer une explication détaillée en fonction du niveau de détail
|
105 |
explanation_prompt = f"""<|system|>
|
106 |
You are a professional financial analyst AI.
|
107 |
</s>
|
|
|
112 |
|
113 |
The predicted sentiment for this impact is: {result['label'].lower()}.
|
114 |
|
115 |
+
Now, explain why the sentiment is {result['label'].lower()} using a logical, fact-based explanation. Base your reasoning only on the predicted economic impact. Respond only with your financial analysis in one clear paragraph. Write in a clear and professional tone. {"Use simple language for a general audience." if detail_mode == "Normal" else "Use detailed financial terminology for an expert audience."}
|
116 |
</s>
|
117 |
<|assistant|>"""
|
118 |
+
explanation_en = call_zephyr_api(explanation_prompt, mode)
|
119 |
explanation_fr = translator_to_fr(explanation_en, max_length=512)[0]['translation_text']
|
120 |
|
121 |
count += 1
|
|
|
154 |
mode_selector = gr.Dropdown(
|
155 |
choices=["Rapide", "Équilibré", "Précis"],
|
156 |
value="Équilibré",
|
157 |
+
label="Mode (longueur et style de réponse)"
|
158 |
)
|
159 |
detail_mode_selector = gr.Dropdown(
|
160 |
choices=["Normal", "Expert"],
|
161 |
value="Normal",
|
162 |
+
label="Niveau de détail (simplicité ou technicité)"
|
163 |
)
|
164 |
|
165 |
analyze_btn = gr.Button("Analyser")
|