DinoFrog's picture
Update app.py
52ab5c8 verified
import gradio as gr
from transformers import pipeline
from langdetect import detect
from huggingface_hub import InferenceClient
import pandas as pd
import os
import asyncio
import nltk
from nltk.tokenize import sent_tokenize
# Téléchargement de punkt_tab avec gestion d'erreur
try:
nltk.download('punkt_tab', download_dir='/usr/local/share/nltk_data')
except Exception as e:
raise Exception(f"Erreur lors du téléchargement de punkt_tab : {str(e)}. Veuillez vérifier votre connexion réseau et les permissions du répertoire /usr/local/share/nltk_data.")
HF_TOKEN = os.getenv("HF_TOKEN")
# Fonction pour appeler l'API Zephyr avec des paramètres ajustés
async def call_zephyr_api(prompt, mode, hf_token=HF_TOKEN):
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", token=hf_token)
try:
if mode == "Rapide":
max_new_tokens = 50
temperature = 0.3
elif mode == "Équilibré":
max_new_tokens = 100
temperature = 0.5
else: # Précis
max_new_tokens = 150
temperature = 0.7
response = await asyncio.to_thread(client.text_generation, prompt, max_new_tokens=max_new_tokens, temperature=temperature)
return response
except Exception as e:
raise gr.Error(f"❌ Erreur d'appel API Hugging Face : {str(e)}")
# Chargement du modèle de sentiment pour analyser les réponses
classifier = pipeline("sentiment-analysis", model="mrm8488/distilroberta-finetuned-financial-news-sentiment-analysis")
# Modèles de traduction
translator_to_en = pipeline("translation", model="Helsinki-NLP/opus-mt-mul-en")
translator_to_fr = pipeline("translation", model="Helsinki-NLP/opus-mt-en-fr")
# Traduction en français avec Helsinki-NLP
def safe_translate_to_fr(text, max_length=512):
try:
sentences = sent_tokenize(text)
translated_sentences = []
for sentence in sentences:
translated = translator_to_fr(sentence, max_length=max_length)[0]['translation_text']
translated_sentences.append(translated)
return " ".join(translated_sentences)
except Exception as e:
return f"Erreur de traduction : {str(e)}"
# Fonction pour suggérer le meilleur modèle
def suggest_model(text):
word_count = len(text.split())
if word_count < 50:
return "Rapide"
elif word_count <= 200:
return "Équilibré"
else:
return "Précis"
# Fonction pour créer une jauge de sentiment
def create_sentiment_gauge(sentiment, score):
score_percentage = score * 100
color = "#A9A9A9"
if sentiment.lower() == "positive":
color = "#2E8B57"
elif sentiment.lower() == "negative":
color = "#DC143C"
html = f"""
<div style='width: 100%; max-width: 300px; margin: 10px 0;'>
<div style='background-color: #D3D3D3; border-radius: 5px; height: 20px; position: relative;'>
<div style='background-color: {color}; width: {score_percentage}%; height: 100%; border-radius: 5px;'></div>
<span style='position: absolute; top: 0; left: 50%; transform: translateX(-50%); font-weight: bold;'>{score_percentage:.1f}%</span>
</div>
<div style='text-align: center; margin-top: 5px;'>Sentiment : {sentiment}</div>
</div>
"""
return html
# Fonction d'analyse
async def full_analysis(text, mode, detail_mode, count, history):
if not text:
yield "Entrez une phrase.", "", "", "", 0, history, "", "Aucune analyse effectuée."
return
yield "Analyse en cours... (Étape 1 : Détection de la langue)", "", "", "", count, history, "", "Détection de la langue"
try:
lang = detect(text)
except:
lang = "unknown"
if lang != "en":
text_en = translator_to_en(text, max_length=512)[0]['translation_text']
else:
text_en = text
yield "Analyse en cours... (Étape 2 : Analyse du sentiment)", "", "", "", count, history, "", "Analyse du sentiment"
result = await asyncio.to_thread(classifier, text_en)
result = result[0]
sentiment_output = f"Sentiment prédictif : {result['label']} (Score: {result['score']:.2f})"
sentiment_gauge = create_sentiment_gauge(result['label'], result['score'])
yield "Analyse en cours... (Étape 3 : Explication IA)", "", "", "", count, history, "", "Génération de l'explication"
explanation_prompt = f"""<|system|>
You are a professional financial analyst AI with expertise in economic forecasting.
</s>
<|user|>
Given the following question about a potential economic event: "{text}"
The predicted sentiment for this event is: {result['label'].lower()}.
Assume the event happens. Explain why this event would likely have a {result['label'].lower()} economic impact.
</s>
<|assistant|>"""
explanation_en = await call_zephyr_api(explanation_prompt, mode)
yield "Analyse en cours... (Étape 4 : Traduction en français)", "", "", "", count, history, "", "Traduction en français"
explanation_fr = safe_translate_to_fr(explanation_en)
count += 1
history.append({
"Texte": text,
"Sentiment": result['label'],
"Score": f"{result['score']:.2f}",
"Explication_EN": explanation_en,
"Explication_FR": explanation_fr
})
yield sentiment_output, text, explanation_en, explanation_fr, count, history, sentiment_gauge, "✅ Analyse terminée."
# Historique CSV
def download_history(history):
if not history:
return None
df = pd.DataFrame(history)
file_path = "/tmp/analysis_history.csv"
df.to_csv(file_path, index=False)
return file_path
# Lancement Gradio avec l'interface restaurée
def launch_app():
custom_css = """
/* CSS restauré à la version précédente, avant les changements esthétiques non demandés */
body {
background: linear-gradient(135deg, #0A1D37 0%, #1A3C34 100%);
font-family: 'Inter', sans-serif;
color: #E0E0E0;
padding: 20px;
}
.gr-box {
background: #2A4A43 !important;
border: 1px solid #FFD700 !important;
border-radius: 12px !important;
padding: 20px !important;
box-shadow: 0px 4px 12px rgba(255, 215, 0, 0.4);
}
.gr-button {
background: linear-gradient(90deg, #FFD700, #D4AF37);
color: #0A1D37;
font-weight: bold;
border: none;
border-radius: 8px;
padding: 12px 24px;
transition: transform 0.2s;
}
.gr-button:hover {
transform: translateY(-2px);
box-shadow: 0 6px 12px rgba(255, 215, 0, 0.5);
}
"""
with gr.Blocks(theme=gr.themes.Base(), css=custom_css) as iface:
gr.Markdown("# 📈 Analyse Financière Premium avec IA")
gr.Markdown("**Posez une question économique.** L'IA analyse et explique l'impact.")
count = gr.State(0)
history = gr.State([])
with gr.Row():
with gr.Column(scale=2):
input_text = gr.Textbox(lines=4, label="Votre question économique")
with gr.Column(scale=1):
mode_selector = gr.Dropdown(choices=["Rapide", "Équilibré", "Précis"], value="Équilibré", label="Mode de réponse")
detail_mode_selector = gr.Dropdown(choices=["Normal", "Expert"], value="Normal", label="Niveau de détail")
analyze_btn = gr.Button("Analyser")
download_btn = gr.Button("Télécharger l'historique")
with gr.Row():
sentiment_output = gr.Textbox(label="Sentiment prédictif")
displayed_prompt = gr.Textbox(label="Votre question", interactive=False)
explanation_output_en = gr.Textbox(label="Explication en anglais")
explanation_output_fr = gr.Textbox(label="Explication en français")
sentiment_gauge = gr.HTML()
progress_message = gr.Textbox(label="Progression", interactive=False)
download_file = gr.File(label="Fichier CSV")
input_text.change(lambda t: gr.update(value=suggest_model(t)), inputs=[input_text], outputs=[mode_selector])
analyze_btn.click(
full_analysis,
inputs=[input_text, mode_selector, detail_mode_selector, count, history],
outputs=[sentiment_output, displayed_prompt, explanation_output_en, explanation_output_fr, count, history, sentiment_gauge, progress_message]
)
download_btn.click(
download_history,
inputs=[history],
outputs=[download_file]
)
iface.launch(share=True)
if __name__ == "__main__":
launch_app()