import gradio as gr from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch import torch.nn.functional as F # 모델 로딩 model_name = "yangheng/deberta-v3-base-absa-v1.1" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSequenceClassification.from_pretrained(model_name) labels = ["Negative", "Neutral", "Positive"] # 분석 함수 def analyze_sentiment(sentence, aspects_text): aspects = [a.strip() for a in aspects_text.split(",") if a.strip()] output = "" for aspect in aspects: combined = f"{sentence} [ASP] {aspect}" inputs = tokenizer(combined, return_tensors="pt", truncation=True) with torch.no_grad(): outputs = model(**inputs) probs = F.softmax(outputs.logits, dim=1) pred = torch.argmax(probs, dim=1).item() sentiment = labels[pred] output += f"- **{aspect}** → **{sentiment}**\n" return output # Gradio 인터페이스 iface = gr.Interface( fn=analyze_sentiment, inputs=[ gr.Textbox(label="문장 입력", placeholder="예: The battery is good but the screen is dim."), gr.Textbox(label="속성 목록 (쉼표로 구분)", placeholder="예: battery, screen") ], outputs=gr.Markdown(label="감정 분석 결과"), title="ABSA 감정 분석기", description="Hugging Face Transformers 모델 사용" ) iface.launch()