joey1101 commited on
Commit
0e85ac7
ยท
verified ยท
1 Parent(s): 7abe73c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -73
app.py CHANGED
@@ -15,6 +15,15 @@ import torch # Tensor operations
15
  import soundfile as sf # Audio file handling
16
  import sentencepiece # Tokenization dependency
17
 
 
 
 
 
 
 
 
 
 
18
  ##########################################
19
  # Initialize models and resources globally
20
  ##########################################
@@ -40,8 +49,7 @@ def load_models():
40
  # Streamlit UI Configuration
41
  ##########################################
42
  def setup_ui():
43
- """Configure Streamlit user interface"""
44
- st.set_page_config(page_title="๐Ÿš€ Just Comment", page_icon="๐Ÿ’ฌ")
45
  st.title("๐Ÿš€ Just Comment - Smart Response Generator")
46
  st.markdown("""
47
  <style>
@@ -52,78 +60,9 @@ def setup_ui():
52
  return st.text_area("๐Ÿ“ Enter your customer comment:", "", height=150)
53
 
54
  ##########################################
55
- # Enhanced Sentiment Analysis
56
- ##########################################
57
- def analyze_emotion(text, classifier):
58
- """Determine dominant emotion with confidence threshold"""
59
- results = classifier(text, return_all_scores=True)[0]
60
- top_emotion = max(results, key=lambda x: x['score'])
61
- return top_emotion if top_emotion['score'] > 0.6 else {'label': 'neutral', 'score': 1.0}
62
-
63
- ##########################################
64
- # Improved Response Generation
65
- ##########################################
66
- def generate_response(text, models):
67
- """Generate context-appropriate response with length control"""
68
- emotion = analyze_emotion(text, models['emotion_classifier'])
69
- prompt = create_prompt(text, emotion['label'].lower())
70
-
71
- inputs = models['textgen_tokenizer'](prompt, return_tensors="pt")
72
- outputs = models['textgen_model'].generate(
73
- **inputs,
74
- max_new_tokens=200, # Increased for longer responses
75
- temperature=0.7, # Balance creativity and focus
76
- do_sample=True,
77
- top_p=0.9,
78
- no_repeat_ngram_size=2
79
- )
80
-
81
- response = models['textgen_tokenizer'].decode(
82
- outputs[0][inputs.input_ids.shape[1]:],
83
- skip_special_tokens=True
84
- )
85
- return postprocess_response(response)
86
-
87
- def create_prompt(text, emotion):
88
- """Create emotion-specific prompts with structured guidance"""
89
- templates = {
90
- "anger": (
91
- "Complaint: {input}\nRespond by:\n1. Apologizing sincerely\n"
92
- "2. Proving solution steps\n3. Offering compensation\nResponse:"
93
- ),
94
- "joy": (
95
- "Positive feedback: {input}\nRespond by:\n1. Thanking customer\n"
96
- "2. Highlighting strengths\n3. Suggesting rewards\nResponse:"
97
- ),
98
- # Add other emotion templates...
99
- "neutral": (
100
- "Feedback: {input}\nRespond by:\n1. Acknowledging input\n"
101
- "2. Providing information\n3. Requesting details\nResponse:"
102
- )
103
- }
104
- return templates.get(emotion, templates['neutral']).format(input=text)
105
-
106
- def postprocess_response(text):
107
- """Ensure response quality and length"""
108
- text = text.split("\n\n")[0].strip() # Take first complete response
109
- if len(text) < 50: # Minimum length check
110
- return "Thank you for your feedback. We'll carefully review your comments and follow up shortly."
111
- return text[:300] # Hard length limit
112
-
113
- ##########################################
114
- # Optimized Text-to-Speech
115
  ##########################################
116
- def generate_speech(text, models):
117
- """Convert text to speech with performance optimizations"""
118
- inputs = models['tts_processor'](text=text, return_tensors="pt")
119
- spectrogram = models['tts_model'].generate_speech(
120
- inputs["input_ids"],
121
- models['speaker_embeddings']
122
- )
123
- with torch.no_grad():
124
- audio = models['tts_vocoder'](spectrogram)
125
- sf.write("response.wav", audio.numpy(), 16000)
126
- return "response.wav"
127
 
128
  ##########################################
129
  # Main Application Logic
 
15
  import soundfile as sf # Audio file handling
16
  import sentencepiece # Tokenization dependency
17
 
18
+ ##########################################
19
+ # Set page config FIRST
20
+ ##########################################
21
+ st.set_page_config( # Must be the first Streamlit command
22
+ page_title="๐Ÿš€ Just Comment - I'm listening to you, my friend๏ฝž",
23
+ page_icon="๐Ÿ’ฌ",
24
+ layout="centered"
25
+ )
26
+
27
  ##########################################
28
  # Initialize models and resources globally
29
  ##########################################
 
49
  # Streamlit UI Configuration
50
  ##########################################
51
  def setup_ui():
52
+ """Configure remaining UI elements"""
 
53
  st.title("๐Ÿš€ Just Comment - Smart Response Generator")
54
  st.markdown("""
55
  <style>
 
60
  return st.text_area("๐Ÿ“ Enter your customer comment:", "", height=150)
61
 
62
  ##########################################
63
+ # (ไฟๆŒๅ…ถไป–ๅ‡ฝๆ•ฐไธๅ˜๏ผŒไธŽไน‹ๅ‰็›ธๅŒ)
64
+ # Keep other functions unchanged as previous version
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  ##########################################
 
 
 
 
 
 
 
 
 
 
 
66
 
67
  ##########################################
68
  # Main Application Logic