isha0110 commited on
Commit
43b9c59
·
verified ·
1 Parent(s): c1db3b1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -121
app.py CHANGED
@@ -49,24 +49,13 @@ except Exception as e:
49
  print(f"⚠️ Error loading model: {e}")
50
  raise e
51
 
52
- # Optimized thresholds from training (you can update these after validation)
53
- BEST_THRESHOLDS = np.array([0.5, 0.5, 0.5, 0.5, 0.5]) # Default thresholds
54
 
55
  def predict_emotions(text):
56
- """
57
- Predict emotions from text
58
-
59
- Args:
60
- text: Input text string
61
-
62
- Returns:
63
- Dictionary with emotion predictions and probabilities
64
- """
65
  if not text or not text.strip():
66
- return {
67
- "⚠️ Error": "Please enter some text to analyze",
68
- "Detected Emotions": "None"
69
- }
70
 
71
  try:
72
  # Tokenize
@@ -91,32 +80,27 @@ def predict_emotions(text):
91
 
92
  # Format results
93
  detected = []
94
- all_probs = {}
95
 
96
- for i, (emotion, emoji, prob, pred) in enumerate(zip(EMOTIONS, EMOTION_EMOJIS, probs, predictions)):
97
- all_probs[f"{emoji} {emotion.capitalize()}"] = float(prob)
98
  if pred == 1:
99
- detected.append(f"{emoji} {emotion.capitalize()} ({prob:.1%})")
100
 
101
- if not detected:
102
- detected_str = "No strong emotions detected (all probabilities below threshold)"
103
  else:
104
- detected_str = " ".join(detected)
105
 
106
- result = {
107
- "🎯 Detected Emotions": detected_str,
108
- "📊 All Probabilities": all_probs,
109
- "📝 Text Length": f"{len(text)} characters",
110
- "🔍 Analysis": f"Analyzed with RoBERTa-base model"
111
- }
112
 
113
- return result
114
 
115
  except Exception as e:
116
- return {
117
- "⚠️ Error": f"Prediction failed: {str(e)}",
118
- "Detected Emotions": "Error"
119
- }
120
 
121
  # Example texts
122
  examples = [
@@ -129,17 +113,14 @@ examples = [
129
  ]
130
 
131
  # Create Gradio Interface
132
- with gr.Blocks(theme=gr.themes.Soft(), title="Multi-Label Emotion Detection") as demo:
133
  gr.Markdown(
134
  """
135
  # 😊 Multi-Label Emotion Classification
136
 
137
- This model detects **multiple emotions** in text using a fine-tuned RoBERTa transformer.
138
-
139
- ### Emotions Detected:
140
- 😠 Anger | 😨 Fear | 😊 Joy | 😢 Sadness | 😲 Surprise
141
 
142
- ---
143
  """
144
  )
145
 
@@ -148,25 +129,18 @@ with gr.Blocks(theme=gr.themes.Soft(), title="Multi-Label Emotion Detection") as
148
  text_input = gr.Textbox(
149
  label="Enter your text",
150
  placeholder="Type or paste text here to analyze emotions...",
151
- lines=5,
152
- max_lines=10
153
  )
154
-
155
- with gr.Row():
156
- clear_btn = gr.Button("🗑️ Clear", variant="secondary")
157
- analyze_btn = gr.Button("🔮 Analyze Emotions", variant="primary", scale=2)
158
 
159
  with gr.Column():
160
- output_json = gr.JSON(
161
- label="📊 Analysis Results",
162
- show_label=True
163
- )
164
 
165
- gr.Markdown("### 💡 Try these examples:")
166
  gr.Examples(
167
  examples=examples,
168
  inputs=text_input,
169
- outputs=output_json,
170
  fn=predict_emotions,
171
  cache_examples=False
172
  )
@@ -174,86 +148,31 @@ with gr.Blocks(theme=gr.themes.Soft(), title="Multi-Label Emotion Detection") as
174
  gr.Markdown(
175
  """
176
  ---
177
- ### 📈 How It Works
178
 
179
- This is a **multi-label classification** model, meaning:
180
- - Each text can have **multiple emotions** simultaneously
181
- - For example, "I'm excited but nervous" → Both Joy ✅ and Fear ✅
182
- - Each emotion is predicted independently with a probability score
183
- - Emotions above the threshold are marked as "detected"
184
 
185
  ### 🎯 Model Details
 
 
 
 
186
 
187
- | Component | Details |
188
- |-----------|---------|
189
- | **Architecture** | RoBERTa-base (125M parameters) |
190
- | **Training Data** | Multi-label emotion dataset |
191
- | **Max Sequence Length** | 200 tokens |
192
- | **Evaluation Metric** | Macro F1-Score |
193
- | **Framework** | PyTorch + Transformers |
194
-
195
- ### 🏗️ Architecture
196
  ```
197
- Input Text
198
-
199
- RoBERTa Tokenizer (BPE)
200
-
201
- RoBERTa Encoder (12 layers)
202
-
203
- [CLS] Token Pooling
204
-
205
- Dropout (0.35)
206
-
207
- Linear Layer (768 → 5)
208
-
209
- Sigmoid Activation
210
-
211
- 5 Emotion Probabilities
212
  ```
213
 
214
- ### ⚙️ Training Configuration
215
- - **Optimizer**: AdamW (lr=2e-5, weight_decay=0.02)
216
- - **Scheduler**: Linear warmup (10% of steps)
217
- - **Loss Function**: BCE with Logits + Label Smoothing (0.05)
218
- - **Batch Size**: 8 (with 4x gradient accumulation = effective 32)
219
- - **Epochs**: 8 (with early stopping, patience=3)
220
- - **Validation**: Threshold tuning per emotion class
221
-
222
- ### 📊 Performance Optimization
223
- - **Stratified split** by label distribution
224
- - **Per-class threshold tuning** for optimal F1-score
225
- - **Label smoothing** to prevent overconfidence
226
- - **Early stopping** to prevent overfitting
227
-
228
  ---
229
-
230
- ### 🔗 Resources
231
- - **Model**: RoBERTa-base ([Hugging Face](https://huggingface.co/roberta-base))
232
- - **Framework**: PyTorch + Transformers
233
- - **Project**: 2025 Sep DLGenAI Course
234
-
235
- ---
236
-
237
- *Built with ❤️ using PyTorch, Transformers, and Gradio*
238
  """
239
  )
240
 
241
- # Button actions
242
- analyze_btn.click(
243
- fn=predict_emotions,
244
- inputs=text_input,
245
- outputs=output_json
246
- )
247
-
248
- clear_btn.click(
249
- fn=lambda: ("", None),
250
- inputs=None,
251
- outputs=[text_input, output_json]
252
- )
253
 
254
- # Launch the app
255
  if __name__ == "__main__":
256
- demo.launch(
257
- share=False,
258
- show_error=True
259
- )
 
49
  print(f"⚠️ Error loading model: {e}")
50
  raise e
51
 
52
+ # Optimized thresholds from training
53
+ BEST_THRESHOLDS = np.array([0.5, 0.5, 0.5, 0.5, 0.5])
54
 
55
  def predict_emotions(text):
56
+ """Predict emotions from text"""
 
 
 
 
 
 
 
 
57
  if not text or not text.strip():
58
+ return "⚠️ Please enter some text to analyze"
 
 
 
59
 
60
  try:
61
  # Tokenize
 
80
 
81
  # Format results
82
  detected = []
83
+ output = "## 🎯 Detected Emotions:\n\n"
84
 
85
+ for emotion, emoji, prob, pred in zip(EMOTIONS, EMOTION_EMOJIS, probs, predictions):
 
86
  if pred == 1:
87
+ detected.append(f"{emoji} **{emotion.capitalize()}**")
88
 
89
+ if detected:
90
+ output += ", ".join(detected) + "\n\n"
91
  else:
92
+ output += "*No strong emotions detected (all below threshold)*\n\n"
93
 
94
+ output += "## 📊 All Probabilities:\n\n"
95
+ for emotion, emoji, prob in zip(EMOTIONS, EMOTION_EMOJIS, probs):
96
+ bar_length = int(prob * 20)
97
+ bar = "█" * bar_length + "░" * (20 - bar_length)
98
+ output += f"{emoji} **{emotion.capitalize()}**: {bar} {prob:.1%}\n\n"
 
99
 
100
+ return output
101
 
102
  except Exception as e:
103
+ return f"⚠️ Error: {str(e)}"
 
 
 
104
 
105
  # Example texts
106
  examples = [
 
113
  ]
114
 
115
  # Create Gradio Interface
116
+ with gr.Blocks() as demo:
117
  gr.Markdown(
118
  """
119
  # 😊 Multi-Label Emotion Classification
120
 
121
+ Detect **multiple emotions** in text using a fine-tuned RoBERTa transformer.
 
 
 
122
 
123
+ **Emotions:** 😠 Anger | 😨 Fear | 😊 Joy | 😢 Sadness | 😲 Surprise
124
  """
125
  )
126
 
 
129
  text_input = gr.Textbox(
130
  label="Enter your text",
131
  placeholder="Type or paste text here to analyze emotions...",
132
+ lines=5
 
133
  )
134
+ analyze_btn = gr.Button("🔮 Analyze Emotions", variant="primary")
135
+ clear_btn = gr.Button("🗑️ Clear")
 
 
136
 
137
  with gr.Column():
138
+ output = gr.Markdown(label="Analysis Results")
 
 
 
139
 
 
140
  gr.Examples(
141
  examples=examples,
142
  inputs=text_input,
143
+ outputs=output,
144
  fn=predict_emotions,
145
  cache_examples=False
146
  )
 
148
  gr.Markdown(
149
  """
150
  ---
151
+ ## 📈 How It Works
152
 
153
+ This is a **multi-label classification** model - each text can have multiple emotions!
 
 
 
 
154
 
155
  ### 🎯 Model Details
156
+ - **Architecture**: RoBERTa-base (125M parameters)
157
+ - **Max Length**: 200 tokens
158
+ - **Training**: BCE Loss + Label Smoothing (0.05)
159
+ - **Evaluation**: Macro F1-Score with per-class threshold tuning
160
 
161
+ ### 🏗️ Architecture Flow
 
 
 
 
 
 
 
 
162
  ```
163
+ Input Text → Tokenizer → RoBERTa Encoder → [CLS] Pooling →
164
+ Dropout (0.35) → Linear (768→5) → Sigmoid → 5 Emotion Probabilities
 
 
 
 
 
 
 
 
 
 
 
 
 
165
  ```
166
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
  ---
168
+ **Project**: 2025 Sep DLGenAI Course | **Built with**: PyTorch + Transformers + Gradio
 
 
 
 
 
 
 
 
169
  """
170
  )
171
 
172
+ # Event handlers
173
+ analyze_btn.click(fn=predict_emotions, inputs=text_input, outputs=output)
174
+ clear_btn.click(fn=lambda: ("", ""), inputs=None, outputs=[text_input, output])
175
+ text_input.submit(fn=predict_emotions, inputs=text_input, outputs=output)
 
 
 
 
 
 
 
 
176
 
 
177
  if __name__ == "__main__":
178
+ demo.launch()