File size: 7,251 Bytes
8e5b86d
 
 
 
 
 
cbd3da5
 
 
 
 
8e5b86d
 
 
 
 
 
 
 
 
 
 
 
cbd3da5
 
 
 
8e5b86d
cbd3da5
8e5b86d
cbd3da5
 
8e5b86d
 
 
 
 
 
cbd3da5
 
8e5b86d
 
 
 
cbd3da5
8e5b86d
 
 
 
 
 
cbd3da5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8e5b86d
cbd3da5
 
 
 
8e5b86d
cbd3da5
 
 
 
8e5b86d
cbd3da5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8e5b86d
cbd3da5
 
8e5b86d
cbd3da5
 
8e5b86d
 
 
 
cbd3da5
 
8e5b86d
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.preprocessing import LabelEncoder
import gradio as gr
import pickle
import matplotlib.pyplot as plt
from matplotlib import cm
import pandas as pd
import time
import json

# Load model and tokenizer
model = tf.keras.models.load_model('sentiment_rnn.h5')

# Load tokenizer
with open('tokenizer.pkl', 'rb') as f:
    tokenizer = pickle.load(f)

# Initialize label encoder
label_encoder = LabelEncoder()
label_encoder.fit(["Happy", "Sad", "Neutral"])

# Load sample data for examples
sample_data = pd.read_csv("sentiment_dataset_1000.csv")

def predict_sentiment(text, show_details=False):
    """
    Predict sentiment with detailed analysis
    """
    start_time = time.time()
    
    # Preprocess the text
    sequence = tokenizer.texts_to_sequences([text])
    padded = pad_sequences(sequence, maxlen=50)
    
    # Make prediction
    prediction = model.predict(padded, verbose=0)[0]
    processing_time = time.time() - start_time
    
    predicted_class = np.argmax(prediction)
    sentiment = label_encoder.inverse_transform([predicted_class])[0]
    confidence = float(prediction[predicted_class])
    
    # Create confidence dictionary
    confidences = {
        "Happy": float(prediction[0]),
        "Sad": float(prediction[1]),
        "Neutral": float(prediction[2])
    }
    
    # Create visualization
    fig = create_confidence_plot(confidences)
    
    # Additional analysis
    word_count = len(text.split())
    char_count = len(text)
    
    result = {
        "sentiment": sentiment,
        "confidence": round(confidence * 100, 2),
        "confidences": confidences,
        "processing_time": round(processing_time * 1000, 2),
        "word_count": word_count,
        "char_count": char_count,
        "plot": fig
    }
    
    return result

def create_confidence_plot(confidences):
    """Create a beautiful confidence plot"""
    labels = list(confidences.keys())
    values = list(confidences.values())
    
    colors = cm.get_cmap('RdYlGn')(np.linspace(0.2, 0.8, len(labels)))
    
    fig, ax = plt.subplots(figsize=(8, 4))
    bars = ax.barh(labels, values, color=colors)
    
    # Add value labels
    for bar in bars:
        width = bar.get_width()
        ax.text(width + 0.02, bar.get_y() + bar.get_height()/2,
                f'{width:.2%}',
                ha='left', va='center', fontsize=10)
    
    ax.set_xlim(0, 1)
    ax.set_title('Sentiment Confidence Scores', pad=20)
    ax.spines['top'].set_visible(False)
    ax.spines['right'].set_visible(False)
    ax.spines['left'].set_visible(False)
    ax.spines['bottom'].set_visible(False)
    ax.grid(axis='x', linestyle='--', alpha=0.7)
    ax.set_facecolor('#f8f9fa')
    fig.patch.set_facecolor('#f8f9fa')
    
    return fig

def get_sentiment_emoji(sentiment):
    """Get emoji for sentiment"""
    emojis = {
        "Happy": "😊",
        "Sad": "😒",
        "Neutral": "😐"
    }
    return emojis.get(sentiment, "")

def analyze_text(text):
    """Main analysis function"""
    result = predict_sentiment(text)
    
    emoji = get_sentiment_emoji(result["sentiment"])
    
    # Create HTML output
    html_output = f"""
    <div style="background-color:#f8f9fa; padding:20px; border-radius:10px; margin-bottom:20px;">
        <h2 style="color:#2c3e50; margin-top:0;">Analysis Result {emoji}</h2>
        <p><strong>Text:</strong> {text[:200]}{'...' if len(text) > 200 else ''}</p>
        <p><strong>Sentiment:</strong> <span style="font-weight:bold; color:{'#27ae60' if result['sentiment'] == 'Happy' else '#e74c3c' if result['sentiment'] == 'Sad' else '#3498db'}">{result['sentiment']}</span></p>
        <p><strong>Confidence:</strong> {result['confidence']}%</p>
        <p><strong>Processing Time:</strong> {result['processing_time']} ms</p>
        <p><strong>Word Count:</strong> {result['word_count']}</p>
        <p><strong>Character Count:</strong> {result['char_count']}</p>
    </div>
    """
    
    return html_output, result['plot'], json.dumps(result['confidences'], indent=2)

# Create Gradio interface
with gr.Blocks(theme=gr.themes.Soft(), title="Sentiment Analysis Dashboard") as demo:
    with gr.Row():
        with gr.Column(scale=1):
            gr.Markdown("""
            # πŸ“Š Sentiment Analysis Dashboard
            **Analyze text for emotional sentiment** using our advanced RNN model.
            """)
            
            with gr.Group():
                text_input = gr.Textbox(
                    label="Enter your text",
                    placeholder="Type something to analyze its sentiment...",
                    lines=4,
                    max_lines=8
                )
                
                analyze_btn = gr.Button("Analyze", variant="primary")
            
            with gr.Accordion("Advanced Settings", open=False):
                show_details = gr.Checkbox(
                    label="Show detailed analysis",
                    value=True
                )
            
            gr.Markdown("### Try these examples:")
            examples = gr.Examples(
                examples=[
                    ["I'm feeling great today!"],
                    ["My dog passed away..."],
                    ["The office is closed tomorrow."],
                    ["This is the best day ever!"],
                    ["I feel completely devastated."],
                    ["The meeting is scheduled for 2 PM."]
                ],
                inputs=[text_input],
                label="Quick Examples"
            )
        
        with gr.Column(scale=2):
            with gr.Tab("Results"):
                html_output = gr.HTML(label="Analysis Summary")
                plot_output = gr.Plot(label="Confidence Distribution")
            
            with gr.Tab("Raw Data"):
                json_output = gr.JSON(label="Confidence Scores")
            
            with gr.Tab("About"):
                gr.Markdown("""
                ## About This Dashboard
                
                This sentiment analysis tool uses a **Recurrent Neural Network (RNN)** with **LSTM** layers to classify text into three sentiment categories:
                
                - 😊 Happy (Positive)
                - 😒 Sad (Negative)
                - 😐 Neutral
                
                **Model Details:**
                - Trained on 1,000 labeled examples
                - 64-unit LSTM layer with regularization
                - 92% test accuracy
                
                **How to use:**
                1. Type or paste text in the input box
                2. Click "Analyze" or press Enter
                3. View the sentiment analysis results
                
                **Try the examples above for quick testing!**
                """)
    
    # Event handlers
    analyze_btn.click(
        fn=analyze_text,
        inputs=[text_input],
        outputs=[html_output, plot_output, json_output]
    )
    
    text_input.submit(
        fn=analyze_text,
        inputs=[text_input],
        outputs=[html_output, plot_output, json_output]
    )

# Launch the app
if __name__ == "__main__":
    demo.launch()