Commit
·
b976ff0
1
Parent(s):
2f687cc
was getting negative irrespective of input
Browse files
app.py
CHANGED
@@ -1,37 +1,35 @@
|
|
1 |
-
from tensorflow import keras
|
2 |
-
import tensorflow as tf
|
3 |
-
from tensorflow.keras.datasets import imdb
|
4 |
-
import numpy as np
|
5 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
-
number_of_words = 3000
|
8 |
-
words_per_view = 30
|
9 |
|
10 |
-
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
encoded_word[words_per_view -len(words) - 1] = 1
|
18 |
-
for i, word in enumerate(words):
|
19 |
-
index = words_per_view - len(words) + i
|
20 |
-
encoded_word[index] = word_to_index.get(word, 0) + 3
|
21 |
-
encoded_word = np.expand_dims(encoded_word, axis=0)
|
22 |
-
prediction = model.predict(encoded_word)
|
23 |
-
return prediction
|
24 |
|
25 |
-
|
26 |
-
|
27 |
-
if result > 0.5:
|
28 |
-
answer = 'positive review'
|
29 |
-
else: answer = 'negative review'
|
30 |
-
return answer
|
31 |
-
UserInputPage = gr.Interface(
|
32 |
-
fn=analyze_sentiment,
|
33 |
-
inputs = ["text"],
|
34 |
-
outputs=["text"]
|
35 |
-
)
|
36 |
-
tabbed_Interface = gr.TabbedInterface([UserInputPage], ["Check user input"])
|
37 |
-
tabbed_Interface.launch()
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
import torch
|
3 |
+
import tensorflow as tf
|
4 |
+
from transformers import AutoTokenizer
|
5 |
+
from model import SentimentClassifier
|
6 |
+
|
7 |
+
model_state_dict = tf.keras.load_model('sentimentality.h5')
|
8 |
+
model = SentimentClassifier(2)
|
9 |
+
model.load_state_dict(model_state_dict)
|
10 |
+
model.eval()
|
11 |
+
|
12 |
+
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
|
13 |
|
|
|
|
|
14 |
|
15 |
+
def preprocess(text):
|
16 |
+
inputs = tokenizer(text, padding='max_length',
|
17 |
+
truncation=True, max_length=512, return_tensors='pt')
|
18 |
+
return inputs
|
19 |
+
# Define a function to use the model to make predictions
|
20 |
+
def predict(review):
|
21 |
+
inputs = preprocess(review)
|
22 |
+
with torch.no_grad():
|
23 |
+
outputs = model(inputs['input_ids'], inputs['attention_mask'])
|
24 |
+
predicted_class = torch.argmax(outputs[0]).item()
|
25 |
+
if(predicted_class==0):
|
26 |
+
return "It was a negative review"
|
27 |
+
return "It was a positive review"
|
28 |
|
29 |
+
# Create a Gradio interface
|
30 |
+
input_text = gr.inputs.Textbox(label="Input Text")
|
31 |
+
output_text = gr.outputs.Textbox(label="Output Text")
|
32 |
+
interface = gr.Interface(fn=predict, inputs=input_text, outputs=output_text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
+
# Run the interface
|
35 |
+
interface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|