Spaces:
Sleeping
Sleeping
File size: 1,740 Bytes
af23c45 71675d3 af23c45 8ad1ab7 2aff68f af23c45 0f15b6d af23c45 0f15b6d af23c45 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import streamlit as st
import torch
from transformers import BertConfig, BertForSequenceClassification, BertTokenizer
import numpy as np
# Load the model and tokenizer
def load_model():
tokenizer = BertTokenizer.from_pretrained('beomi/kcbert-base')
config = BertConfig.from_pretrained('beomi/kcbert-base', num_labels=7)
model = BertForSequenceClassification.from_pretrained('beomi/kcbert-base', config=config)
model_state_dict = torch.load('sentiment7_model_acc8878.pth', map_location=torch.device('cpu')) # cpu μ¬μ©
model.load_state_dict(model_state_dict)
model.eval()
return model, tokenizer
model, tokenizer = load_model()
# Define the inference function
def inference(input_doc):
inputs = tokenizer(input_doc, return_tensors='pt')
outputs = model(**inputs)
probs = torch.softmax(outputs.logits, dim=1).squeeze().tolist()
class_idx = {'곡ν¬': 0, 'λλ': 1, 'λΆλ
Έ': 2, 'μ¬ν': 3, 'μ€λ¦½': 4, 'ν볡': 5, 'νμ€': 6}
results = {class_name: prob for class_name, prob in zip(class_idx, probs)}
# Find the class with the highest probability
max_prob_class = max(results, key=results.get)
max_prob = results[max_prob_class]
# Display results
print(f"κ°μ₯ κ°νκ² λνλ κ°μ : {max_prob_class}")
for class_name, prob in results.items():
print(f"{class_name}: {prob:.2%}")
# Set up the Streamlit interface
st.title('κ°μ λΆμ(Sentiment Analysis): μλμ κΈμ μ
λ ₯νλ©΄ 곡ν¬,λλ,λΆλ
Έ,μ¬ν,μ€λ¦½,ν볡,νμ€κ° ν¬ν¨λ μ λλ₯Ό λΉμ¨λ‘ μλ €λ립λλ€')
user_input = st.text_area("μ΄ κ³³μ κΈ μ
λ ₯(100μ μ΄ν κΆμ₯):")
if st.button('μμ'):
result = inference(user_input)
st.write(result)
|