Spaces:
Sleeping
Sleeping
Enhance model loading function with connectivity checks and detailed error reporting
Browse files- utils/prediction.py +25 -10
utils/prediction.py
CHANGED
@@ -3,13 +3,24 @@ from transformers import AutoTokenizer
|
|
3 |
from sklearn.preprocessing import LabelEncoder
|
4 |
from utils.BiLSTM import BiLSTMAttentionBERT
|
5 |
import numpy as np
|
|
|
|
|
6 |
|
7 |
|
8 |
|
9 |
def load_model_for_prediction():
|
10 |
-
|
11 |
try:
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
model = BiLSTMAttentionBERT.from_pretrained(
|
14 |
"joko333/BiLSTM_v01",
|
15 |
hidden_dim=128,
|
@@ -17,10 +28,10 @@ def load_model_for_prediction():
|
|
17 |
num_layers=2,
|
18 |
dropout=0.5
|
19 |
)
|
|
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
# Initialize label encoder with predefined classes
|
24 |
label_encoder = LabelEncoder()
|
25 |
label_encoder.classes_ = np.array(['Addition', 'Causal', 'Cause and Effect',
|
26 |
'Clarification', 'Comparison', 'Concession',
|
@@ -30,16 +41,20 @@ def load_model_for_prediction():
|
|
30 |
'Illustration', 'Inference', 'Problem Solution',
|
31 |
'Purpose', 'Sequential', 'Summary',
|
32 |
'Temporal Sequence'])
|
|
|
33 |
|
34 |
-
#
|
35 |
-
|
36 |
-
|
37 |
-
)
|
38 |
|
39 |
return model, label_encoder, tokenizer
|
40 |
|
41 |
except Exception as e:
|
42 |
-
|
|
|
|
|
|
|
43 |
return None, None, None
|
44 |
|
45 |
def predict_sentence(model, sentence, tokenizer, label_encoder):
|
|
|
3 |
from sklearn.preprocessing import LabelEncoder
|
4 |
from utils.BiLSTM import BiLSTMAttentionBERT
|
5 |
import numpy as np
|
6 |
+
import streamlit as st
|
7 |
+
import requests
|
8 |
|
9 |
|
10 |
|
11 |
def load_model_for_prediction():
|
|
|
12 |
try:
|
13 |
+
st.write("Starting model loading...")
|
14 |
+
|
15 |
+
# Test Hugging Face connectivity
|
16 |
+
st.write("Testing connection to Hugging Face...")
|
17 |
+
response = requests.get("https://huggingface.co/joko333/BiLSTM_v01")
|
18 |
+
if response.status_code != 200:
|
19 |
+
st.error(f"Cannot connect to Hugging Face. Status code: {response.status_code}")
|
20 |
+
return None, None, None
|
21 |
+
|
22 |
+
# Load model with logging
|
23 |
+
st.write("Loading BiLSTM model...")
|
24 |
model = BiLSTMAttentionBERT.from_pretrained(
|
25 |
"joko333/BiLSTM_v01",
|
26 |
hidden_dim=128,
|
|
|
28 |
num_layers=2,
|
29 |
dropout=0.5
|
30 |
)
|
31 |
+
st.write("Model loaded successfully")
|
32 |
|
33 |
+
# Initialize label encoder
|
34 |
+
st.write("Initializing label encoder...")
|
|
|
35 |
label_encoder = LabelEncoder()
|
36 |
label_encoder.classes_ = np.array(['Addition', 'Causal', 'Cause and Effect',
|
37 |
'Clarification', 'Comparison', 'Concession',
|
|
|
41 |
'Illustration', 'Inference', 'Problem Solution',
|
42 |
'Purpose', 'Sequential', 'Summary',
|
43 |
'Temporal Sequence'])
|
44 |
+
st.write("Label encoder initialized")
|
45 |
|
46 |
+
# Load tokenizer
|
47 |
+
st.write("Loading tokenizer...")
|
48 |
+
tokenizer = AutoTokenizer.from_pretrained('dmis-lab/biobert-base-cased-v1.2')
|
49 |
+
st.write("Tokenizer loaded successfully")
|
50 |
|
51 |
return model, label_encoder, tokenizer
|
52 |
|
53 |
except Exception as e:
|
54 |
+
st.error(f"Detailed error: {str(e)}")
|
55 |
+
st.error(f"Error type: {type(e).__name__}")
|
56 |
+
import traceback
|
57 |
+
st.error(f"Traceback: {traceback.format_exc()}")
|
58 |
return None, None, None
|
59 |
|
60 |
def predict_sentence(model, sentence, tokenizer, label_encoder):
|