Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -16,6 +16,10 @@ import string
|
|
16 |
import spaces
|
17 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
18 |
import torch
|
|
|
|
|
|
|
|
|
19 |
|
20 |
warnings.filterwarnings("ignore")
|
21 |
|
@@ -75,8 +79,17 @@ class SafeGeocoder:
|
|
75 |
self.cache[location] = None
|
76 |
return None
|
77 |
|
|
|
|
|
78 |
def load_model():
|
79 |
global tokenizer, model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
try:
|
81 |
# Generate a random location and text each time
|
82 |
random_city = random.choice(["Berlin", "Paris", "London", "Tokyo", "Rome", "Madrid"])
|
@@ -86,7 +99,22 @@ def load_model():
|
|
86 |
|
87 |
# Initialize model if not already loaded
|
88 |
if model is None:
|
89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
model = AutoModelForCausalLM.from_pretrained(
|
91 |
MODEL_NAME,
|
92 |
torch_dtype=TORCH_DTYPE,
|
@@ -107,7 +135,6 @@ def load_model():
|
|
107 |
|
108 |
except Exception as e:
|
109 |
return f"❌ Fehler beim Laden des Modells: {str(e)}"
|
110 |
-
|
111 |
@spaces.GPU
|
112 |
def extract_info(template, text):
|
113 |
global tokenizer, model
|
|
|
16 |
import spaces
|
17 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
18 |
import torch
|
19 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
20 |
+
from transformers import AutoConfig
|
21 |
+
import torch
|
22 |
+
|
23 |
|
24 |
warnings.filterwarnings("ignore")
|
25 |
|
|
|
79 |
self.cache[location] = None
|
80 |
return None
|
81 |
|
82 |
+
|
83 |
+
# Replace the model loading section with this:
|
84 |
def load_model():
|
85 |
global tokenizer, model
|
86 |
+
try:
|
87 |
+
# First ensure we have the right tokenizer class available
|
88 |
+
from transformers import Qwen2Tokenizer
|
89 |
+
except ImportError:
|
90 |
+
# Fallback to AutoTokenizer if specific import fails
|
91 |
+
pass
|
92 |
+
|
93 |
try:
|
94 |
# Generate a random location and text each time
|
95 |
random_city = random.choice(["Berlin", "Paris", "London", "Tokyo", "Rome", "Madrid"])
|
|
|
99 |
|
100 |
# Initialize model if not already loaded
|
101 |
if model is None:
|
102 |
+
# Load config first to check for tokenizer class
|
103 |
+
config = AutoConfig.from_pretrained(MODEL_NAME, trust_remote_code=True)
|
104 |
+
|
105 |
+
# Load tokenizer with explicit class if needed
|
106 |
+
if hasattr(config, "tokenizer_class"):
|
107 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
108 |
+
MODEL_NAME,
|
109 |
+
trust_remote_code=True,
|
110 |
+
tokenizer_class=config.tokenizer_class
|
111 |
+
)
|
112 |
+
else:
|
113 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
114 |
+
MODEL_NAME,
|
115 |
+
trust_remote_code=True
|
116 |
+
)
|
117 |
+
|
118 |
model = AutoModelForCausalLM.from_pretrained(
|
119 |
MODEL_NAME,
|
120 |
torch_dtype=TORCH_DTYPE,
|
|
|
135 |
|
136 |
except Exception as e:
|
137 |
return f"❌ Fehler beim Laden des Modells: {str(e)}"
|
|
|
138 |
@spaces.GPU
|
139 |
def extract_info(template, text):
|
140 |
global tokenizer, model
|