Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -115,37 +115,36 @@ def load_model():
|
|
115 |
print(f"β
Loaded {MODEL_NAME} on {DEVICE}")
|
116 |
|
117 |
# Test the model
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
136 |
-
|
137 |
-
# Check if output contains expected text
|
138 |
-
if "<|output|>" in result and "Berlin" in result:
|
139 |
-
return "β
Modell erfolgreich geladen und getestet!"
|
140 |
|
|
|
|
|
|
|
|
|
|
|
|
|
141 |
return "β οΈ Modell-Test nicht erfolgreich. Bitte versuchen Sie es erneut."
|
142 |
-
|
143 |
except Exception as e:
|
144 |
import traceback
|
145 |
trace = traceback.format_exc()
|
146 |
print(f"Error loading model: {e}\n{trace}")
|
147 |
return f"β Fehler beim Laden des Modells: {str(e)}"
|
148 |
-
|
149 |
@spaces.GPU
|
150 |
def extract_info(template, text):
|
151 |
global tokenizer, model
|
|
|
115 |
print(f"β
Loaded {MODEL_NAME} on {DEVICE}")
|
116 |
|
117 |
# Test the model
|
118 |
+
test_text = "Test in Berlin."
|
119 |
+
test_template = '{"test_location": ""}'
|
120 |
+
test_template_formatted = json.dumps(json.loads(test_template), indent=4)
|
121 |
+
prompt = f"<|input|>\n### Template:\n{test_template_formatted}\n### Text:\n{test_text}\n\n<|output|>"
|
122 |
+
|
123 |
+
# Create inputs with proper padding and truncation
|
124 |
+
inputs = tokenizer([prompt], return_tensors="pt", truncation=True, max_length=MAX_INPUT_LENGTH).to(DEVICE)
|
125 |
+
|
126 |
+
# Generate output
|
127 |
+
with torch.no_grad():
|
128 |
+
outputs = model.generate(
|
129 |
+
**inputs,
|
130 |
+
max_new_tokens=50,
|
131 |
+
temperature=0.0,
|
132 |
+
do_sample=False
|
133 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
|
135 |
+
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
136 |
+
|
137 |
+
# Check if output contains expected text
|
138 |
+
if "<|output|>" in result and "Berlin" in result:
|
139 |
+
return "β
Modell erfolgreich geladen und getestet!"
|
140 |
+
|
141 |
return "β οΈ Modell-Test nicht erfolgreich. Bitte versuchen Sie es erneut."
|
142 |
+
|
143 |
except Exception as e:
|
144 |
import traceback
|
145 |
trace = traceback.format_exc()
|
146 |
print(f"Error loading model: {e}\n{trace}")
|
147 |
return f"β Fehler beim Laden des Modells: {str(e)}"
|
|
|
148 |
@spaces.GPU
|
149 |
def extract_info(template, text):
|
150 |
global tokenizer, model
|