oberbics commited on
Commit
d3cd6fb
·
verified ·
1 Parent(s): 4a50fa6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -12
app.py CHANGED
@@ -13,6 +13,7 @@ from typing import List, Tuple, Optional
13
  import io
14
  import tempfile
15
  import warnings
 
16
 
17
  warnings.filterwarnings("ignore")
18
 
@@ -67,15 +68,19 @@ class SafeGeocoder:
67
 
68
  # Function to just load the model
69
 
 
70
  def load_model():
71
  try:
72
- # Use a simple but complete example to test actual extraction
 
 
 
 
73
  test_template = '{"test_location": ""}'
74
- test_text = "Heute sind wir in Berlin."
75
 
76
  prompt = f"<|input|>\n### Template:\n{test_template}\n### Text:\n{test_text}\n\n<|output|>"
77
 
78
- # Send a complete request with a simple test case
79
  payload = {
80
  "inputs": prompt,
81
  "parameters": {
@@ -84,35 +89,29 @@ def load_model():
84
  }
85
  }
86
 
87
-
88
- # Make the actual request
89
  response = requests.post(API_URL, headers=headers, json=payload)
90
 
91
- # Check loading status
92
  if response.status_code == 503:
93
  response_json = response.json()
94
  if "error" in response_json and "loading" in response_json["error"]:
95
  estimated_time = response_json.get("estimated_time", "unknown")
96
  return f"⏳ Modell lädt... (ca. {int(float(estimated_time)) if isinstance(estimated_time, (int, float, str)) else 'unbekannt'} Sekunden)"
97
 
98
- # Verify we got a proper response
99
  if response.status_code == 200:
100
  result = response.json()
101
 
102
- # Check for a properly formatted extraction result
103
  if isinstance(result, list) and len(result) > 0:
104
  result_text = result[0].get("generated_text", "")
105
 
106
- # Look for evidence of a completed extraction
107
- if "<|output|>" in result_text and "Berlin" in result_text:
108
  return "✅ Modell erfolgreich geladen und getestet! Sie können jetzt mit der Extraktion beginnen."
109
 
110
- # If we get here, the model response wasn't complete
111
  return "⚠️ Modell-Test nicht erfolgreich. Bitte versuchen Sie es erneut in einigen Sekunden."
112
 
113
  except Exception as e:
114
  return f"❌ Fehler beim Laden des Modells: {str(e)}"
115
-
116
  def extract_info(template, text):
117
  try:
118
  prompt = f"<|input|>\n### Template:\n{template}\n### Text:\n{text}\n\n<|output|>"
 
13
  import io
14
  import tempfile
15
  import warnings
16
+ import string
17
 
18
  warnings.filterwarnings("ignore")
19
 
 
68
 
69
  # Function to just load the model
70
 
71
+
72
  def load_model():
73
  try:
74
+ # Generate a random location and text each time
75
+ random_city = random.choice(["Berlin", "Paris", "London", "Tokyo", "Rome", "Madrid"])
76
+ random_suffix = ''.join(random.choices(string.ascii_lowercase, k=5))
77
+ test_text = f"Test in {random_city}_{random_suffix}."
78
+
79
  test_template = '{"test_location": ""}'
 
80
 
81
  prompt = f"<|input|>\n### Template:\n{test_template}\n### Text:\n{test_text}\n\n<|output|>"
82
 
83
+ # Send request with randomized input
84
  payload = {
85
  "inputs": prompt,
86
  "parameters": {
 
89
  }
90
  }
91
 
 
 
92
  response = requests.post(API_URL, headers=headers, json=payload)
93
 
 
94
  if response.status_code == 503:
95
  response_json = response.json()
96
  if "error" in response_json and "loading" in response_json["error"]:
97
  estimated_time = response_json.get("estimated_time", "unknown")
98
  return f"⏳ Modell lädt... (ca. {int(float(estimated_time)) if isinstance(estimated_time, (int, float, str)) else 'unbekannt'} Sekunden)"
99
 
 
100
  if response.status_code == 200:
101
  result = response.json()
102
 
 
103
  if isinstance(result, list) and len(result) > 0:
104
  result_text = result[0].get("generated_text", "")
105
 
106
+ # Check if response contains the random city we included
107
+ if "<|output|>" in result_text and random_city in result_text:
108
  return "✅ Modell erfolgreich geladen und getestet! Sie können jetzt mit der Extraktion beginnen."
109
 
 
110
  return "⚠️ Modell-Test nicht erfolgreich. Bitte versuchen Sie es erneut in einigen Sekunden."
111
 
112
  except Exception as e:
113
  return f"❌ Fehler beim Laden des Modells: {str(e)}"
114
+
115
  def extract_info(template, text):
116
  try:
117
  prompt = f"<|input|>\n### Template:\n{template}\n### Text:\n{text}\n\n<|output|>"