oberbics commited on
Commit
c66b983
·
verified ·
1 Parent(s): 38f0b3d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -9
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import gradio as gr
2
  import json
3
  import os
@@ -14,12 +15,9 @@ import tempfile
14
  import warnings
15
  import string
16
  import spaces
17
- from transformers import AutoModelForCausalLM, AutoTokenizer
18
- import torch
19
- from transformers import AutoConfig
20
  import torch
21
 
22
-
23
  warnings.filterwarnings("ignore")
24
 
25
  # Map Tile Providers with reliable sources
@@ -79,17 +77,25 @@ class SafeGeocoder:
79
  return None
80
 
81
 
82
- # Replace the model loading section with this:
83
  def load_model():
84
  global tokenizer, model
85
  try:
86
  if model is None:
87
- # Special handling for NuExtract tokenizer
 
 
 
 
88
  tokenizer = AutoTokenizer.from_pretrained(
89
  MODEL_NAME,
90
- trust_remote_code=True
 
91
  )
92
 
 
 
 
93
  model = AutoModelForCausalLM.from_pretrained(
94
  MODEL_NAME,
95
  torch_dtype=TORCH_DTYPE,
@@ -119,6 +125,9 @@ def load_model():
119
  return "⚠️ Modell-Test nicht erfolgreich. Bitte versuchen Sie es erneut."
120
 
121
  except Exception as e:
 
 
 
122
  return f"❌ Fehler beim Laden des Modells: {str(e)}"
123
 
124
  @spaces.GPU
@@ -360,8 +369,7 @@ h2 {
360
  border-top: 1px solid #eaeaea;
361
  }
362
  </style>
363
- """
364
-
365
  with gr.Blocks(css=custom_css, title="Daten Strukturieren und Analysieren") as demo:
366
  gr.HTML("""
367
  <div style="text-align: center; margin-bottom: 1rem">
 
1
+
2
  import gradio as gr
3
  import json
4
  import os
 
15
  import warnings
16
  import string
17
  import spaces
18
+ from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
 
 
19
  import torch
20
 
 
21
  warnings.filterwarnings("ignore")
22
 
23
  # Map Tile Providers with reliable sources
 
77
  return None
78
 
79
 
80
+ # Fixed model loading function
81
  def load_model():
82
  global tokenizer, model
83
  try:
84
  if model is None:
85
+ # First, try to get configuration to check model type
86
+ config = AutoConfig.from_pretrained(MODEL_NAME, trust_remote_code=True)
87
+ print(f"Loading model config: {config.__class__.__name__}")
88
+
89
+ # Load tokenizer with appropriate options
90
  tokenizer = AutoTokenizer.from_pretrained(
91
  MODEL_NAME,
92
+ trust_remote_code=True,
93
+ use_fast=False # Try with use_fast=False if the regular tokenizer fails
94
  )
95
 
96
+ print(f"Successfully loaded tokenizer: {tokenizer.__class__.__name__}")
97
+
98
+ # Load the model
99
  model = AutoModelForCausalLM.from_pretrained(
100
  MODEL_NAME,
101
  torch_dtype=TORCH_DTYPE,
 
125
  return "⚠️ Modell-Test nicht erfolgreich. Bitte versuchen Sie es erneut."
126
 
127
  except Exception as e:
128
+ import traceback
129
+ trace = traceback.format_exc()
130
+ print(f"Error loading model: {e}\n{trace}")
131
  return f"❌ Fehler beim Laden des Modells: {str(e)}"
132
 
133
  @spaces.GPU
 
369
  border-top: 1px solid #eaeaea;
370
  }
371
  </style>
372
+ """
 
373
  with gr.Blocks(css=custom_css, title="Daten Strukturieren und Analysieren") as demo:
374
  gr.HTML("""
375
  <div style="text-align: center; margin-bottom: 1rem">