import gradio as gr
import json
import os
import pandas as pd
import folium
from folium.plugins import MeasureControl, Fullscreen, MarkerCluster
from geopy.geocoders import Nominatim
from geopy.exc import GeocoderTimedOut, GeocoderServiceError
import time
import random
from typing import List, Tuple, Optional
import io
import tempfile
import warnings
import string
import spaces
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
import torch
warnings.filterwarnings("ignore")
# Map Tile Providers with reliable sources
MAP_TILES = {
"GreenMap": {
"url": "https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}",
"attr": "Esri"
}
}
# Model configuration - corrected model name
MODEL_NAME = "numind/NuExtract-1.5" # Fixed model name according to documentation
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
TORCH_DTYPE = torch.bfloat16 if DEVICE == "cuda" else torch.float32
MAX_INPUT_LENGTH = 20000 # For sliding window processing
MAX_NEW_TOKENS = 1000
# Global model variables
tokenizer = None
model = None
try:
from transformers.models.qwen2.tokenization_qwen2 import Qwen2Tokenizer
from transformers.models.qwen2.modeling_qwen2 import Qwen2ForCausalLM
print("Qwen2 components successfully imported")
except ImportError:
print("Could not import Qwen2 components directly")
class SafeGeocoder:
def __init__(self):
user_agent = f"location_mapper_v1_{random.randint(1000, 9999)}"
self.geolocator = Nominatim(user_agent=user_agent, timeout=10)
self.cache = {}
self.last_request = 0
def _respect_rate_limit(self):
current_time = time.time()
elapsed = current_time - self.last_request
if elapsed < 1.0:
time.sleep(1.0 - elapsed)
self.last_request = current_time
def get_coords(self, location: str):
if not location or pd.isna(location):
return None
location = str(location).strip()
if location in self.cache:
return self.cache[location]
try:
self._respect_rate_limit()
result = self.geolocator.geocode(location)
if result:
coords = (result.latitude, result.longitude)
self.cache[location] = coords
return coords
self.cache[location] = None
return None
except Exception as e:
print(f"Geocoding error for '{location}': {e}")
self.cache[location] = None
return None
def process_excel(file, places_column):
if file is None:
return None, "No file uploaded", None
try:
if hasattr(file, 'name'):
df = pd.read_excel(file.name)
elif isinstance(file, bytes):
df = pd.read_excel(io.BytesIO(file))
else:
df = pd.read_excel(file)
print(f"Spalten in der Excel-Tabelle: {list(df.columns)}")
if places_column not in df.columns:
return None, f"Spalte '{places_column}' wurde in der Excel-Datei nicht gefunden. Verfügbare Spalten: {', '.join(df.columns)}", None
# Create coordinates columns
df['latitude'] = None
df['longitude'] = None
geocoder = SafeGeocoder()
coords = []
processed_count = 0
# Geocode each location and store coordinates in the DataFrame
for idx, row in df.iterrows():
if pd.isna(row[places_column]):
continue
location = str(row[places_column]).strip()
try:
locations = [loc.strip() for loc in location.split(',') if loc.strip()]
if not locations:
locations = [location]
except:
locations = [location]
for loc in locations:
point = geocoder.get_coords(loc)
if point:
df.at[idx, 'latitude'] = point[0]
df.at[idx, 'longitude'] = point[1]
coords.append(point)
processed_count += 1
break # Use first successfully geocoded location
# Create the map
map_html, _ = create_map(df, places_column)
# Save the updated DataFrame to a new Excel file
with tempfile.NamedTemporaryFile(suffix=".xlsx", delete=False) as tmp:
processed_path = tmp.name
df.to_excel(processed_path, index=False)
total_locations = df[places_column].count()
success_rate = (processed_count / total_locations * 100) if total_locations > 0 else 0
stats = f"Gefunden: {processed_count} von {total_locations} Orten ({success_rate:.1f}%)"
return map_html, stats, processed_path
except Exception as e:
import traceback
trace = traceback.format_exc()
print(f"Error processing file: {e}\n{trace}")
return None, f"Fehler bei der Verarbeitung der Datei: {str(e)}", None
# Corrected model loading function based on official usage example
@spaces.GPU
def extract_info(template, text):
global tokenizer, model
# Load tokenizer if not loaded yet
if tokenizer is None:
print("Tokenizer not loaded yet, loading now...")
try:
try:
from modelscope import AutoTokenizer as MSAutoTokenizer
tokenizer = MSAutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
print("Loaded tokenizer using modelscope AutoTokenizer")
except:
# Fall back to regular tokenizer
tokenizer = AutoTokenizer.from_pretrained(
MODEL_NAME,
trust_remote_code=True,
revision="main"
)
print("Loaded tokenizer using standard AutoTokenizer")
except Exception as e:
trace = traceback.format_exc()
print(f"Error loading tokenizer: {e}\n{trace}")
return "❌ Fehler beim Laden des Tokenizers", f"{str(e)}"
try:
# Load model if not loaded yet
if model is None:
print("Model not loaded yet, loading now...")
try:
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
torch_dtype=TORCH_DTYPE,
trust_remote_code=True,
revision="main",
device_map="auto" # Let the model decide CUDA placement
).eval()
print(f"✅ Model loaded successfully")
except Exception as e:
trace = traceback.format_exc()
print(f"Error loading model: {e}\n{trace}")
return f"❌ Fehler beim Laden des Modells: {str(e)}", "{}"
print("Using model for inference...")
# Format the template as proper JSON with indentation
template_formatted = json.dumps(json.loads(template), indent=4)
# Create prompt
prompt = f"<|input|>\n### Template:\n{template_formatted}\n### Text:\n{text}\n\n<|output|>"
# Tokenize with proper settings
inputs = tokenizer(
[prompt],
return_tensors="pt",
truncation=True,
padding=True,
max_length=MAX_INPUT_LENGTH
).to(model.device) # Use model's device
# Generate output with torch.no_grad() for efficiency
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=MAX_NEW_TOKENS,
temperature=0.0,
do_sample=False
)
# Decode the result
result_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Extract the output part
if "<|output|>" in result_text:
json_text = result_text.split("<|output|>")[1].strip()
else:
json_text = result_text
# Try to parse as JSON
try:
extracted = json.loads(json_text)
return "✅ Erfolgreich extrahiert", json.dumps(extracted, indent=2)
except json.JSONDecodeError:
return "❌ JSON Parsing Fehler", json_text
except Exception as e:
import traceback
trace = traceback.format_exc()
print(f"Error in extract_info: {e}\n{trace}")
return f"❌ Fehler: {str(e)}", "{}"
@spaces.GPU
def create_map(df, location_col):
m = folium.Map(
location=[20, 0],
zoom_start=2,
control_scale=True
)
folium.TileLayer(
tiles=MAP_TILES["GreenMap"]["url"],
attr=MAP_TILES["GreenMap"]["attr"],
name="GreenMap",
overlay=False,
control=False
).add_to(m)
Fullscreen().add_to(m)
MeasureControl(position='topright', primary_length_unit='kilometers').add_to(m)
geocoder = SafeGeocoder()
coords = []
marker_cluster = MarkerCluster(name="Locations").add_to(m)
processed_count = 0
for idx, row in df.iterrows():
if pd.isna(row[location_col]):
continue
location = str(row[location_col]).strip()
additional_info = ""
for col in df.columns:
if col != location_col and not pd.isna(row[col]):
additional_info += f"
{col}: {row[col]}"
try:
locations = [loc.strip() for loc in location.split(',') if loc.strip()]
if not locations:
locations = [location]
except:
locations = [location]
for loc in locations:
point = geocoder.get_coords(loc)
if point:
popup_content = f"""
In dieser Unterrichtseinheit befassen wir uns mit der Strukturierung unstrukturierter historischer Texte und der Visualisierung von extrahierten Daten auf Karten. Die systematische Strukturierung von Daten wird mit einem für Informationsextrahierung trainiertem Sprachmodell durchgeführt, das auf der Question-Answering-Methode basiert. Diese Methode erlaubt es, Informationen mit Hilfe einer Frage zu extrahieren, wie etwa „Wo fand das Erdbeben statt"? Dies ermöglicht die Extrahierung des Ortes, an dem ein Erdbeben stattfand, auch wenn im Text selbst noch andere Orte genannt werden.
Die grundlegende Herausforderung bei der Arbeit mit historischen Quellen ist, dass relevante Informationen in langen Fließtexten eingebettet sind und manuell mühsam extrahiert werden müssen. Dieser Ansatz automatisiert diesen Prozess.
{"earthquake location": "", "dateline location": ""}
"earthquake location": ""
→ "Wo ist das Erdbeben passiert?""dateline location": ""
→ "Von wo wird berichtet?"{"earthquake location": "Japan, Yokohama", "dateline location": "Tokio"}
Intern erfolgt die Verarbeitung in mehreren Schritten:
Nach der Extraktion der Ortsangaben ermöglicht unsere Anwendung die automatische Visualisierung dieser Daten auf einer interaktiven Karte:
Dieser kombinierte Ansatz aus Textextraktion und geografischer Visualisierung eröffnet neue Möglichkeiten für die räumliche Analyse historischer Quellen und erlaubt es, geografische Muster zu erkennen, die in den reinen Textdaten nicht unmittelbar sichtbar wären.
Diese Methode ermöglicht die effiziente Extraktion und Visualisierung historischer Daten aus unstrukturierten Quellen.
Verwenden Sie das Sprachmodell NuExtract-1.5 um automatisch Informationen zu extrahieren.
Laden Sie eine Excel-Tabelle hoch und erstelle eine interaktive Karte.
Your map will appear here after processing
Made with ❤ for historical research