Spaces:
Sleeping
Sleeping
import gradio as gr | |
import json | |
import os | |
import pandas as pd | |
import folium | |
from folium.plugins import MeasureControl, Fullscreen, MarkerCluster | |
from geopy.geocoders import Nominatim | |
from geopy.exc import GeocoderTimedOut, GeocoderServiceError | |
import time | |
import random | |
from typing import List, Tuple, Optional | |
import io | |
import tempfile | |
import warnings | |
import string | |
import spaces | |
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig | |
import torch | |
warnings.filterwarnings("ignore") | |
# Map Tile Providers with reliable sources | |
MAP_TILES = { | |
"GreenMap": { | |
"url": "https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}", | |
"attr": "Esri" | |
} | |
} | |
# Model configuration - corrected model name | |
MODEL_NAME = "numind/NuExtract-1.5" # Fixed model name according to documentation | |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu" | |
TORCH_DTYPE = torch.bfloat16 if DEVICE == "cuda" else torch.float32 | |
MAX_INPUT_LENGTH = 20000 # For sliding window processing | |
MAX_NEW_TOKENS = 1000 | |
# Global model variables | |
tokenizer = None | |
model = None | |
try: | |
from transformers.models.qwen2.tokenization_qwen2 import Qwen2Tokenizer | |
from transformers.models.qwen2.modeling_qwen2 import Qwen2ForCausalLM | |
print("Qwen2 components successfully imported") | |
except ImportError: | |
print("Could not import Qwen2 components directly") | |
class SafeGeocoder: | |
def __init__(self): | |
user_agent = f"location_mapper_v1_{random.randint(1000, 9999)}" | |
self.geolocator = Nominatim(user_agent=user_agent, timeout=10) | |
self.cache = {} | |
self.last_request = 0 | |
def _respect_rate_limit(self): | |
current_time = time.time() | |
elapsed = current_time - self.last_request | |
if elapsed < 1.0: | |
time.sleep(1.0 - elapsed) | |
self.last_request = current_time | |
def get_coords(self, location: str): | |
if not location or pd.isna(location): | |
return None | |
location = str(location).strip() | |
if location in self.cache: | |
return self.cache[location] | |
try: | |
self._respect_rate_limit() | |
result = self.geolocator.geocode(location) | |
if result: | |
coords = (result.latitude, result.longitude) | |
self.cache[location] = coords | |
return coords | |
self.cache[location] = None | |
return None | |
except Exception as e: | |
print(f"Geocoding error for '{location}': {e}") | |
self.cache[location] = None | |
return None | |
def process_excel(file, places_column): | |
if file is None: | |
return None, "No file uploaded", None | |
try: | |
if hasattr(file, 'name'): | |
df = pd.read_excel(file.name) | |
elif isinstance(file, bytes): | |
df = pd.read_excel(io.BytesIO(file)) | |
else: | |
df = pd.read_excel(file) | |
print(f"Spalten in der Excel-Tabelle: {list(df.columns)}") | |
if places_column not in df.columns: | |
return None, f"Spalte '{places_column}' wurde in der Excel-Datei nicht gefunden. Verfügbare Spalten: {', '.join(df.columns)}", None | |
# Create coordinates columns | |
df['latitude'] = None | |
df['longitude'] = None | |
geocoder = SafeGeocoder() | |
coords = [] | |
processed_count = 0 | |
# Geocode each location and store coordinates in the DataFrame | |
for idx, row in df.iterrows(): | |
if pd.isna(row[places_column]): | |
continue | |
location = str(row[places_column]).strip() | |
try: | |
locations = [loc.strip() for loc in location.split(',') if loc.strip()] | |
if not locations: | |
locations = [location] | |
except: | |
locations = [location] | |
for loc in locations: | |
point = geocoder.get_coords(loc) | |
if point: | |
df.at[idx, 'latitude'] = point[0] | |
df.at[idx, 'longitude'] = point[1] | |
coords.append(point) | |
processed_count += 1 | |
break # Use first successfully geocoded location | |
# Create the map | |
map_html, _ = create_map(df, places_column) | |
# Save the updated DataFrame to a new Excel file | |
with tempfile.NamedTemporaryFile(suffix=".xlsx", delete=False) as tmp: | |
processed_path = tmp.name | |
df.to_excel(processed_path, index=False) | |
total_locations = df[places_column].count() | |
success_rate = (processed_count / total_locations * 100) if total_locations > 0 else 0 | |
stats = f"Gefunden: {processed_count} von {total_locations} Orten ({success_rate:.1f}%)" | |
return map_html, stats, processed_path | |
except Exception as e: | |
import traceback | |
trace = traceback.format_exc() | |
print(f"Error processing file: {e}\n{trace}") | |
return None, f"Fehler bei der Verarbeitung der Datei: {str(e)}", None | |
# Corrected model loading function based on official usage example | |
def extract_info(template, text): | |
global tokenizer, model | |
# Load tokenizer if not loaded yet | |
if tokenizer is None: | |
print("Tokenizer not loaded yet, loading now...") | |
try: | |
try: | |
from modelscope import AutoTokenizer as MSAutoTokenizer | |
tokenizer = MSAutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True) | |
print("Loaded tokenizer using modelscope AutoTokenizer") | |
except: | |
# Fall back to regular tokenizer | |
tokenizer = AutoTokenizer.from_pretrained( | |
MODEL_NAME, | |
trust_remote_code=True, | |
revision="main" | |
) | |
print("Loaded tokenizer using standard AutoTokenizer") | |
except Exception as e: | |
trace = traceback.format_exc() | |
print(f"Error loading tokenizer: {e}\n{trace}") | |
return "❌ Fehler beim Laden des Tokenizers", f"{str(e)}" | |
try: | |
# Load model if not loaded yet | |
if model is None: | |
print("Model not loaded yet, loading now...") | |
try: | |
model = AutoModelForCausalLM.from_pretrained( | |
MODEL_NAME, | |
torch_dtype=TORCH_DTYPE, | |
trust_remote_code=True, | |
revision="main", | |
device_map="auto" # Let the model decide CUDA placement | |
).eval() | |
print(f"✅ Model loaded successfully") | |
except Exception as e: | |
trace = traceback.format_exc() | |
print(f"Error loading model: {e}\n{trace}") | |
return f"❌ Fehler beim Laden des Modells: {str(e)}", "{}" | |
print("Using model for inference...") | |
# Format the template as proper JSON with indentation | |
template_formatted = json.dumps(json.loads(template), indent=4) | |
# Create prompt | |
prompt = f"<|input|>\n### Template:\n{template_formatted}\n### Text:\n{text}\n\n<|output|>" | |
# Tokenize with proper settings | |
inputs = tokenizer( | |
[prompt], | |
return_tensors="pt", | |
truncation=True, | |
padding=True, | |
max_length=MAX_INPUT_LENGTH | |
).to(model.device) # Use model's device | |
# Generate output with torch.no_grad() for efficiency | |
with torch.no_grad(): | |
outputs = model.generate( | |
**inputs, | |
max_new_tokens=MAX_NEW_TOKENS, | |
temperature=0.0, | |
do_sample=False | |
) | |
# Decode the result | |
result_text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
# Extract the output part | |
if "<|output|>" in result_text: | |
json_text = result_text.split("<|output|>")[1].strip() | |
else: | |
json_text = result_text | |
# Try to parse as JSON | |
try: | |
extracted = json.loads(json_text) | |
return "✅ Erfolgreich extrahiert", json.dumps(extracted, indent=2) | |
except json.JSONDecodeError: | |
return "❌ JSON Parsing Fehler", json_text | |
except Exception as e: | |
import traceback | |
trace = traceback.format_exc() | |
print(f"Error in extract_info: {e}\n{trace}") | |
return f"❌ Fehler: {str(e)}", "{}" | |
def create_map(df, location_col): | |
m = folium.Map( | |
location=[20, 0], | |
zoom_start=2, | |
control_scale=True | |
) | |
folium.TileLayer( | |
tiles=MAP_TILES["GreenMap"]["url"], | |
attr=MAP_TILES["GreenMap"]["attr"], | |
name="GreenMap", | |
overlay=False, | |
control=False | |
).add_to(m) | |
Fullscreen().add_to(m) | |
MeasureControl(position='topright', primary_length_unit='kilometers').add_to(m) | |
geocoder = SafeGeocoder() | |
coords = [] | |
marker_cluster = MarkerCluster(name="Locations").add_to(m) | |
processed_count = 0 | |
for idx, row in df.iterrows(): | |
if pd.isna(row[location_col]): | |
continue | |
location = str(row[location_col]).strip() | |
additional_info = "" | |
for col in df.columns: | |
if col != location_col and not pd.isna(row[col]): | |
additional_info += f"<br><b>{col}:</b> {row[col]}" | |
try: | |
locations = [loc.strip() for loc in location.split(',') if loc.strip()] | |
if not locations: | |
locations = [location] | |
except: | |
locations = [location] | |
for loc in locations: | |
point = geocoder.get_coords(loc) | |
if point: | |
popup_content = f""" | |
<div style="min-width: 200px; max-width: 300px"> | |
<h4 style="font-family: 'Source Sans Pro', sans-serif; margin-bottom: 5px;">{loc}</h4> | |
<div style="font-family: 'Source Sans Pro', sans-serif; font-size: 14px;"> | |
{additional_info} | |
</div> | |
</div> | |
""" | |
folium.Marker( | |
location=point, | |
popup=folium.Popup(popup_content, max_width=300), | |
tooltip=loc, | |
icon=folium.Icon(color="blue", icon="info-sign") | |
).add_to(marker_cluster) | |
coords.append(point) | |
processed_count += 1 | |
if coords: | |
m.fit_bounds(coords) | |
custom_css = """ | |
<style> | |
@import url('https://fonts.googleapis.com/css2?family=Source+Sans+Pro:wght@400;600&display=swap'); | |
.leaflet-container { | |
font-family: 'Source Sans Pro', sans-serif; | |
} | |
.leaflet-popup-content { | |
font-family: 'Source Sans Pro', sans-serif; | |
} | |
.leaflet-popup-content h4 { | |
font-weight: 600; | |
margin-bottom: 8px; | |
} | |
</style> | |
""" | |
m.get_root().header.add_child(folium.Element(custom_css)) | |
return m._repr_html_(), processed_count | |
def process_excel(file, places_column): | |
if file is None: | |
return None, "No file uploaded", None | |
try: | |
if hasattr(file, 'name'): | |
df = pd.read_excel(file.name) | |
elif isinstance(file, bytes): | |
df = pd.read_excel(io.BytesIO(file)) | |
else: | |
df = pd.read_excel(file) | |
print(f"Spalten in der Excel-Tabelle: {list(df.columns)}") | |
if places_column not in df.columns: | |
return None, f"Spalte '{places_column}' wurde in der Excel-Datei nicht gefunden. Verfügbare Spalten: {', '.join(df.columns)}", None | |
# Create a copy of the dataframe to avoid modifying the original | |
result_df = df.copy() | |
# Add coordinate columns if they don't exist | |
if 'latitude' not in result_df.columns: | |
result_df['latitude'] = None | |
if 'longitude' not in result_df.columns: | |
result_df['longitude'] = None | |
geocoder = SafeGeocoder() | |
coords = [] | |
marker_cluster = MarkerCluster(name="Locations") | |
processed_count = 0 | |
# Create map instance | |
m = folium.Map( | |
location=[20, 0], | |
zoom_start=2, | |
control_scale=True | |
) | |
folium.TileLayer( | |
tiles=MAP_TILES["GreenMap"]["url"], | |
attr=MAP_TILES["GreenMap"]["attr"], | |
name="GreenMap", | |
overlay=False, | |
control=False | |
).add_to(m) | |
Fullscreen().add_to(m) | |
MeasureControl(position='topright', primary_length_unit='kilometers').add_to(m) | |
marker_cluster.add_to(m) | |
# Process each location and store coordinates | |
for idx, row in result_df.iterrows(): | |
if pd.isna(row[places_column]): | |
continue | |
location = str(row[places_column]).strip() | |
additional_info = "" | |
for col in result_df.columns: | |
if col != places_column and not pd.isna(row[col]): | |
additional_info += f"<br><b>{col}:</b> {row[col]}" | |
try: | |
locations = [loc.strip() for loc in location.split(',') if loc.strip()] | |
if not locations: | |
locations = [location] | |
except: | |
locations = [location] | |
for loc in locations: | |
point = geocoder.get_coords(loc) | |
if point: | |
# Store coordinates in the dataframe | |
result_df.at[idx, 'latitude'] = point[0] | |
result_df.at[idx, 'longitude'] = point[1] | |
# Add marker to map | |
popup_content = f""" | |
<div style="min-width: 200px; max-width: 300px"> | |
<h4 style="font-family: 'Source Sans Pro', sans-serif; margin-bottom: 5px;">{loc}</h4> | |
<div style="font-family: 'Source Sans Pro', sans-serif; font-size: 14px;"> | |
{additional_info} | |
</div> | |
</div> | |
""" | |
folium.Marker( | |
location=point, | |
popup=folium.Popup(popup_content, max_width=300), | |
tooltip=loc, | |
icon=folium.Icon(color="blue", icon="info-sign") | |
).add_to(marker_cluster) | |
coords.append(point) | |
processed_count += 1 | |
break # Use first successful geocode | |
# Fit map to coordinates if any were found | |
if coords: | |
m.fit_bounds(coords) | |
# Add custom CSS to map | |
custom_css = """ | |
<style> | |
@import url('https://fonts.googleapis.com/css2?family=Source+Sans+Pro:wght@400;600&display=swap'); | |
.leaflet-container { | |
font-family: 'Source Sans Pro', sans-serif; | |
} | |
.leaflet-popup-content { | |
font-family: 'Source Sans Pro', sans-serif; | |
} | |
.leaflet-popup-content h4 { | |
font-weight: 600; | |
margin-bottom: 8px; | |
} | |
</style> | |
""" | |
m.get_root().header.add_child(folium.Element(custom_css)) | |
# Save the updated dataframe to Excel | |
with tempfile.NamedTemporaryFile(suffix=".xlsx", delete=False) as tmp: | |
processed_path = tmp.name | |
result_df.to_excel(processed_path, index=False) | |
total_locations = result_df[places_column].count() | |
success_rate = (processed_count / total_locations * 100) if total_locations > 0 else 0 | |
stats = f"Gefunden: {processed_count} von {total_locations} Orten ({success_rate:.1f}%)" | |
# Print the dataframe to debug | |
print("DataFrame with coordinates:") | |
print(result_df.head()) | |
return m._repr_html_(), stats, processed_path | |
except Exception as e: | |
import traceback | |
trace = traceback.format_exc() | |
print(f"Error processing file: {e}\n{trace}") | |
return None, f"Fehler bei der Verarbeitung der Datei: {str(e)}", None | |
custom_css = """ | |
<style> | |
@import url('https://fonts.googleapis.com/css2?family=Source+Sans+Pro:wght@300;400;600;700&display=swap'); | |
body, .gradio-container { | |
font-family: 'Source Sans Pro', sans-serif !important; | |
color: #333333; | |
} | |
h1 { | |
font-weight: 700 !important; | |
color: #2c6bb3 !important; | |
font-size: 2.5rem !important; | |
margin-bottom: 1rem !important; | |
} | |
h2 { | |
font-weight: 600 !important; | |
color: #4e8fd1 !important; | |
font-size: 1.5rem !important; | |
margin-top: 1rem !important; | |
margin-bottom: 0.75rem !important; | |
} | |
.gradio-button.primary { | |
background-color: #ff7518 !important; | |
} | |
.gradio-button.secondary { | |
background-color: #5a87ca !important; | |
color: white !important; | |
} | |
.info-box { | |
background-color: #e8f4fd; | |
border-left: 4px solid #2c6bb3; | |
padding: 15px; | |
margin: 15px 0; | |
border-radius: 4px; | |
} | |
.file-upload-box { | |
border: 2px dashed #e0e0e0; | |
border-radius: 8px; | |
padding: 20px; | |
text-align: center; | |
transition: all 0.3s ease; | |
} | |
/* Fix for map container spacing */ | |
#map-container { | |
height: 20vh !important; | |
margin-bottom: 0 !important; | |
padding-bottom: 0 !important; | |
} | |
/* Stats box styling */ | |
.stats-box { | |
margin-top: 10px !important; | |
margin-bottom: 0 !important; | |
padding: 10px; | |
background: #f8f9fa; | |
border-radius: 4px; | |
} | |
/* Remove extra space around components */ | |
.gr-box { | |
margin-bottom: 0 !important; | |
} | |
/* Model status styling */ | |
.model-status { | |
padding: 10px; | |
border-radius: 4px; | |
margin-bottom: 15px; | |
background-color: #f8f9fa; | |
font-size: 14px; | |
} | |
.separator { | |
margin: 20px 0; | |
border-top: 1px solid #eaeaea; | |
} | |
</style> | |
""" | |
with gr.Blocks(css=custom_css, title="Daten Strukturieren und Analysieren") as demo: | |
gr.HTML(""" | |
<div style="text-align: center; margin-bottom: 1rem"> | |
<h1>Strukturierung und Visualisierung von historischen Daten</h1> | |
</div> | |
<div style="font-family: 'Source Sans Pro', sans-serif; max-width: 1000px; margin: 0 auto; color: #333; line-height: 1.7; font-size: 1.15rem;"> | |
<p style="font-size: 1.3rem; margin-bottom: 1.8rem; color: #2c3e50; font-weight: 400; padding: 0 1rem;"> | |
In dieser Unterrichtseinheit befassen wir uns mit der Strukturierung unstrukturierter historischer Texte und der Visualisierung von extrahierten Daten auf Karten. Die systematische Strukturierung von Daten wird mit einem für Informationsextrahierung trainiertem Sprachmodell durchgeführt, das auf der Question-Answering-Methode basiert. Diese Methode erlaubt es, Informationen mit Hilfe einer Frage zu extrahieren, wie etwa „Wo fand das Erdbeben statt"? Dies ermöglicht die Extrahierung des Ortes, an dem ein Erdbeben stattfand, auch wenn im Text selbst noch andere Orte genannt werden. | |
</p> | |
<div style="font-family: 'Source Sans Pro', sans-serif; line-height: 1.7; font-size: 1.15rem; background: #f8f9fa; padding: 20px; border-radius: 8px; margin: 20px 0;"> | |
Die Katastrophe in <span style="background-color: #a8e6cf; font-weight: bold; padding: 2px 5px; border-radius: 3px;" title="Earthquake Location">Japan</span> — 3 Millionen Tote. Mtb. <span style="background-color: #ffdfba; font-weight: bold; padding: 2px 5px; border-radius: 3px;" title="Non-Earthquake Location">London</span>, 4. Sept. (Drahtbericht.) Zu dem Unglück in <span style="background-color: #a8e6cf; font-weight: bold; padding: 2px 5px; border-radius: 3px;" title="Earthquake Location">Japan</span> liegen noch folgende Nachrichten vor: Wie die japanische Gesandtschaft in <span style="background-color: #ffdfba; font-weight: bold; padding: 2px 5px; border-radius: 3px;" title="Non-Earthquake Location">Peking</span> meldet, sind Unterhandlungen mit <span style="background-color: #ffdfba; font-weight: bold; padding: 2px 5px; border-radius: 3px;" title="Non-Earthquake Location">China</span> über die sofortige Lieferung von Lebensmitteln ausgenommen worden. Von <span style="background-color: #ffdfba; font-weight: bold; padding: 2px 5px; border-radius: 3px;" title="Non-Earthquake Location">Peking</span> seien amerikanische, englische und italienische Schiffe mit Lebensmitteln nach <span style="background-color: #a8e6cf; font-weight: bold; padding: 2px 5px; border-radius: 3px;" title="Earthquake Location">Japan</span> abgegangen. | |
</div> | |
<div style="display: flex; margin-top: 20px;"> | |
<div style="display: flex; align-items: center; margin-right: 20px;"> | |
<div style="width: 20px; height: 20px; background-color: #a8e6cf; margin-right: 10px; border-radius: 3px;"></div> | |
<span>Ort des Erdbebens: Japan</span> | |
</div> | |
<div style="display: flex; align-items: center;"> | |
<div style="width: 20px; height: 20px; background-color: #ffdfba; margin-right: 10px; border-radius: 3px;"></div> | |
<span>Andere Orte: London, Peking, China</span> | |
</div> | |
</div> | |
<div style="background: #f8f9fa; padding: 2rem; border-radius: 10px; margin-bottom: 2.5rem; border-left: 5px solid #3498db;"> | |
<h3 style="margin-top: 0; color: #2c3e50; border-bottom: 2px solid #eee; padding-bottom: 0.8rem; font-size: 1.5rem;"> | |
Methodik: Vom unstrukturierten Text zur strukturierten Information | |
</h3> | |
<p style="margin-bottom: 1.8rem; font-size: 1.2rem;"> | |
Die grundlegende Herausforderung bei der Arbeit mit historischen Quellen ist, dass relevante Informationen in langen | |
Fließtexten eingebettet sind und manuell mühsam extrahiert werden müssen. Dieser Ansatz automatisiert diesen Prozess. | |
</p> | |
<h4 style="color: #2980b9; margin-top: 2rem; font-size: 1.35rem;">Wie funktioniert die Informationsextraktion?</h4> | |
<ol style="padding-left: 2rem; font-size: 1.15rem;"> | |
<li style="margin-bottom: 1.5rem;"> | |
<strong style="color: #2c3e50; font-size: 1.2rem;">Template-Definition</strong>: Sie definieren ein JSON-Template mit den Informationstypen, die Sie extrahieren möchten: | |
<pre style="background: #f5f5f5; padding: 1.2rem; border-radius: 6px; overflow-x: auto; margin: 1rem 0 1.5rem; font-size: 1.1rem;"><code>{"earthquake location": "", "dateline location": ""}</code></pre> | |
</li> | |
<li style="margin-bottom: 1.5rem;"> | |
<strong style="color: #2c3e50; font-size: 1.2rem;">Question-Answering-Methode</strong>: Das Sprachmodell interpretiert jedes leere Feld als implizite Frage: | |
<ul style="margin-top: 1rem; padding-left: 2rem; font-size: 1.15rem;"> | |
<li style="margin-bottom: 0.8rem;"><code style="background: #f0f0f0; padding: 0.3rem 0.5rem; border-radius: 4px; font-size: 1.1rem;">"earthquake location": ""</code> → "Wo ist das Erdbeben passiert?"</li> | |
<li style="margin-bottom: 0.8rem;"><code style="background: #f0f0f0; padding: 0.3rem 0.5rem; border-radius: 4px; font-size: 1.1rem;">"dateline location": ""</code> → "Von wo wird berichtet?"</li> | |
</ul> | |
</li> | |
<li style="margin-bottom: 1.5rem;"> | |
<strong style="color: #2c3e50; font-size: 1.2rem;">Sprachmodell-Verarbeitung</strong>: Das NuExtract-1.5 Modell (ein Sequence-to-Sequence Transformer) analysiert den Text vollständig und identifiziert die relevanten Informationen für jedes Template-Feld. | |
</li> | |
<li style="margin-bottom: 1rem;"> | |
<strong style="color: #2c3e50; font-size: 1.2rem;">Strukturierte Ausgabe</strong>: Das Modell füllt das Template mit den extrahierten Informationen: | |
<pre style="background: #f5f5f5; padding: 1.2rem; border-radius: 6px; overflow-x: auto; margin: 1rem 0 1.5rem; font-size: 1.1rem;"><code>{"earthquake location": "Japan, Yokohama", "dateline location": "Tokio"}</code></pre> | |
</li> | |
</ol> | |
</div> | |
<div style="background: #f8f9fa; padding: 2rem; border-radius: 10px; margin-bottom: 2.5rem; border-left: 5px solid #9b59b6;"> | |
<h4 style="color: #2980b9; margin-top: 0; font-size: 1.35rem;">Technische Funktionsweise des Sprachmodells</h4> | |
<p style="font-size: 1.2rem;">Intern erfolgt die Verarbeitung in mehreren Schritten:</p> | |
<ol style="padding-left: 2rem; font-size: 1.15rem;"> | |
<li style="margin-bottom: 1rem;"><strong style="color: #2c3e50; font-size: 1.2rem;">Tokenisierung</strong>: Der Text wird in bearbeitbare Einheiten zerlegt.</li> | |
<li style="margin-bottom: 1rem;"><strong style="color: #2c3e50; font-size: 1.2rem;">Kontextuelle Analyse</strong>: Der Transformer-Mechanismus ermöglicht die Analyse von Beziehungen zwischen allen Textteilen gleichzeitig.</li> | |
<li style="margin-bottom: 1rem;"><strong style="color: #2c3e50; font-size: 1.2rem;">Selektive Aufmerksamkeit</strong>: Das Modell fokussiert sich auf Textpassagen, die Antworten auf die impliziten Fragen enthalten könnten.</li> | |
<li style="margin-bottom: 1rem;"><strong style="color: #2c3e50; font-size: 1.2rem;">Generierung</strong>: Die erkannten Informationen werden in das vorgegebene Template eingefügt.</li> | |
</ol> | |
</div> | |
<div style="background: #f8f9fa; padding: 2rem; border-radius: 10px; margin-bottom: 2.5rem; border-left: 5px solid #27ae60;"> | |
<h3 style="margin-top: 0; color: #2c3e50; border-bottom: 2px solid #eee; padding-bottom: 0.8rem; font-size: 1.5rem;"> | |
Die Kartierungsfunktion | |
</h3> | |
<p style="margin-bottom: 1.8rem; font-size: 1.2rem;"> | |
Nach der Extraktion der Ortsangaben ermöglicht unsere Anwendung die automatische Visualisierung dieser Daten auf einer interaktiven Karte: | |
</p> | |
<ol style="padding-left: 2rem; font-size: 1.15rem;"> | |
<li style="margin-bottom: 1.2rem;"> | |
<strong style="color: #2c3e50; font-size: 1.2rem;">Geokodierung</strong>: Die extrahierten Ortsnamen werden mittels eines geografischen Dienstes in geografische Koordinaten (Längen- und Breitengrade) umgewandelt. | |
</li> | |
<li style="margin-bottom: 1.2rem;"> | |
<strong style="color: #2c3e50; font-size: 1.2rem;">Kartenerstellung</strong>: Die Koordinaten werden auf einer interaktiven Karte platziert, wobei jeder Ort durch einen Marker dargestellt wird. | |
</li> | |
<li style="margin-bottom: 1.2rem;"> | |
<strong style="color: #2c3e50; font-size: 1.2rem;">Kontextinformationen</strong>: Beim Klick auf einen Marker werden zusätzliche Informationen aus dem Originaltext angezeigt. | |
</li> | |
<li style="margin-bottom: 1rem;"> | |
<strong style="color: #2c3e50; font-size: 1.2rem;">Räumliche Analyse</strong>: Die Karte ermöglicht die visuelle Analyse der räumlichen Verteilung historischer Ereignisse. | |
</li> | |
</ol> | |
<p style="font-size: 1.2rem; margin-top: 1.5rem;"> | |
Dieser kombinierte Ansatz aus Textextraktion und geografischer Visualisierung eröffnet neue Möglichkeiten für die räumliche Analyse historischer Quellen und erlaubt es, geografische Muster zu erkennen, die in den reinen Textdaten nicht unmittelbar sichtbar wären. | |
</p> | |
</div> | |
<div style="margin-top: 2.5rem; padding: 1.5rem; background: #e8f4fd; border-radius: 10px; text-align: center; font-size: 1.1rem;"> | |
<p style="margin: 0;">Diese Methode ermöglicht die effiziente Extraktion und Visualisierung historischer Daten aus unstrukturierten Quellen.</p> | |
</div> | |
</div> | |
""") | |
with gr.Tabs() as tabs: | |
with gr.TabItem("🔍 Text Extrahierung"): | |
gr.HTML(""" | |
<div class="info-box"> | |
<h3 style="margin-top: 0;">Extrahieren Sie strukturierte Daten aus unstrukturiertem Text</h3> | |
<p>Verwenden Sie das Sprachmodell NuExtract-1.5 um automatisch Informationen zu extrahieren.</p> | |
</div> | |
""") | |
# Add model loading button and status at the top | |
with gr.Row(): | |
with gr.Column(): | |
template = gr.Textbox( | |
label="JSON Template", | |
value='{"earthquake location": "", "dateline location": "", "source of information": "", "communication form": ""}', | |
lines=5 | |
) | |
text = gr.Textbox( | |
label="Hier unstrukturierten Text einfügen", | |
value="Die Zahl der Opfer in Japan. Paris, 12. Sept. Der japanische Konsul in Marseille veröffentlicht nachstehendes offizielles Telegramm, das er heute aus Japan erhalten hat: „Die Zahl der Toten beträgt in Tokio laut einer von der Polizei vorgenommenen Zählung mehr als 60000. Die Zahl der Verwundeten beläuft sich auf ungefähr 500000. In Jokohama beträgt die Zahl der Opfer 110 000, was ungefähr ein Viertel der gesamten Bevölkerung dieser Stadt ausmacht. In den Bezirken von Chiba und Kanagama ist die Zahl der Opfer gleichfalls beträchtlich, doch wurde die Zählung noch nicht zu Ende geführt", | |
lines=8 | |
) | |
extract_btn = gr.Button("Extrahieren Sie Informationen", variant="primary") | |
with gr.Column(): | |
status = gr.Textbox(label="Status") | |
output = gr.Textbox(label="Output", lines=10) | |
excel_download_file = gr.File( | |
label="Excel-Vorlage herunterladen", | |
value="test_dateline.xlsx", # Replace with the actual path to your Excel file | |
visible=True, | |
interactive=False | |
) | |
extract_btn.click( | |
fn=extract_info, | |
inputs=[template, text], | |
outputs=[status, output] | |
) | |
with gr.TabItem("📍 Visualisierung von strukturierten Daten"): | |
gr.HTML(""" | |
<div class="info-box"> | |
<h3 style="margin-top: 0;">Visualisieren Sie Daten auf Karten</h3> | |
<p>Laden Sie eine Excel-Tabelle hoch und erstelle eine interaktive Karte.</p> | |
</div> | |
""") | |
with gr.Row(): | |
with gr.Column(): | |
excel_file = gr.File( | |
label="Upload Excel File", | |
file_types=[".xlsx", ".xls"], | |
elem_classes="file-upload-box" | |
) | |
places_column = gr.Textbox( | |
label="Name der Tabellenspalte", | |
value="earthquake_locations", | |
placeholder="Füge den Namen der Spalte mit den Orten ein" | |
) | |
process_btn = gr.Button("Erstellen Sie die Karte", variant="primary") | |
stats_output = gr.Textbox( | |
label="Status", | |
lines=2, | |
elem_classes="stats-box" | |
) | |
processed_file = gr.File( | |
label="Bearbeitete Daten herunterladen", | |
visible=True, | |
interactive=False | |
) | |
with gr.Column(): | |
map_output = gr.HTML( | |
label="Interaktive Karte", | |
value=""" | |
<div style="text-align:center; height:20vh; width:100%; display:flex; align-items:center; justify-content:center; | |
background-color:#f5f5f5; border:1px solid #e0e0e0; border-radius:8px;"> | |
<div> | |
<img src="https://cdn-icons-png.flaticon.com/512/854/854878.png" width="100"> | |
<p style="margin-top:20px; color:#666;">Your map will appear here after processing</p> | |
</div> | |
</div> | |
""", | |
elem_id="map-container" | |
) | |
def process_and_map(file, column): | |
if file is None: | |
return None, "Hier bitte die Excel-Tabelle hochladen", None | |
try: | |
map_html, stats, processed_path = process_excel(file, column) | |
if map_html and processed_path: | |
responsive_html = f""" | |
<div style="width:100%; height:20vh; margin:0; padding:0; border:1px solid #e0e0e0; border-radius:8px; overflow:hidden;"> | |
{map_html} | |
</div> | |
""" | |
return responsive_html, stats, processed_path | |
else: | |
return None, stats, None | |
except Exception as e: | |
import traceback | |
trace = traceback.format_exc() | |
print(f"Error in process_and_map: {e}\n{trace}") | |
return None, f"Error: {str(e)}", None | |
process_btn.click( | |
fn=process_and_map, | |
inputs=[excel_file, places_column], | |
outputs=[map_output, stats_output, processed_file] | |
) | |
gr.HTML(""" | |
<div style="text-align: center; margin-top: 2rem; padding-top: 1rem; border-top: 1px solid #eee; font-size: 0.9rem; color: #666;"> | |
<p>Made with <span style="color: #e25555;">❤</span> for historical research</p> | |
</div> | |
""") | |
if __name__ == "__main__": | |
demo.launch(share=True) |