--- pretty_name: Wikidata Label Maps 2025 All Languages dataset_name: wikidata-label-maps-2025-all-languages license: cc0-1.0 language: - multilingual - en tags: - wikidata - knowledge-graph - metadata - labels - multilingual - i18n - properties size_categories: - 100M str: """Get entity label with language preference fallback""" df = pl.scan_parquet("qid_labels_desc.parquet") entity_labels = ( df.filter(pl.col("id") == entity_id) .filter(pl.col("label").is_not_null()) .collect() ) # Try preferred languages in order for lang in preferred_langs: label = entity_labels.filter(pl.col("len") == lang) if label.height > 0: return label.select("label").item() # Fallback to any available label if entity_labels.height > 0: return entity_labels.select("label").item() return entity_id # Return ID if no label found # Example usage print(get_entity_label("Q31", ["es", "en"])) # Try Spanish first, then English ``` ### pandas ```python import pandas as pd # Load with specific columns for memory efficiency df = pd.read_parquet("qid_labels_desc.parquet", columns=["id", "len", "label"]) # Basic statistics print(f"Total records: {len(df):,}") print(f"Unique entities: {df['id'].nunique():,}") print(f"Unique languages: {df['len'].nunique():,}") # Language distribution lang_counts = df['len'].value_counts() print("Top 10 languages:") print(lang_counts.head(10)) # Get multilingual labels for specific entities entities_of_interest = ["Q31", "Q142", "Q183"] # Belgium, France, Germany multilingual = df[df['id'].isin(entities_of_interest)] pivot_table = multilingual.pivot_table( index='id', columns='len', values='label', aggfunc='first' ) print(pivot_table[['en', 'fr', 'de', 'es']].head()) ``` ### DuckDB SQL ```sql -- in duckdb shell or via Python duckdb.execute INSTALL httpfs; LOAD httpfs; PRAGMA threads=16; -- Create view CREATE VIEW multilingual_labels AS SELECT * FROM parquet_scan('qid_labels_desc.parquet'); -- Language statistics SELECT len as language, COUNT(*) as label_count, ROUND(COUNT(*) * 100.0 / SUM(COUNT(*)) OVER (), 2) as percentage FROM multilingual_labels GROUP BY len ORDER BY label_count DESC LIMIT 20; -- Find entities with most language coverage SELECT id, COUNT(DISTINCT len) as num_languages, STRING_AGG(DISTINCT len, ', ' ORDER BY len) as languages FROM multilingual_labels WHERE label IS NOT NULL GROUP BY id ORDER BY num_languages DESC LIMIT 10; -- Pivot table for specific entities across major languages SELECT id, MAX(CASE WHEN len = 'en' THEN label END) as english, MAX(CASE WHEN len = 'fr' THEN label END) as french, MAX(CASE WHEN len = 'de' THEN label END) as german, MAX(CASE WHEN len = 'es' THEN label END) as spanish, MAX(CASE WHEN len = 'zh' THEN label END) as chinese FROM multilingual_labels WHERE id IN ('Q31', 'Q142', 'Q183', 'Q148') GROUP BY id; ``` ### PySpark ```python from pyspark.sql import SparkSession from pyspark.sql.functions import col, count, desc spark = SparkSession.builder.getOrCreate() df = spark.read.parquet("qid_labels_desc.parquet") # Basic statistics print(f"Total records: {df.count():,}") df.select("len").distinct().count() # Number of languages # Language distribution lang_dist = ( df.groupBy("len") .agg(count("*").alias("count")) .orderBy(desc("count")) ) lang_dist.show(20) # Entity multilingual coverage entity_coverage = ( df.filter(col("label").isNotNull()) .groupBy("id") .agg(count("len").alias("num_languages")) .orderBy(desc("num_languages")) ) entity_coverage.show(10) ``` ## Fast Lookup Helpers ### Multilingual dictionary maps ```python import polars as pl from collections import defaultdict # Load and create language-specific dictionaries df = pl.read_parquet("qid_labels_desc.parquet", columns=["id", "len", "label"]) # Create nested dictionary: {language: {entity_id: label}} MULTILANG_LABELS = defaultdict(dict) for row in df.iter_rows(): entity_id, lang, label = row if label: # Skip null labels MULTILANG_LABELS[lang][entity_id] = label # Usage examples print(MULTILANG_LABELS["en"].get("Q31", "Unknown")) # English print(MULTILANG_LABELS["fr"].get("Q31", "Unknown")) # French print(MULTILANG_LABELS["zh"].get("Q31", "Unknown")) # Chinese # Get all available languages for an entity entity_id = "Q31" available_langs = [lang for lang in MULTILANG_LABELS if entity_id in MULTILANG_LABELS[lang]] print(f"Q31 available in: {len(available_langs)} languages") ``` ### Multilingual resolver class ```python import polars as pl from typing import List, Optional, Dict class MultilingualWDResolver: def __init__(self, parquet_file: str): self.df = pl.read_parquet(parquet_file) self._build_indices() def _build_indices(self): """Build language-specific lookup indices""" self.lang_maps = {} for lang in self.df.select("len").unique().to_series(): lang_df = self.df.filter(pl.col("len") == lang) self.lang_maps[lang] = dict(zip( lang_df.select("id").to_series(), lang_df.select("label").to_series() )) def get_label(self, entity_id: str, lang: str = "en") -> Optional[str]: """Get label for entity in specific language""" return self.lang_maps.get(lang, {}).get(entity_id) def get_multilingual_labels(self, entity_id: str, langs: List[str] = None) -> Dict[str, str]: """Get labels in multiple languages""" if langs is None: langs = ["en", "fr", "de", "es", "zh"] result = {} for lang in langs: label = self.get_label(entity_id, lang) if label: result[lang] = label return result def get_best_label(self, entity_id: str, preferred_langs: List[str] = ["en", "fr", "de"]) -> str: """Get best available label with fallback""" for lang in preferred_langs: label = self.get_label(entity_id, lang) if label: return label # Try any available language for lang_map in self.lang_maps.values(): if entity_id in lang_map: return lang_map[entity_id] return entity_id # Fallback to ID # Usage resolver = MultilingualWDResolver("qid_labels_desc.parquet") print(resolver.get_label("Q31", "en")) # English print(resolver.get_multilingual_labels("Q31")) # Multiple languages print(resolver.get_best_label("Q31", ["ja", "ko", "en"])) # With preference ``` ## Language Coverage Analysis ### Find entities with best multilingual coverage ```python import polars as pl def analyze_multilingual_coverage(parquet_file: str, top_n: int = 20): df = pl.read_parquet(parquet_file) # Entities with most language coverage coverage = ( df.filter(pl.col("label").is_not_null()) .group_by("id") .agg([ pl.count("len").alias("num_languages"), pl.col("label").first().alias("sample_label") ]) .sort("num_languages", descending=True) .limit(top_n) ) print(f"Top {top_n} entities by language coverage:") for row in coverage.iter_rows(named=True): print(f" {row['id']}: {row['num_languages']} languages | {row['sample_label']}") return coverage analyze_multilingual_coverage("qid_labels_desc.parquet") ``` ### Language family analysis ```python import polars as pl # Define language families (simplified) LANGUAGE_FAMILIES = { "Germanic": ["en", "de", "nl", "sv", "no", "da"], "Romance": ["fr", "es", "it", "pt", "ro", "ca"], "Slavic": ["ru", "pl", "cs", "sk", "uk", "bg"], "East Asian": ["zh", "ja", "ko"], "Arabic": ["ar", "fa", "ur"], } def analyze_by_language_family(parquet_file: str): df = pl.read_parquet(parquet_file) for family, languages in LANGUAGE_FAMILIES.items(): family_count = ( df.filter(pl.col("len").is_in(languages)) .filter(pl.col("label").is_not_null()) .height ) print(f"{family}: {family_count:,} labels") analyze_by_language_family("qid_labels_desc.parquet") ``` ## Use Cases ### Cross-lingual Knowledge Graph Analysis - Build multilingual knowledge graphs with proper labels - Analyze entity coverage across different languages - Create language-specific views of Wikidata ### Machine Translation & NLP - Training data for multilingual named entity recognition - Cross-lingual entity linking datasets - Evaluation of translation quality for entity names ### Internationalization (i18n) - Localize applications with proper entity names - Build multilingual search interfaces - Create region-specific content ### Research Applications - Study linguistic diversity in knowledge representation - Analyze cultural bias in knowledge coverage - Cross-cultural studies of entity naming patterns ## Performance Tips - **Memory Management**: Load only required columns (`columns=["id", "len", "label"]`) - **Language Filtering**: Filter by language early to reduce data size - **Lazy Loading**: Use `.scan_parquet()` with Polars for large-scale processing - **Indexing**: Build language-specific dictionaries for frequent lookups - **Batch Processing**: Process entities in batches for memory efficiency ## Data Quality Notes - Some labels may be missing (NULL values) for certain language-entity combinations - Description coverage is 46.8% across all language-entity pairs - The dataset includes both major world languages and regional/minority languages - Language codes follow ISO standards but may include some Wikidata-specific codes