import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import logging
import os

# Setup logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

SYSTEM_INSTRUCTION = """Convert natural language queries into boolean search queries by following these rules:

1. FIRST: Remove all meta-terms from this list (they should NEVER appear in output):
   - articles, papers, research, studies
   - examining, investigating, analyzing
   - findings, documents, literature
   - publications, journals, reviews
   Example: "Research examining X" → just "X"

2. SECOND: Remove generic implied terms that don't add search value:
   - Remove words like "practices," "techniques," "methods," "approaches," "strategies"
   - Remove words like "impacts," "effects," "influences," "role," "applications"
   - For example: "sustainable agriculture practices" → "sustainable agriculture"
   - For example: "teaching methodologies" → "teaching"
   - For example: "leadership styles" → "leadership"

3. THEN: Format the remaining terms:
   CRITICAL QUOTING RULES:
   - Multi-word phrases MUST ALWAYS be in quotes - NO EXCEPTIONS
   - Examples of correct quoting:
     - Wrong: machine learning AND deep learning
     - Right: "machine learning" AND "deep learning"
     - Wrong: natural language processing
     - Right: "natural language processing"
   - Single words must NEVER have quotes (e.g., science, research, learning)
   - Use AND to connect required concepts
   - Use OR with parentheses for alternatives
   - Provide ONLY the correct Boolean query
   """

def load_model():
    """Load the model and tokenizer."""
    logger.info("Loading model on CPU...")
    model = AutoModelForCausalLM.from_pretrained(
        "Zwounds/boolean-search-model",
        torch_dtype=torch.float32,
        device_map="cpu",
        trust_remote_code=True
    )
    tokenizer = AutoTokenizer.from_pretrained(
        "Zwounds/boolean-search-model",
        trust_remote_code=True
    )
    
    logger.info("Model loaded successfully")
    return model, tokenizer

def extract_response(text: str) -> str:
    """Extract the actual boolean query from the model's response."""
    # Split by newlines and get last non-empty line
    lines = [line.strip() for line in text.split('\n') if line.strip()]
    return lines[-1] if lines else ""

def get_boolean_query(query: str, model_tuple=None) -> str:
    """Generate boolean query from natural language."""
    if model_tuple is None:
        return ""
    model, tokenizer = model_tuple
    
    # Format the conversation
    conversation = [
        {"role": "system", "content": SYSTEM_INSTRUCTION},
        {"role": "user", "content": query}
    ]
    
    # Apply chat template and tokenize
    encoded = tokenizer.apply_chat_template(
        conversation,
        return_tensors="pt"
    )
    
    # Create attention mask
    attention_mask = torch.ones_like(encoded)
    inputs = {
        "input_ids": encoded,
        "attention_mask": attention_mask
    }
    
    # Generate response
    outputs = model.generate(
        **inputs,
        max_new_tokens=64,
        temperature=0.1,
        pad_token_id=tokenizer.eos_token_id
    )
    
    # Decode and extract response
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return extract_response(response)

# Example queries demonstrating various cases
examples = [
    # Testing removal of meta-terms
    ["Find research papers examining the long-term effects of meditation on brain structure"],
    
    # Testing removal of generic implied terms (practices, techniques, methods)
    ["Articles about deep learning techniques for natural language processing tasks"],
    
    # Testing removal of impact/effect terms
    ["Studies on the impact of early childhood nutrition on cognitive development"],
    
    # Testing handling of technology applications
    ["Information on virtual reality applications in architectural design and urban planning"],
    
    # Testing proper OR relationship with parentheses
    ["Research on electric vehicles adoption in urban environments or rural communities"],
    
    # Testing proper quoting of multi-word concepts only
    ["Articles on biodiversity loss in coral reefs and rainforest ecosystems"],
    
    # Testing removal of strategy/approach terms
    ["Studies about different teaching approaches for children with learning disabilities"],
    
    # Testing complex OR relationships
    ["Research examining social media influence on political polarization or public discourse"],
    
    # Testing implied terms in specific industries
    ["Articles about implementation strategies for blockchain in supply chain management or financial services"],
    
    # Testing qualifiers that don't add search value
    ["Research on effective leadership styles in multicultural organizations"],
    
    # Testing removal of multiple implied terms
    ["Studies on the effects of microplastic pollution techniques on marine ecosystem health"],
    
    # Testing domain-specific implied terms
    ["Articles about successful cybersecurity protection methods for critical infrastructure"],
    
    # Testing generalized vs specific concepts
    ["Research papers on quantum computing algorithms for cryptography or optimization problems"],
    
    # Testing implied terms in outcome descriptions
    ["Studies examining the relationship between sleep quality and academic performance outcomes"],
    
    # Testing complex nesting of concepts
    ["Articles about renewable energy integration challenges in developing countries or island nations"]
]

# Load model and tokenizer globally
logger.info("Initializing model...")
model_tuple = load_model()

# Create Gradio interface
title = "Natural Language to Boolean Search (CPU Version)"
description = """Convert natural language queries into boolean search expressions. The model will:

1. Remove search-related terms (like 'articles', 'research', etc.)
2. Handle generic implied terms (like 'practices', 'methods')
3. Format concepts using proper boolean syntax:
   - Multi-word phrases in quotes
   - Single words without quotes
   - AND to connect required concepts
   - OR with parentheses for alternatives
"""

demo = gr.Interface(
    fn=lambda x: get_boolean_query(x, model_tuple),
    inputs=[
        gr.Textbox(
            label="Enter your natural language query",
            placeholder="e.g., I'm looking for information about climate change and renewable energy"
        )
    ],
    outputs=gr.Textbox(label="Boolean Search Query"),
    title=title,
    description=description,
    examples=examples,
    theme=gr.themes.Soft()
)

if __name__ == "__main__":
    demo.launch(share=True)