File size: 27,204 Bytes
e8538b0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
# Multi-Modal Document Intelligence System
# Author: Spencer Purdy
# Description: An advanced document analysis tool that combines LayoutLMv3 for document understanding
# with efficient language models to extract information, summarize, and answer questions about documents.
# Optimized for Google Colab Pro performance.

import subprocess
import sys
import os
import io
from typing import List, Dict, Tuple, Optional
import json
import re
import hashlib
import time

# Install required packages function
def install_packages():
    """Install all required packages for the document intelligence system"""
    packages = [
        'gradio',
        'transformers',
        'torch',
        'torchvision',
        'Pillow',
        'pytesseract',
        'pdf2image',
        'opencv-python',
        'sentencepiece',
        'accelerate'
    ]
    
    print("Installing required packages...")
    for package in packages:
        subprocess.check_call([sys.executable, '-m', 'pip', 'install', package, '-q'])
    
    # Install system dependencies for PDF processing and OCR
    print("Installing system dependencies...")
    subprocess.check_call(['apt-get', 'update', '-qq'])
    subprocess.check_call(['apt-get', 'install', '-y', '-qq', 'poppler-utils', 'tesseract-ocr'])

# Try importing, install if needed
try:
    import gradio as gr
    from transformers import (
        AutoProcessor, AutoModelForTokenClassification,
        AutoTokenizer, AutoModelForSeq2SeqLM,
        pipeline
    )
    import torch
    from PIL import Image
    import pytesseract
    from pdf2image import convert_from_path
    import cv2
    import numpy as np
except ImportError:
    print("Installing required packages...")
    install_packages()
    # Re-import after installation
    import gradio as gr
    from transformers import (
        AutoProcessor, AutoModelForTokenClassification,
        AutoTokenizer, AutoModelForSeq2SeqLM,
        pipeline
    )
    import torch
    from PIL import Image
    import pytesseract
    from pdf2image import convert_from_path
    import cv2
    import numpy as np

# Configure device for optimal performance
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# Model initialization with optimized settings
print("Loading models...")

# Load LayoutLMv3 for document structure understanding
print("Loading LayoutLMv3...")
layoutlm_processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
layoutlm_model = AutoModelForTokenClassification.from_pretrained(
    "microsoft/layoutlmv3-base",
    torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
).to(device)
layoutlm_model.eval()  # Set to evaluation mode for faster inference

# Load efficient T5 model for text generation (much faster than Phi-2)
print("Loading T5 model for summarization and Q&A...")
t5_model_name = "google/flan-t5-base"  # 250M parameters, efficient and effective
t5_tokenizer = AutoTokenizer.from_pretrained(t5_model_name)
t5_model = AutoModelForSeq2SeqLM.from_pretrained(
    t5_model_name,
    torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
).to(device)
t5_model.eval()  # Set to evaluation mode

print("Models loaded successfully!")

class DocumentProcessor:
    """
    Main document processing class that handles OCR, text extraction,
    summarization, and question answering for various document types.
    """
    
    def __init__(self):
        """Initialize the document processor with empty state"""
        self.extracted_text = ""
        self.document_metadata = {}
        self.page_contents = []
        self.processing_cache = {}  # Cache for processed documents
        
    def _get_file_hash(self, file_path: str) -> str:
        """Generate a hash for the file to use as cache key"""
        with open(file_path, 'rb') as f:
            return hashlib.md5(f.read()).hexdigest()
    
    def process_pdf(self, pdf_path: str, max_pages: int = 20) -> List[Image.Image]:
        """
        Convert PDF pages to images for OCR processing
        
        Args:
            pdf_path: Path to the PDF file
            max_pages: Maximum number of pages to process (for memory management)
            
        Returns:
            List of PIL Images representing PDF pages
        """
        try:
            # Convert PDF to images with resolution optimization
            images = convert_from_path(
                pdf_path, 
                dpi=150,  # Balance between quality and performance
                first_page=1,
                last_page=min(max_pages, 100)  # Limit pages for memory
            )
            return images
        except Exception as e:
            print(f"Error processing PDF: {e}")
            return []
    
    def extract_text_from_image(self, image: Image.Image) -> Dict[str, any]:
        """
        Extract text and layout information from an image using OCR
        
        Args:
            image: PIL Image to process
            
        Returns:
            Dictionary containing extracted text and metadata
        """
        try:
            # Resize image if too large to improve performance
            max_dimension = 2000
            if max(image.size) > max_dimension:
                ratio = max_dimension / max(image.size)
                new_size = tuple(int(dim * ratio) for dim in image.size)
                image = image.resize(new_size, Image.Resampling.LANCZOS)
            
            # Convert to numpy array for OCR
            image_np = np.array(image)
            
            # Perform OCR with Tesseract
            ocr_config = '--oem 3 --psm 6'  # Use LSTM engine with uniform block detection
            ocr_data = pytesseract.image_to_data(
                image_np, 
                output_type=pytesseract.Output.DICT,
                config=ocr_config
            )
            
            # Extract words and bounding boxes
            words = []
            boxes = []
            confidences = []
            
            for i in range(len(ocr_data['text'])):
                if ocr_data['text'][i].strip() and ocr_data['conf'][i] > 30:  # Filter by confidence
                    words.append(ocr_data['text'][i])
                    boxes.append([
                        ocr_data['left'][i],
                        ocr_data['top'][i],
                        ocr_data['left'][i] + ocr_data['width'][i],
                        ocr_data['top'][i] + ocr_data['height'][i]
                    ])
                    confidences.append(ocr_data['conf'][i])
            
            # Join words to form complete text
            text = ' '.join(words)
            
            # Process with LayoutLMv3 for structure understanding (if text found)
            structured_text = text
            if words and len(words) < 400:  # Limit for performance
                try:
                    # Prepare inputs for LayoutLMv3
                    encoding = layoutlm_processor(
                        image,
                        words[:400],  # Limit words
                        boxes=boxes[:400],
                        return_tensors="pt",
                        truncation=True,
                        padding="max_length",
                        max_length=512
                    )
                    
                    # Move to device and run inference
                    encoding = {k: v.to(device) for k, v in encoding.items()}
                    
                    with torch.no_grad():
                        outputs = layoutlm_model(**encoding)
                    
                    # Get predictions
                    predictions = outputs.logits.argmax(-1).squeeze().tolist()
                    if isinstance(predictions, int):
                        predictions = [predictions]
                    
                    # Structure text based on layout
                    structured_text = self._structure_text(words[:len(predictions)], boxes[:len(predictions)])
                except Exception as e:
                    print(f"LayoutLM processing skipped: {e}")
                    structured_text = self._simple_structure_text(words, boxes)
            else:
                structured_text = self._simple_structure_text(words, boxes)
            
            return {
                'raw_text': text,
                'words': words,
                'boxes': boxes,
                'structured_text': structured_text,
                'num_words': len(words),
                'avg_confidence': sum(confidences) / len(confidences) if confidences else 0
            }
                
        except Exception as e:
            print(f"Error extracting text: {e}")
            return {
                'raw_text': "",
                'words': [],
                'boxes': [],
                'structured_text': "",
                'num_words': 0,
                'avg_confidence': 0
            }
    
    def _simple_structure_text(self, words: List[str], boxes: List[List[int]]) -> str:
        """
        Simple text structuring based on spatial layout
        Groups words into lines based on vertical position
        """
        if not words:
            return ""
        
        # Group words by lines
        lines = []
        current_line = []
        last_y = None
        
        for word, box in zip(words, boxes):
            y_pos = box[1]  # Top position
            
            if last_y is None or abs(y_pos - last_y) < 15:  # Same line threshold
                current_line.append(word)
            else:
                if current_line:
                    lines.append(' '.join(current_line))
                current_line = [word]
            
            last_y = y_pos
        
        if current_line:
            lines.append(' '.join(current_line))
        
        return '\n'.join(lines)
    
    def _structure_text(self, words: List[str], boxes: List[List[int]]) -> str:
        """Enhanced text structuring with better line detection"""
        return self._simple_structure_text(words, boxes)
    
    def process_document(self, file_path: str) -> str:
        """
        Process any document type (PDF or image) and extract text
        
        Args:
            file_path: Path to the document file
            
        Returns:
            Status message indicating success or failure
        """
        # Reset state
        self.extracted_text = ""
        self.page_contents = []
        self.document_metadata = {
            'filename': os.path.basename(file_path),
            'pages': 0,
            'total_words': 0
        }
        
        # Check cache
        file_hash = self._get_file_hash(file_path)
        if file_hash in self.processing_cache:
            cached_data = self.processing_cache[file_hash]
            self.extracted_text = cached_data['text']
            self.page_contents = cached_data['pages']
            self.document_metadata = cached_data['metadata']
            return f"βœ… Loaded from cache: {self.document_metadata['filename']}\n" \
                   f"πŸ“„ Pages: {self.document_metadata['pages']}\n" \
                   f"πŸ“ Words: {self.document_metadata['total_words']}"
        
        try:
            start_time = time.time()
            
            if file_path.lower().endswith('.pdf'):
                # Process PDF document
                images = self.process_pdf(file_path)
                self.document_metadata['pages'] = len(images)
                
                for i, image in enumerate(images):
                    print(f"Processing page {i+1}/{len(images)}...")
                    page_data = self.extract_text_from_image(image)
                    self.page_contents.append(page_data)
                    self.extracted_text += f"\n\n--- Page {i+1} ---\n\n"
                    self.extracted_text += page_data['structured_text']
                    self.document_metadata['total_words'] += page_data['num_words']
                    
            else:
                # Process single image
                image = Image.open(file_path).convert('RGB')
                page_data = self.extract_text_from_image(image)
                self.page_contents.append(page_data)
                self.extracted_text = page_data['structured_text']
                self.document_metadata['pages'] = 1
                self.document_metadata['total_words'] = page_data['num_words']
            
            # Cache the results
            self.processing_cache[file_hash] = {
                'text': self.extracted_text,
                'pages': self.page_contents,
                'metadata': self.document_metadata
            }
            
            processing_time = time.time() - start_time
            
            if self.document_metadata['total_words'] == 0:
                return f"⚠️ No text found in {self.document_metadata['filename']}. Please ensure the document contains readable text."
            
            return f"βœ… Successfully processed {self.document_metadata['filename']}\n" \
                   f"πŸ“„ Pages: {self.document_metadata['pages']}\n" \
                   f"πŸ“ Words extracted: {self.document_metadata['total_words']}\n" \
                   f"⏱️ Processing time: {processing_time:.1f}s"
                   
        except Exception as e:
            return f"❌ Error processing document: {str(e)}"
    
    def summarize_document(self) -> str:
        """
        Generate a concise summary of the document using T5 model
        
        Returns:
            Document summary or error message
        """
        if not self.extracted_text:
            return "No document has been processed yet. Please upload and process a document first."
        
        try:
            start_time = time.time()
            
            # Prepare text for summarization (limit to manage tokens)
            text_to_summarize = self.extracted_text[:2048]
            
            # Create prompt for T5
            prompt = f"Summarize the following document:\n\n{text_to_summarize}"
            
            # Tokenize input
            inputs = t5_tokenizer(
                prompt,
                return_tensors="pt",
                max_length=1024,
                truncation=True
            ).to(device)
            
            # Generate summary
            with torch.no_grad():
                summary_ids = t5_model.generate(
                    inputs.input_ids,
                    max_length=150,
                    min_length=30,
                    num_beams=4,
                    length_penalty=2.0,
                    early_stopping=True
                )
            
            # Decode summary
            summary = t5_tokenizer.decode(summary_ids[0], skip_special_tokens=True)
            
            generation_time = time.time() - start_time
            
            return f"{summary}\n\n⏱️ Generated in {generation_time:.1f}s"
            
        except Exception as e:
            return f"Error generating summary: {str(e)}"
    
    def answer_question(self, question: str) -> str:
        """
        Answer questions about the document using T5 model
        
        Args:
            question: User's question about the document
            
        Returns:
            Answer to the question
        """
        if not self.extracted_text:
            return "Please upload and process a document first."
        
        if not question.strip():
            return "Please enter a question."
        
        try:
            start_time = time.time()
            
            # Prepare context and question
            context = self.extracted_text[:1536]  # Limit context
            
            # Format prompt for T5
            prompt = f"Answer the question based on the context.\n\nContext: {context}\n\nQuestion: {question}\n\nAnswer:"
            
            # Tokenize
            inputs = t5_tokenizer(
                prompt,
                return_tensors="pt",
                max_length=1024,
                truncation=True
            ).to(device)
            
            # Generate answer
            with torch.no_grad():
                answer_ids = t5_model.generate(
                    inputs.input_ids,
                    max_length=100,
                    min_length=5,
                    num_beams=3,
                    temperature=0.7,
                    do_sample=True,
                    top_p=0.9
                )
            
            # Decode answer
            answer = t5_tokenizer.decode(answer_ids[0], skip_special_tokens=True)
            
            generation_time = time.time() - start_time
            
            return f"{answer}\n\n⏱️ Generated in {generation_time:.1f}s"
            
        except Exception as e:
            return f"Error answering question: {str(e)}"
    
    def extract_key_information(self) -> Dict[str, List[str]]:
        """
        Extract key entities from the document using regex patterns
        
        Returns:
            Dictionary of extracted entities organized by type
        """
        if not self.extracted_text:
            return {"message": ["No document has been processed yet."]}
        
        try:
            entities = {
                'dates': [],
                'emails': [],
                'phone_numbers': [],
                'monetary_amounts': [],
                'percentages': [],
                'urls': []
            }
            
            # Date extraction patterns
            date_patterns = [
                r'\b\d{1,2}[/-]\d{1,2}[/-]\d{2,4}\b',
                r'\b\d{4}[/-]\d{1,2}[/-]\d{1,2}\b',
                r'\b(?:January|February|March|April|May|June|July|August|September|October|November|December|Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s+\d{1,2},?\s+\d{4}\b',
                r'\b\d{1,2}\s+(?:January|February|March|April|May|June|July|August|September|October|November|December|Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s+\d{4}\b'
            ]
            
            for pattern in date_patterns:
                matches = re.findall(pattern, self.extracted_text, re.IGNORECASE)
                entities['dates'].extend(matches)
            
            # Email extraction
            email_pattern = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'
            entities['emails'] = re.findall(email_pattern, self.extracted_text)
            
            # Phone number extraction (various formats)
            phone_patterns = [
                r'\b\+?1?\s*\(?([0-9]{3})\)?[-.\s]?([0-9]{3})[-.\s]?([0-9]{4})\b',
                r'\b\d{3}[-.\s]\d{3}[-.\s]\d{4}\b'
            ]
            
            for pattern in phone_patterns:
                matches = re.findall(pattern, self.extracted_text)
                if isinstance(matches[0], tuple) if matches else False:
                    entities['phone_numbers'].extend(['-'.join(match) for match in matches])
                else:
                    entities['phone_numbers'].extend(matches)
            
            # Monetary amount extraction
            money_patterns = [
                r'\$\s*[\d,]+\.?\d*',
                r'USD\s*[\d,]+\.?\d*',
                r'\b\d{1,3}(?:,\d{3})*(?:\.\d{2})?\s*(?:dollars?|USD)\b'
            ]
            
            for pattern in money_patterns:
                matches = re.findall(pattern, self.extracted_text, re.IGNORECASE)
                entities['monetary_amounts'].extend(matches)
            
            # Percentage extraction
            percent_pattern = r'\b\d+\.?\d*\s*%'
            entities['percentages'] = re.findall(percent_pattern, self.extracted_text)
            
            # URL extraction
            url_pattern = r'https?://(?:www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b(?:[-a-zA-Z0-9()@:%_\+.~#?&/=]*)'
            entities['urls'] = re.findall(url_pattern, self.extracted_text)
            
            # Clean up and deduplicate
            for key in entities:
                # Remove duplicates and limit to 10 items
                unique_items = list(dict.fromkeys(entities[key]))  # Preserves order
                entities[key] = unique_items[:10]
            
            # Remove empty categories
            entities = {k: v for k, v in entities.items() if v}
            
            if not entities:
                entities = {"info": ["No specific entities found. The document may need better quality or contain different types of information."]}
            
            return entities
            
        except Exception as e:
            return {"error": [f"Error extracting information: {str(e)}"]}

# Initialize global processor
processor = DocumentProcessor()

# Gradio interface handlers
def process_document_handler(file):
    """Handle document upload and processing"""
    if file is None:
        return "Please upload a document.", "", {}
    
    # Process the document
    status = processor.process_document(file)
    
    # Get text preview
    text_preview = processor.extracted_text[:1000] + "..." if len(processor.extracted_text) > 1000 else processor.extracted_text
    
    # Extract key information
    key_info = processor.extract_key_information()
    
    return status, text_preview, key_info

def summarize_handler():
    """Handle document summarization request"""
    return processor.summarize_document()

def qa_handler(question):
    """Handle question answering request"""
    if not question:
        return "Please enter a question."
    return processor.answer_question(question)

def create_interface():
    """
    Create the Gradio interface for the document intelligence system
    """
    
    with gr.Blocks(title="Multi-Modal Document Intelligence System", theme=gr.themes.Soft()) as interface:
        # Header
        gr.Markdown("""
        # 🧠 Multi-Modal Document Intelligence System
        
        **Upload any document (PDF or image) and unlock its insights with AI!**
        
        This advanced system combines:
        - πŸ“„ **LayoutLMv3** for understanding document structure and layout
        - πŸ€– **Flan-T5** for intelligent summarization and question answering
        - πŸ” **OCR Technology** for accurate text extraction from any document
        
        ### ✨ Features
        - Upload PDFs or images (JPG, PNG, etc.)
        - Automatic text extraction with layout understanding
        - Intelligent document summarization
        - Natural language Q&A about your documents
        - Key information extraction (dates, emails, amounts, etc.)
        """)
        
        # Main interface layout
        with gr.Row():
            # Left column - Upload and processing
            with gr.Column(scale=1):
                file_input = gr.File(
                    label="πŸ“ Upload Document",
                    file_types=[".pdf", ".png", ".jpg", ".jpeg", ".bmp", ".tiff"],
                    type="filepath"
                )
                
                process_btn = gr.Button("πŸ”„ Process Document", variant="primary", size="lg")
                
                status_output = gr.Textbox(
                    label="πŸ“Š Processing Status",
                    lines=4,
                    interactive=False
                )
                
                gr.Markdown("### πŸ”‘ Key Information Extracted")
                key_info_output = gr.JSON(label="Extracted Entities", elem_id="key_info")
            
            # Right column - Results and interaction
            with gr.Column(scale=2):
                text_preview = gr.Textbox(
                    label="πŸ“„ Document Text Preview",
                    lines=10,
                    max_lines=15,
                    interactive=False
                )
                
                with gr.Tab("πŸ“ Summary"):
                    summary_btn = gr.Button("Generate Summary", variant="secondary")
                    summary_output = gr.Textbox(
                        label="Document Summary",
                        lines=8,
                        interactive=False
                    )
                
                with gr.Tab("❓ Q&A"):
                    question_input = gr.Textbox(
                        label="Ask a question about the document",
                        placeholder="e.g., What are the main points? What dates are mentioned? What is the total amount?",
                        lines=2
                    )
                    qa_btn = gr.Button("Get Answer", variant="secondary")
                    answer_output = gr.Textbox(
                        label="Answer",
                        lines=6,
                        interactive=False
                    )
        
        # Example questions
        gr.Markdown("### πŸ“š Example Questions")
        gr.Examples(
            examples=[
                "What is the main topic of this document?",
                "What dates are mentioned?",
                "What is the total amount due?",
                "Who are the key people mentioned?",
                "What are the main findings?",
                "Summarize the key points."
            ],
            inputs=question_input
        )
        
        # Footer with instructions
        gr.Markdown("""
        ---
        ### 🎯 How to Use
        1. **Upload** a PDF or image document
        2. **Process** the document to extract text
        3. **Review** the extracted text and key information
        4. **Generate** a summary or ask questions
        
        ### πŸ’‘ Tips for Best Results
        - Use clear, high-quality documents
        - For images, ensure good lighting and contrast
        - The system works with multiple languages
        - Processing time depends on document size and complexity
        
        ---
        πŸ‘¨β€πŸ’» **Created by Spencer Purdy** | Computer Science @ Auburn University  
        [GitHub](https://github.com/spencercpurdy) | [LinkedIn](https://linkedin.com/in/spencerpurdy) | [Hugging Face](https://huggingface.co/spencercpurdy)
        """)
        
        # Connect event handlers
        process_btn.click(
            fn=process_document_handler,
            inputs=file_input,
            outputs=[status_output, text_preview, key_info_output]
        )
        
        summary_btn.click(
            fn=summarize_handler,
            inputs=[],
            outputs=summary_output
        )
        
        qa_btn.click(
            fn=qa_handler,
            inputs=question_input,
            outputs=answer_output
        )
        
        # Allow Enter key to submit questions
        question_input.submit(
            fn=qa_handler,
            inputs=question_input,
            outputs=answer_output
        )
    
    return interface

# Main execution
if __name__ == "__main__":
    print("Starting Multi-Modal Document Intelligence System...")
    
    # Create and launch the interface
    interface = create_interface()
    
    # Launch with public link
    interface.launch(
        debug=True,
        share=True,
        server_name="0.0.0.0",
        server_port=7860
    )