File size: 5,677 Bytes
77daf9a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
# model_investigation.py - Run this to investigate your model
# Add this as a separate file to debug your model loading

from transformers import AutoConfig, AutoTokenizer
import logging
import traceback

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

def investigate_model(model_name="Girinath11/aiml_code_debug_model"):
    """Investigate the model to understand its structure."""
    
    print(f"πŸ” Investigating model: {model_name}")
    print("=" * 60)
    
    try:
        # 1. Check config
        print("πŸ“‹ STEP 1: Checking model configuration...")
        config = AutoConfig.from_pretrained(model_name)
        
        print(f"   βœ“ Config type: {type(config).__name__}")
        print(f"   βœ“ Model type: {getattr(config, 'model_type', 'Unknown')}")
        print(f"   βœ“ Architectures: {getattr(config, 'architectures', 'Unknown')}")
        
        # Print all config attributes
        print("\n   πŸ“„ Full config attributes:")
        for attr in sorted(dir(config)):
            if not attr.startswith('_') and hasattr(config, attr):
                try:
                    value = getattr(config, attr)
                    if not callable(value):
                        print(f"      {attr}: {value}")
                except:
                    pass
        
        # 2. Check tokenizer
        print(f"\nπŸ”€ STEP 2: Checking tokenizer...")
        try:
            tokenizer = AutoTokenizer.from_pretrained(model_name)
            print(f"   βœ“ Tokenizer type: {type(tokenizer).__name__}")
            print(f"   βœ“ Vocab size: {len(tokenizer)}")
            print(f"   βœ“ Special tokens:")
            print(f"      pad_token: {tokenizer.pad_token}")
            print(f"      eos_token: {tokenizer.eos_token}")
            print(f"      unk_token: {tokenizer.unk_token}")
            print(f"      bos_token: {getattr(tokenizer, 'bos_token', 'None')}")
        except Exception as e:
            print(f"   ❌ Tokenizer error: {e}")
        
        # 3. Try different loading approaches
        print(f"\nπŸ€– STEP 3: Testing model loading approaches...")
        
        from transformers import (
            AutoModel, 
            AutoModelForSeq2SeqLM, 
            AutoModelForCausalLM,
            pipeline
        )
        
        approaches = [
            ("AutoModel", lambda: AutoModel.from_pretrained(model_name, trust_remote_code=True)),
            ("AutoModelForCausalLM", lambda: AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)),
            ("AutoModelForSeq2SeqLM", lambda: AutoModelForSeq2SeqLM.from_pretrained(model_name, trust_remote_code=True)),
            ("Pipeline text-generation", lambda: pipeline("text-generation", model=model_name, trust_remote_code=True)),
            ("Pipeline text2text-generation", lambda: pipeline("text2text-generation", model=model_name, trust_remote_code=True)),
        ]
        
        for approach_name, approach_func in approaches:
            try:
                print(f"   πŸ”„ Trying {approach_name}...")
                model = approach_func()
                print(f"   βœ… SUCCESS with {approach_name}!")
                print(f"      Model type: {type(model)}")
                
                # Test a simple generation if possible
                if hasattr(model, 'generate') or 'pipeline' in approach_name.lower():
                    print(f"      βœ“ Supports text generation")
                    
                    # Quick test
                    try:
                        test_input = "def hello(): print('world')"
                        if 'pipeline' in approach_name.lower():
                            result = model(f"Fix this code: {test_input}", max_length=100)
                            print(f"      βœ“ Test generation successful: {str(result)[:100]}...")
                        else:
                            inputs = tokenizer(f"Fix: {test_input}", return_tensors="pt", max_length=100, truncation=True)
                            outputs = model.generate(**inputs, max_new_tokens=50)
                            result = tokenizer.decode(outputs[0], skip_special_tokens=True)
                            print(f"      βœ“ Test generation successful: {result[:100]}...")
                    except Exception as gen_error:
                        print(f"      ⚠️ Generation test failed: {str(gen_error)[:100]}...")
                
                # We found a working approach, let's break
                print(f"\nπŸŽ‰ RECOMMENDATION: Use {approach_name} for loading this model!")
                break
                
            except Exception as e:
                print(f"   ❌ {approach_name} failed: {str(e)[:100]}...")
        
        # 4. Model files inspection
        print(f"\nπŸ“ STEP 4: Model files information...")
        try:
            from huggingface_hub import list_repo_files
            files = list_repo_files(model_name)
            
            model_files = [f for f in files if f.endswith(('.bin', '.safetensors', '.json'))]
            print(f"   πŸ“„ Key model files found:")
            for file in sorted(model_files):
                print(f"      {file}")
                
        except Exception as e:
            print(f"   ⚠️ Could not list files: {e}")
            
    except Exception as main_error:
        print(f"\n❌ CRITICAL ERROR: {main_error}")
        print(f"Full traceback:\n{traceback.format_exc()}")

if __name__ == "__main__":
    investigate_model()
    
    print("\n" + "="*60)
    print("🏁 Investigation complete!")
    print("Copy the successful loading approach to your model_wrapper.py")