File size: 2,229 Bytes
31b51e9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import gradio as gr
import torch
from huggingface_hub import HfApi, login
import os
import sys
import traceback

# Force CPU usage if needed
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")

# More details about the environment
print(f"Gradio version: {gr.__version__}")
print(f"Python version: {sys.version}")

# Login to Hugging Face with token
hf_token = os.getenv("HUGGINGFACE_TOKEN")
if hf_token:
    login(token=hf_token)
    print("Successfully logged in to Hugging Face Hub")
else:
    print("Warning: HUGGINGFACE_TOKEN environment variable not set")

def load_model():
    try:
        print("Attempting to load model from Hugging Face Hub...")
        # Attempt to use HfApi to check if the model exists
        api = HfApi()
        try:
            # Check if model exists
            model_info = api.model_info("goofyai/3d_render_style_xl")
            print(f"Found model: {model_info.modelId}")
        except Exception as e:
            print(f"Error checking model info: {str(e)}")
        
        # Try a different approach with the newer Gradio API
        try:
            print("Using gr.load() method...")
            interface = gr.load("models/goofyai/3d_render_style_xl")
            return interface
        except Exception as e:
            print(f"Error with gr.load(): {str(e)}")
            print(traceback.format_exc())
            
            # Try the older Interface.load method as fallback
            print("Trying Interface.load() as fallback...")
            interface = gr.Interface.load("models/goofyai/3d_render_style_xl")
            return interface
    except Exception as e:
        print(f"Error loading model: {str(e)}")
        print(traceback.format_exc())
        return None

# Create the interface
try:
    interface = load_model()
    if interface:
        print("Model loaded successfully, launching interface...")
        interface.launch(
            share=False,
            server_name="0.0.0.0",
            server_port=7860,
            show_error=True
        )
    else:
        print("Failed to load the interface")
except Exception as e:
    print(f"Error launching interface: {str(e)}")
    print(traceback.format_exc())