|
import gradio as gr |
|
import torch |
|
from huggingface_hub import HfApi, login |
|
import os |
|
import sys |
|
import traceback |
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
print(f"Using device: {device}") |
|
|
|
|
|
print(f"Gradio version: {gr.__version__}") |
|
print(f"Python version: {sys.version}") |
|
|
|
|
|
hf_token = os.getenv("HUGGINGFACE_TOKEN") |
|
if hf_token: |
|
login(token=hf_token) |
|
print("Successfully logged in to Hugging Face Hub") |
|
else: |
|
print("Warning: HUGGINGFACE_TOKEN environment variable not set") |
|
|
|
def load_model(): |
|
try: |
|
print("Attempting to load model from Hugging Face Hub...") |
|
|
|
api = HfApi() |
|
try: |
|
|
|
model_info = api.model_info("goofyai/3d_render_style_xl") |
|
print(f"Found model: {model_info.modelId}") |
|
except Exception as e: |
|
print(f"Error checking model info: {str(e)}") |
|
|
|
|
|
try: |
|
print("Using gr.load() method...") |
|
interface = gr.load("models/goofyai/3d_render_style_xl") |
|
return interface |
|
except Exception as e: |
|
print(f"Error with gr.load(): {str(e)}") |
|
print(traceback.format_exc()) |
|
|
|
|
|
print("Trying Interface.load() as fallback...") |
|
interface = gr.Interface.load("models/goofyai/3d_render_style_xl") |
|
return interface |
|
except Exception as e: |
|
print(f"Error loading model: {str(e)}") |
|
print(traceback.format_exc()) |
|
return None |
|
|
|
|
|
try: |
|
interface = load_model() |
|
if interface: |
|
print("Model loaded successfully, launching interface...") |
|
interface.launch( |
|
share=False, |
|
server_name="0.0.0.0", |
|
server_port=7860, |
|
show_error=True |
|
) |
|
else: |
|
print("Failed to load the interface") |
|
except Exception as e: |
|
print(f"Error launching interface: {str(e)}") |
|
print(traceback.format_exc()) |