dnm3d / app.py
geyik1's picture
Upload app.py
31b51e9 verified
raw
history blame
2.23 kB
import gradio as gr
import torch
from huggingface_hub import HfApi, login
import os
import sys
import traceback
# Force CPU usage if needed
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")
# More details about the environment
print(f"Gradio version: {gr.__version__}")
print(f"Python version: {sys.version}")
# Login to Hugging Face with token
hf_token = os.getenv("HUGGINGFACE_TOKEN")
if hf_token:
login(token=hf_token)
print("Successfully logged in to Hugging Face Hub")
else:
print("Warning: HUGGINGFACE_TOKEN environment variable not set")
def load_model():
try:
print("Attempting to load model from Hugging Face Hub...")
# Attempt to use HfApi to check if the model exists
api = HfApi()
try:
# Check if model exists
model_info = api.model_info("goofyai/3d_render_style_xl")
print(f"Found model: {model_info.modelId}")
except Exception as e:
print(f"Error checking model info: {str(e)}")
# Try a different approach with the newer Gradio API
try:
print("Using gr.load() method...")
interface = gr.load("models/goofyai/3d_render_style_xl")
return interface
except Exception as e:
print(f"Error with gr.load(): {str(e)}")
print(traceback.format_exc())
# Try the older Interface.load method as fallback
print("Trying Interface.load() as fallback...")
interface = gr.Interface.load("models/goofyai/3d_render_style_xl")
return interface
except Exception as e:
print(f"Error loading model: {str(e)}")
print(traceback.format_exc())
return None
# Create the interface
try:
interface = load_model()
if interface:
print("Model loaded successfully, launching interface...")
interface.launch(
share=False,
server_name="0.0.0.0",
server_port=7860,
show_error=True
)
else:
print("Failed to load the interface")
except Exception as e:
print(f"Error launching interface: {str(e)}")
print(traceback.format_exc())