File size: 5,772 Bytes
c09783c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
import gradio as gr
import torch
import os
import sys
from huggingface_hub import login
import base64
import io
from PIL import Image
import requests
import tempfile

# Force CPU usage if needed
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")

# More details about the environment
print(f"Gradio version: {gr.__version__}")
print(f"Python version: {sys.version}")

# Hugging Face API token'ı - önce environment variable olarak ara, 
# sonra Hugging Face Secrets sisteminde ara
hf_token = os.environ.get("HUGGINGFACE_TOKEN")
if hf_token:
    print("Found HUGGINGFACE_TOKEN in environment variables")
    # Token ile giriş yap
    login(token=hf_token)
    print("Logged in with Hugging Face token")
else:
    print("HUGGINGFACE_TOKEN not found in environment variables")
    # Hugging Face Spaces bu değişkeni otomatik olarak yükleyecek
    # eğer Spaces UI üzerinden secret olarak eklediyseniz

def process_image(img_data):
    """Process image data to ensure it's in a valid format"""
    try:
        # If it's already a PIL Image
        if isinstance(img_data, Image.Image):
            return img_data
            
        # If it's a URL
        if isinstance(img_data, str) and (img_data.startswith('http://') or img_data.startswith('https://')):
            response = requests.get(img_data)
            return Image.open(io.BytesIO(response.content))
            
        # If it's base64 encoded
        if isinstance(img_data, str) and img_data.startswith('data:image'):
            img_data = img_data.split(',')[1]
            img_bytes = base64.b64decode(img_data)
            return Image.open(io.BytesIO(img_bytes))
            
        # If it's bytes
        if isinstance(img_data, bytes):
            return Image.open(io.BytesIO(img_data))
            
        # If it's a numpy array
        if hasattr(img_data, 'shape') and len(img_data.shape) >= 2:
            return Image.fromarray(img_data)
            
        # Default fallback
        print(f"Unknown image format: {type(img_data)}")
        return None
    except Exception as e:
        print(f"Error processing image: {str(e)}")
        return None

def save_image(img, filename=None):
    """Save image to a temporary file and return the path"""
    try:
        if not filename:
            temp_dir = tempfile.gettempdir()
            filename = os.path.join(temp_dir, f"generated_image_{id(img)}.png")
        
        img = process_image(img)
        if img:
            # Ensure the image is in RGB mode (not RGBA which can cause problems)
            if img.mode == 'RGBA':
                img = img.convert('RGB')
            img.save(filename, format="PNG")
            return filename
        return None
    except Exception as e:
        print(f"Error saving image: {str(e)}")
        return None

def generate_3d_render(prompt):
    """Generate a 3D render from the prompt"""
    try:
        # Attempt to use external API through Gradio
        try:
            print(f"Sending request to model with prompt: {prompt}")
            
            # HF Spaces'te önceden tanımlanmış bir model arayüzümüz var,
            # bu modeli doğrudan çağırıyoruz
            import gradio.external as ext
            result = ext.call_space(
                name="goofyai/3d_render_style_xl",
                fn_index=0,  # Ana model fonksiyonu genellikle 0 indeksindedir
                inputs=[prompt],
                api_key=hf_token
            )
            
            # Sonuçları işle
            if result and isinstance(result, list) and len(result) > 0:
                print("Received response from model API")
                # Görsel varsa işle
                if hasattr(result[0], 'shape') or isinstance(result[0], (str, bytes, Image.Image)):
                    img = process_image(result[0])
                    if img:
                        # Görüntüyü PNG formatında kaydet (kaydedilmiş dosya yolunu döndürür)
                        saved_path = save_image(img)
                        if saved_path:
                            print(f"Image saved to {saved_path}")
                            return saved_path
                return result[0]  # İşlenemezse orijinal sonucu döndür
            else:
                print("Empty or invalid response from model API")
                return None
        except Exception as e:
            print(f"Error calling external API: {str(e)}")
            # Geri dönüş mekanizması - basit metin yanıtı
            return f"Model API'sine erişilemiyor: {str(e)}"
    except Exception as e:
        print(f"Error in generate_3d_render: {str(e)}")
        return f"Hata: {str(e)}"

def load_model():
    try:
        print("Setting up 3D render model interface...")
        
        # Basit bir Gradio arayüzü oluştur
        interface = gr.Interface(
            fn=generate_3d_render,
            inputs=gr.Textbox(label="Input", placeholder="Enter a prompt for 3D rendering"),
            outputs=gr.Image(label="Output", type="filepath"),
            title="3D Render Style XL",
            description="Enter a prompt to generate a 3D render in game-icon style"
        )
        
        return interface
    except Exception as e:
        print(f"Error setting up interface: {str(e)}")
        return None

# Create the interface
try:
    interface = load_model()
    if interface:
        print("Interface set up successfully, launching...")
        interface.launch(
            share=False,
            server_name="0.0.0.0",
            server_port=7860,
            show_error=True
        )
    else:
        print("Failed to set up the interface")
except Exception as e:
    print(f"Error launching interface: {str(e)}")