Spaces:
Sleeping
Sleeping
from fastapi import FastAPI, HTTPException, File, UploadFile | |
from fastapi.middleware.cors import CORSMiddleware | |
import torch | |
import torchvision.transforms as transforms | |
from PIL import Image | |
import numpy as np | |
import json | |
import base64 | |
from io import BytesIO | |
import uvicorn | |
app = FastAPI(title="VerifAI GradCAM API Simple", description="API simplifiée pour la détection d'images IA") | |
# Configuration CORS | |
app.add_middleware( | |
CORSMiddleware, | |
allow_origins=["*"], | |
allow_credentials=True, | |
allow_methods=["*"], | |
allow_headers=["*"], | |
) | |
class SimpleAIDetector: | |
def __init__(self): | |
self.device = torch.device('cpu') # Utiliser CPU pour éviter les problèmes GPU | |
self.transform = transforms.Compose([ | |
transforms.Resize((224, 224)), | |
transforms.ToTensor(), | |
transforms.Normalize(mean=[0.485, 0.456, 0.406], | |
std=[0.229, 0.224, 0.225]) | |
]) | |
def _preprocess_image(self, image): | |
"""Prétraite l'image""" | |
if isinstance(image, str): | |
if image.startswith('data:image'): | |
header, data = image.split(',', 1) | |
image_data = base64.b64decode(data) | |
image = Image.open(BytesIO(image_data)) | |
else: | |
image = Image.open(image) | |
if image.mode != 'RGB': | |
image = image.convert('RGB') | |
return image | |
def predict_simple(self, image): | |
"""Prédiction simple sans modèle complexe""" | |
try: | |
# Prétraitement | |
processed_image = self._preprocess_image(image) | |
# Simulation d'une prédiction (remplacez par votre modèle réel) | |
# Pour l'instant, on fait une prédiction basée sur la variance des couleurs | |
img_array = np.array(processed_image) | |
color_variance = np.var(img_array) | |
# Logique simple : plus de variance = plus probable d'être réel | |
if color_variance > 1000: | |
prediction = 0 # Real | |
confidence = min(0.9, color_variance / 2000) | |
else: | |
prediction = 1 # AI-Generated | |
confidence = min(0.9, 1 - color_variance / 2000) | |
# Créer une "carte de saillance" simple (gradient coloré) | |
height, width = img_array.shape[:2] | |
gradient = np.zeros((height, width, 3), dtype=np.uint8) | |
for i in range(height): | |
for j in range(width): | |
gradient[i, j] = [int(255 * i / height), int(255 * j / width), 128] | |
# Convertir en base64 | |
pil_image = Image.fromarray(gradient) | |
buffer = BytesIO() | |
pil_image.save(buffer, format='PNG') | |
cam_base64 = base64.b64encode(buffer.getvalue()).decode() | |
result = { | |
'prediction': prediction, | |
'confidence': confidence, | |
'class_probabilities': { | |
'Real': 1 - prediction if prediction == 0 else 1 - confidence, | |
'AI-Generated': prediction if prediction == 1 else confidence | |
}, | |
'cam_image': f"data:image/png;base64,{cam_base64}", | |
'status': 'success', | |
'note': 'Version simplifiée pour test' | |
} | |
return result | |
except Exception as e: | |
return {'status': 'error', 'message': str(e)} | |
# Initialiser le détecteur | |
detector = SimpleAIDetector() | |
async def root(): | |
return { | |
"message": "VerifAI GradCAM API Simple", | |
"status": "running", | |
"version": "1.0-simple" | |
} | |
async def health(): | |
return {"status": "healthy", "device": str(detector.device)} | |
async def predict_image(file: UploadFile = File(...)): | |
"""Endpoint pour analyser une image""" | |
try: | |
# Lire l'image | |
image_data = await file.read() | |
image = Image.open(BytesIO(image_data)) | |
# Analyser | |
result = detector.predict_simple(image) | |
return result | |
except Exception as e: | |
raise HTTPException(status_code=500, detail=str(e)) | |
async def predict_base64(data: dict): | |
"""Endpoint pour analyser une image en base64""" | |
try: | |
if 'image' not in data: | |
raise HTTPException(status_code=400, detail="Champ 'image' requis") | |
image_b64 = data['image'] | |
# Analyser | |
result = detector.predict_simple(image_b64) | |
return result | |
except Exception as e: | |
raise HTTPException(status_code=500, detail=str(e)) | |
if __name__ == "__main__": | |
uvicorn.run(app, host="0.0.0.0", port=7860) |