Spaces:
Sleeping
Sleeping
Upload app.py with huggingface_hub
Browse files
app.py
ADDED
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, HTTPException, File, UploadFile
|
2 |
+
from fastapi.middleware.cors import CORSMiddleware
|
3 |
+
import torch
|
4 |
+
import torchvision.transforms as transforms
|
5 |
+
from PIL import Image
|
6 |
+
import numpy as np
|
7 |
+
import json
|
8 |
+
import base64
|
9 |
+
from io import BytesIO
|
10 |
+
import uvicorn
|
11 |
+
|
12 |
+
app = FastAPI(title="VerifAI GradCAM API Simple", description="API simplifiée pour la détection d'images IA")
|
13 |
+
|
14 |
+
# Configuration CORS
|
15 |
+
app.add_middleware(
|
16 |
+
CORSMiddleware,
|
17 |
+
allow_origins=["*"],
|
18 |
+
allow_credentials=True,
|
19 |
+
allow_methods=["*"],
|
20 |
+
allow_headers=["*"],
|
21 |
+
)
|
22 |
+
|
23 |
+
class SimpleAIDetector:
|
24 |
+
def __init__(self):
|
25 |
+
self.device = torch.device('cpu') # Utiliser CPU pour éviter les problèmes GPU
|
26 |
+
self.transform = transforms.Compose([
|
27 |
+
transforms.Resize((224, 224)),
|
28 |
+
transforms.ToTensor(),
|
29 |
+
transforms.Normalize(mean=[0.485, 0.456, 0.406],
|
30 |
+
std=[0.229, 0.224, 0.225])
|
31 |
+
])
|
32 |
+
|
33 |
+
def _preprocess_image(self, image):
|
34 |
+
"""Prétraite l'image"""
|
35 |
+
if isinstance(image, str):
|
36 |
+
if image.startswith('data:image'):
|
37 |
+
header, data = image.split(',', 1)
|
38 |
+
image_data = base64.b64decode(data)
|
39 |
+
image = Image.open(BytesIO(image_data))
|
40 |
+
else:
|
41 |
+
image = Image.open(image)
|
42 |
+
|
43 |
+
if image.mode != 'RGB':
|
44 |
+
image = image.convert('RGB')
|
45 |
+
|
46 |
+
return image
|
47 |
+
|
48 |
+
def predict_simple(self, image):
|
49 |
+
"""Prédiction simple sans modèle complexe"""
|
50 |
+
try:
|
51 |
+
# Prétraitement
|
52 |
+
processed_image = self._preprocess_image(image)
|
53 |
+
|
54 |
+
# Simulation d'une prédiction (remplacez par votre modèle réel)
|
55 |
+
# Pour l'instant, on fait une prédiction basée sur la variance des couleurs
|
56 |
+
img_array = np.array(processed_image)
|
57 |
+
color_variance = np.var(img_array)
|
58 |
+
|
59 |
+
# Logique simple : plus de variance = plus probable d'être réel
|
60 |
+
if color_variance > 1000:
|
61 |
+
prediction = 0 # Real
|
62 |
+
confidence = min(0.9, color_variance / 2000)
|
63 |
+
else:
|
64 |
+
prediction = 1 # AI-Generated
|
65 |
+
confidence = min(0.9, 1 - color_variance / 2000)
|
66 |
+
|
67 |
+
# Créer une "carte de saillance" simple (gradient coloré)
|
68 |
+
height, width = img_array.shape[:2]
|
69 |
+
gradient = np.zeros((height, width, 3), dtype=np.uint8)
|
70 |
+
for i in range(height):
|
71 |
+
for j in range(width):
|
72 |
+
gradient[i, j] = [int(255 * i / height), int(255 * j / width), 128]
|
73 |
+
|
74 |
+
# Convertir en base64
|
75 |
+
pil_image = Image.fromarray(gradient)
|
76 |
+
buffer = BytesIO()
|
77 |
+
pil_image.save(buffer, format='PNG')
|
78 |
+
cam_base64 = base64.b64encode(buffer.getvalue()).decode()
|
79 |
+
|
80 |
+
result = {
|
81 |
+
'prediction': prediction,
|
82 |
+
'confidence': confidence,
|
83 |
+
'class_probabilities': {
|
84 |
+
'Real': 1 - prediction if prediction == 0 else 1 - confidence,
|
85 |
+
'AI-Generated': prediction if prediction == 1 else confidence
|
86 |
+
},
|
87 |
+
'cam_image': f"data:image/png;base64,{cam_base64}",
|
88 |
+
'status': 'success',
|
89 |
+
'note': 'Version simplifiée pour test'
|
90 |
+
}
|
91 |
+
|
92 |
+
return result
|
93 |
+
|
94 |
+
except Exception as e:
|
95 |
+
return {'status': 'error', 'message': str(e)}
|
96 |
+
|
97 |
+
# Initialiser le détecteur
|
98 |
+
detector = SimpleAIDetector()
|
99 |
+
|
100 |
+
@app.get("/")
|
101 |
+
async def root():
|
102 |
+
return {
|
103 |
+
"message": "VerifAI GradCAM API Simple",
|
104 |
+
"status": "running",
|
105 |
+
"version": "1.0-simple"
|
106 |
+
}
|
107 |
+
|
108 |
+
@app.get("/health")
|
109 |
+
async def health():
|
110 |
+
return {"status": "healthy", "device": str(detector.device)}
|
111 |
+
|
112 |
+
@app.post("/predict")
|
113 |
+
async def predict_image(file: UploadFile = File(...)):
|
114 |
+
"""Endpoint pour analyser une image"""
|
115 |
+
try:
|
116 |
+
# Lire l'image
|
117 |
+
image_data = await file.read()
|
118 |
+
image = Image.open(BytesIO(image_data))
|
119 |
+
|
120 |
+
# Analyser
|
121 |
+
result = detector.predict_simple(image)
|
122 |
+
|
123 |
+
return result
|
124 |
+
|
125 |
+
except Exception as e:
|
126 |
+
raise HTTPException(status_code=500, detail=str(e))
|
127 |
+
|
128 |
+
@app.post("/predict-base64")
|
129 |
+
async def predict_base64(data: dict):
|
130 |
+
"""Endpoint pour analyser une image en base64"""
|
131 |
+
try:
|
132 |
+
if 'image' not in data:
|
133 |
+
raise HTTPException(status_code=400, detail="Champ 'image' requis")
|
134 |
+
|
135 |
+
image_b64 = data['image']
|
136 |
+
|
137 |
+
# Analyser
|
138 |
+
result = detector.predict_simple(image_b64)
|
139 |
+
|
140 |
+
return result
|
141 |
+
|
142 |
+
except Exception as e:
|
143 |
+
raise HTTPException(status_code=500, detail=str(e))
|
144 |
+
|
145 |
+
if __name__ == "__main__":
|
146 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|