mrfakename commited on
Commit
b97094d
·
verified ·
1 Parent(s): 8560ac0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -1565
app.py CHANGED
@@ -1,1572 +1,33 @@
1
- import os
2
- from huggingface_hub import HfApi, hf_hub_download
3
- from apscheduler.schedulers.background import BackgroundScheduler
4
- from concurrent.futures import ThreadPoolExecutor
5
- from datetime import datetime
6
- import threading # Added for locking
7
- from sqlalchemy import or_ # Added for vote counting query
8
-
9
- year = datetime.now().year
10
- month = datetime.now().month
11
-
12
- # Check if running in a Huggin Face Space
13
- IS_SPACES = False
14
- if os.getenv("SPACE_REPO_NAME"):
15
- print("Running in a Hugging Face Space 🤗")
16
- IS_SPACES = True
17
-
18
- # Setup database sync for HF Spaces
19
- if not os.path.exists("instance/tts_arena.db"):
20
- os.makedirs("instance", exist_ok=True)
21
- try:
22
- print("Database not found, downloading from HF dataset...")
23
- hf_hub_download(
24
- repo_id="TTS-AGI/database-arena-v2",
25
- filename="tts_arena.db",
26
- repo_type="dataset",
27
- local_dir="instance",
28
- token=os.getenv("HF_TOKEN"),
29
- )
30
- print("Database downloaded successfully ✅")
31
- except Exception as e:
32
- print(f"Error downloading database from HF dataset: {str(e)} ⚠️")
33
-
34
- from flask import (
35
- Flask,
36
- render_template,
37
- g,
38
- request,
39
- jsonify,
40
- send_file,
41
- redirect,
42
- url_for,
43
- session,
44
- abort,
45
- )
46
- from flask_login import LoginManager, current_user
47
- from models import *
48
- from auth import auth, init_oauth, is_admin
49
- from admin import admin
50
- from security import is_vote_allowed, check_user_security_score, detect_coordinated_voting
51
- import os
52
- from dotenv import load_dotenv
53
- from flask_limiter import Limiter
54
- from flask_limiter.util import get_remote_address
55
- import uuid
56
- import tempfile
57
- import shutil
58
- from tts import predict_tts
59
- import random
60
- import json
61
- from datetime import datetime, timedelta
62
- from flask_migrate import Migrate
63
- import requests
64
- import functools
65
- import time # Added for potential retries
66
-
67
-
68
- def get_client_ip():
69
- """Get the client's IP address, handling proxies and load balancers."""
70
- # Check for forwarded headers first (common with reverse proxies)
71
- if request.headers.get('X-Forwarded-For'):
72
- # X-Forwarded-For can contain multiple IPs, take the first one
73
- return request.headers.get('X-Forwarded-For').split(',')[0].strip()
74
- elif request.headers.get('X-Real-IP'):
75
- return request.headers.get('X-Real-IP')
76
- elif request.headers.get('CF-Connecting-IP'): # Cloudflare
77
- return request.headers.get('CF-Connecting-IP')
78
- else:
79
- return request.remote_addr
80
-
81
-
82
- # Load environment variables
83
- if not IS_SPACES:
84
- load_dotenv() # Only load .env if not running in a Hugging Face Space
85
 
86
  app = Flask(__name__)
87
- app.config["SECRET_KEY"] = os.getenv("SECRET_KEY", os.urandom(24))
88
- app.config["SQLALCHEMY_DATABASE_URI"] = os.getenv(
89
- "DATABASE_URI", "sqlite:///tts_arena.db"
90
- )
91
- app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
92
- app.config["SESSION_COOKIE_SECURE"] = True
93
- app.config["SESSION_COOKIE_SAMESITE"] = (
94
- "None" if IS_SPACES else "Lax"
95
- ) # HF Spaces uses iframes to load the app, so we need to set SAMESITE to None
96
- app.config["PERMANENT_SESSION_LIFETIME"] = timedelta(days=30) # Set to desired duration
97
-
98
- # Force HTTPS when running in HuggingFace Spaces
99
- if IS_SPACES:
100
- app.config["PREFERRED_URL_SCHEME"] = "https"
101
-
102
- # Cloudflare Turnstile settings
103
- app.config["TURNSTILE_ENABLED"] = (
104
- os.getenv("TURNSTILE_ENABLED", "False").lower() == "true"
105
- )
106
- app.config["TURNSTILE_SITE_KEY"] = os.getenv("TURNSTILE_SITE_KEY", "")
107
- app.config["TURNSTILE_SECRET_KEY"] = os.getenv("TURNSTILE_SECRET_KEY", "")
108
- app.config["TURNSTILE_VERIFY_URL"] = (
109
- "https://challenges.cloudflare.com/turnstile/v0/siteverify"
110
- )
111
-
112
- migrate = Migrate(app, db)
113
-
114
- # Initialize extensions
115
- db.init_app(app)
116
- login_manager = LoginManager()
117
- login_manager.init_app(app)
118
- login_manager.login_view = "auth.login"
119
-
120
- # Initialize OAuth
121
- init_oauth(app)
122
-
123
- # Configure rate limits
124
- limiter = Limiter(
125
- app=app,
126
- key_func=get_remote_address,
127
- default_limits=["2000 per day", "50 per minute"],
128
- storage_uri="memory://",
129
- )
130
-
131
- # TTS Cache Configuration - Read from environment
132
- TTS_CACHE_SIZE = int(os.getenv("TTS_CACHE_SIZE", "10"))
133
- CACHE_AUDIO_SUBDIR = "cache"
134
- tts_cache = {} # sentence -> {model_a, model_b, audio_a, audio_b, created_at}
135
- tts_cache_lock = threading.Lock()
136
- SMOOTHING_FACTOR_MODEL_SELECTION = 500 # For weighted random model selection
137
- # Increased max_workers to 8 for concurrent generation/refill
138
- cache_executor = ThreadPoolExecutor(max_workers=8, thread_name_prefix='CacheReplacer')
139
- all_harvard_sentences = [] # Keep the full list available
140
-
141
- # Create temp directories
142
- TEMP_AUDIO_DIR = os.path.join(tempfile.gettempdir(), "tts_arena_audio")
143
- CACHE_AUDIO_DIR = os.path.join(TEMP_AUDIO_DIR, CACHE_AUDIO_SUBDIR)
144
- os.makedirs(TEMP_AUDIO_DIR, exist_ok=True)
145
- os.makedirs(CACHE_AUDIO_DIR, exist_ok=True) # Ensure cache subdir exists
146
-
147
-
148
- # Store active TTS sessions
149
- app.tts_sessions = {}
150
- tts_sessions = app.tts_sessions
151
-
152
- # Store active conversational sessions
153
- app.conversational_sessions = {}
154
- conversational_sessions = app.conversational_sessions
155
-
156
- # Register blueprints
157
- app.register_blueprint(auth, url_prefix="/auth")
158
- app.register_blueprint(admin)
159
-
160
-
161
- @login_manager.user_loader
162
- def load_user(user_id):
163
- return User.query.get(int(user_id))
164
-
165
-
166
- @app.before_request
167
- def before_request():
168
- g.user = current_user
169
- g.is_admin = is_admin(current_user)
170
-
171
- # Ensure HTTPS for HuggingFace Spaces environment
172
- if IS_SPACES and request.headers.get("X-Forwarded-Proto") == "http":
173
- url = request.url.replace("http://", "https://", 1)
174
- return redirect(url, code=301)
175
-
176
- # Check if Turnstile verification is required
177
- if app.config["TURNSTILE_ENABLED"]:
178
- # Exclude verification routes
179
- excluded_routes = ["verify_turnstile", "turnstile_page", "static"]
180
- if request.endpoint not in excluded_routes:
181
- # Check if user is verified
182
- if not session.get("turnstile_verified"):
183
- # Save original URL for redirect after verification
184
- redirect_url = request.url
185
- # Force HTTPS in HuggingFace Spaces
186
- if IS_SPACES and redirect_url.startswith("http://"):
187
- redirect_url = redirect_url.replace("http://", "https://", 1)
188
-
189
- # If it's an API request, return a JSON response
190
- if request.path.startswith("/api/"):
191
- return jsonify({"error": "Turnstile verification required"}), 403
192
- # For regular requests, redirect to verification page
193
- return redirect(url_for("turnstile_page", redirect_url=redirect_url))
194
- else:
195
- # Check if verification has expired (default: 24 hours)
196
- verification_timeout = (
197
- int(os.getenv("TURNSTILE_TIMEOUT_HOURS", "24")) * 3600
198
- ) # Convert hours to seconds
199
- verified_at = session.get("turnstile_verified_at", 0)
200
- current_time = datetime.utcnow().timestamp()
201
-
202
- if current_time - verified_at > verification_timeout:
203
- # Verification expired, clear status and redirect to verification page
204
- session.pop("turnstile_verified", None)
205
- session.pop("turnstile_verified_at", None)
206
-
207
- redirect_url = request.url
208
- # Force HTTPS in HuggingFace Spaces
209
- if IS_SPACES and redirect_url.startswith("http://"):
210
- redirect_url = redirect_url.replace("http://", "https://", 1)
211
-
212
- if request.path.startswith("/api/"):
213
- return jsonify({"error": "Turnstile verification expired"}), 403
214
- return redirect(
215
- url_for("turnstile_page", redirect_url=redirect_url)
216
- )
217
-
218
-
219
- @app.route("/turnstile", methods=["GET"])
220
- def turnstile_page():
221
- """Display Cloudflare Turnstile verification page"""
222
- redirect_url = request.args.get("redirect_url", url_for("arena", _external=True))
223
 
224
- # Force HTTPS in HuggingFace Spaces
225
- if IS_SPACES and redirect_url.startswith("http://"):
226
- redirect_url = redirect_url.replace("http://", "https://", 1)
227
-
228
- return render_template(
229
- "turnstile.html",
230
- turnstile_site_key=app.config["TURNSTILE_SITE_KEY"],
231
- redirect_url=redirect_url,
232
- )
233
-
234
-
235
- @app.route("/verify-turnstile", methods=["POST"])
236
- def verify_turnstile():
237
- """Verify Cloudflare Turnstile token"""
238
- token = request.form.get("cf-turnstile-response")
239
- redirect_url = request.form.get("redirect_url", url_for("arena", _external=True))
240
-
241
- # Force HTTPS in HuggingFace Spaces
242
- if IS_SPACES and redirect_url.startswith("http://"):
243
- redirect_url = redirect_url.replace("http://", "https://", 1)
244
-
245
- if not token:
246
- # If AJAX request, return JSON error
247
- if request.headers.get("X-Requested-With") == "XMLHttpRequest":
248
- return (
249
- jsonify({"success": False, "error": "Missing verification token"}),
250
- 400,
251
- )
252
- # Otherwise redirect back to turnstile page
253
- return redirect(url_for("turnstile_page", redirect_url=redirect_url))
254
-
255
- # Verify token with Cloudflare
256
- data = {
257
- "secret": app.config["TURNSTILE_SECRET_KEY"],
258
- "response": token,
259
- "remoteip": request.remote_addr,
260
- }
261
-
262
- try:
263
- response = requests.post(app.config["TURNSTILE_VERIFY_URL"], data=data)
264
- result = response.json()
265
-
266
- if result.get("success"):
267
- # Set verification status in session
268
- session["turnstile_verified"] = True
269
- session["turnstile_verified_at"] = datetime.utcnow().timestamp()
270
-
271
- # Determine response type based on request
272
- is_xhr = request.headers.get("X-Requested-With") == "XMLHttpRequest"
273
- accepts_json = "application/json" in request.headers.get("Accept", "")
274
-
275
- # If AJAX or JSON request, return success JSON
276
- if is_xhr or accepts_json:
277
- return jsonify({"success": True, "redirect": redirect_url})
278
-
279
- # For regular form submissions, redirect to the target URL
280
- return redirect(redirect_url)
281
- else:
282
- # Verification failed
283
- app.logger.warning(f"Turnstile verification failed: {result}")
284
-
285
- # If AJAX request, return JSON error
286
- if request.headers.get("X-Requested-With") == "XMLHttpRequest":
287
- return jsonify({"success": False, "error": "Verification failed"}), 403
288
-
289
- # Otherwise redirect back to turnstile page
290
- return redirect(url_for("turnstile_page", redirect_url=redirect_url))
291
-
292
- except Exception as e:
293
- app.logger.error(f"Turnstile verification error: {str(e)}")
294
-
295
- # If AJAX request, return JSON error
296
- if request.headers.get("X-Requested-With") == "XMLHttpRequest":
297
- return (
298
- jsonify(
299
- {"success": False, "error": "Server error during verification"}
300
- ),
301
- 500,
302
- )
303
-
304
- # Otherwise redirect back to turnstile page
305
- return redirect(url_for("turnstile_page", redirect_url=redirect_url))
306
-
307
- with open("sentences.txt", "r") as f, open("emotional_sentences.txt", "r") as f_emotional:
308
- # Store all sentences and clean them up
309
- all_harvard_sentences = [line.strip() for line in f.readlines() if line.strip()] + [line.strip() for line in f_emotional.readlines() if line.strip()]
310
- # Shuffle for initial random selection if needed, but main list remains ordered
311
- initial_sentences = random.sample(all_harvard_sentences, min(len(all_harvard_sentences), 500)) # Limit initial pass for template
312
 
313
  @app.route("/")
314
- def arena():
315
- # Pass a subset of sentences for the random button fallback
316
- return render_template("arena.html", harvard_sentences=json.dumps(initial_sentences))
317
-
318
-
319
- @app.route("/leaderboard")
320
- def leaderboard():
321
- tts_leaderboard = get_leaderboard_data(ModelType.TTS)
322
- conversational_leaderboard = get_leaderboard_data(ModelType.CONVERSATIONAL)
323
- top_voters = get_top_voters(10) # Get top 10 voters
324
-
325
- # Initialize personal leaderboard data
326
- tts_personal_leaderboard = None
327
- conversational_personal_leaderboard = None
328
- user_leaderboard_visibility = None
329
-
330
- # If user is logged in, get their personal leaderboard and visibility setting
331
- if current_user.is_authenticated:
332
- tts_personal_leaderboard = get_user_leaderboard(current_user.id, ModelType.TTS)
333
- conversational_personal_leaderboard = get_user_leaderboard(
334
- current_user.id, ModelType.CONVERSATIONAL
335
- )
336
- user_leaderboard_visibility = current_user.show_in_leaderboard
337
-
338
- # Get key dates for the timeline
339
- tts_key_dates = get_key_historical_dates(ModelType.TTS)
340
- conversational_key_dates = get_key_historical_dates(ModelType.CONVERSATIONAL)
341
-
342
- # Format dates for display in the dropdown
343
- formatted_tts_dates = [date.strftime("%B %Y") for date in tts_key_dates]
344
- formatted_conversational_dates = [
345
- date.strftime("%B %Y") for date in conversational_key_dates
346
- ]
347
-
348
- return render_template(
349
- "leaderboard.html",
350
- tts_leaderboard=tts_leaderboard,
351
- conversational_leaderboard=conversational_leaderboard,
352
- tts_personal_leaderboard=tts_personal_leaderboard,
353
- conversational_personal_leaderboard=conversational_personal_leaderboard,
354
- tts_key_dates=tts_key_dates,
355
- conversational_key_dates=conversational_key_dates,
356
- formatted_tts_dates=formatted_tts_dates,
357
- formatted_conversational_dates=formatted_conversational_dates,
358
- top_voters=top_voters,
359
- user_leaderboard_visibility=user_leaderboard_visibility
360
- )
361
-
362
-
363
- @app.route("/api/historical-leaderboard/<model_type>")
364
- def historical_leaderboard(model_type):
365
- """Get historical leaderboard data for a specific date"""
366
- if model_type not in [ModelType.TTS, ModelType.CONVERSATIONAL]:
367
- return jsonify({"error": "Invalid model type"}), 400
368
-
369
- # Get date from query parameter
370
- date_str = request.args.get("date")
371
- if not date_str:
372
- return jsonify({"error": "Date parameter is required"}), 400
373
-
374
- try:
375
- # Parse date from URL parameter (format: YYYY-MM-DD)
376
- target_date = datetime.strptime(date_str, "%Y-%m-%d")
377
-
378
- # Get historical leaderboard data
379
- leaderboard_data = get_historical_leaderboard_data(model_type, target_date)
380
-
381
- return jsonify(
382
- {"date": target_date.strftime("%B %d, %Y"), "leaderboard": leaderboard_data}
383
- )
384
- except ValueError:
385
- return jsonify({"error": "Invalid date format. Use YYYY-MM-DD"}), 400
386
-
387
-
388
- @app.route("/about")
389
- def about():
390
- return render_template("about.html")
391
-
392
-
393
- # --- TTS Caching Functions ---
394
-
395
- def generate_and_save_tts(text, model_id, output_dir):
396
- """Generates TTS and saves it to a specific directory, returning the full path."""
397
- temp_audio_path = None # Initialize to None
398
- try:
399
- app.logger.debug(f"[TTS Gen {model_id}] Starting generation for: '{text[:30]}...'")
400
- # If predict_tts saves file itself and returns path:
401
- temp_audio_path = predict_tts(text, model_id)
402
- app.logger.debug(f"[TTS Gen {model_id}] predict_tts returned: {temp_audio_path}")
403
-
404
- if not temp_audio_path or not os.path.exists(temp_audio_path):
405
- app.logger.warning(f"[TTS Gen {model_id}] predict_tts failed or returned invalid path: {temp_audio_path}")
406
- raise ValueError("predict_tts did not return a valid path or file does not exist")
407
-
408
- file_uuid = str(uuid.uuid4())
409
- dest_path = os.path.join(output_dir, f"{file_uuid}.wav")
410
- app.logger.debug(f"[TTS Gen {model_id}] Moving {temp_audio_path} to {dest_path}")
411
- # Move the file generated by predict_tts to the target cache directory
412
- shutil.move(temp_audio_path, dest_path)
413
- app.logger.debug(f"[TTS Gen {model_id}] Move successful. Returning {dest_path}")
414
- return dest_path
415
-
416
- except Exception as e:
417
- app.logger.error(f"Error generating/saving TTS for model {model_id} and text '{text[:30]}...': {str(e)}")
418
- # Ensure temporary file from predict_tts (if any) is cleaned up
419
- if temp_audio_path and os.path.exists(temp_audio_path):
420
- try:
421
- app.logger.debug(f"[TTS Gen {model_id}] Cleaning up temporary file {temp_audio_path} after error.")
422
- os.remove(temp_audio_path)
423
- except OSError:
424
- pass # Ignore error if file couldn't be removed
425
- return None
426
-
427
-
428
- def _generate_cache_entry_task(sentence):
429
- """Task function to generate audio for a sentence and add to cache."""
430
- # Wrap the entire task in an application context
431
- with app.app_context():
432
- if not sentence:
433
- # Select a new sentence if not provided (for replacement)
434
- with tts_cache_lock:
435
- cached_keys = set(tts_cache.keys())
436
- available_sentences = [s for s in all_harvard_sentences if s not in cached_keys]
437
- if not available_sentences:
438
- app.logger.warning("No more unique Harvard sentences available for caching.")
439
- return
440
- sentence = random.choice(available_sentences)
441
-
442
- # app.logger.info removed duplicate log
443
- print(f"[Cache Task] Querying models for: '{sentence[:50]}...'")
444
- available_models = Model.query.filter_by(
445
- model_type=ModelType.TTS, is_active=True
446
- ).all()
447
-
448
- if len(available_models) < 2:
449
- app.logger.error("Not enough active TTS models to generate cache entry.")
450
- return
451
-
452
- try:
453
- models = get_weighted_random_models(available_models, 2, ModelType.TTS)
454
- model_a_id = models[0].id
455
- model_b_id = models[1].id
456
-
457
- # Generate audio concurrently using a local executor for clarity within the task
458
- with ThreadPoolExecutor(max_workers=2, thread_name_prefix='AudioGen') as audio_executor:
459
- future_a = audio_executor.submit(generate_and_save_tts, sentence, model_a_id, CACHE_AUDIO_DIR)
460
- future_b = audio_executor.submit(generate_and_save_tts, sentence, model_b_id, CACHE_AUDIO_DIR)
461
-
462
- timeout_seconds = 120
463
- audio_a_path = future_a.result(timeout=timeout_seconds)
464
- audio_b_path = future_b.result(timeout=timeout_seconds)
465
-
466
- if audio_a_path and audio_b_path:
467
- with tts_cache_lock:
468
- # Only add if the sentence isn't already back in the cache
469
- # And ensure cache size doesn't exceed limit
470
- if sentence not in tts_cache and len(tts_cache) < TTS_CACHE_SIZE:
471
- tts_cache[sentence] = {
472
- "model_a": model_a_id,
473
- "model_b": model_b_id,
474
- "audio_a": audio_a_path,
475
- "audio_b": audio_b_path,
476
- "created_at": datetime.utcnow(),
477
- }
478
- app.logger.info(f"Successfully cached entry for: '{sentence[:50]}...'")
479
- elif sentence in tts_cache:
480
- app.logger.warning(f"Sentence '{sentence[:50]}...' already re-cached. Discarding new generation.")
481
- # Clean up the newly generated files if not added
482
- if os.path.exists(audio_a_path): os.remove(audio_a_path)
483
- if os.path.exists(audio_b_path): os.remove(audio_b_path)
484
- else: # Cache is full
485
- app.logger.warning(f"Cache is full ({len(tts_cache)} entries). Discarding new generation for '{sentence[:50]}...'.")
486
- # Clean up the newly generated files if not added
487
- if os.path.exists(audio_a_path): os.remove(audio_a_path)
488
- if os.path.exists(audio_b_path): os.remove(audio_b_path)
489
-
490
- else:
491
- app.logger.error(f"Failed to generate one or both audio files for cache: '{sentence[:50]}...'")
492
- # Clean up whichever file might have been created
493
- if audio_a_path and os.path.exists(audio_a_path): os.remove(audio_a_path)
494
- if audio_b_path and os.path.exists(audio_b_path): os.remove(audio_b_path)
495
-
496
- except Exception as e:
497
- # Log the exception within the app context
498
- app.logger.error(f"Exception in _generate_cache_entry_task for '{sentence[:50]}...': {str(e)}", exc_info=True)
499
-
500
-
501
- def initialize_tts_cache():
502
- print("Initializing TTS cache")
503
- """Selects initial sentences and starts generation tasks."""
504
- with app.app_context(): # Ensure access to models
505
- if not all_harvard_sentences:
506
- app.logger.error("Harvard sentences not loaded. Cannot initialize cache.")
507
- return
508
-
509
- initial_selection = random.sample(all_harvard_sentences, min(len(all_harvard_sentences), TTS_CACHE_SIZE))
510
- app.logger.info(f"Initializing TTS cache with {len(initial_selection)} sentences...")
511
-
512
- for sentence in initial_selection:
513
- # Use the main cache_executor for initial population too
514
- cache_executor.submit(_generate_cache_entry_task, sentence)
515
- app.logger.info("Submitted initial cache generation tasks.")
516
-
517
- # --- End TTS Caching Functions ---
518
-
519
-
520
- @app.route("/api/tts/generate", methods=["POST"])
521
- @limiter.limit("10 per minute") # Keep limit, cached responses are still requests
522
- def generate_tts():
523
- # If verification not setup, handle it first
524
- if app.config["TURNSTILE_ENABLED"] and not session.get("turnstile_verified"):
525
- return jsonify({"error": "Turnstile verification required"}), 403
526
-
527
- # Require user to be logged in to generate audio
528
- if not current_user.is_authenticated:
529
- return jsonify({"error": "You must be logged in to generate audio"}), 401
530
-
531
- data = request.json
532
- text = data.get("text", "").strip() # Ensure text is stripped
533
-
534
- if not text or len(text) > 1000:
535
- return jsonify({"error": "Invalid or too long text"}), 400
536
-
537
- # --- Cache Check ---
538
- cache_hit = False
539
- session_data_from_cache = None
540
- with tts_cache_lock:
541
- if text in tts_cache:
542
- cache_hit = True
543
- cached_entry = tts_cache.pop(text) # Remove from cache immediately
544
- app.logger.info(f"TTS Cache HIT for: '{text[:50]}...'")
545
-
546
- # Prepare session data using cached info
547
- session_id = str(uuid.uuid4())
548
- session_data_from_cache = {
549
- "model_a": cached_entry["model_a"],
550
- "model_b": cached_entry["model_b"],
551
- "audio_a": cached_entry["audio_a"], # Paths are now from cache_dir
552
- "audio_b": cached_entry["audio_b"],
553
- "text": text,
554
- "created_at": datetime.utcnow(),
555
- "expires_at": datetime.utcnow() + timedelta(minutes=30),
556
- "voted": False,
557
- "cache_hit": True,
558
- }
559
- app.tts_sessions[session_id] = session_data_from_cache
560
-
561
- # --- Trigger background tasks to refill the cache ---
562
- # Calculate how many slots need refilling
563
- current_cache_size = len(tts_cache) # Size *before* adding potentially new items
564
- needed_refills = TTS_CACHE_SIZE - current_cache_size
565
- # Limit concurrent refills to 8 or the actual need
566
- refills_to_submit = min(needed_refills, 8)
567
-
568
- if refills_to_submit > 0:
569
- app.logger.info(f"Cache hit: Submitting {refills_to_submit} background task(s) to refill cache (current size: {current_cache_size}, target: {TTS_CACHE_SIZE}).")
570
- for _ in range(refills_to_submit):
571
- # Pass None to signal replacement selection within the task
572
- cache_executor.submit(_generate_cache_entry_task, None)
573
- else:
574
- app.logger.info(f"Cache hit: Cache is already full or at target size ({current_cache_size}/{TTS_CACHE_SIZE}). No refill tasks submitted.")
575
- # --- End Refill Trigger ---
576
-
577
- if cache_hit and session_data_from_cache:
578
- # Return response using cached data
579
- # Note: The files are now managed by the session lifecycle (cleanup_session)
580
- return jsonify(
581
- {
582
- "session_id": session_id,
583
- "audio_a": f"/api/tts/audio/{session_id}/a",
584
- "audio_b": f"/api/tts/audio/{session_id}/b",
585
- "expires_in": 1800, # 30 minutes in seconds
586
- "cache_hit": True,
587
- }
588
- )
589
- # --- End Cache Check ---
590
-
591
- # --- Cache Miss: Generate on the fly ---
592
- app.logger.info(f"TTS Cache MISS for: '{text[:50]}...'. Generating on the fly.")
593
- available_models = Model.query.filter_by(
594
- model_type=ModelType.TTS, is_active=True
595
- ).all()
596
- if len(available_models) < 2:
597
- return jsonify({"error": "Not enough TTS models available"}), 500
598
-
599
- selected_models = get_weighted_random_models(available_models, 2, ModelType.TTS)
600
-
601
- try:
602
- audio_files = []
603
- model_ids = []
604
-
605
- # Function to process a single model (generate directly to TEMP_AUDIO_DIR, not cache subdir)
606
- def process_model_on_the_fly(model):
607
- # Generate and save directly to the main temp dir
608
- # Assume predict_tts handles saving temporary files
609
- temp_audio_path = predict_tts(text, model.id)
610
- if not temp_audio_path or not os.path.exists(temp_audio_path):
611
- raise ValueError(f"predict_tts failed for model {model.id}")
612
-
613
- # Create a unique name in the main TEMP_AUDIO_DIR for the session
614
- file_uuid = str(uuid.uuid4())
615
- dest_path = os.path.join(TEMP_AUDIO_DIR, f"{file_uuid}.wav")
616
- shutil.move(temp_audio_path, dest_path) # Move from predict_tts's temp location
617
-
618
- return {"model_id": model.id, "audio_path": dest_path}
619
-
620
-
621
- # Use ThreadPoolExecutor to process models concurrently
622
- with ThreadPoolExecutor(max_workers=2) as executor:
623
- results = list(executor.map(process_model_on_the_fly, selected_models))
624
-
625
- # Extract results
626
- for result in results:
627
- model_ids.append(result["model_id"])
628
- audio_files.append(result["audio_path"])
629
-
630
- # Create session
631
- session_id = str(uuid.uuid4())
632
- app.tts_sessions[session_id] = {
633
- "model_a": model_ids[0],
634
- "model_b": model_ids[1],
635
- "audio_a": audio_files[0], # Paths are now from TEMP_AUDIO_DIR directly
636
- "audio_b": audio_files[1],
637
- "text": text,
638
- "created_at": datetime.utcnow(),
639
- "expires_at": datetime.utcnow() + timedelta(minutes=30),
640
- "voted": False,
641
- "cache_hit": False,
642
- }
643
-
644
- # Return audio file paths and session
645
- return jsonify(
646
- {
647
- "session_id": session_id,
648
- "audio_a": f"/api/tts/audio/{session_id}/a",
649
- "audio_b": f"/api/tts/audio/{session_id}/b",
650
- "expires_in": 1800,
651
- "cache_hit": False,
652
- }
653
- )
654
-
655
- except Exception as e:
656
- app.logger.error(f"TTS on-the-fly generation error: {str(e)}", exc_info=True)
657
- # Cleanup any files potentially created during the failed attempt
658
- if 'results' in locals():
659
- for res in results:
660
- if 'audio_path' in res and os.path.exists(res['audio_path']):
661
- try:
662
- os.remove(res['audio_path'])
663
- except OSError:
664
- pass
665
- return jsonify({"error": "Failed to generate TTS"}), 500
666
- # --- End Cache Miss ---
667
-
668
-
669
- @app.route("/api/tts/audio/<session_id>/<model_key>")
670
- def get_audio(session_id, model_key):
671
- # If verification not setup, handle it first
672
- if app.config["TURNSTILE_ENABLED"] and not session.get("turnstile_verified"):
673
- return jsonify({"error": "Turnstile verification required"}), 403
674
-
675
- if session_id not in app.tts_sessions:
676
- return jsonify({"error": "Invalid or expired session"}), 404
677
-
678
- session_data = app.tts_sessions[session_id]
679
-
680
- # Check if session expired
681
- if datetime.utcnow() > session_data["expires_at"]:
682
- cleanup_session(session_id)
683
- return jsonify({"error": "Session expired"}), 410
684
-
685
- if model_key == "a":
686
- audio_path = session_data["audio_a"]
687
- elif model_key == "b":
688
- audio_path = session_data["audio_b"]
689
- else:
690
- return jsonify({"error": "Invalid model key"}), 400
691
-
692
- # Check if file exists
693
- if not os.path.exists(audio_path):
694
- return jsonify({"error": "Audio file not found"}), 404
695
-
696
- return send_file(audio_path, mimetype="audio/wav")
697
-
698
-
699
- @app.route("/api/tts/vote", methods=["POST"])
700
- @limiter.limit("30 per minute")
701
- def submit_vote():
702
- # If verification not setup, handle it first
703
- if app.config["TURNSTILE_ENABLED"] and not session.get("turnstile_verified"):
704
- return jsonify({"error": "Turnstile verification required"}), 403
705
-
706
- # Require user to be logged in to vote
707
- if not current_user.is_authenticated:
708
- return jsonify({"error": "You must be logged in to vote"}), 401
709
-
710
- # Security checks for vote manipulation prevention
711
- client_ip = get_client_ip()
712
- vote_allowed, security_reason, security_score = is_vote_allowed(current_user.id, client_ip)
713
-
714
- if not vote_allowed:
715
- app.logger.warning(f"Vote blocked for user {current_user.username} (ID: {current_user.id}): {security_reason} (Score: {security_score})")
716
- return jsonify({"error": f"Vote not allowed: {security_reason}"}), 403
717
-
718
- data = request.json
719
- session_id = data.get("session_id")
720
- chosen_model_key = data.get("chosen_model") # "a" or "b"
721
-
722
- if not session_id or session_id not in app.tts_sessions:
723
- return jsonify({"error": "Invalid or expired session"}), 404
724
-
725
- if not chosen_model_key or chosen_model_key not in ["a", "b"]:
726
- return jsonify({"error": "Invalid chosen model"}), 400
727
-
728
- session_data = app.tts_sessions[session_id]
729
-
730
- # Check if session expired
731
- if datetime.utcnow() > session_data["expires_at"]:
732
- cleanup_session(session_id)
733
- return jsonify({"error": "Session expired"}), 410
734
-
735
- # Check if already voted
736
- if session_data["voted"]:
737
- return jsonify({"error": "Vote already submitted for this session"}), 400
738
-
739
- # Get model IDs and audio paths
740
- chosen_id = (
741
- session_data["model_a"] if chosen_model_key == "a" else session_data["model_b"]
742
- )
743
- rejected_id = (
744
- session_data["model_b"] if chosen_model_key == "a" else session_data["model_a"]
745
- )
746
- chosen_audio_path = (
747
- session_data["audio_a"] if chosen_model_key == "a" else session_data["audio_b"]
748
- )
749
- rejected_audio_path = (
750
- session_data["audio_b"] if chosen_model_key == "a" else session_data["audio_a"]
751
- )
752
-
753
- # Calculate session duration and gather analytics data
754
- vote_time = datetime.utcnow()
755
- session_duration = (vote_time - session_data["created_at"]).total_seconds()
756
- client_ip = get_client_ip()
757
- user_agent = request.headers.get('User-Agent')
758
- cache_hit = session_data.get("cache_hit", False)
759
-
760
- # Record vote in database with analytics data
761
- vote, error = record_vote(
762
- current_user.id,
763
- session_data["text"],
764
- chosen_id,
765
- rejected_id,
766
- ModelType.TTS,
767
- session_duration=session_duration,
768
- ip_address=client_ip,
769
- user_agent=user_agent,
770
- generation_date=session_data["created_at"],
771
- cache_hit=cache_hit
772
- )
773
-
774
- if error:
775
- return jsonify({"error": error}), 500
776
-
777
- # --- Save preference data ---
778
- try:
779
- vote_uuid = str(uuid.uuid4())
780
- vote_dir = os.path.join("./votes", vote_uuid)
781
- os.makedirs(vote_dir, exist_ok=True)
782
-
783
- # Copy audio files
784
- shutil.copy(chosen_audio_path, os.path.join(vote_dir, "chosen.wav"))
785
- shutil.copy(rejected_audio_path, os.path.join(vote_dir, "rejected.wav"))
786
-
787
- # Create metadata
788
- chosen_model_obj = Model.query.get(chosen_id)
789
- rejected_model_obj = Model.query.get(rejected_id)
790
- metadata = {
791
- "text": session_data["text"],
792
- "chosen_model": chosen_model_obj.name if chosen_model_obj else "Unknown",
793
- "chosen_model_id": chosen_model_obj.id if chosen_model_obj else "Unknown",
794
- "rejected_model": rejected_model_obj.name if rejected_model_obj else "Unknown",
795
- "rejected_model_id": rejected_model_obj.id if rejected_model_obj else "Unknown",
796
- "session_id": session_id,
797
- "timestamp": datetime.utcnow().isoformat(),
798
- "username": current_user.username,
799
- "model_type": "TTS"
800
- }
801
- with open(os.path.join(vote_dir, "metadata.json"), "w") as f:
802
- json.dump(metadata, f, indent=2)
803
-
804
- except Exception as e:
805
- app.logger.error(f"Error saving preference data for vote {session_id}: {str(e)}")
806
- # Continue even if saving preference data fails, vote is already recorded
807
-
808
- # Mark session as voted
809
- session_data["voted"] = True
810
-
811
- # Check for coordinated voting campaigns (async to not slow down response)
812
- try:
813
- from threading import Thread
814
- campaign_check_thread = Thread(target=check_for_coordinated_campaigns)
815
- campaign_check_thread.daemon = True
816
- campaign_check_thread.start()
817
- except Exception as e:
818
- app.logger.error(f"Error starting coordinated campaign check thread: {str(e)}")
819
-
820
- # Return updated models (use previously fetched objects)
821
- return jsonify(
822
- {
823
- "success": True,
824
- "chosen_model": {"id": chosen_id, "name": chosen_model_obj.name if chosen_model_obj else "Unknown"},
825
- "rejected_model": {
826
- "id": rejected_id,
827
- "name": rejected_model_obj.name if rejected_model_obj else "Unknown",
828
- },
829
- "names": {
830
- "a": (
831
- chosen_model_obj.name if chosen_model_key == "a" else rejected_model_obj.name
832
- if chosen_model_obj and rejected_model_obj else "Unknown"
833
- ),
834
- "b": (
835
- rejected_model_obj.name if chosen_model_key == "a" else chosen_model_obj.name
836
- if chosen_model_obj and rejected_model_obj else "Unknown"
837
- ),
838
- },
839
- }
840
- )
841
-
842
-
843
- def cleanup_session(session_id):
844
- """Remove session and its audio files"""
845
- if session_id in app.tts_sessions:
846
- session = app.tts_sessions[session_id]
847
-
848
- # Remove audio files
849
- for audio_file in [session["audio_a"], session["audio_b"]]:
850
- if os.path.exists(audio_file):
851
- try:
852
- os.remove(audio_file)
853
- except Exception as e:
854
- app.logger.error(f"Error removing audio file: {str(e)}")
855
-
856
- # Remove session
857
- del app.tts_sessions[session_id]
858
-
859
-
860
- @app.route("/api/conversational/generate", methods=["POST"])
861
- @limiter.limit("5 per minute")
862
- def generate_podcast():
863
- # If verification not setup, handle it first
864
- if app.config["TURNSTILE_ENABLED"] and not session.get("turnstile_verified"):
865
- return jsonify({"error": "Turnstile verification required"}), 403
866
-
867
- # Require user to be logged in to generate audio
868
- if not current_user.is_authenticated:
869
- return jsonify({"error": "You must be logged in to generate audio"}), 401
870
-
871
- data = request.json
872
- script = data.get("script")
873
-
874
- if not script or not isinstance(script, list) or len(script) < 2:
875
- return jsonify({"error": "Invalid script format or too short"}), 400
876
-
877
- # Validate script format
878
- for line in script:
879
- if not isinstance(line, dict) or "text" not in line or "speaker_id" not in line:
880
- return (
881
- jsonify(
882
- {
883
- "error": "Invalid script line format. Each line must have text and speaker_id"
884
- }
885
- ),
886
- 400,
887
- )
888
- if (
889
- not line["text"]
890
- or not isinstance(line["speaker_id"], int)
891
- or line["speaker_id"] not in [0, 1]
892
- ):
893
- return (
894
- jsonify({"error": "Invalid script content. Speaker ID must be 0 or 1"}),
895
- 400,
896
- )
897
-
898
- # Get two conversational models (currently only CSM and PlayDialog)
899
- available_models = Model.query.filter_by(
900
- model_type=ModelType.CONVERSATIONAL, is_active=True
901
- ).all()
902
-
903
- if len(available_models) < 2:
904
- return jsonify({"error": "Not enough conversational models available"}), 500
905
-
906
- selected_models = get_weighted_random_models(available_models, 2, ModelType.CONVERSATIONAL)
907
-
908
- try:
909
- # Generate audio for both models concurrently
910
- audio_files = []
911
- model_ids = []
912
-
913
- # Function to process a single model
914
- def process_model(model):
915
- # Call conversational TTS service
916
- audio_content = predict_tts(script, model.id)
917
-
918
- # Save to temp file with unique name
919
- file_uuid = str(uuid.uuid4())
920
- dest_path = os.path.join(TEMP_AUDIO_DIR, f"{file_uuid}.wav")
921
-
922
- with open(dest_path, "wb") as f:
923
- f.write(audio_content)
924
-
925
- return {"model_id": model.id, "audio_path": dest_path}
926
-
927
- # Use ThreadPoolExecutor to process models concurrently
928
- with ThreadPoolExecutor(max_workers=2) as executor:
929
- results = list(executor.map(process_model, selected_models))
930
-
931
- # Extract results
932
- for result in results:
933
- model_ids.append(result["model_id"])
934
- audio_files.append(result["audio_path"])
935
-
936
- # Create session
937
- session_id = str(uuid.uuid4())
938
- script_text = " ".join([line["text"] for line in script])
939
- app.conversational_sessions[session_id] = {
940
- "model_a": model_ids[0],
941
- "model_b": model_ids[1],
942
- "audio_a": audio_files[0],
943
- "audio_b": audio_files[1],
944
- "text": script_text[:1000], # Limit text length
945
- "created_at": datetime.utcnow(),
946
- "expires_at": datetime.utcnow() + timedelta(minutes=30),
947
- "voted": False,
948
- "script": script,
949
- "cache_hit": False, # Conversational is always generated on-demand
950
- }
951
-
952
- # Return audio file paths and session
953
- return jsonify(
954
- {
955
- "session_id": session_id,
956
- "audio_a": f"/api/conversational/audio/{session_id}/a",
957
- "audio_b": f"/api/conversational/audio/{session_id}/b",
958
- "expires_in": 1800, # 30 minutes in seconds
959
- }
960
- )
961
-
962
- except Exception as e:
963
- app.logger.error(f"Conversational generation error: {str(e)}")
964
- return jsonify({"error": f"Failed to generate podcast: {str(e)}"}), 500
965
-
966
-
967
- @app.route("/api/conversational/audio/<session_id>/<model_key>")
968
- def get_podcast_audio(session_id, model_key):
969
- # If verification not setup, handle it first
970
- if app.config["TURNSTILE_ENABLED"] and not session.get("turnstile_verified"):
971
- return jsonify({"error": "Turnstile verification required"}), 403
972
-
973
- if session_id not in app.conversational_sessions:
974
- return jsonify({"error": "Invalid or expired session"}), 404
975
-
976
- session_data = app.conversational_sessions[session_id]
977
-
978
- # Check if session expired
979
- if datetime.utcnow() > session_data["expires_at"]:
980
- cleanup_conversational_session(session_id)
981
- return jsonify({"error": "Session expired"}), 410
982
-
983
- if model_key == "a":
984
- audio_path = session_data["audio_a"]
985
- elif model_key == "b":
986
- audio_path = session_data["audio_b"]
987
- else:
988
- return jsonify({"error": "Invalid model key"}), 400
989
-
990
- # Check if file exists
991
- if not os.path.exists(audio_path):
992
- return jsonify({"error": "Audio file not found"}), 404
993
-
994
- return send_file(audio_path, mimetype="audio/wav")
995
-
996
-
997
- @app.route("/api/conversational/vote", methods=["POST"])
998
- @limiter.limit("30 per minute")
999
- def submit_podcast_vote():
1000
- # If verification not setup, handle it first
1001
- if app.config["TURNSTILE_ENABLED"] and not session.get("turnstile_verified"):
1002
- return jsonify({"error": "Turnstile verification required"}), 403
1003
-
1004
- # Require user to be logged in to vote
1005
- if not current_user.is_authenticated:
1006
- return jsonify({"error": "You must be logged in to vote"}), 401
1007
-
1008
- # Security checks for vote manipulation prevention
1009
- client_ip = get_client_ip()
1010
- vote_allowed, security_reason, security_score = is_vote_allowed(current_user.id, client_ip)
1011
-
1012
- if not vote_allowed:
1013
- app.logger.warning(f"Conversational vote blocked for user {current_user.username} (ID: {current_user.id}): {security_reason} (Score: {security_score})")
1014
- return jsonify({"error": f"Vote not allowed: {security_reason}"}), 403
1015
-
1016
- data = request.json
1017
- session_id = data.get("session_id")
1018
- chosen_model_key = data.get("chosen_model") # "a" or "b"
1019
-
1020
- if not session_id or session_id not in app.conversational_sessions:
1021
- return jsonify({"error": "Invalid or expired session"}), 404
1022
-
1023
- if not chosen_model_key or chosen_model_key not in ["a", "b"]:
1024
- return jsonify({"error": "Invalid chosen model"}), 400
1025
-
1026
- session_data = app.conversational_sessions[session_id]
1027
-
1028
- # Check if session expired
1029
- if datetime.utcnow() > session_data["expires_at"]:
1030
- cleanup_conversational_session(session_id)
1031
- return jsonify({"error": "Session expired"}), 410
1032
-
1033
- # Check if already voted
1034
- if session_data["voted"]:
1035
- return jsonify({"error": "Vote already submitted for this session"}), 400
1036
-
1037
- # Get model IDs and audio paths
1038
- chosen_id = (
1039
- session_data["model_a"] if chosen_model_key == "a" else session_data["model_b"]
1040
- )
1041
- rejected_id = (
1042
- session_data["model_b"] if chosen_model_key == "a" else session_data["model_a"]
1043
- )
1044
- chosen_audio_path = (
1045
- session_data["audio_a"] if chosen_model_key == "a" else session_data["audio_b"]
1046
- )
1047
- rejected_audio_path = (
1048
- session_data["audio_b"] if chosen_model_key == "a" else session_data["audio_a"]
1049
- )
1050
-
1051
- # Calculate session duration and gather analytics data
1052
- vote_time = datetime.utcnow()
1053
- session_duration = (vote_time - session_data["created_at"]).total_seconds()
1054
- client_ip = get_client_ip()
1055
- user_agent = request.headers.get('User-Agent')
1056
- cache_hit = session_data.get("cache_hit", False)
1057
-
1058
- # Record vote in database with analytics data
1059
- vote, error = record_vote(
1060
- current_user.id,
1061
- session_data["text"],
1062
- chosen_id,
1063
- rejected_id,
1064
- ModelType.CONVERSATIONAL,
1065
- session_duration=session_duration,
1066
- ip_address=client_ip,
1067
- user_agent=user_agent,
1068
- generation_date=session_data["created_at"],
1069
- cache_hit=cache_hit
1070
- )
1071
-
1072
- if error:
1073
- return jsonify({"error": error}), 500
1074
-
1075
- # --- Save preference data ---\
1076
- try:
1077
- vote_uuid = str(uuid.uuid4())
1078
- vote_dir = os.path.join("./votes", vote_uuid)
1079
- os.makedirs(vote_dir, exist_ok=True)
1080
-
1081
- # Copy audio files
1082
- shutil.copy(chosen_audio_path, os.path.join(vote_dir, "chosen.wav"))
1083
- shutil.copy(rejected_audio_path, os.path.join(vote_dir, "rejected.wav"))
1084
-
1085
- # Create metadata
1086
- chosen_model_obj = Model.query.get(chosen_id)
1087
- rejected_model_obj = Model.query.get(rejected_id)
1088
- metadata = {
1089
- "script": session_data["script"], # Save the full script
1090
- "chosen_model": chosen_model_obj.name if chosen_model_obj else "Unknown",
1091
- "chosen_model_id": chosen_model_obj.id if chosen_model_obj else "Unknown",
1092
- "rejected_model": rejected_model_obj.name if rejected_model_obj else "Unknown",
1093
- "rejected_model_id": rejected_model_obj.id if rejected_model_obj else "Unknown",
1094
- "session_id": session_id,
1095
- "timestamp": datetime.utcnow().isoformat(),
1096
- "username": current_user.username,
1097
- "model_type": "CONVERSATIONAL"
1098
- }
1099
- with open(os.path.join(vote_dir, "metadata.json"), "w") as f:
1100
- json.dump(metadata, f, indent=2)
1101
-
1102
- except Exception as e:
1103
- app.logger.error(f"Error saving preference data for conversational vote {session_id}: {str(e)}")
1104
- # Continue even if saving preference data fails, vote is already recorded
1105
-
1106
- # Mark session as voted
1107
- session_data["voted"] = True
1108
-
1109
- # Check for coordinated voting campaigns (async to not slow down response)
1110
- try:
1111
- from threading import Thread
1112
- campaign_check_thread = Thread(target=check_for_coordinated_campaigns)
1113
- campaign_check_thread.daemon = True
1114
- campaign_check_thread.start()
1115
- except Exception as e:
1116
- app.logger.error(f"Error starting coordinated campaign check thread: {str(e)}")
1117
-
1118
- # Return updated models (use previously fetched objects)
1119
- return jsonify(
1120
- {
1121
- "success": True,
1122
- "chosen_model": {"id": chosen_id, "name": chosen_model_obj.name if chosen_model_obj else "Unknown"},
1123
- "rejected_model": {
1124
- "id": rejected_id,
1125
- "name": rejected_model_obj.name if rejected_model_obj else "Unknown",
1126
- },
1127
- "names": {
1128
- "a": Model.query.get(session_data["model_a"]).name,
1129
- "b": Model.query.get(session_data["model_b"]).name,
1130
- },
1131
- }
1132
- )
1133
-
1134
-
1135
- def cleanup_conversational_session(session_id):
1136
- """Remove conversational session and its audio files"""
1137
- if session_id in app.conversational_sessions:
1138
- session = app.conversational_sessions[session_id]
1139
-
1140
- # Remove audio files
1141
- for audio_file in [session["audio_a"], session["audio_b"]]:
1142
- if os.path.exists(audio_file):
1143
- try:
1144
- os.remove(audio_file)
1145
- except Exception as e:
1146
- app.logger.error(
1147
- f"Error removing conversational audio file: {str(e)}"
1148
- )
1149
-
1150
- # Remove session
1151
- del app.conversational_sessions[session_id]
1152
-
1153
-
1154
- # Schedule periodic cleanup
1155
- def setup_cleanup():
1156
- def cleanup_expired_sessions():
1157
- with app.app_context(): # Ensure app context for logging
1158
- current_time = datetime.utcnow()
1159
- # Cleanup TTS sessions
1160
- expired_tts_sessions = [
1161
- sid
1162
- for sid, session_data in app.tts_sessions.items()
1163
- if current_time > session_data["expires_at"]
1164
- ]
1165
- for sid in expired_tts_sessions:
1166
- cleanup_session(sid)
1167
-
1168
- # Cleanup conversational sessions
1169
- expired_conv_sessions = [
1170
- sid
1171
- for sid, session_data in app.conversational_sessions.items()
1172
- if current_time > session_data["expires_at"]
1173
- ]
1174
- for sid in expired_conv_sessions:
1175
- cleanup_conversational_session(sid)
1176
- app.logger.info(f"Cleaned up {len(expired_tts_sessions)} TTS and {len(expired_conv_sessions)} conversational sessions.")
1177
-
1178
- # Also cleanup potentially expired cache entries (e.g., > 1 hour old)
1179
- # This prevents stale cache entries if generation is slow or failing
1180
- # cleanup_stale_cache_entries()
1181
-
1182
- # Run cleanup every 15 minutes
1183
- scheduler = BackgroundScheduler(daemon=True) # Run scheduler as daemon thread
1184
- scheduler.add_job(cleanup_expired_sessions, "interval", minutes=15)
1185
- scheduler.start()
1186
- print("Cleanup scheduler started") # Use print for startup messages
1187
-
1188
-
1189
- # Schedule periodic tasks (database sync and preference upload)
1190
- def setup_periodic_tasks():
1191
- """Setup periodic database synchronization and preference data upload for Spaces"""
1192
- if not IS_SPACES:
1193
- return
1194
-
1195
- db_path = app.config["SQLALCHEMY_DATABASE_URI"].replace("sqlite:///", "instance/") # Get relative path
1196
- preferences_repo_id = "TTS-AGI/arena-v2-preferences"
1197
- database_repo_id = "TTS-AGI/database-arena-v2"
1198
- votes_dir = "./votes"
1199
-
1200
- def sync_database():
1201
- """Uploads the database to HF dataset"""
1202
- with app.app_context(): # Ensure app context for logging
1203
- try:
1204
- if not os.path.exists(db_path):
1205
- app.logger.warning(f"Database file not found at {db_path}, skipping sync.")
1206
- return
1207
-
1208
- api = HfApi(token=os.getenv("HF_TOKEN"))
1209
- api.upload_file(
1210
- path_or_fileobj=db_path,
1211
- path_in_repo="tts_arena.db",
1212
- repo_id=database_repo_id,
1213
- repo_type="dataset",
1214
- )
1215
- app.logger.info(f"Database uploaded to {database_repo_id} at {datetime.utcnow()}")
1216
- except Exception as e:
1217
- app.logger.error(f"Error uploading database to {database_repo_id}: {str(e)}")
1218
-
1219
- def sync_preferences_data():
1220
- """Zips and uploads preference data folders in batches to HF dataset"""
1221
- with app.app_context(): # Ensure app context for logging
1222
- if not os.path.isdir(votes_dir):
1223
- return # Don't log every 5 mins if dir doesn't exist yet
1224
-
1225
- temp_batch_dir = None # Initialize to manage cleanup
1226
- temp_individual_zip_dir = None # Initialize for individual zips
1227
- local_batch_zip_path = None # Initialize for batch zip path
1228
-
1229
- try:
1230
- api = HfApi(token=os.getenv("HF_TOKEN"))
1231
- vote_uuids = [d for d in os.listdir(votes_dir) if os.path.isdir(os.path.join(votes_dir, d))]
1232
-
1233
- if not vote_uuids:
1234
- return # No data to process
1235
-
1236
- app.logger.info(f"Found {len(vote_uuids)} vote directories to process.")
1237
-
1238
- # Create temporary directories
1239
- temp_batch_dir = tempfile.mkdtemp(prefix="hf_batch_")
1240
- temp_individual_zip_dir = tempfile.mkdtemp(prefix="hf_indiv_zips_")
1241
- app.logger.debug(f"Created temp directories: {temp_batch_dir}, {temp_individual_zip_dir}")
1242
-
1243
- processed_vote_dirs = []
1244
- individual_zips_in_batch = []
1245
-
1246
- # 1. Create individual zips and move them to the batch directory
1247
- for vote_uuid in vote_uuids:
1248
- dir_path = os.path.join(votes_dir, vote_uuid)
1249
- individual_zip_base_path = os.path.join(temp_individual_zip_dir, vote_uuid)
1250
- individual_zip_path = f"{individual_zip_base_path}.zip"
1251
-
1252
- try:
1253
- shutil.make_archive(individual_zip_base_path, 'zip', dir_path)
1254
- app.logger.debug(f"Created individual zip: {individual_zip_path}")
1255
-
1256
- # Move the created zip into the batch directory
1257
- final_individual_zip_path = os.path.join(temp_batch_dir, f"{vote_uuid}.zip")
1258
- shutil.move(individual_zip_path, final_individual_zip_path)
1259
- app.logger.debug(f"Moved individual zip to batch dir: {final_individual_zip_path}")
1260
-
1261
- processed_vote_dirs.append(dir_path) # Mark original dir for later cleanup
1262
- individual_zips_in_batch.append(final_individual_zip_path)
1263
-
1264
- except Exception as zip_err:
1265
- app.logger.error(f"Error creating or moving zip for {vote_uuid}: {str(zip_err)}")
1266
- # Clean up partial zip if it exists
1267
- if os.path.exists(individual_zip_path):
1268
- try:
1269
- os.remove(individual_zip_path)
1270
- except OSError:
1271
- pass
1272
- # Continue processing other votes
1273
-
1274
- # Clean up the temporary dir used for creating individual zips
1275
- shutil.rmtree(temp_individual_zip_dir)
1276
- temp_individual_zip_dir = None # Mark as cleaned
1277
- app.logger.debug("Cleaned up temporary individual zip directory.")
1278
-
1279
- if not individual_zips_in_batch:
1280
- app.logger.warning("No individual zips were successfully created for batching.")
1281
- # Clean up batch dir if it's empty or only contains failed attempts
1282
- if temp_batch_dir and os.path.exists(temp_batch_dir):
1283
- shutil.rmtree(temp_batch_dir)
1284
- temp_batch_dir = None
1285
- return
1286
-
1287
- # 2. Create the batch zip file
1288
- batch_timestamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S")
1289
- batch_uuid_short = str(uuid.uuid4())[:8]
1290
- batch_zip_filename = f"{batch_timestamp}_batch_{batch_uuid_short}.zip"
1291
- # Create batch zip in a standard temp location first
1292
- local_batch_zip_base = os.path.join(tempfile.gettempdir(), batch_zip_filename.replace('.zip', ''))
1293
- local_batch_zip_path = f"{local_batch_zip_base}.zip"
1294
-
1295
- app.logger.info(f"Creating batch zip: {local_batch_zip_path} with {len(individual_zips_in_batch)} individual zips.")
1296
- shutil.make_archive(local_batch_zip_base, 'zip', temp_batch_dir)
1297
- app.logger.info(f"Batch zip created successfully: {local_batch_zip_path}")
1298
-
1299
- # 3. Upload the batch zip file
1300
- hf_repo_path = f"votes/{year}/{month}/{batch_zip_filename}"
1301
- app.logger.info(f"Uploading batch zip to HF Hub: {preferences_repo_id}/{hf_repo_path}")
1302
-
1303
- api.upload_file(
1304
- path_or_fileobj=local_batch_zip_path,
1305
- path_in_repo=hf_repo_path,
1306
- repo_id=preferences_repo_id,
1307
- repo_type="dataset",
1308
- commit_message=f"Add batch preference data {batch_zip_filename} ({len(individual_zips_in_batch)} votes)"
1309
- )
1310
- app.logger.info(f"Successfully uploaded batch {batch_zip_filename} to {preferences_repo_id}")
1311
-
1312
- # 4. Cleanup after successful upload
1313
- app.logger.info("Cleaning up local files after successful upload.")
1314
- # Remove original vote directories that were successfully zipped and uploaded
1315
- for dir_path in processed_vote_dirs:
1316
- try:
1317
- shutil.rmtree(dir_path)
1318
- app.logger.debug(f"Removed original vote directory: {dir_path}")
1319
- except OSError as e:
1320
- app.logger.error(f"Error removing processed vote directory {dir_path}: {str(e)}")
1321
-
1322
- # Remove the temporary batch directory (containing the individual zips)
1323
- shutil.rmtree(temp_batch_dir)
1324
- temp_batch_dir = None
1325
- app.logger.debug("Removed temporary batch directory.")
1326
-
1327
- # Remove the local batch zip file
1328
- os.remove(local_batch_zip_path)
1329
- local_batch_zip_path = None
1330
- app.logger.debug("Removed local batch zip file.")
1331
-
1332
- app.logger.info(f"Finished preference data sync. Uploaded batch {batch_zip_filename}.")
1333
-
1334
- except Exception as e:
1335
- app.logger.error(f"Error during preference data batch sync: {str(e)}", exc_info=True)
1336
- # If upload failed, the local batch zip might exist, clean it up.
1337
- if local_batch_zip_path and os.path.exists(local_batch_zip_path):
1338
- try:
1339
- os.remove(local_batch_zip_path)
1340
- app.logger.debug("Cleaned up local batch zip after failed upload.")
1341
- except OSError as clean_err:
1342
- app.logger.error(f"Error cleaning up batch zip after failed upload: {clean_err}")
1343
- # Do NOT remove temp_batch_dir if it exists; its contents will be retried next time.
1344
- # Do NOT remove original vote directories if upload failed.
1345
-
1346
- finally:
1347
- # Final cleanup for temporary directories in case of unexpected exits
1348
- if temp_individual_zip_dir and os.path.exists(temp_individual_zip_dir):
1349
- try:
1350
- shutil.rmtree(temp_individual_zip_dir)
1351
- except Exception as final_clean_err:
1352
- app.logger.error(f"Error in final cleanup (indiv zips): {final_clean_err}")
1353
- # Only clean up batch dir in finally block if it *wasn't* kept intentionally after upload failure
1354
- if temp_batch_dir and os.path.exists(temp_batch_dir):
1355
- # Check if an upload attempt happened and failed
1356
- upload_failed = 'e' in locals() and isinstance(e, Exception) # Crude check if exception occurred
1357
- if not upload_failed: # If no upload error or upload succeeded, clean up
1358
- try:
1359
- shutil.rmtree(temp_batch_dir)
1360
- except Exception as final_clean_err:
1361
- app.logger.error(f"Error in final cleanup (batch dir): {final_clean_err}")
1362
- else:
1363
- app.logger.warning("Keeping temporary batch directory due to upload failure for next attempt.")
1364
-
1365
-
1366
- # Schedule periodic tasks
1367
- scheduler = BackgroundScheduler()
1368
- # Sync database less frequently if needed, e.g., every 15 minutes
1369
- scheduler.add_job(sync_database, "interval", minutes=15, id="sync_db_job")
1370
- # Sync preferences more frequently
1371
- scheduler.add_job(sync_preferences_data, "interval", minutes=5, id="sync_pref_job")
1372
- scheduler.start()
1373
- print("Periodic tasks scheduler started (DB sync and Preferences upload)") # Use print for startup
1374
-
1375
-
1376
- @app.cli.command("init-db")
1377
- def init_db():
1378
- """Initialize the database."""
1379
- with app.app_context():
1380
- db.create_all()
1381
- print("Database initialized!")
1382
-
1383
-
1384
- @app.route("/api/toggle-leaderboard-visibility", methods=["POST"])
1385
- def toggle_leaderboard_visibility():
1386
- """Toggle whether the current user appears in the top voters leaderboard"""
1387
- if not current_user.is_authenticated:
1388
- return jsonify({"error": "You must be logged in to change this setting"}), 401
1389
-
1390
- new_status = toggle_user_leaderboard_visibility(current_user.id)
1391
- if new_status is None:
1392
- return jsonify({"error": "User not found"}), 404
1393
-
1394
- return jsonify({
1395
- "success": True,
1396
- "visible": new_status,
1397
- "message": "You are now visible in the voters leaderboard" if new_status else "You are now hidden from the voters leaderboard"
1398
- })
1399
-
1400
-
1401
- @app.route("/api/tts/cached-sentences")
1402
- def get_cached_sentences():
1403
- """Returns a list of sentences currently available in the TTS cache."""
1404
- with tts_cache_lock:
1405
- cached_keys = list(tts_cache.keys())
1406
- return jsonify(cached_keys)
1407
-
1408
-
1409
- def get_weighted_random_models(
1410
- applicable_models: list[Model], num_to_select: int, model_type: ModelType
1411
- ) -> list[Model]:
1412
- """
1413
- Selects a specified number of models randomly from a list of applicable_models,
1414
- weighting models with fewer votes higher. A smoothing factor is used to ensure
1415
- the preference is slight and to prevent models with zero votes from being
1416
- overwhelmingly favored. Models are selected without replacement.
1417
-
1418
- Assumes len(applicable_models) >= num_to_select, which should be checked by the caller.
1419
- """
1420
- model_votes_counts = {}
1421
- for model in applicable_models:
1422
- votes = (
1423
- Vote.query.filter(Vote.model_type == model_type)
1424
- .filter(or_(Vote.model_chosen == model.id, Vote.model_rejected == model.id))
1425
- .count()
1426
- )
1427
- model_votes_counts[model.id] = votes
1428
-
1429
- weights = [
1430
- 1.0 / (model_votes_counts[model.id] + SMOOTHING_FACTOR_MODEL_SELECTION)
1431
- for model in applicable_models
1432
- ]
1433
-
1434
- selected_models_list = []
1435
- # Create copies to modify during selection process
1436
- current_candidates = list(applicable_models)
1437
- current_weights = list(weights)
1438
-
1439
- # Assumes num_to_select is positive and less than or equal to len(current_candidates)
1440
- # Callers should ensure this (e.g., len(available_models) >= 2).
1441
- for _ in range(num_to_select):
1442
- if not current_candidates: # Safety break
1443
- app.logger.warning("Not enough candidates left for weighted selection.")
1444
- break
1445
-
1446
- chosen_model = random.choices(current_candidates, weights=current_weights, k=1)[0]
1447
- selected_models_list.append(chosen_model)
1448
-
1449
- try:
1450
- idx_to_remove = current_candidates.index(chosen_model)
1451
- current_candidates.pop(idx_to_remove)
1452
- current_weights.pop(idx_to_remove)
1453
- except ValueError:
1454
- # This should ideally not happen if chosen_model came from current_candidates.
1455
- app.logger.error(f"Error removing model {chosen_model.id} from weighted selection candidates.")
1456
- break # Avoid potential issues
1457
-
1458
- return selected_models_list
1459
-
1460
-
1461
- def check_for_coordinated_campaigns():
1462
- """Check all active models for potential coordinated voting campaigns"""
1463
- try:
1464
- from security import detect_coordinated_voting
1465
- from models import Model, ModelType
1466
-
1467
- # Check TTS models
1468
- tts_models = Model.query.filter_by(model_type=ModelType.TTS, is_active=True).all()
1469
- for model in tts_models:
1470
- try:
1471
- detect_coordinated_voting(model.id)
1472
- except Exception as e:
1473
- app.logger.error(f"Error checking coordinated voting for TTS model {model.id}: {str(e)}")
1474
-
1475
- # Check conversational models
1476
- conv_models = Model.query.filter_by(model_type=ModelType.CONVERSATIONAL, is_active=True).all()
1477
- for model in conv_models:
1478
- try:
1479
- detect_coordinated_voting(model.id)
1480
- except Exception as e:
1481
- app.logger.error(f"Error checking coordinated voting for conversational model {model.id}: {str(e)}")
1482
-
1483
- except Exception as e:
1484
- app.logger.error(f"Error in coordinated campaign check: {str(e)}")
1485
-
1486
 
1487
  if __name__ == "__main__":
1488
- with app.app_context():
1489
- # Ensure ./instance and ./votes directories exist
1490
- os.makedirs("instance", exist_ok=True)
1491
- os.makedirs("./votes", exist_ok=True) # Create votes directory if it doesn't exist
1492
- os.makedirs(CACHE_AUDIO_DIR, exist_ok=True) # Ensure cache audio dir exists
1493
-
1494
- # Clean up old cache audio files on startup
1495
- try:
1496
- app.logger.info(f"Clearing old cache audio files from {CACHE_AUDIO_DIR}")
1497
- for filename in os.listdir(CACHE_AUDIO_DIR):
1498
- file_path = os.path.join(CACHE_AUDIO_DIR, filename)
1499
- try:
1500
- if os.path.isfile(file_path) or os.path.islink(file_path):
1501
- os.unlink(file_path)
1502
- elif os.path.isdir(file_path):
1503
- shutil.rmtree(file_path)
1504
- except Exception as e:
1505
- app.logger.error(f'Failed to delete {file_path}. Reason: {e}')
1506
- except Exception as e:
1507
- app.logger.error(f"Error clearing cache directory {CACHE_AUDIO_DIR}: {e}")
1508
-
1509
-
1510
- # Download database if it doesn't exist (only on initial space start)
1511
- if IS_SPACES and not os.path.exists(app.config["SQLALCHEMY_DATABASE_URI"].replace("sqlite:///", "")):
1512
- try:
1513
- print("Database not found, downloading from HF dataset...")
1514
- hf_hub_download(
1515
- repo_id="TTS-AGI/database-arena-v2",
1516
- filename="tts_arena.db",
1517
- repo_type="dataset",
1518
- local_dir="instance", # download to instance/
1519
- token=os.getenv("HF_TOKEN"),
1520
- )
1521
- print("Database downloaded successfully ✅")
1522
- except Exception as e:
1523
- print(f"Error downloading database from HF dataset: {str(e)} ⚠️")
1524
-
1525
-
1526
- db.create_all() # Create tables if they don't exist
1527
- insert_initial_models()
1528
- # Setup background tasks
1529
- initialize_tts_cache() # Start populating the cache
1530
- setup_cleanup()
1531
- setup_periodic_tasks() # Renamed function call
1532
-
1533
- # Configure Flask to recognize HTTPS when behind a reverse proxy
1534
- from werkzeug.middleware.proxy_fix import ProxyFix
1535
-
1536
- # Apply ProxyFix middleware to handle reverse proxy headers
1537
- # This ensures Flask generates correct URLs with https scheme
1538
- # X-Forwarded-Proto header will be used to detect the original protocol
1539
- app.wsgi_app = ProxyFix(app.wsgi_app, x_proto=1, x_host=1)
1540
-
1541
- # Force Flask to prefer HTTPS for generated URLs
1542
- app.config["PREFERRED_URL_SCHEME"] = "https"
1543
-
1544
- from waitress import serve
1545
-
1546
- # Configuration for 2 vCPUs:
1547
- # - threads: typically 4-8 threads per CPU core is a good balance
1548
- # - connection_limit: maximum concurrent connections
1549
- # - channel_timeout: prevent hanging connections
1550
- threads = 12 # 6 threads per vCPU is a good balance for mixed IO/CPU workloads
1551
-
1552
- if IS_SPACES:
1553
- serve(
1554
- app,
1555
- host="0.0.0.0",
1556
- port=int(os.environ.get("PORT", 7860)),
1557
- threads=threads,
1558
- connection_limit=100,
1559
- channel_timeout=30,
1560
- url_scheme='https'
1561
- )
1562
- else:
1563
- print(f"Starting Waitress server with {threads} threads")
1564
- serve(
1565
- app,
1566
- host="0.0.0.0",
1567
- port=5000,
1568
- threads=threads,
1569
- connection_limit=100,
1570
- channel_timeout=30,
1571
- url_scheme='https' # Keep https for local dev if using proxy/tunnel
1572
- )
 
1
+ from flask import Flask, render_template_string
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
  app = Flask(__name__)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
+ HTML = """
6
+ <!DOCTYPE html>
7
+ <html lang="en">
8
+ <head>
9
+ <meta charset="UTF-8">
10
+ <title>Maintenance</title>
11
+ <meta name="viewport" content="width=device-width, initial-scale=1">
12
+ <script src="https://cdn.tailwindcss.com"></script>
13
+ </head>
14
+ <body class="bg-gray-100 flex items-center justify-center h-screen">
15
+ <div class="bg-white p-8 rounded-2xl shadow-lg text-center max-w-md">
16
+ <svg class="mx-auto mb-4 w-16 h-16 text-yellow-500" fill="none" stroke="currentColor" stroke-width="1.5"
17
+ viewBox="0 0 24 24">
18
+ <path stroke-linecap="round" stroke-linejoin="round"
19
+ d="M12 9v2m0 4h.01M4.93 4.93a10 10 0 0114.14 0 10 10 0 010 14.14 10 10 0 01-14.14 0 10 10 0 010-14.14z"/>
20
+ </svg>
21
+ <h1 class="text-2xl font-bold text-gray-800 mb-2">We'll be back soon!</h1>
22
+ <p class="text-gray-600">The TTS Arena is temporarily undergoing maintenance.<br>Thank you for your patience.</p>
23
+ </div>
24
+ </body>
25
+ </html>
26
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
  @app.route("/")
29
+ def maintenance():
30
+ return render_template_string(HTML)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
  if __name__ == "__main__":
33
+ app.run(host="0.0.0.0", port=7860)