devcool20 commited on
Commit
8cb6e61
·
verified ·
1 Parent(s): d29d40b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -9
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import os
2
  import sys
3
  from flask import Flask, request, jsonify
4
- from flask_cors import CORS
5
  import numpy as np
6
  import json
7
  import google.api_core.exceptions
@@ -18,9 +18,12 @@ load_dotenv()
18
 
19
  app = Flask(__name__)
20
 
21
- # IMPORTANT: Configure CORS to allow requests from your Vercel frontend
22
- # Replace 'https://sales-doc.vercel.app' with your actual Vercel URL.
23
- CORS(app, resources={r"/*": {"origins": "https://sales-doc.vercel.app"}})
 
 
 
24
 
25
  # --- Global Model Instances ---
26
  sales_agent = None
@@ -58,9 +61,6 @@ except ImportError as e:
58
  print("Debug Point: Attempting to instantiate sales.Agent (core RL model).")
59
  if sales is not None:
60
  try:
61
- # --- Relying on Dockerfile to make /.deepmost writable ---
62
- # NO local_model_path argument here. deepmost expects to write to /.deepmost by default
63
- # and doesn't take local_model_path in this constructor.
64
  sales_agent = sales.Agent(
65
  model_path="https://huggingface.co/DeepMostInnovations/sales-conversion-model-reinf-learning/resolve/main/sales_conversion_model.zip",
66
  auto_download=True,
@@ -87,7 +87,6 @@ print("\nDebug Point: Attempting to initialize Gemini 1.5 Flash model.")
87
  if GEMINI_API_KEY:
88
  try:
89
  gemini_model = genai.GenerativeModel('gemini-1.5-flash-latest')
90
- # Small test call to ensure connectivity
91
  test_response = gemini_model.generate_content("Hello.", generation_config=GenerationConfig(max_output_tokens=10))
92
  print(f"Debug Point: Gemini 1.5 Flash test response: {test_response.text[:50]}...")
93
  print("Debug Point: Gemini LLM (1.5 Flash) initialized successfully.")
@@ -215,7 +214,7 @@ def get_llm_advice():
215
  print(f"ERROR: JSON parsing error for overall advice: {json_e}. Raw string: {raw_json_string}")
216
  return jsonify({"points": ["Error parsing LLM JSON advice. This happens with incomplete LLM responses (e.g., due to API rate limits or max tokens). Please try a shorter conversation or wait a moment. Raw response starts with: " + raw_json_string[:100] + "..."]})
217
  except Exception as parse_e:
218
- print(f"ERROR: General error during JSON parsing attempt for chat_llm (Gemini): {parse_e}. Raw string: {json_response}")
219
  return jsonify({"points": ["General error with LLM JSON parsing. Raw response starts with: " + raw_json_string[:100] + "..."]})
220
 
221
  except google.api_core.exceptions.ResourceExhausted as quota_e:
 
1
  import os
2
  import sys
3
  from flask import Flask, request, jsonify
4
+ from flask_cors import CORS # Import CORS (ensure this line is there!)
5
  import numpy as np
6
  import json
7
  import google.api_core.exceptions
 
18
 
19
  app = Flask(__name__)
20
 
21
+ # --- CRITICAL FIX: CORS Configuration ---
22
+ # Ensure this exact URL matches your Vercel frontend URL, WITHOUT a trailing slash.
23
+ # If you have multiple origins, you can provide a list: ["https://sales-doc.vercel.app", "http://localhost:3000"]
24
+ # For production, avoid "*" (wildcard) as it's a security risk.
25
+ cors_origins = "https://sales-doc.vercel.app" # Your exact Vercel frontend URL
26
+ CORS(app, resources={r"/*": {"origins": cors_origins, "allow_headers": ["Content-Type", "Authorization"]}})
27
 
28
  # --- Global Model Instances ---
29
  sales_agent = None
 
61
  print("Debug Point: Attempting to instantiate sales.Agent (core RL model).")
62
  if sales is not None:
63
  try:
 
 
 
64
  sales_agent = sales.Agent(
65
  model_path="https://huggingface.co/DeepMostInnovations/sales-conversion-model-reinf-learning/resolve/main/sales_conversion_model.zip",
66
  auto_download=True,
 
87
  if GEMINI_API_KEY:
88
  try:
89
  gemini_model = genai.GenerativeModel('gemini-1.5-flash-latest')
 
90
  test_response = gemini_model.generate_content("Hello.", generation_config=GenerationConfig(max_output_tokens=10))
91
  print(f"Debug Point: Gemini 1.5 Flash test response: {test_response.text[:50]}...")
92
  print("Debug Point: Gemini LLM (1.5 Flash) initialized successfully.")
 
214
  print(f"ERROR: JSON parsing error for overall advice: {json_e}. Raw string: {raw_json_string}")
215
  return jsonify({"points": ["Error parsing LLM JSON advice. This happens with incomplete LLM responses (e.g., due to API rate limits or max tokens). Please try a shorter conversation or wait a moment. Raw response starts with: " + raw_json_string[:100] + "..."]})
216
  except Exception as parse_e:
217
+ print(f"ERROR: General error during JSON parsing attempt for chat_llm (Gemini): {parse_e}. Raw string: {raw_json_string}")
218
  return jsonify({"points": ["General error with LLM JSON parsing. Raw response starts with: " + raw_json_string[:100] + "..."]})
219
 
220
  except google.api_core.exceptions.ResourceExhausted as quota_e: