Johnny commited on
Commit
8f771eb
·
1 Parent(s): 786acf0

added retry logic to config query, fixed api variables, fixed score and summary functions

Browse files
.gitignore CHANGED
@@ -15,8 +15,12 @@ venv/
15
  # Ignore all files in a specific folder
16
  build/
17
 
 
18
  # Do not ignore a specific file inside an ignored directory
19
  !build/keep-me.txt
20
 
21
  # ignore cache files
22
- __pycache__/
 
 
 
 
15
  # Ignore all files in a specific folder
16
  build/
17
 
18
+
19
  # Do not ignore a specific file inside an ignored directory
20
  !build/keep-me.txt
21
 
22
  # ignore cache files
23
+ __pycache_/
24
+ .pytest_cache/
25
+ # Ignore all files with the .tmp extension
26
+ *.tmp
__pycache__/config.cpython-311.pyc DELETED
Binary file (1.44 kB)
 
__pycache__/database.cpython-311.pyc DELETED
Binary file (1.31 kB)
 
__pycache__/model.cpython-311.pyc DELETED
Binary file (720 Bytes)
 
__pycache__/utils.cpython-311.pyc DELETED
Binary file (4.97 kB)
 
config.py CHANGED
@@ -2,6 +2,7 @@ import os
2
  from dotenv import load_dotenv
3
  from supabase import create_client
4
  import requests
 
5
 
6
  # Load environment variables from .env file
7
  load_dotenv()
@@ -13,74 +14,54 @@ if not SUPABASE_KEY:
13
  raise ValueError("SUPABASE_KEY is not set in the environment variables.")
14
  supabase = create_client(SUPABASE_URL, SUPABASE_KEY)
15
 
 
16
  HF_MODELS = {
17
  "gemma": "https://api-inference.huggingface.co/models/google/gemma-7b",
18
  "bart": "https://api-inference.huggingface.co/models/facebook/bart-large-cnn"
19
  }
20
 
21
- # Hugging Face API Config
22
- #HF_API_URL = "https://router.huggingface.co/hf-inference/models/google/gemma-7b"
23
  HF_API_TOKEN = os.getenv("HF_API_TOKEN")
24
- HF_HEADERS = {"Authorization": f"Bearer HF_API_TOKEN"}
25
-
26
- # Ensure the API key is loaded
27
  if not HF_API_TOKEN:
28
  raise ValueError("Missing Hugging Face API key. Check your .env file.")
29
 
30
- #
31
- def query(payload, model="gemma"):
 
 
32
  """
33
- Sends a request to the selected Hugging Face model API.
34
 
35
  :param payload: The input data for inference.
36
- :param model: Choose either 'gemma' (for google/gemma-7b) or 'bart' (for facebook/bart-large-cnn).
37
- :return: The model's response in JSON format, or None if the request fails.
 
 
38
  """
39
  if model not in HF_MODELS:
40
  raise ValueError("Invalid model name. Choose 'gemma' or 'bart'.")
41
 
42
- api_url = f"https://api-inference.huggingface.co/models/{HF_MODELS[model]}"
43
-
44
- try:
45
- response = requests.post(api_url, headers=HF_HEADERS, json=payload)
46
-
47
- if response.status_code == 401:
48
- print(f"Error querying Hugging Face model '{model}': 401 Unauthorized. Check API key.")
49
- return None # Handle authentication failure
50
-
51
- response.raise_for_status() # Raise an error for failed requests (e.g., 500 errors)
52
-
53
- return response.json() # Return the parsed JSON response
54
 
55
- except requests.exceptions.RequestException as e:
56
- print(f"Error querying Hugging Face model '{model}': {e}")
57
- return None # Return None if API call fails
58
-
59
- # Bart query
60
- def query(payload, model="bart"):
61
- """
62
- Sends a request to the selected Hugging Face model API.
63
-
64
- :param payload: The input data for inference.
65
- :param model: Choose either 'gemma' (for google/gemma-7b) or 'bart' (for facebook/bart-large-cnn).
66
- :return: The model's response in JSON format, or None if the request fails.
67
- """
68
- if model not in HF_MODELS:
69
- raise ValueError("Invalid model name. Choose 'gemma' or 'bart'.")
70
 
71
- api_url = f"https://api-inference.huggingface.co/models/{HF_MODELS[model]}"
 
 
72
 
73
- try:
74
- response = requests.post(api_url, headers=HF_HEADERS, json=payload)
 
 
75
 
76
- if response.status_code == 401:
77
- print(f"Error querying Hugging Face model '{model}': 401 Unauthorized. Check API key.")
78
- return None # Handle authentication failure
79
 
80
- response.raise_for_status() # Raise an error for failed requests (e.g., 500 errors)
81
 
82
- return response.json() # Return the parsed JSON response
 
 
83
 
84
- except requests.exceptions.RequestException as e:
85
- print(f"Error querying Hugging Face model '{model}': {e}")
86
- return None # Return None if API call fails
 
2
  from dotenv import load_dotenv
3
  from supabase import create_client
4
  import requests
5
+ import time
6
 
7
  # Load environment variables from .env file
8
  load_dotenv()
 
14
  raise ValueError("SUPABASE_KEY is not set in the environment variables.")
15
  supabase = create_client(SUPABASE_URL, SUPABASE_KEY)
16
 
17
+ # Hugging Face API Config
18
  HF_MODELS = {
19
  "gemma": "https://api-inference.huggingface.co/models/google/gemma-7b",
20
  "bart": "https://api-inference.huggingface.co/models/facebook/bart-large-cnn"
21
  }
22
 
 
 
23
  HF_API_TOKEN = os.getenv("HF_API_TOKEN")
 
 
 
24
  if not HF_API_TOKEN:
25
  raise ValueError("Missing Hugging Face API key. Check your .env file.")
26
 
27
+ # Correct API Headers
28
+ HF_HEADERS = {"Authorization": f"Bearer {HF_API_TOKEN}"}
29
+
30
+ def query(payload, model="gemma", retries=3, delay=5):
31
  """
32
+ Sends a request to the Hugging Face API with retries.
33
 
34
  :param payload: The input data for inference.
35
+ :param model: The model name ('gemma' or 'bart').
36
+ :param retries: Number of times to retry if the request fails.
37
+ :param delay: Delay in seconds before retrying.
38
+ :return: The model's response in JSON format, or None if all retries fail.
39
  """
40
  if model not in HF_MODELS:
41
  raise ValueError("Invalid model name. Choose 'gemma' or 'bart'.")
42
 
43
+ api_url = HF_MODELS[model] # Correct model URL
 
 
 
 
 
 
 
 
 
 
 
44
 
45
+ for attempt in range(retries):
46
+ try:
47
+ response = requests.post(api_url, headers=HF_HEADERS, json=payload)
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
+ if response.status_code == 401:
50
+ print(f"Error querying Hugging Face model '{model}': 401 Unauthorized. Check API key.")
51
+ return None # API key issue
52
 
53
+ if response.status_code == 500:
54
+ print(f"Server error (500) on attempt {attempt + 1}. Retrying in {delay} seconds...")
55
+ time.sleep(delay) # Wait before retrying
56
+ continue # Retry the request
57
 
58
+ response.raise_for_status() # Raise an error for failed requests (except 500)
 
 
59
 
60
+ return response.json() # Return the parsed JSON response
61
 
62
+ except requests.exceptions.RequestException as e:
63
+ print(f"Error querying Hugging Face model '{model}': {e}")
64
+ time.sleep(delay) # Wait before retrying
65
 
66
+ print("All retry attempts failed.")
67
+ return None # Return None if all retries fail
 
utils.py CHANGED
@@ -43,26 +43,36 @@ def extract_email(resume_text):
43
  match = re.search(r"[\w\.-]+@[\w\.-]+", resume_text)
44
  return match.group(0) if match else None
45
 
46
- # Test on why score 0 is returned even though resume matches key words
47
- # score_candidate function will use HuggingFace gemini model
48
  def score_candidate(resume_text, job_description):
49
  """
50
  Scores the candidate's resume based on the job description using the Hugging Face API.
51
-
52
  :param resume_text: The extracted resume text.
53
  :param job_description: The job description for comparison.
54
  :return: A numerical score (default 0 if scoring fails).
55
  """
56
  payload = {"inputs": f"Resume: {resume_text}\nJob Description: {job_description}"}
57
- response_gemma = query(payload, model="gemma") # Use Google Gemma Model for scoring
58
 
59
  if response_gemma is None:
60
- return 0 # Return 0 if API call fails
 
 
 
 
 
 
 
61
 
62
  try:
63
- return float(response_gemma.get("score", 0)) # Ensure score is always a float
64
- except (TypeError, ValueError):
65
- return 0 # Return 0 if score parsing fails
 
 
 
 
 
66
 
67
  # summarize_resume function will use HuggingFace BART model
68
  def summarize_resume(resume_text):
 
43
  match = re.search(r"[\w\.-]+@[\w\.-]+", resume_text)
44
  return match.group(0) if match else None
45
 
 
 
46
  def score_candidate(resume_text, job_description):
47
  """
48
  Scores the candidate's resume based on the job description using the Hugging Face API.
49
+
50
  :param resume_text: The extracted resume text.
51
  :param job_description: The job description for comparison.
52
  :return: A numerical score (default 0 if scoring fails).
53
  """
54
  payload = {"inputs": f"Resume: {resume_text}\nJob Description: {job_description}"}
55
+ response_gemma = query(payload, model="gemma") # Call Hugging Face API
56
 
57
  if response_gemma is None:
58
+ print("API response is None")
59
+ return 0
60
+
61
+ print("API Response:", response_gemma) # Debugging
62
+
63
+ # Handle list response
64
+ if isinstance(response_gemma, list) and len(response_gemma) > 0:
65
+ response_gemma = response_gemma[0] # Extract first item if response is a list
66
 
67
  try:
68
+ if isinstance(response_gemma, dict) and "score" in response_gemma:
69
+ return float(response_gemma["score"])
70
+ else:
71
+ print("Unexpected API response format:", response_gemma) # Debugging
72
+ return 0 # Default if score is missing
73
+ except (TypeError, ValueError) as e:
74
+ print(f"Error parsing score: {e}")
75
+ return 0
76
 
77
  # summarize_resume function will use HuggingFace BART model
78
  def summarize_resume(resume_text):