Spaces:
Running
Running
Johnny
commited on
Commit
·
19ea0c5
1
Parent(s):
56325dc
mvp app tested, configured postgreSQL Supabase
Browse files- .gitignore +4 -1
- __pycache__/config.cpython-311.pyc +0 -0
- __pycache__/utils.cpython-311.pyc +0 -0
- config.py +13 -2
- main.py +8 -1
- utils.py +35 -18
.gitignore
CHANGED
@@ -16,4 +16,7 @@ venv/
|
|
16 |
build/
|
17 |
|
18 |
# Do not ignore a specific file inside an ignored directory
|
19 |
-
!build/keep-me.txt
|
|
|
|
|
|
|
|
16 |
build/
|
17 |
|
18 |
# Do not ignore a specific file inside an ignored directory
|
19 |
+
!build/keep-me.txt
|
20 |
+
|
21 |
+
# ignore cache files
|
22 |
+
__pycache__/
|
__pycache__/config.cpython-311.pyc
CHANGED
Binary files a/__pycache__/config.cpython-311.pyc and b/__pycache__/config.cpython-311.pyc differ
|
|
__pycache__/utils.cpython-311.pyc
CHANGED
Binary files a/__pycache__/utils.cpython-311.pyc and b/__pycache__/utils.cpython-311.pyc differ
|
|
config.py
CHANGED
@@ -13,6 +13,17 @@ if not SUPABASE_KEY:
|
|
13 |
supabase = create_client(SUPABASE_URL, SUPABASE_KEY)
|
14 |
|
15 |
# Hugging Face API Config
|
16 |
-
HF_API_URL = "https://
|
17 |
HF_API_TOKEN = os.getenv("HF_API_TOKEN")
|
18 |
-
HF_HEADERS = {"Authorization": f"Bearer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
supabase = create_client(SUPABASE_URL, SUPABASE_KEY)
|
14 |
|
15 |
# Hugging Face API Config
|
16 |
+
HF_API_URL = "https://router.huggingface.co/hf-inference/models/google/gemma-7b"
|
17 |
HF_API_TOKEN = os.getenv("HF_API_TOKEN")
|
18 |
+
HF_HEADERS = {"Authorization": f"Bearer HF_API_TOKEN"}
|
19 |
+
|
20 |
+
def query(payload):
|
21 |
+
"""Sends request to Hugging Face inference API."""
|
22 |
+
import requests
|
23 |
+
response = requests.post(HF_API_URL, headers=HF_HEADERS, json=payload)
|
24 |
+
|
25 |
+
if response.status_code != 200:
|
26 |
+
print(f"Error: {response.status_code}, {response.text}") # Debugging
|
27 |
+
return None
|
28 |
+
|
29 |
+
return response.json()
|
main.py
CHANGED
@@ -1,5 +1,12 @@
|
|
1 |
import streamlit as st
|
2 |
-
from utils import process_resumes, generate_pdf_report
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
def main():
|
5 |
st.title("AI Candidate Screening App")
|
|
|
1 |
import streamlit as st
|
2 |
+
from utils import process_resumes, generate_pdf_report, store_in_supabase, score_candidate, extract_email, parse_resume
|
3 |
+
from config import supabase
|
4 |
+
from config import HF_API_TOKEN, HF_API_URL, HF_HEADERS
|
5 |
+
import fitz # PyMuPDF
|
6 |
+
from io import BytesIO
|
7 |
+
from dotenv import load_dotenv
|
8 |
+
import os
|
9 |
+
import requests
|
10 |
|
11 |
def main():
|
12 |
st.title("AI Candidate Screening App")
|
utils.py
CHANGED
@@ -4,7 +4,10 @@ import json
|
|
4 |
import re
|
5 |
from io import BytesIO
|
6 |
import supabase
|
7 |
-
from config import SUPABASE_URL, SUPABASE_KEY, HF_API_TOKEN, HF_API_URL, HF_HEADERS
|
|
|
|
|
|
|
8 |
|
9 |
def parse_resume(pdf_file):
|
10 |
"""Extracts text from a resume PDF."""
|
@@ -20,8 +23,8 @@ def extract_email(resume_text):
|
|
20 |
def score_candidate(resume_text, job_description):
|
21 |
"""Sends resume and job description to Hugging Face for scoring."""
|
22 |
payload = {"inputs": f"Resume: {resume_text}\nJob Description: {job_description}"}
|
23 |
-
response = requests.post(HF_API_URL, headers=HF_HEADERS,
|
24 |
-
|
25 |
# Debugging: Print response
|
26 |
if response.status_code != 200:
|
27 |
print(f"Error: {response.status_code}, {response.text}") # Log any errors
|
@@ -33,16 +36,18 @@ def score_candidate(resume_text, job_description):
|
|
33 |
print("Failed to decode JSON response:", response.text) # Debugging output
|
34 |
return 0 # Return default score if JSON decoding fails
|
35 |
|
36 |
-
def store_in_supabase(resume_text, score, candidate_name):
|
37 |
-
"""Stores
|
38 |
-
email = extract_email(resume_text)
|
39 |
data = {
|
40 |
-
"name": candidate_name,
|
41 |
-
"resume": resume_text,
|
42 |
-
"score": score,
|
43 |
-
"email": email
|
|
|
44 |
}
|
45 |
-
|
|
|
|
|
46 |
|
47 |
def generate_pdf_report(shortlisted_candidates):
|
48 |
"""Generates a PDF summary of shortlisted candidates."""
|
@@ -50,7 +55,11 @@ def generate_pdf_report(shortlisted_candidates):
|
|
50 |
doc = fitz.open()
|
51 |
for candidate in shortlisted_candidates:
|
52 |
page = doc.new_page()
|
53 |
-
|
|
|
|
|
|
|
|
|
54 |
doc.save(pdf)
|
55 |
pdf.seek(0)
|
56 |
return pdf
|
@@ -62,11 +71,19 @@ def process_resumes(uploaded_files, job_description):
|
|
62 |
resume_text = parse_resume(pdf_file)
|
63 |
score = score_candidate(resume_text, job_description)
|
64 |
email = extract_email(resume_text)
|
|
|
|
|
|
|
|
|
65 |
candidates.append({
|
66 |
-
"name": pdf_file.name,
|
67 |
-
"resume": resume_text,
|
68 |
-
"score": score,
|
69 |
-
"email": email
|
|
|
70 |
})
|
71 |
-
|
72 |
-
|
|
|
|
|
|
|
|
4 |
import re
|
5 |
from io import BytesIO
|
6 |
import supabase
|
7 |
+
from config import SUPABASE_URL, SUPABASE_KEY, HF_API_TOKEN, HF_API_URL, HF_HEADERS, supabase
|
8 |
+
#from config import supabase
|
9 |
+
|
10 |
+
# These functions will be called in the main.py file
|
11 |
|
12 |
def parse_resume(pdf_file):
|
13 |
"""Extracts text from a resume PDF."""
|
|
|
23 |
def score_candidate(resume_text, job_description):
|
24 |
"""Sends resume and job description to Hugging Face for scoring."""
|
25 |
payload = {"inputs": f"Resume: {resume_text}\nJob Description: {job_description}"}
|
26 |
+
response = requests.post(HF_API_URL, headers=HF_HEADERS, json=payload)
|
27 |
+
|
28 |
# Debugging: Print response
|
29 |
if response.status_code != 200:
|
30 |
print(f"Error: {response.status_code}, {response.text}") # Log any errors
|
|
|
36 |
print("Failed to decode JSON response:", response.text) # Debugging output
|
37 |
return 0 # Return default score if JSON decoding fails
|
38 |
|
39 |
+
def store_in_supabase(resume_text, score, candidate_name, email, summary):
|
40 |
+
"""Stores candidate data in Supabase"""
|
|
|
41 |
data = {
|
42 |
+
"name": candidate_name,
|
43 |
+
"resume": resume_text,
|
44 |
+
"score": score,
|
45 |
+
"email": email,
|
46 |
+
"summary": summary # Store summary in Supabase
|
47 |
}
|
48 |
+
|
49 |
+
response = supabase.table("candidates").insert(data).execute()
|
50 |
+
print("Inserted into Supabase:", response) # Debugging output
|
51 |
|
52 |
def generate_pdf_report(shortlisted_candidates):
|
53 |
"""Generates a PDF summary of shortlisted candidates."""
|
|
|
55 |
doc = fitz.open()
|
56 |
for candidate in shortlisted_candidates:
|
57 |
page = doc.new_page()
|
58 |
+
summary = candidate.get("summary", "No summary available") # Avoid KeyError
|
59 |
+
page.insert_text(
|
60 |
+
(50, 50),
|
61 |
+
f"Candidate: {candidate['name']}\nEmail: {candidate['email']}\nScore: {candidate['score']}\nSummary: {summary}"
|
62 |
+
)
|
63 |
doc.save(pdf)
|
64 |
pdf.seek(0)
|
65 |
return pdf
|
|
|
71 |
resume_text = parse_resume(pdf_file)
|
72 |
score = score_candidate(resume_text, job_description)
|
73 |
email = extract_email(resume_text)
|
74 |
+
|
75 |
+
# Generate summary (replace with actual summarization logic later)
|
76 |
+
summary = f"{pdf_file.name} has a score of {score} for this job."
|
77 |
+
|
78 |
candidates.append({
|
79 |
+
"name": pdf_file.name,
|
80 |
+
"resume": resume_text,
|
81 |
+
"score": score,
|
82 |
+
"email": email,
|
83 |
+
"summary": summary
|
84 |
})
|
85 |
+
|
86 |
+
# Store all details including summary in Supabase
|
87 |
+
store_in_supabase(resume_text, score, pdf_file.name, email, summary)
|
88 |
+
|
89 |
+
return sorted(candidates, key=lambda x: x["score"], reverse=True)[:5] # Return top 5 candidates
|