career_rep / eu_act_project /eu-ai-act.py
seanfaheymedia's picture
Upload folder using huggingface_hub
74cabcc verified
from dotenv import load_dotenv
from openai import OpenAI
from pypdf import PdfReader
import json
import os
import requests
import gradio as gr
load_dotenv(override=True)
openai = OpenAI()
pushover_user = os.getenv("PUSHOVER_USER")
pushover_token = os.getenv("PUSHOVER_TOKEN_EU")
pushover_url = "https://api.pushover.net/1/messages.json"
def push(message):
print(f"Push: {message}")
payload = {"user": pushover_user, "token": pushover_token, "message": message}
requests.post(pushover_url, data=payload)
def record_user_details(email, name="Name not provided", notes="not provided"):
push(f"Recording interest from {name} with email {email} and notes {notes}")
return {"recorded": "ok"}
def record_unknown_question(question):
push(f"Recording {question} asked that I couldn't answer")
return {"recorded": "ok"}
record_user_details_json = {
"name": "record_user_details",
"description": "Use this tool to record that a user is interested in being in touch and provided an email address",
"parameters": {
"type": "object",
"properties": {
"email": {
"type": "string",
"description": "The email address of this user"
},
"name": {
"type": "string",
"description": "The user's name, if they provided it"
}
,
"notes": {
"type": "string",
"description": "Any additional information about the conversation that's worth recording to give context"
}
},
"required": ["email"],
"additionalProperties": False
}
}
record_unknown_question_json = {
"name": "record_unknown_question",
"description": "Always use this tool to record any question that couldn't be answered as you didn't know the answer",
"parameters": {
"type": "object",
"properties": {
"question": {
"type": "string",
"description": "The question that couldn't be answered"
},
},
"required": ["question"],
"additionalProperties": False
}
}
tools = [{"type": "function", "function": record_user_details_json},
{"type": "function", "function": record_unknown_question_json}]
def handle_tool_calls(tool_calls):
results = []
for tool_call in tool_calls:
tool_name = tool_call.function.name
arguments = json.loads(tool_call.function.arguments)
print(f"Tool called: {tool_name}", flush=True)
tool = globals().get(tool_name)
result = tool(**arguments) if tool else {}
results.append({"role": "tool","content": json.dumps(result),"tool_call_id": tool_call.id})
return results
reader = PdfReader("EU_AI_ACT.pdf")
euact = ""
for page in reader.pages:
text = page.extract_text()
if text:
euact += text
system_prompt = f"You are acting as an expert assistant on the EU Artificial Intelligence Act (EU AI Act). \
You are helping users understand the EU AI Act, including its key principles, obligations, risk classifications, and compliance requirements. \
Your role is to explain how the Act applies to different types of businesses, sectors, and AI use cases, based on the official documentation provided under the name 'euact'. \
You must provide accurate, clear, and actionable guidance, making complex legal and technical language easier for users to understand. \
Always remain professional, informative, and approachable—your tone should be that of a helpful advisor assisting a business owner, compliance officer, or curious professional. \
If you cannot answer a specific question using the provided 'euact' documentation, record it using your record_unknown_question tool. \
If the user appears interested in deeper support or guidance, encourage them to share their email and record it using your record_user_details tool for follow-up."
system_prompt += f"\n\n## EU AI Act Documentation:\n{euact}\n\n"
system_prompt += f"With this context, please assist the user, always staying in character as a knowledgeable and helpful guide to the EU AI Act."
def chat(message, history):
messages = [{"role": "system", "content": system_prompt}] + history + [{"role": "user", "content": message}]
done = False
while not done:
# This is the call to the LLM - see that we pass in the tools json
response = openai.chat.completions.create(model="gpt-4.1-mini", messages=messages, tools=tools)
finish_reason = response.choices[0].finish_reason
# If the LLM wants to call a tool, we do that!
if finish_reason=="tool_calls":
message = response.choices[0].message
tool_calls = message.tool_calls
results = handle_tool_calls(tool_calls)
messages.append(message)
messages.extend(results)
else:
done = True
return response.choices[0].message.content
# %%
# Create a Pydantic model for the Evaluation
from pydantic import BaseModel
class Evaluation(BaseModel):
is_acceptable: bool
feedback: str
evaluator_system_prompt = f"You are an evaluator that decides whether a response to a question is acceptable. \
You are provided with a conversation between a User and an Agent. Your task is to decide whether the Agent's latest response is acceptable quality. \
The Agent is playing the role of an expert on the EU Artificial Intelligence Act. \
The Agent has been instructed to be professional and engaging. \
The Agent has been provided with context on the EU Artificial Intelligence in the form of the official act texts. Here's the information:"
evaluator_system_prompt += f"\n\n## EU Act Texts:\n{euact}\n\n"
evaluator_system_prompt += f"With this context, please evaluate the latest response, replying with whether the response is acceptable and your feedback."
def evaluator_user_prompt(reply, message, history):
user_prompt = f"Here's the conversation between the User and the Agent: \n\n{history}\n\n"
user_prompt += f"Here's the latest message from the User: \n\n{message}\n\n"
user_prompt += f"Here's the latest response from the Agent: \n\n{reply}\n\n"
user_prompt += f"Please evaluate the response, replying with whether it is acceptable and your feedback."
return user_prompt
import os
gemini = OpenAI(
api_key=os.getenv("GOOGLE_API_KEY"),
base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
)
def evaluate(reply, message, history) -> Evaluation:
messages = [{"role": "system", "content": evaluator_system_prompt}] + [{"role": "user", "content": evaluator_user_prompt(reply, message, history)}]
response = gemini.beta.chat.completions.parse(model="gemini-2.0-flash", messages=messages, response_format=Evaluation)
return response.choices[0].message.parsed
messages = [{"role": "system", "content": system_prompt}] + [{"role": "user", "content": "what is high risk AI"}]
response = openai.chat.completions.create(model="gpt-4.1-mini", messages=messages)
reply = response.choices[0].message.content
def rerun(reply, message, history, feedback):
updated_system_prompt = system_prompt + f"\n\n## Previous answer rejected\nYou just tried to reply, but the quality control rejected your reply\n"
updated_system_prompt += f"## Your attempted answer:\n{reply}\n\n"
updated_system_prompt += f"## Reason for rejection:\n{feedback}\n\n"
messages = [{"role": "system", "content": updated_system_prompt}] + history + [{"role": "user", "content": message}]
response = openai.chat.completions.create(model="gpt-4o-mini", messages=messages)
return response.choices[0].message.content
def chat(message, history):
system = system_prompt
messages = [{"role": "system", "content": system}] + history + [{"role": "user", "content": message}]
response = openai.chat.completions.create(model="gpt-4.1-mini", messages=messages)
reply =response.choices[0].message.content
evaluation = evaluate(reply, message, history)
if evaluation.is_acceptable:
print("Passed evaluation - returning reply")
else:
print("Failed evaluation - retrying")
print(evaluation.feedback)
reply = rerun(reply, message, history, evaluation.feedback)
return reply
gr.ChatInterface(chat, type="messages").launch()