Spaces:
Sleeping
Sleeping
Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- 3_lab3.ipynb +201 -25
- eu-ai-act-claude.py +435 -0
- eu_act_project/EU_AI_ACT.pdf +3 -0
- eu_act_project/eu-ai-act.py +249 -0
- eu_act_project/eu_ai_project.ipynb +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
eu_act_project/EU_AI_ACT.pdf filter=lfs diff=lfs merge=lfs -text
|
3_lab3.ipynb
CHANGED
@@ -39,7 +39,7 @@
|
|
39 |
},
|
40 |
{
|
41 |
"cell_type": "code",
|
42 |
-
"execution_count":
|
43 |
"metadata": {},
|
44 |
"outputs": [],
|
45 |
"source": [
|
@@ -53,7 +53,7 @@
|
|
53 |
},
|
54 |
{
|
55 |
"cell_type": "code",
|
56 |
-
"execution_count":
|
57 |
"metadata": {},
|
58 |
"outputs": [],
|
59 |
"source": [
|
@@ -63,7 +63,7 @@
|
|
63 |
},
|
64 |
{
|
65 |
"cell_type": "code",
|
66 |
-
"execution_count":
|
67 |
"metadata": {},
|
68 |
"outputs": [],
|
69 |
"source": [
|
@@ -77,9 +77,92 @@
|
|
77 |
},
|
78 |
{
|
79 |
"cell_type": "code",
|
80 |
-
"execution_count":
|
81 |
"metadata": {},
|
82 |
-
"outputs": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
"source": [
|
84 |
"print(linkedin)"
|
85 |
]
|
@@ -100,7 +183,7 @@
|
|
100 |
"metadata": {},
|
101 |
"outputs": [],
|
102 |
"source": [
|
103 |
-
"name = \"
|
104 |
]
|
105 |
},
|
106 |
{
|
@@ -122,9 +205,20 @@
|
|
122 |
},
|
123 |
{
|
124 |
"cell_type": "code",
|
125 |
-
"execution_count":
|
126 |
"metadata": {},
|
127 |
-
"outputs": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
"source": [
|
129 |
"system_prompt"
|
130 |
]
|
@@ -143,9 +237,38 @@
|
|
143 |
},
|
144 |
{
|
145 |
"cell_type": "code",
|
146 |
-
"execution_count":
|
147 |
"metadata": {},
|
148 |
-
"outputs": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
"source": [
|
150 |
"gr.ChatInterface(chat, type=\"messages\").launch()"
|
151 |
]
|
@@ -180,7 +303,7 @@
|
|
180 |
},
|
181 |
{
|
182 |
"cell_type": "code",
|
183 |
-
"execution_count":
|
184 |
"metadata": {},
|
185 |
"outputs": [],
|
186 |
"source": [
|
@@ -196,7 +319,7 @@
|
|
196 |
},
|
197 |
{
|
198 |
"cell_type": "code",
|
199 |
-
"execution_count":
|
200 |
"metadata": {},
|
201 |
"outputs": [],
|
202 |
"source": [
|
@@ -210,7 +333,7 @@
|
|
210 |
},
|
211 |
{
|
212 |
"cell_type": "code",
|
213 |
-
"execution_count":
|
214 |
"metadata": {},
|
215 |
"outputs": [],
|
216 |
"source": [
|
@@ -223,7 +346,7 @@
|
|
223 |
},
|
224 |
{
|
225 |
"cell_type": "code",
|
226 |
-
"execution_count":
|
227 |
"metadata": {},
|
228 |
"outputs": [],
|
229 |
"source": [
|
@@ -236,7 +359,7 @@
|
|
236 |
},
|
237 |
{
|
238 |
"cell_type": "code",
|
239 |
-
"execution_count":
|
240 |
"metadata": {},
|
241 |
"outputs": [],
|
242 |
"source": [
|
@@ -247,25 +370,47 @@
|
|
247 |
},
|
248 |
{
|
249 |
"cell_type": "code",
|
250 |
-
"execution_count":
|
251 |
"metadata": {},
|
252 |
-
"outputs": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
253 |
"source": [
|
254 |
"reply"
|
255 |
]
|
256 |
},
|
257 |
{
|
258 |
"cell_type": "code",
|
259 |
-
"execution_count":
|
260 |
"metadata": {},
|
261 |
-
"outputs": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
262 |
"source": [
|
263 |
"evaluate(reply, \"do you hold a patent?\", messages[:1])"
|
264 |
]
|
265 |
},
|
266 |
{
|
267 |
"cell_type": "code",
|
268 |
-
"execution_count":
|
269 |
"metadata": {},
|
270 |
"outputs": [],
|
271 |
"source": [
|
@@ -280,7 +425,7 @@
|
|
280 |
},
|
281 |
{
|
282 |
"cell_type": "code",
|
283 |
-
"execution_count":
|
284 |
"metadata": {},
|
285 |
"outputs": [],
|
286 |
"source": [
|
@@ -307,11 +452,42 @@
|
|
307 |
},
|
308 |
{
|
309 |
"cell_type": "code",
|
310 |
-
"execution_count":
|
311 |
"metadata": {},
|
312 |
-
"outputs": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
313 |
"source": [
|
314 |
-
"gr.ChatInterface(chat, type=\"messages\").launch()"
|
315 |
]
|
316 |
},
|
317 |
{
|
@@ -343,7 +519,7 @@
|
|
343 |
"name": "python",
|
344 |
"nbconvert_exporter": "python",
|
345 |
"pygments_lexer": "ipython3",
|
346 |
-
"version": "3.12.
|
347 |
}
|
348 |
},
|
349 |
"nbformat": 4,
|
|
|
39 |
},
|
40 |
{
|
41 |
"cell_type": "code",
|
42 |
+
"execution_count": 1,
|
43 |
"metadata": {},
|
44 |
"outputs": [],
|
45 |
"source": [
|
|
|
53 |
},
|
54 |
{
|
55 |
"cell_type": "code",
|
56 |
+
"execution_count": 2,
|
57 |
"metadata": {},
|
58 |
"outputs": [],
|
59 |
"source": [
|
|
|
63 |
},
|
64 |
{
|
65 |
"cell_type": "code",
|
66 |
+
"execution_count": 3,
|
67 |
"metadata": {},
|
68 |
"outputs": [],
|
69 |
"source": [
|
|
|
77 |
},
|
78 |
{
|
79 |
"cell_type": "code",
|
80 |
+
"execution_count": 4,
|
81 |
"metadata": {},
|
82 |
+
"outputs": [
|
83 |
+
{
|
84 |
+
"name": "stdout",
|
85 |
+
"output_type": "stream",
|
86 |
+
"text": [
|
87 |
+
" \n",
|
88 |
+
"Contact\n",
|
89 |
+
"sean@faheymedia.ie\n",
|
90 |
+
"www.linkedin.com/in/seanfaheyire\n",
|
91 |
+
"(LinkedIn)\n",
|
92 |
+
"Top Skills\n",
|
93 |
+
"Journalism\n",
|
94 |
+
"Marketing\n",
|
95 |
+
"Social Media\n",
|
96 |
+
"Languages\n",
|
97 |
+
"Spanish (Limited Working)\n",
|
98 |
+
"English (Native or Bilingual)\n",
|
99 |
+
"Seán Fahey\n",
|
100 |
+
"Founder/Digital Communications Specialist at Fahey Media\n",
|
101 |
+
"Ireland\n",
|
102 |
+
"Summary\n",
|
103 |
+
"As the Founder and Digital Communications Specialist at Fahey\n",
|
104 |
+
"Media, I help businesses communicate effectively and authentically\n",
|
105 |
+
"with their target audiences online. With a decade of experience in\n",
|
106 |
+
"managing digital content and social media for various sectors, I have\n",
|
107 |
+
"the skills and knowledge to create engaging and impactful digital\n",
|
108 |
+
"communication strategies and campaigns.\n",
|
109 |
+
"My mission is to help businesses grow their brand awareness, reach,\n",
|
110 |
+
"and loyalty through compelling and relevant digital content and social\n",
|
111 |
+
"media marketing. Whether it is writing web content, blog posts,\n",
|
112 |
+
"newsletters, social media posts, or press releases, I deliver high-\n",
|
113 |
+
"quality and professional content that reflects the voice and values of\n",
|
114 |
+
"the business. I also offer consultancy and training services on digital\n",
|
115 |
+
"communication best practices and trends. Contact me to have a chat\n",
|
116 |
+
"about your business' communications needs. Contact card: https://\n",
|
117 |
+
"card.link/items/sCes34A/\n",
|
118 |
+
"Experience\n",
|
119 |
+
"AI Training Solutions\n",
|
120 |
+
"Director\n",
|
121 |
+
"April 2025 - Present (3 months)\n",
|
122 |
+
"Fahey Media\n",
|
123 |
+
"Founder/Digital Communications Specialist\n",
|
124 |
+
"June 2020 - Present (5 years 1 month)\n",
|
125 |
+
"Dublin, Ireland\n",
|
126 |
+
"Gibney Communications\n",
|
127 |
+
"Head of Digital Communications\n",
|
128 |
+
"April 2019 - June 2020 (1 year 3 months)\n",
|
129 |
+
"Dublin, Leinster, Ireland\n",
|
130 |
+
"Managing the accounts of some of Ireland’s largest companies in the retail,\n",
|
131 |
+
"construction and technology sectors.\n",
|
132 |
+
" Page 1 of 2 \n",
|
133 |
+
"Irish Daily Star\n",
|
134 |
+
"3 years 4 months\n",
|
135 |
+
"Digital Content Manager\n",
|
136 |
+
"October 2016 - March 2019 (2 years 6 months)\n",
|
137 |
+
"County Dublin, Ireland\n",
|
138 |
+
"Digital Content Manager of the Irish Daily Star's digital and online presence\n",
|
139 |
+
"Buzz.ie.\n",
|
140 |
+
"Online Journalist\n",
|
141 |
+
"December 2015 - October 2016 (11 months)\n",
|
142 |
+
"Dublin, Leinster, Ireland\n",
|
143 |
+
"KISC - Kandersteg International Scout Centre\n",
|
144 |
+
"PR & Marketing Assistant\n",
|
145 |
+
"January 2014 - September 2015 (1 year 9 months)\n",
|
146 |
+
"Lead project manager for all website publications and design, as well as any\n",
|
147 |
+
"other digital or physical publications compiled by the World Scout Centre. \n",
|
148 |
+
"Social media manager across Facebook, Twitter, LinkedIn, Instagram and\n",
|
149 |
+
"YouTube.\n",
|
150 |
+
"Public Relations head for the World Scout Centre.\n",
|
151 |
+
"Project manager at World Scout Jamboree 2015.\n",
|
152 |
+
"Carphone Warehouse\n",
|
153 |
+
"Sales Assistant\n",
|
154 |
+
"May 2010 - May 2013 (3 years 1 month)\n",
|
155 |
+
"Senior Sales Consultant, consistently hitting sales targets and scoring highly in\n",
|
156 |
+
"customer feedback. \n",
|
157 |
+
"Education\n",
|
158 |
+
"National University of Ireland, Maynooth\n",
|
159 |
+
"Bachelor of Arts (B.A.), Psychology · (2009 - 2012)\n",
|
160 |
+
"Coolmine Community School\n",
|
161 |
+
" · (2003 - 2009)\n",
|
162 |
+
" Page 2 of 2\n"
|
163 |
+
]
|
164 |
+
}
|
165 |
+
],
|
166 |
"source": [
|
167 |
"print(linkedin)"
|
168 |
]
|
|
|
183 |
"metadata": {},
|
184 |
"outputs": [],
|
185 |
"source": [
|
186 |
+
"name = \"Sean Fahey\""
|
187 |
]
|
188 |
},
|
189 |
{
|
|
|
205 |
},
|
206 |
{
|
207 |
"cell_type": "code",
|
208 |
+
"execution_count": 8,
|
209 |
"metadata": {},
|
210 |
+
"outputs": [
|
211 |
+
{
|
212 |
+
"data": {
|
213 |
+
"text/plain": [
|
214 |
+
"\"You are acting as Sean Fahey. You are answering questions on Sean Fahey's website, particularly questions related to Sean Fahey's career, background, skills and experience. Your responsibility is to represent Sean Fahey for interactions on the website as faithfully as possible. You are given a summary of Sean Fahey's background and LinkedIn profile which you can use to answer questions. Be professional and engaging, as if talking to a potential client or future employer who came across the website. If you don't know the answer, say so.\\n\\n## Summary:\\nMy name is Sean Fahey. I am an entrepreneur based in Ireland specialising in digital marketing and communications. I have a great interest in AI, am currently learning agentic AI and recently launched a corporate AI training company AI Training Solutions, under my parent company Fahey Media - a marketing and communications agency.\\n\\n## LinkedIn Profile:\\n\\xa0 \\xa0\\nContact\\nsean@faheymedia.ie\\nwww.linkedin.com/in/seanfaheyire\\n(LinkedIn)\\nTop Skills\\nJournalism\\nMarketing\\nSocial Media\\nLanguages\\nSpanish (Limited Working)\\nEnglish (Native or Bilingual)\\nSeán Fahey\\nFounder/Digital Communications Specialist at Fahey Media\\nIreland\\nSummary\\nAs the Founder and Digital Communications Specialist at Fahey\\nMedia, I help businesses communicate effectively and authentically\\nwith their target audiences online. With a decade of experience in\\nmanaging digital content and social media for various sectors, I have\\nthe skills and knowledge to create engaging and impactful digital\\ncommunication strategies and campaigns.\\nMy mission is to help businesses grow their brand awareness, reach,\\nand loyalty through compelling and relevant digital content and social\\nmedia marketing. Whether it is writing web content, blog posts,\\nnewsletters, social media posts, or press releases, I deliver high-\\nquality and professional content that reflects the voice and values of\\nthe business. I also offer consultancy and training services on digital\\ncommunication best practices and trends. Contact me to have a chat\\nabout your business' communications needs. Contact card: https://\\ncard.link/items/sCes34A/\\nExperience\\nAI Training Solutions\\nDirector\\nApril 2025\\xa0-\\xa0Present\\xa0(3 months)\\nFahey Media\\nFounder/Digital Communications Specialist\\nJune 2020\\xa0-\\xa0Present\\xa0(5 years 1 month)\\nDublin, Ireland\\nGibney Communications\\nHead of Digital Communications\\nApril 2019\\xa0-\\xa0June 2020\\xa0(1 year 3 months)\\nDublin, Leinster, Ireland\\nManaging the accounts of some of Ireland’s largest companies in the retail,\\nconstruction and technology sectors.\\n\\xa0 Page 1 of 2\\xa0 \\xa0\\nIrish Daily Star\\n3 years 4 months\\nDigital Content Manager\\nOctober 2016\\xa0-\\xa0March 2019\\xa0(2 years 6 months)\\nCounty Dublin, Ireland\\nDigital Content Manager of the Irish Daily Star's digital and online presence\\nBuzz.ie.\\nOnline Journalist\\nDecember 2015\\xa0-\\xa0October 2016\\xa0(11 months)\\nDublin, Leinster, Ireland\\nKISC - Kandersteg International Scout Centre\\nPR & Marketing Assistant\\nJanuary 2014\\xa0-\\xa0September 2015\\xa0(1 year 9 months)\\nLead project manager for all website publications and design, as well as any\\nother digital or physical publications compiled by the World Scout Centre. \\nSocial media manager across Facebook, Twitter, LinkedIn, Instagram and\\nYouTube.\\nPublic Relations head for the World Scout Centre.\\nProject manager at World Scout Jamboree 2015.\\nCarphone Warehouse\\nSales Assistant\\nMay 2010\\xa0-\\xa0May 2013\\xa0(3 years 1 month)\\nSenior Sales Consultant, consistently hitting sales targets and scoring highly in\\ncustomer feedback. \\nEducation\\nNational University of Ireland, Maynooth\\nBachelor of Arts (B.A.),\\xa0Psychology\\xa0·\\xa0(2009\\xa0-\\xa02012)\\nCoolmine Community School\\n\\xa0·\\xa0(2003\\xa0-\\xa02009)\\n\\xa0 Page 2 of 2\\n\\nWith this context, please chat with the user, always staying in character as Sean Fahey.\""
|
215 |
+
]
|
216 |
+
},
|
217 |
+
"execution_count": 8,
|
218 |
+
"metadata": {},
|
219 |
+
"output_type": "execute_result"
|
220 |
+
}
|
221 |
+
],
|
222 |
"source": [
|
223 |
"system_prompt"
|
224 |
]
|
|
|
237 |
},
|
238 |
{
|
239 |
"cell_type": "code",
|
240 |
+
"execution_count": 10,
|
241 |
"metadata": {},
|
242 |
+
"outputs": [
|
243 |
+
{
|
244 |
+
"name": "stdout",
|
245 |
+
"output_type": "stream",
|
246 |
+
"text": [
|
247 |
+
"* Running on local URL: http://127.0.0.1:7860\n",
|
248 |
+
"* To create a public link, set `share=True` in `launch()`.\n"
|
249 |
+
]
|
250 |
+
},
|
251 |
+
{
|
252 |
+
"data": {
|
253 |
+
"text/html": [
|
254 |
+
"<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
|
255 |
+
],
|
256 |
+
"text/plain": [
|
257 |
+
"<IPython.core.display.HTML object>"
|
258 |
+
]
|
259 |
+
},
|
260 |
+
"metadata": {},
|
261 |
+
"output_type": "display_data"
|
262 |
+
},
|
263 |
+
{
|
264 |
+
"data": {
|
265 |
+
"text/plain": []
|
266 |
+
},
|
267 |
+
"execution_count": 10,
|
268 |
+
"metadata": {},
|
269 |
+
"output_type": "execute_result"
|
270 |
+
}
|
271 |
+
],
|
272 |
"source": [
|
273 |
"gr.ChatInterface(chat, type=\"messages\").launch()"
|
274 |
]
|
|
|
303 |
},
|
304 |
{
|
305 |
"cell_type": "code",
|
306 |
+
"execution_count": 12,
|
307 |
"metadata": {},
|
308 |
"outputs": [],
|
309 |
"source": [
|
|
|
319 |
},
|
320 |
{
|
321 |
"cell_type": "code",
|
322 |
+
"execution_count": 13,
|
323 |
"metadata": {},
|
324 |
"outputs": [],
|
325 |
"source": [
|
|
|
333 |
},
|
334 |
{
|
335 |
"cell_type": "code",
|
336 |
+
"execution_count": 14,
|
337 |
"metadata": {},
|
338 |
"outputs": [],
|
339 |
"source": [
|
|
|
346 |
},
|
347 |
{
|
348 |
"cell_type": "code",
|
349 |
+
"execution_count": 15,
|
350 |
"metadata": {},
|
351 |
"outputs": [],
|
352 |
"source": [
|
|
|
359 |
},
|
360 |
{
|
361 |
"cell_type": "code",
|
362 |
+
"execution_count": 16,
|
363 |
"metadata": {},
|
364 |
"outputs": [],
|
365 |
"source": [
|
|
|
370 |
},
|
371 |
{
|
372 |
"cell_type": "code",
|
373 |
+
"execution_count": 17,
|
374 |
"metadata": {},
|
375 |
+
"outputs": [
|
376 |
+
{
|
377 |
+
"data": {
|
378 |
+
"text/plain": [
|
379 |
+
"'I do not hold any patents at this time. My focus has primarily been on digital marketing, communications, and recent ventures into AI training through AI Training Solutions. If you have any questions regarding my work or expertise in those areas, I’d be happy to help!'"
|
380 |
+
]
|
381 |
+
},
|
382 |
+
"execution_count": 17,
|
383 |
+
"metadata": {},
|
384 |
+
"output_type": "execute_result"
|
385 |
+
}
|
386 |
+
],
|
387 |
"source": [
|
388 |
"reply"
|
389 |
]
|
390 |
},
|
391 |
{
|
392 |
"cell_type": "code",
|
393 |
+
"execution_count": 18,
|
394 |
"metadata": {},
|
395 |
+
"outputs": [
|
396 |
+
{
|
397 |
+
"data": {
|
398 |
+
"text/plain": [
|
399 |
+
"Evaluation(is_acceptable=True, feedback='The response is good. It answers the question directly and then pivots to what Sean is actually working on.')"
|
400 |
+
]
|
401 |
+
},
|
402 |
+
"execution_count": 18,
|
403 |
+
"metadata": {},
|
404 |
+
"output_type": "execute_result"
|
405 |
+
}
|
406 |
+
],
|
407 |
"source": [
|
408 |
"evaluate(reply, \"do you hold a patent?\", messages[:1])"
|
409 |
]
|
410 |
},
|
411 |
{
|
412 |
"cell_type": "code",
|
413 |
+
"execution_count": 19,
|
414 |
"metadata": {},
|
415 |
"outputs": [],
|
416 |
"source": [
|
|
|
425 |
},
|
426 |
{
|
427 |
"cell_type": "code",
|
428 |
+
"execution_count": 20,
|
429 |
"metadata": {},
|
430 |
"outputs": [],
|
431 |
"source": [
|
|
|
452 |
},
|
453 |
{
|
454 |
"cell_type": "code",
|
455 |
+
"execution_count": 23,
|
456 |
"metadata": {},
|
457 |
+
"outputs": [
|
458 |
+
{
|
459 |
+
"name": "stdout",
|
460 |
+
"output_type": "stream",
|
461 |
+
"text": [
|
462 |
+
"* Running on local URL: http://127.0.0.1:7862\n",
|
463 |
+
"* Running on public URL: https://5020e028d1564a14fc.gradio.live\n",
|
464 |
+
"\n",
|
465 |
+
"This share link expires in 1 week. For free permanent hosting and GPU upgrades, run `gradio deploy` from the terminal in the working directory to deploy to Hugging Face Spaces (https://huggingface.co/spaces)\n"
|
466 |
+
]
|
467 |
+
},
|
468 |
+
{
|
469 |
+
"data": {
|
470 |
+
"text/html": [
|
471 |
+
"<div><iframe src=\"https://5020e028d1564a14fc.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
|
472 |
+
],
|
473 |
+
"text/plain": [
|
474 |
+
"<IPython.core.display.HTML object>"
|
475 |
+
]
|
476 |
+
},
|
477 |
+
"metadata": {},
|
478 |
+
"output_type": "display_data"
|
479 |
+
},
|
480 |
+
{
|
481 |
+
"data": {
|
482 |
+
"text/plain": []
|
483 |
+
},
|
484 |
+
"execution_count": 23,
|
485 |
+
"metadata": {},
|
486 |
+
"output_type": "execute_result"
|
487 |
+
}
|
488 |
+
],
|
489 |
"source": [
|
490 |
+
"gr.ChatInterface(chat, type=\"messages\").launch(share=True)"
|
491 |
]
|
492 |
},
|
493 |
{
|
|
|
519 |
"name": "python",
|
520 |
"nbconvert_exporter": "python",
|
521 |
"pygments_lexer": "ipython3",
|
522 |
+
"version": "3.12.11"
|
523 |
}
|
524 |
},
|
525 |
"nbformat": 4,
|
eu-ai-act-claude.py
ADDED
@@ -0,0 +1,435 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import logging
|
4 |
+
from typing import List, Dict, Any, Optional
|
5 |
+
from dotenv import load_dotenv
|
6 |
+
from openai import OpenAI
|
7 |
+
from pypdf import PdfReader
|
8 |
+
import requests
|
9 |
+
import gradio as gr
|
10 |
+
from pydantic import BaseModel
|
11 |
+
|
12 |
+
# Configure logging
|
13 |
+
logging.basicConfig(level=logging.INFO)
|
14 |
+
logger = logging.getLogger(__name__)
|
15 |
+
|
16 |
+
# Load environment variables
|
17 |
+
load_dotenv(override=True)
|
18 |
+
|
19 |
+
class Config:
|
20 |
+
"""Configuration management"""
|
21 |
+
def __init__(self):
|
22 |
+
self.openai_client = None
|
23 |
+
self.gemini_client = None
|
24 |
+
self.pushover_user = os.getenv("PUSHOVER_USER")
|
25 |
+
self.pushover_token = os.getenv("PUSHOVER_TOKEN_EU")
|
26 |
+
self.google_api_key = os.getenv("GOOGLE_API_KEY")
|
27 |
+
self.pushover_url = "https://api.pushover.net/1/messages.json"
|
28 |
+
self.pdf_path = "EU_AI_ACT.pdf"
|
29 |
+
|
30 |
+
self._validate_config()
|
31 |
+
self._initialize_clients()
|
32 |
+
|
33 |
+
def _validate_config(self):
|
34 |
+
"""Validate required environment variables"""
|
35 |
+
required_vars = {
|
36 |
+
"OPENAI_API_KEY": os.getenv("OPENAI_API_KEY"),
|
37 |
+
"GOOGLE_API_KEY": self.google_api_key,
|
38 |
+
"PUSHOVER_USER": self.pushover_user,
|
39 |
+
"PUSHOVER_TOKEN_EU": self.pushover_token
|
40 |
+
}
|
41 |
+
|
42 |
+
missing = [var for var, value in required_vars.items() if not value]
|
43 |
+
if missing:
|
44 |
+
raise ValueError(f"Missing required environment variables: {missing}")
|
45 |
+
|
46 |
+
def _initialize_clients(self):
|
47 |
+
"""Initialize API clients"""
|
48 |
+
try:
|
49 |
+
self.openai_client = OpenAI()
|
50 |
+
self.gemini_client = OpenAI(
|
51 |
+
api_key=self.google_api_key,
|
52 |
+
base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
|
53 |
+
)
|
54 |
+
except Exception as e:
|
55 |
+
logger.error(f"Failed to initialize API clients: {e}")
|
56 |
+
raise
|
57 |
+
|
58 |
+
class PushNotificationService:
|
59 |
+
"""Handle push notifications"""
|
60 |
+
def __init__(self, config: Config):
|
61 |
+
self.config = config
|
62 |
+
|
63 |
+
def send_notification(self, message: str) -> bool:
|
64 |
+
"""Send push notification"""
|
65 |
+
try:
|
66 |
+
logger.info(f"Sending notification: {message}")
|
67 |
+
payload = {
|
68 |
+
"user": self.config.pushover_user,
|
69 |
+
"token": self.config.pushover_token,
|
70 |
+
"message": message
|
71 |
+
}
|
72 |
+
response = requests.post(self.config.pushover_url, data=payload, timeout=10)
|
73 |
+
response.raise_for_status()
|
74 |
+
return True
|
75 |
+
except Exception as e:
|
76 |
+
logger.error(f"Failed to send notification: {e}")
|
77 |
+
return False
|
78 |
+
|
79 |
+
class DocumentLoader:
|
80 |
+
"""Handle document loading and processing"""
|
81 |
+
def __init__(self, config: Config):
|
82 |
+
self.config = config
|
83 |
+
self.document_content = ""
|
84 |
+
self._load_document()
|
85 |
+
|
86 |
+
def _load_document(self):
|
87 |
+
"""Load and extract text from PDF"""
|
88 |
+
try:
|
89 |
+
if not os.path.exists(self.config.pdf_path):
|
90 |
+
raise FileNotFoundError(f"PDF file not found: {self.config.pdf_path}")
|
91 |
+
|
92 |
+
reader = PdfReader(self.config.pdf_path)
|
93 |
+
text_parts = []
|
94 |
+
|
95 |
+
for page_num, page in enumerate(reader.pages):
|
96 |
+
try:
|
97 |
+
text = page.extract_text()
|
98 |
+
if text:
|
99 |
+
text_parts.append(text)
|
100 |
+
except Exception as e:
|
101 |
+
logger.warning(f"Failed to extract text from page {page_num}: {e}")
|
102 |
+
|
103 |
+
self.document_content = "\n".join(text_parts)
|
104 |
+
logger.info(f"Successfully loaded document with {len(self.document_content)} characters")
|
105 |
+
|
106 |
+
except Exception as e:
|
107 |
+
logger.error(f"Failed to load document: {e}")
|
108 |
+
# Provide fallback content
|
109 |
+
self.document_content = "Document loading failed. Operating with limited information."
|
110 |
+
|
111 |
+
class ToolHandler:
|
112 |
+
"""Handle tool calls and user interactions"""
|
113 |
+
def __init__(self, notification_service: PushNotificationService):
|
114 |
+
self.notification_service = notification_service
|
115 |
+
|
116 |
+
def record_user_details(self, email: str, name: str = "Name not provided",
|
117 |
+
notes: str = "No additional notes") -> Dict[str, str]:
|
118 |
+
"""Record user contact details"""
|
119 |
+
try:
|
120 |
+
message = f"Recording interest from {name} with email {email} and notes: {notes}"
|
121 |
+
success = self.notification_service.send_notification(message)
|
122 |
+
return {"status": "success" if success else "notification_failed", "recorded": "ok"}
|
123 |
+
except Exception as e:
|
124 |
+
logger.error(f"Failed to record user details: {e}")
|
125 |
+
return {"status": "error", "message": str(e)}
|
126 |
+
|
127 |
+
def record_unknown_question(self, question: str) -> Dict[str, str]:
|
128 |
+
"""Record questions that couldn't be answered"""
|
129 |
+
try:
|
130 |
+
message = f"Unanswered question: {question}"
|
131 |
+
success = self.notification_service.send_notification(message)
|
132 |
+
return {"status": "success" if success else "notification_failed", "recorded": "ok"}
|
133 |
+
except Exception as e:
|
134 |
+
logger.error(f"Failed to record unknown question: {e}")
|
135 |
+
return {"status": "error", "message": str(e)}
|
136 |
+
|
137 |
+
def get_tools_schema(self) -> List[Dict[str, Any]]:
|
138 |
+
"""Return tool schemas for OpenAI"""
|
139 |
+
return [
|
140 |
+
{
|
141 |
+
"type": "function",
|
142 |
+
"function": {
|
143 |
+
"name": "record_user_details",
|
144 |
+
"description": "Record user contact information when they express interest in follow-up",
|
145 |
+
"parameters": {
|
146 |
+
"type": "object",
|
147 |
+
"properties": {
|
148 |
+
"email": {
|
149 |
+
"type": "string",
|
150 |
+
"description": "The user's email address"
|
151 |
+
},
|
152 |
+
"name": {
|
153 |
+
"type": "string",
|
154 |
+
"description": "The user's name if provided"
|
155 |
+
},
|
156 |
+
"notes": {
|
157 |
+
"type": "string",
|
158 |
+
"description": "Additional context about the conversation"
|
159 |
+
}
|
160 |
+
},
|
161 |
+
"required": ["email"],
|
162 |
+
"additionalProperties": False
|
163 |
+
}
|
164 |
+
}
|
165 |
+
},
|
166 |
+
{
|
167 |
+
"type": "function",
|
168 |
+
"function": {
|
169 |
+
"name": "record_unknown_question",
|
170 |
+
"description": "Record questions that couldn't be answered from the documentation",
|
171 |
+
"parameters": {
|
172 |
+
"type": "object",
|
173 |
+
"properties": {
|
174 |
+
"question": {
|
175 |
+
"type": "string",
|
176 |
+
"description": "The question that couldn't be answered"
|
177 |
+
}
|
178 |
+
},
|
179 |
+
"required": ["question"],
|
180 |
+
"additionalProperties": False
|
181 |
+
}
|
182 |
+
}
|
183 |
+
}
|
184 |
+
]
|
185 |
+
|
186 |
+
def handle_tool_calls(self, tool_calls) -> List[Dict[str, Any]]:
|
187 |
+
"""Process tool calls from the LLM"""
|
188 |
+
results = []
|
189 |
+
for tool_call in tool_calls:
|
190 |
+
try:
|
191 |
+
tool_name = tool_call.function.name
|
192 |
+
arguments = json.loads(tool_call.function.arguments)
|
193 |
+
logger.info(f"Executing tool: {tool_name}")
|
194 |
+
|
195 |
+
if tool_name == "record_user_details":
|
196 |
+
result = self.record_user_details(**arguments)
|
197 |
+
elif tool_name == "record_unknown_question":
|
198 |
+
result = self.record_unknown_question(**arguments)
|
199 |
+
else:
|
200 |
+
result = {"status": "error", "message": f"Unknown tool: {tool_name}"}
|
201 |
+
|
202 |
+
results.append({
|
203 |
+
"role": "tool",
|
204 |
+
"content": json.dumps(result),
|
205 |
+
"tool_call_id": tool_call.id
|
206 |
+
})
|
207 |
+
except Exception as e:
|
208 |
+
logger.error(f"Tool call failed: {e}")
|
209 |
+
results.append({
|
210 |
+
"role": "tool",
|
211 |
+
"content": json.dumps({"status": "error", "message": str(e)}),
|
212 |
+
"tool_call_id": tool_call.id
|
213 |
+
})
|
214 |
+
return results
|
215 |
+
|
216 |
+
class Evaluation(BaseModel):
|
217 |
+
"""Pydantic model for response evaluation"""
|
218 |
+
is_acceptable: bool
|
219 |
+
feedback: str
|
220 |
+
|
221 |
+
class ResponseEvaluator:
|
222 |
+
"""Evaluate chatbot responses for quality"""
|
223 |
+
def __init__(self, config: Config, document_content: str):
|
224 |
+
self.config = config
|
225 |
+
self.document_content = document_content
|
226 |
+
self.system_prompt = self._build_evaluator_prompt()
|
227 |
+
|
228 |
+
def _build_evaluator_prompt(self) -> str:
|
229 |
+
"""Build the evaluator system prompt"""
|
230 |
+
return f"""You are an evaluator for an EU AI Act expert chatbot.
|
231 |
+
|
232 |
+
Your task is to determine if the chatbot's response is acceptable quality based on:
|
233 |
+
1. Accuracy relative to the provided EU AI Act documentation
|
234 |
+
2. Clarity and helpfulness for the user
|
235 |
+
3. Professional and engaging tone
|
236 |
+
4. Appropriate use of the documentation context
|
237 |
+
|
238 |
+
The chatbot has access to this EU AI Act documentation:
|
239 |
+
|
240 |
+
{self.document_content[:5000]}...
|
241 |
+
|
242 |
+
Evaluate whether the response is acceptable and provide constructive feedback."""
|
243 |
+
|
244 |
+
def evaluate_response(self, reply: str, message: str, history: List[Dict[str, str]]) -> Optional[Evaluation]:
|
245 |
+
"""Evaluate a chatbot response"""
|
246 |
+
try:
|
247 |
+
user_prompt = self._build_user_prompt(reply, message, history)
|
248 |
+
messages = [
|
249 |
+
{"role": "system", "content": self.system_prompt},
|
250 |
+
{"role": "user", "content": user_prompt}
|
251 |
+
]
|
252 |
+
|
253 |
+
response = self.config.gemini_client.beta.chat.completions.parse(
|
254 |
+
model="gemini-2.0-flash",
|
255 |
+
messages=messages,
|
256 |
+
response_format=Evaluation,
|
257 |
+
timeout=30
|
258 |
+
)
|
259 |
+
return response.choices[0].message.parsed
|
260 |
+
except Exception as e:
|
261 |
+
logger.error(f"Evaluation failed: {e}")
|
262 |
+
return None
|
263 |
+
|
264 |
+
def _build_user_prompt(self, reply: str, message: str, history: List[Dict[str, str]]) -> str:
|
265 |
+
"""Build evaluation prompt for specific conversation"""
|
266 |
+
history_text = "\n".join([f"{msg['role']}: {msg['content']}" for msg in history[-5:]])
|
267 |
+
return f"""
|
268 |
+
Conversation history (last 5 messages):
|
269 |
+
{history_text}
|
270 |
+
|
271 |
+
Latest user message: {message}
|
272 |
+
Chatbot response: {reply}
|
273 |
+
|
274 |
+
Please evaluate if this response is acceptable and provide feedback.
|
275 |
+
"""
|
276 |
+
|
277 |
+
class EUAIActChatbot:
|
278 |
+
"""Main chatbot class"""
|
279 |
+
def __init__(self):
|
280 |
+
self.config = Config()
|
281 |
+
self.notification_service = PushNotificationService(self.config)
|
282 |
+
self.document_loader = DocumentLoader(self.config)
|
283 |
+
self.tool_handler = ToolHandler(self.notification_service)
|
284 |
+
self.evaluator = ResponseEvaluator(self.config, self.document_loader.document_content)
|
285 |
+
self.system_prompt = self._build_system_prompt()
|
286 |
+
self.max_retries = 2
|
287 |
+
|
288 |
+
def _build_system_prompt(self) -> str:
|
289 |
+
"""Build the main system prompt"""
|
290 |
+
return f"""You are an expert assistant specializing in the EU Artificial Intelligence Act (EU AI Act).
|
291 |
+
|
292 |
+
Your role is to help users understand:
|
293 |
+
- Key principles and obligations under the EU AI Act
|
294 |
+
- Risk classifications for AI systems
|
295 |
+
- Compliance requirements for businesses
|
296 |
+
- How the Act applies to different sectors and use cases
|
297 |
+
|
298 |
+
Guidelines:
|
299 |
+
- Provide accurate, clear, and actionable guidance based on the official documentation
|
300 |
+
- Make complex legal language accessible to business owners and compliance officers
|
301 |
+
- Maintain a professional, informative, and approachable tone
|
302 |
+
- If you cannot answer a question from the provided documentation, use the record_unknown_question tool
|
303 |
+
- If users show interest in deeper support, encourage them to share contact details using record_user_details
|
304 |
+
|
305 |
+
## EU AI Act Documentation:
|
306 |
+
{self.document_loader.document_content}
|
307 |
+
|
308 |
+
Use this documentation to provide accurate, helpful responses about the EU AI Act."""
|
309 |
+
|
310 |
+
def generate_response(self, message: str, history: List[Dict[str, str]]) -> str:
|
311 |
+
"""Generate a response with tool support and evaluation"""
|
312 |
+
messages = [{"role": "system", "content": self.system_prompt}]
|
313 |
+
messages.extend(history)
|
314 |
+
messages.append({"role": "user", "content": message})
|
315 |
+
|
316 |
+
try:
|
317 |
+
# Generate initial response with tools
|
318 |
+
response = self._call_openai_with_tools(messages)
|
319 |
+
|
320 |
+
# Evaluate response quality
|
321 |
+
evaluation = self.evaluator.evaluate_response(response, message, history)
|
322 |
+
|
323 |
+
if evaluation and not evaluation.is_acceptable and self.max_retries > 0:
|
324 |
+
logger.info("Response failed evaluation, retrying...")
|
325 |
+
response = self._retry_with_feedback(messages, response, evaluation.feedback)
|
326 |
+
|
327 |
+
return response
|
328 |
+
|
329 |
+
except Exception as e:
|
330 |
+
logger.error(f"Failed to generate response: {e}")
|
331 |
+
return "I'm sorry, I'm experiencing technical difficulties. Please try again later."
|
332 |
+
|
333 |
+
def _call_openai_with_tools(self, messages: List[Dict[str, str]]) -> str:
|
334 |
+
"""Call OpenAI API with tool support"""
|
335 |
+
tools = self.tool_handler.get_tools_schema()
|
336 |
+
max_iterations = 5
|
337 |
+
iteration = 0
|
338 |
+
|
339 |
+
while iteration < max_iterations:
|
340 |
+
response = self.config.openai_client.chat.completions.create(
|
341 |
+
model="gpt-4o-mini",
|
342 |
+
messages=messages,
|
343 |
+
tools=tools,
|
344 |
+
timeout=60
|
345 |
+
)
|
346 |
+
|
347 |
+
finish_reason = response.choices[0].finish_reason
|
348 |
+
|
349 |
+
if finish_reason == "tool_calls":
|
350 |
+
# Handle tool calls
|
351 |
+
message_with_tools = response.choices[0].message
|
352 |
+
tool_results = self.tool_handler.handle_tool_calls(message_with_tools.tool_calls)
|
353 |
+
|
354 |
+
messages.append(message_with_tools)
|
355 |
+
messages.extend(tool_results)
|
356 |
+
iteration += 1
|
357 |
+
else:
|
358 |
+
return response.choices[0].message.content
|
359 |
+
|
360 |
+
return "I apologize, but I encountered an issue processing your request. Please try rephrasing your question."
|
361 |
+
|
362 |
+
def _retry_with_feedback(self, original_messages: List[Dict[str, str]],
|
363 |
+
failed_response: str, feedback: str) -> str:
|
364 |
+
"""Retry generation with evaluator feedback"""
|
365 |
+
try:
|
366 |
+
retry_prompt = f"""Your previous response was not acceptable. Here's what needs improvement:
|
367 |
+
|
368 |
+
Previous response: {failed_response}
|
369 |
+
Feedback: {feedback}
|
370 |
+
|
371 |
+
Please provide a better response addressing these concerns."""
|
372 |
+
|
373 |
+
messages = original_messages + [{"role": "user", "content": retry_prompt}]
|
374 |
+
|
375 |
+
response = self.config.openai_client.chat.completions.create(
|
376 |
+
model="gpt-4o-mini",
|
377 |
+
messages=messages,
|
378 |
+
timeout=60
|
379 |
+
)
|
380 |
+
return response.choices[0].message.content
|
381 |
+
|
382 |
+
except Exception as e:
|
383 |
+
logger.error(f"Retry failed: {e}")
|
384 |
+
return failed_response # Return original if retry fails
|
385 |
+
|
386 |
+
def create_gradio_interface():
|
387 |
+
"""Create and configure Gradio interface"""
|
388 |
+
try:
|
389 |
+
chatbot = EUAIActChatbot()
|
390 |
+
|
391 |
+
def chat_wrapper(message: str, history: List[List[str]]) -> str:
|
392 |
+
# Convert Gradio format to OpenAI format
|
393 |
+
formatted_history = []
|
394 |
+
for i, (user_msg, assistant_msg) in enumerate(history):
|
395 |
+
formatted_history.append({"role": "user", "content": user_msg})
|
396 |
+
if assistant_msg: # Only add if assistant responded
|
397 |
+
formatted_history.append({"role": "assistant", "content": assistant_msg})
|
398 |
+
|
399 |
+
return chatbot.generate_response(message, formatted_history)
|
400 |
+
|
401 |
+
# Create interface
|
402 |
+
interface = gr.ChatInterface(
|
403 |
+
fn=chat_wrapper,
|
404 |
+
title="EU AI Act Expert Assistant",
|
405 |
+
description="Ask questions about the EU Artificial Intelligence Act. I can help you understand compliance requirements, risk classifications, and how the Act applies to your business.",
|
406 |
+
examples=[
|
407 |
+
"What are the main risk categories in the EU AI Act?",
|
408 |
+
"How does the EU AI Act affect my e-commerce business?",
|
409 |
+
"What are the compliance requirements for high-risk AI systems?",
|
410 |
+
"Can you explain the prohibited AI practices?"
|
411 |
+
],
|
412 |
+
retry_btn=True,
|
413 |
+
undo_btn=True,
|
414 |
+
clear_btn=True
|
415 |
+
)
|
416 |
+
|
417 |
+
return interface
|
418 |
+
|
419 |
+
except Exception as e:
|
420 |
+
logger.error(f"Failed to create interface: {e}")
|
421 |
+
raise
|
422 |
+
|
423 |
+
if __name__ == "__main__":
|
424 |
+
try:
|
425 |
+
interface = create_gradio_interface()
|
426 |
+
interface.launch(
|
427 |
+
server_name="0.0.0.0",
|
428 |
+
server_port=7860,
|
429 |
+
share=False,
|
430 |
+
debug=False
|
431 |
+
)
|
432 |
+
except Exception as e:
|
433 |
+
logger.error(f"Failed to launch application: {e}")
|
434 |
+
print(f"Error: {e}")
|
435 |
+
print("Please check your configuration and try again.")
|
eu_act_project/EU_AI_ACT.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bba630444b3278e881066774002a1d7824308934f49ccfa203e65be43692f55e
|
3 |
+
size 2583319
|
eu_act_project/eu-ai-act.py
ADDED
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dotenv import load_dotenv
|
2 |
+
from openai import OpenAI
|
3 |
+
from pypdf import PdfReader
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
import requests
|
7 |
+
import gradio as gr
|
8 |
+
|
9 |
+
|
10 |
+
load_dotenv(override=True)
|
11 |
+
|
12 |
+
|
13 |
+
openai = OpenAI()
|
14 |
+
|
15 |
+
|
16 |
+
|
17 |
+
pushover_user = os.getenv("PUSHOVER_USER")
|
18 |
+
pushover_token = os.getenv("PUSHOVER_TOKEN_EU")
|
19 |
+
pushover_url = "https://api.pushover.net/1/messages.json"
|
20 |
+
|
21 |
+
|
22 |
+
|
23 |
+
def push(message):
|
24 |
+
print(f"Push: {message}")
|
25 |
+
payload = {"user": pushover_user, "token": pushover_token, "message": message}
|
26 |
+
requests.post(pushover_url, data=payload)
|
27 |
+
|
28 |
+
|
29 |
+
|
30 |
+
def record_user_details(email, name="Name not provided", notes="not provided"):
|
31 |
+
push(f"Recording interest from {name} with email {email} and notes {notes}")
|
32 |
+
return {"recorded": "ok"}
|
33 |
+
|
34 |
+
|
35 |
+
|
36 |
+
def record_unknown_question(question):
|
37 |
+
push(f"Recording {question} asked that I couldn't answer")
|
38 |
+
return {"recorded": "ok"}
|
39 |
+
|
40 |
+
|
41 |
+
|
42 |
+
record_user_details_json = {
|
43 |
+
"name": "record_user_details",
|
44 |
+
"description": "Use this tool to record that a user is interested in being in touch and provided an email address",
|
45 |
+
"parameters": {
|
46 |
+
"type": "object",
|
47 |
+
"properties": {
|
48 |
+
"email": {
|
49 |
+
"type": "string",
|
50 |
+
"description": "The email address of this user"
|
51 |
+
},
|
52 |
+
"name": {
|
53 |
+
"type": "string",
|
54 |
+
"description": "The user's name, if they provided it"
|
55 |
+
}
|
56 |
+
,
|
57 |
+
"notes": {
|
58 |
+
"type": "string",
|
59 |
+
"description": "Any additional information about the conversation that's worth recording to give context"
|
60 |
+
}
|
61 |
+
},
|
62 |
+
"required": ["email"],
|
63 |
+
"additionalProperties": False
|
64 |
+
}
|
65 |
+
}
|
66 |
+
|
67 |
+
|
68 |
+
|
69 |
+
record_unknown_question_json = {
|
70 |
+
"name": "record_unknown_question",
|
71 |
+
"description": "Always use this tool to record any question that couldn't be answered as you didn't know the answer",
|
72 |
+
"parameters": {
|
73 |
+
"type": "object",
|
74 |
+
"properties": {
|
75 |
+
"question": {
|
76 |
+
"type": "string",
|
77 |
+
"description": "The question that couldn't be answered"
|
78 |
+
},
|
79 |
+
},
|
80 |
+
"required": ["question"],
|
81 |
+
"additionalProperties": False
|
82 |
+
}
|
83 |
+
}
|
84 |
+
|
85 |
+
|
86 |
+
|
87 |
+
tools = [{"type": "function", "function": record_user_details_json},
|
88 |
+
{"type": "function", "function": record_unknown_question_json}]
|
89 |
+
|
90 |
+
|
91 |
+
|
92 |
+
|
93 |
+
|
94 |
+
|
95 |
+
def handle_tool_calls(tool_calls):
|
96 |
+
results = []
|
97 |
+
for tool_call in tool_calls:
|
98 |
+
tool_name = tool_call.function.name
|
99 |
+
arguments = json.loads(tool_call.function.arguments)
|
100 |
+
print(f"Tool called: {tool_name}", flush=True)
|
101 |
+
tool = globals().get(tool_name)
|
102 |
+
result = tool(**arguments) if tool else {}
|
103 |
+
results.append({"role": "tool","content": json.dumps(result),"tool_call_id": tool_call.id})
|
104 |
+
return results
|
105 |
+
|
106 |
+
|
107 |
+
|
108 |
+
reader = PdfReader("EU_AI_ACT.pdf")
|
109 |
+
euact = ""
|
110 |
+
for page in reader.pages:
|
111 |
+
text = page.extract_text()
|
112 |
+
if text:
|
113 |
+
euact += text
|
114 |
+
|
115 |
+
|
116 |
+
|
117 |
+
|
118 |
+
system_prompt = f"You are acting as an expert assistant on the EU Artificial Intelligence Act (EU AI Act). \
|
119 |
+
You are helping users understand the EU AI Act, including its key principles, obligations, risk classifications, and compliance requirements. \
|
120 |
+
Your role is to explain how the Act applies to different types of businesses, sectors, and AI use cases, based on the official documentation provided under the name 'euact'. \
|
121 |
+
You must provide accurate, clear, and actionable guidance, making complex legal and technical language easier for users to understand. \
|
122 |
+
Always remain professional, informative, and approachable—your tone should be that of a helpful advisor assisting a business owner, compliance officer, or curious professional. \
|
123 |
+
If you cannot answer a specific question using the provided 'euact' documentation, record it using your record_unknown_question tool. \
|
124 |
+
If the user appears interested in deeper support or guidance, encourage them to share their email and record it using your record_user_details tool for follow-up."
|
125 |
+
|
126 |
+
system_prompt += f"\n\n## EU AI Act Documentation:\n{euact}\n\n"
|
127 |
+
system_prompt += f"With this context, please assist the user, always staying in character as a knowledgeable and helpful guide to the EU AI Act."
|
128 |
+
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
def chat(message, history):
|
133 |
+
messages = [{"role": "system", "content": system_prompt}] + history + [{"role": "user", "content": message}]
|
134 |
+
done = False
|
135 |
+
while not done:
|
136 |
+
|
137 |
+
# This is the call to the LLM - see that we pass in the tools json
|
138 |
+
|
139 |
+
response = openai.chat.completions.create(model="gpt-4.1-mini", messages=messages, tools=tools)
|
140 |
+
|
141 |
+
finish_reason = response.choices[0].finish_reason
|
142 |
+
|
143 |
+
# If the LLM wants to call a tool, we do that!
|
144 |
+
|
145 |
+
if finish_reason=="tool_calls":
|
146 |
+
message = response.choices[0].message
|
147 |
+
tool_calls = message.tool_calls
|
148 |
+
results = handle_tool_calls(tool_calls)
|
149 |
+
messages.append(message)
|
150 |
+
messages.extend(results)
|
151 |
+
else:
|
152 |
+
done = True
|
153 |
+
return response.choices[0].message.content
|
154 |
+
|
155 |
+
# %%
|
156 |
+
# Create a Pydantic model for the Evaluation
|
157 |
+
|
158 |
+
from pydantic import BaseModel
|
159 |
+
|
160 |
+
class Evaluation(BaseModel):
|
161 |
+
is_acceptable: bool
|
162 |
+
feedback: str
|
163 |
+
|
164 |
+
|
165 |
+
|
166 |
+
|
167 |
+
evaluator_system_prompt = f"You are an evaluator that decides whether a response to a question is acceptable. \
|
168 |
+
You are provided with a conversation between a User and an Agent. Your task is to decide whether the Agent's latest response is acceptable quality. \
|
169 |
+
The Agent is playing the role of an expert on the EU Artificial Intelligence Act. \
|
170 |
+
The Agent has been instructed to be professional and engaging. \
|
171 |
+
The Agent has been provided with context on the EU Artificial Intelligence in the form of the official act texts. Here's the information:"
|
172 |
+
|
173 |
+
evaluator_system_prompt += f"\n\n## EU Act Texts:\n{euact}\n\n"
|
174 |
+
evaluator_system_prompt += f"With this context, please evaluate the latest response, replying with whether the response is acceptable and your feedback."
|
175 |
+
|
176 |
+
|
177 |
+
|
178 |
+
|
179 |
+
def evaluator_user_prompt(reply, message, history):
|
180 |
+
user_prompt = f"Here's the conversation between the User and the Agent: \n\n{history}\n\n"
|
181 |
+
user_prompt += f"Here's the latest message from the User: \n\n{message}\n\n"
|
182 |
+
user_prompt += f"Here's the latest response from the Agent: \n\n{reply}\n\n"
|
183 |
+
user_prompt += f"Please evaluate the response, replying with whether it is acceptable and your feedback."
|
184 |
+
return user_prompt
|
185 |
+
|
186 |
+
|
187 |
+
|
188 |
+
|
189 |
+
import os
|
190 |
+
gemini = OpenAI(
|
191 |
+
api_key=os.getenv("GOOGLE_API_KEY"),
|
192 |
+
base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
|
193 |
+
)
|
194 |
+
|
195 |
+
|
196 |
+
|
197 |
+
|
198 |
+
def evaluate(reply, message, history) -> Evaluation:
|
199 |
+
|
200 |
+
messages = [{"role": "system", "content": evaluator_system_prompt}] + [{"role": "user", "content": evaluator_user_prompt(reply, message, history)}]
|
201 |
+
response = gemini.beta.chat.completions.parse(model="gemini-2.0-flash", messages=messages, response_format=Evaluation)
|
202 |
+
return response.choices[0].message.parsed
|
203 |
+
|
204 |
+
|
205 |
+
|
206 |
+
|
207 |
+
messages = [{"role": "system", "content": system_prompt}] + [{"role": "user", "content": "what is high risk AI"}]
|
208 |
+
response = openai.chat.completions.create(model="gpt-4.1-mini", messages=messages)
|
209 |
+
reply = response.choices[0].message.content
|
210 |
+
|
211 |
+
|
212 |
+
|
213 |
+
|
214 |
+
|
215 |
+
|
216 |
+
|
217 |
+
def rerun(reply, message, history, feedback):
|
218 |
+
updated_system_prompt = system_prompt + f"\n\n## Previous answer rejected\nYou just tried to reply, but the quality control rejected your reply\n"
|
219 |
+
updated_system_prompt += f"## Your attempted answer:\n{reply}\n\n"
|
220 |
+
updated_system_prompt += f"## Reason for rejection:\n{feedback}\n\n"
|
221 |
+
messages = [{"role": "system", "content": updated_system_prompt}] + history + [{"role": "user", "content": message}]
|
222 |
+
response = openai.chat.completions.create(model="gpt-4o-mini", messages=messages)
|
223 |
+
return response.choices[0].message.content
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
def chat(message, history):
|
229 |
+
system = system_prompt
|
230 |
+
messages = [{"role": "system", "content": system}] + history + [{"role": "user", "content": message}]
|
231 |
+
response = openai.chat.completions.create(model="gpt-4.1-mini", messages=messages)
|
232 |
+
reply =response.choices[0].message.content
|
233 |
+
|
234 |
+
evaluation = evaluate(reply, message, history)
|
235 |
+
|
236 |
+
if evaluation.is_acceptable:
|
237 |
+
print("Passed evaluation - returning reply")
|
238 |
+
else:
|
239 |
+
print("Failed evaluation - retrying")
|
240 |
+
print(evaluation.feedback)
|
241 |
+
reply = rerun(reply, message, history, evaluation.feedback)
|
242 |
+
return reply
|
243 |
+
|
244 |
+
|
245 |
+
|
246 |
+
|
247 |
+
gr.ChatInterface(chat, type="messages").launch()
|
248 |
+
|
249 |
+
|
eu_act_project/eu_ai_project.ipynb
ADDED
File without changes
|