Spaces:
Runtime error
Runtime error
File size: 3,797 Bytes
8729582 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
# Rewriting the app into a Gradio-compatible format
import zipfile
import os
# Directory for Gradio version
gradio_dir = "/mnt/data/exam-ai-gradio"
os.makedirs(gradio_dir, exist_ok=True)
# Gradio-based app.py content
gradio_app_code = '''import gradio as gr
from transformers import pipeline
from googlesearch import search
import requests
from bs4 import BeautifulSoup
from sentence_transformers import SentenceTransformer
import faiss
import numpy as np
# Initialize models
summarizer = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6")
embed_model = SentenceTransformer("all-MiniLM-L6-v2")
def summarize_text(text, max_len=512):
if not text.strip():
return "No content to summarize."
try:
summary = summarizer(text, max_length=max_len, min_length=100, do_sample=False)
return summary[0]["summary_text"]
except Exception as e:
return f"Summarization error: {e}"
def search_links(query, max_results=5):
try:
return list(search(query, num_results=max_results))
except Exception as e:
return []
def fetch_page_content(url):
try:
res = requests.get(url, timeout=10)
soup = BeautifulSoup(res.text, "html.parser")
return soup.get_text(separator=" ", strip=True)
except:
return ""
def embed_chunks(chunks):
return embed_model.encode(chunks)
def create_faiss_index(chunks):
embeddings = embed_chunks(chunks)
index = faiss.IndexFlatL2(embeddings.shape[1])
index.add(np.array(embeddings))
return index, embeddings
def generate_notes(query):
query_phrases = [
f"{query} exam syllabus 2025",
f"{query} exam dates",
f"{query} preparation resources",
f"{query} important topics"
]
chunks = []
docs = []
for phrase in query_phrases:
urls = search_links(phrase, max_results=3)
for url in urls:
content = fetch_page_content(url)
if len(content.strip()) > 200:
docs.append(content)
for doc in docs:
chunks.extend([doc[i:i+300] for i in range(0, len(doc), 300)])
if not chunks:
return "โ ๏ธ No content could be retrieved. Please try again with a different query."
index, _ = create_faiss_index(chunks)
prompt = f"important topics and notes for {query} exam"
query_vec = embed_chunks([prompt])[0].reshape(1, -1)
D, I = index.search(query_vec, k=15)
selected = [chunks[i] for i in I[0]]
unique_chunks = list(set([c.strip() for c in selected if len(c.strip()) > 200]))
combined = "\\n\\n".join(unique_chunks[:10])
notes = summarize_text(combined)
return notes
iface = gr.Interface(
fn=generate_notes,
inputs=gr.Textbox(lines=1, placeholder="Enter exam name (e.g., AAI ATC)", label="Exam Name"),
outputs=gr.Textbox(lines=15, label="AI-Generated Important Topic Notes"),
title="๐ AI Exam Assistant",
description="Enter your exam name and get summarized notes with syllabus, dates, topics and resources."
)
if __name__ == "__main__":
iface.launch()
'''
# Gradio-specific requirements
gradio_requirements = '''gradio
transformers
torch
sentence-transformers
faiss-cpu
googlesearch-python
beautifulsoup4
requests
'''
# Save files
with open(os.path.join(gradio_dir, "app.py"), "w") as f:
f.write(gradio_app_code)
with open(os.path.join(gradio_dir, "requirements.txt"), "w") as f:
f.write(gradio_requirements)
# Create zip
zip_path = "/mnt/data/exam-ai-gradio.zip"
with zipfile.ZipFile(zip_path, "w") as zipf:
for root, _, files in os.walk(gradio_dir):
for file in files:
full_path = os.path.join(root, file)
arcname = os.path.relpath(full_path, gradio_dir)
zipf.write(full_path, arcname=arcname)
zip_path
|