|
import gradio as gr |
|
import pandas as pd |
|
import torch |
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline |
|
from sentence_transformers import SentenceTransformer, util |
|
|
|
|
|
|
|
|
|
qg_model_name = "iarfmoose/t5-base-question-generator" |
|
tokenizer_qg = AutoTokenizer.from_pretrained(qg_model_name) |
|
model_qg = AutoModelForSeq2SeqLM.from_pretrained(qg_model_name) |
|
model_plag = SentenceTransformer('all-MiniLM-L6-v2') |
|
asr = pipeline("automatic-speech-recognition", model="openai/whisper-base") |
|
|
|
|
|
|
|
|
|
def generate_mcqs(text, num_questions=3): |
|
input_text = f"generate questions: {text.strip()}" |
|
input_ids = tokenizer_qg.encode(input_text, return_tensors="pt", max_length=512, truncation=True) |
|
|
|
outputs = model_qg.generate( |
|
input_ids=input_ids, |
|
max_length=256, |
|
num_return_sequences=num_questions, |
|
do_sample=True, |
|
top_k=50, |
|
top_p=0.95 |
|
) |
|
|
|
questions = [tokenizer_qg.decode(out, skip_special_tokens=True).strip() for out in outputs] |
|
return "\n".join([f"{i+1}. {q}" for i, q in enumerate(questions)]) |
|
|
|
|
|
|
|
|
|
def analyze_weakness(csv_file): |
|
df = pd.read_csv(csv_file.name) |
|
summary = df.groupby("Topic")["Score"].mean().sort_values() |
|
return summary.to_string() |
|
|
|
|
|
|
|
|
|
def chatbot_response(message, history): |
|
return "This is a placeholder response for now. (LLM not integrated)" |
|
|
|
|
|
|
|
|
|
def speech_answer(audio_file_path): |
|
transcription = asr(audio_file_path)["text"] |
|
input_text = f"generate questions: {transcription.strip()}" |
|
input_ids = tokenizer_qg.encode(input_text, return_tensors="pt", max_length=512, truncation=True) |
|
outputs = model_qg.generate(input_ids, max_length=256, num_return_sequences=1) |
|
response = tokenizer_qg.decode(outputs[0], skip_special_tokens=True) |
|
return f"π£οΈ Transcript: {transcription.strip()}\n\nπ‘ Answer: {response.strip()}" |
|
|
|
|
|
|
|
|
|
from transformers import pipeline |
|
|
|
summarizer = pipeline("summarization", model="facebook/bart-large-cnn") |
|
|
|
def summarize_text(text): |
|
result = summarizer(text, max_length=120, min_length=40, do_sample=False) |
|
return result[0]["summary_text"] |
|
|
|
|
|
|
|
|
|
def predict_engagement(file): |
|
df = pd.read_csv(file.name) |
|
avg_time = df["TimeSpent"].mean() |
|
return "β
Engaged student" if avg_time >= 10 else "β οΈ Risk of disengagement" |
|
|
|
|
|
|
|
|
|
def generate_badge(file): |
|
df = pd.read_csv(file.name) |
|
avg_score = df["Score"].mean() |
|
if avg_score >= 80: |
|
return "π
Gold Badge" |
|
elif avg_score >= 50: |
|
return "π₯ Silver Badge" |
|
else: |
|
return "π₯ Bronze Badge" |
|
|
|
|
|
|
|
|
|
def translate_text(text, target_lang): |
|
return f"(Translated to {target_lang}) - This is a mock translation." |
|
|
|
|
|
|
|
|
|
def check_plagiarism(text1, text2): |
|
emb1 = model_plag.encode(text1, convert_to_tensor=True) |
|
emb2 = model_plag.encode(text2, convert_to_tensor=True) |
|
score = util.cos_sim(emb1, emb2).item() |
|
return f"Similarity Score: {score:.2f} - {'β οΈ Possible Plagiarism' if score > 0.8 else 'β
Looks Original'}" |
|
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("# π Smart LMS Suite (Offline)") |
|
|
|
with gr.Tab("π§ Quiz Generator"): |
|
quiz_text = gr.Textbox(label="π Input Content", lines=6, placeholder="Paste a paragraph here...") |
|
quiz_slider = gr.Slider(1, 10, value=3, label="π§Ύ Number of Questions") |
|
quiz_btn = gr.Button("π Generate Quiz") |
|
quiz_output = gr.Textbox(label="π Generated Questions", lines=10) |
|
quiz_btn.click(fn=generate_mcqs, inputs=[quiz_text, quiz_slider], outputs=quiz_output) |
|
|
|
with gr.Tab("π Weakness Analyzer"): |
|
weak_file = gr.File(label="Upload CSV with Topic & Score columns") |
|
weak_btn = gr.Button("Analyze") |
|
weak_out = gr.Textbox(label="Analysis") |
|
weak_btn.click(fn=analyze_weakness, inputs=weak_file, outputs=weak_out) |
|
|
|
with gr.Tab("π€ Teaching Assistant"): |
|
gr.ChatInterface(fn=chatbot_response) |
|
|
|
with gr.Tab("π€ Speech Q Solver"): |
|
audio_in = gr.Audio(label="Upload Audio", type="filepath") |
|
audio_btn = gr.Button("Transcribe + Generate Answer") |
|
audio_out = gr.Textbox(label="Answer") |
|
audio_btn.click(fn=speech_answer, inputs=audio_in, outputs=audio_out) |
|
|
|
with gr.Tab("π Summarizer"): |
|
sum_text = gr.Textbox(lines=5, label="Paste Text") |
|
sum_btn = gr.Button("Summarize") |
|
sum_out = gr.Textbox(label="Summary") |
|
sum_btn.click(fn=summarize_text, inputs=sum_text, outputs=sum_out) |
|
|
|
with gr.Tab("π Engagement Predictor"): |
|
eng_file = gr.File(label="Upload CSV with TimeSpent column") |
|
eng_btn = gr.Button("Predict") |
|
eng_out = gr.Textbox() |
|
eng_btn.click(fn=predict_engagement, inputs=eng_file, outputs=eng_out) |
|
|
|
with gr.Tab("π
Badge Generator"): |
|
badge_file = gr.File(label="Upload CSV with Score column") |
|
badge_btn = gr.Button("Get Badge") |
|
badge_out = gr.Textbox() |
|
badge_btn.click(fn=generate_badge, inputs=badge_file, outputs=badge_out) |
|
|
|
with gr.Tab("π Translator"): |
|
trans_in = gr.Textbox(label="Enter Text") |
|
trans_lang = gr.Textbox(label="Target Language") |
|
trans_btn = gr.Button("Translate") |
|
trans_out = gr.Textbox() |
|
trans_btn.click(fn=translate_text, inputs=[trans_in, trans_lang], outputs=trans_out) |
|
|
|
with gr.Tab("π Plagiarism Checker"): |
|
text1 = gr.Textbox(label="Text 1", lines=3) |
|
text2 = gr.Textbox(label="Text 2", lines=3) |
|
plag_btn = gr.Button("Check Similarity") |
|
plag_out = gr.Textbox() |
|
plag_btn.click(fn=check_plagiarism, inputs=[text1, text2], outputs=plag_out) |
|
|
|
|
|
|
|
|
|
demo.launch() |
|
|