Spaces:
Sleeping
Sleeping
File size: 5,697 Bytes
15a2774 38d1603 15a2774 38d1603 35d3571 38d1603 4ea3c16 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 |
# import os
# import langchain
# import langchain_huggingface
# from langchain_huggingface import HuggingFaceEndpoint,HuggingFacePipeline, ChatHuggingFace
# from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
# os.environ["HF_TOKEN"]=os.getenv('Ayush')
# os.environ["HUGGINGFACEHUB_API_KEY"]=os.getenv('Ayush')
# llama_model = HuggingFaceEndpoint(repo_id= "meta-llama/Llama-3.2-3B-Instruct",provider= "nebius",temperature=0.6, max_new_tokens=70,task="conversational")
# model_d=ChatHuggingFace(llm =llama_model,repo_id= "meta-llama/Llama-3.2-3B-Instruct",provider= "nebius",temperature=0.6, max_new_tokens=70,task="conversational")
# message = [SystemMessage(content = "Answer like you are a hardcore pc gamer"),
# HumanMessage(content = "Give me name of top 10 pc games of all time with description")]
# result = model_d.invoke(message)
# print(result.content)
import os
import streamlit as st
from langchain_community.chat_models import ChatHuggingFace
from langchain_community.llms import HuggingFaceHub
from langchain_core.messages import HumanMessage, SystemMessage
from fpdf import FPDF
# Set HuggingFace token from env or st.secrets
os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.getenv("Ayush")
# Topic-wise base prompts and models
topic_config = {
"Python": {
"prompt": "Answer like a senior Python developer and coding mentor.",
"model": "meta-llama/Llama-3.2-3B-Instruct"
},
"SQL": {
"prompt": "Answer like a senior SQL engineer with industry experience.",
"model": "google/gemma-3-27b-it"
},
"Power BI": {
"prompt": "Answer like a Power BI expert helping a beginner.",
"model": "mistralai/Mistral-7B-Instruct-v0.1"
},
"Statistics": {
"prompt": "Answer like a statistics professor explaining key concepts to a student.",
"model": "deepseek-ai/DeepSeek-R1"
},
"Machine Learning": {
"prompt": "Answer like an ML mentor guiding a junior data scientist.",
"model": "google/gemma-3-27b-it"
},
"Deep Learning": {
"prompt": "Answer like a deep learning researcher with real-world insights.",
"model": "meta-llama/Llama-3.2-3B-Instruct"
},
"Generative AI": {
"prompt": "Answer like an expert in LLMs and Generative AI research.",
"model": "deepseek-ai/DeepSeek-R1"
}
}
# Experience level adjustments to prompt
experience_prompts = {
"Beginner": "Explain with simple language and clear examples for a beginner.",
"Intermediate": "Provide a detailed answer suitable for an intermediate learner.",
"Expert": "Give an in-depth and advanced explanation suitable for an expert."
}
# Streamlit app setup
st.set_page_config(page_title="Data Science Mentor", page_icon="π")
st.title("π Data Science Mentor App")
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
# Multi-select topics
selected_topics = st.multiselect("Select one or more topics:", list(topic_config.keys()), default=["Python"])
# Select experience level
experience_level = st.selectbox("Select mentor experience level:", list(experience_prompts.keys()))
question = st.text_area("Ask your question here:")
if st.button("Get Answer"):
if not selected_topics:
st.warning("Please select at least one topic.")
elif not question.strip():
st.warning("Please enter your question.")
else:
# Combine prompts from selected topics + experience level
combined_prompt = ""
models_used = set()
for topic in selected_topics:
base_prompt = topic_config[topic]["prompt"]
combined_prompt += f"{base_prompt} "
models_used.add(topic_config[topic]["model"])
combined_prompt += experience_prompts[experience_level]
# Choose the first model from selected topics (or could do more advanced merging)
chosen_model = list(models_used)[0]
# Load model
llm = HuggingFaceHub(
repo_id=chosen_model,
model_kwargs={"temperature": 0.6, "max_new_tokens": 150}
)
chat_model = ChatHuggingFace(llm=llm)
messages = [
SystemMessage(content=combined_prompt),
HumanMessage(content=question)
]
with st.spinner("Mentor is typing..."):
response = chat_model.invoke(messages)
st.markdown("### π§ Mentor's Response:")
st.markdown(response.content)
# Save chat
st.session_state.chat_history.append((selected_topics, experience_level, question, response.content))
# Display chat history
if st.session_state.chat_history:
st.markdown("---")
st.subheader("π Chat History")
for i, (topics, exp, q, a) in enumerate(st.session_state.chat_history, 1):
st.markdown(f"**{i}. Topics:** {', '.join(topics)} | **Mentor Level:** {exp}")
st.markdown(f"**You:** {q}")
st.markdown(f"**Mentor:** {a}")
st.markdown("---")
# Download PDF
if st.button("π Download PDF of this chat"):
pdf = FPDF()
pdf.add_page()
pdf.set_font("Arial", size=12)
pdf.cell(200, 10, txt="Data Science Mentor Chat History", ln=True, align="C")
pdf.ln(10)
for i, (topics, exp, q, a) in enumerate(st.session_state.chat_history, 1):
pdf.multi_cell(0, 10, f"{i}. Topics: {', '.join(topics)} | Mentor Level: {exp}\nYou: {q}\nMentor: {a}\n\n")
pdf_path = "/tmp/mentor_chat.pdf"
pdf.output(pdf_path)
with open(pdf_path, "rb") as f:
st.download_button("π₯ Click to Download PDF", f, file_name="mentor_chat.pdf", mime="application/pdf")
|