File size: 1,727 Bytes
d94bdb4 369f31d d94bdb4 369f31d d94bdb4 1b3009e 369f31d d94bdb4 1b3009e d94bdb4 1b3009e d94bdb4 369f31d 1b3009e d94bdb4 1b3009e 369f31d 1b3009e 369f31d d94bdb4 1b3009e d94bdb4 1b3009e d94bdb4 1b3009e 369f31d 1b3009e 369f31d d94bdb4 1b3009e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
import os
import keyfile
import warnings
import streamlit as st
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.schema import HumanMessage, SystemMessage, AIMessage
# Ignore warnings
warnings.filterwarnings("ignore")
# Streamlit settings
st.set_page_config(page_title="Magical Healer")
st.header("Welcome, What help do you need?")
# Initialize session state for messages
if "sessionMessages" not in st.session_state:
st.session_state["sessionMessages"] = [
SystemMessage(content="You are a medieval magical healer known for your peculiar sarcasm")
]
# Set Google API key
os.environ["GOOGLE_API_KEY"] = keyfile.GOOGLEKEY
# Initialize the model
llm = ChatGoogleGenerativeAI(
model="gemini-1.5-pro",
temperature=0.7,
convert_system_message_to_human=True
)
# Response function
def load_answer(question):
# Add user question to the message history
st.session_state.sessionMessages.append(HumanMessage(content=question))
# Get AI's response
assistant_answer = llm.invoke(st.session_state.sessionMessages)
# Append AI's answer to the session messages
if isinstance(assistant_answer, AIMessage):
st.session_state.sessionMessages.append(assistant_answer)
return assistant_answer.content
else:
st.session_state.sessionMessages.append(AIMessage(content=assistant_answer))
return assistant_answer
# Capture user input
def get_text():
input_text = st.text_input("You: ", key="input")
return str(input_text)
# Main implementation
user_input = get_text()
submit = st.button("Generate")
if submit and user_input:
response = load_answer(user_input)
st.subheader("Answer:")
st.write(response)
|