KrishPawargi commited on
Commit
c43f083
Β·
verified Β·
1 Parent(s): c04b93b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -6
app.py CHANGED
@@ -5,9 +5,10 @@ import os
5
  from dotenv import load_dotenv
6
  from datetime import datetime
7
  import zipfile
8
- from langchain.llms import HuggingFaceHub
9
  from langchain.chains import ConversationChain
10
  from langchain.memory import ConversationBufferMemory
 
 
11
 
12
 
13
  # πŸ”“ Extract .streamlit folder if zipped
@@ -20,11 +21,17 @@ load_dotenv()
20
  HF_API_TOKEN = os.getenv("HF_API_TOKEN")
21
 
22
  # βœ… Initialize LangChain LLM
23
- llm = HuggingFaceHub(
24
- repo_id="mistralai/Mistral-7B-Instruct-v0.1",
25
- model_kwargs={"temperature": 0.7, "max_new_tokens": 2048},
26
- huggingfacehub_api_token=HF_API_TOKEN
27
- )
 
 
 
 
 
 
28
 
29
  # βœ… Setup LangChain memory and conversation
30
  if "memory" not in st.session_state:
 
5
  from dotenv import load_dotenv
6
  from datetime import datetime
7
  import zipfile
 
8
  from langchain.chains import ConversationChain
9
  from langchain.memory import ConversationBufferMemory
10
+ from langchain.llms import HuggingFacePipeline
11
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
12
 
13
 
14
  # πŸ”“ Extract .streamlit folder if zipped
 
21
  HF_API_TOKEN = os.getenv("HF_API_TOKEN")
22
 
23
  # βœ… Initialize LangChain LLM
24
+ model_id = "mistralai/Mistral-7B-Instruct-v0.1"
25
+
26
+ # Load tokenizer and model locally
27
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
28
+ model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
29
+
30
+ # Create Hugging Face pipeline
31
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=2048, temperature=0.7)
32
+
33
+ # LangChain wrapper
34
+ llm = HuggingFacePipeline(pipeline=pipe)
35
 
36
  # βœ… Setup LangChain memory and conversation
37
  if "memory" not in st.session_state: