Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -4,16 +4,15 @@ from langchain.chains import ConversationalRetrievalChain
|
|
4 |
from langchain.document_loaders import PyPDFLoader, Docx2txtLoader, TextLoader
|
5 |
from langchain.text_splitter import CharacterTextSplitter
|
6 |
from langchain.vectorstores import Chroma
|
7 |
-
import gradio as gr
|
8 |
-
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
9 |
from sentence_transformers import SentenceTransformer
|
10 |
-
import
|
|
|
11 |
|
12 |
-
# sqlite
|
13 |
__import__('pysqlite3')
|
14 |
sys.modules['sqlite3'] = sys.modules.pop('pysqlite3')
|
15 |
|
16 |
-
# Load documents
|
17 |
docs = []
|
18 |
for f in os.listdir("multiple_docs"):
|
19 |
if f.endswith(".pdf"):
|
@@ -26,43 +25,37 @@ for f in os.listdir("multiple_docs"):
|
|
26 |
loader = TextLoader(os.path.join("multiple_docs", f))
|
27 |
docs.extend(loader.load())
|
28 |
|
29 |
-
# Split
|
30 |
splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=10)
|
31 |
docs = splitter.split_documents(docs)
|
32 |
|
33 |
-
#
|
34 |
embedding_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
|
35 |
texts = [doc.page_content for doc in docs]
|
36 |
metadatas = [{"id": i} for i in range(len(texts))]
|
37 |
embeddings = embedding_model.encode(texts)
|
38 |
|
39 |
-
#
|
40 |
vectorstore = Chroma(persist_directory="./db")
|
41 |
vectorstore.add_texts(texts=texts, metadatas=metadatas, embeddings=embeddings)
|
42 |
vectorstore.persist()
|
43 |
|
|
|
|
|
|
|
44 |
|
45 |
-
#
|
46 |
-
# tokenizer = AutoTokenizer.from_pretrained(model_name)
|
47 |
-
# model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
|
48 |
-
|
49 |
-
model_name = "google/flan-t5-large"
|
50 |
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
51 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
52 |
-
|
53 |
-
|
54 |
-
def generate(prompt):
|
55 |
-
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
56 |
-
outputs = model.generate(**inputs, max_new_tokens=512)
|
57 |
-
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
58 |
-
|
59 |
class HuggingFaceLLMWrapper:
|
|
|
|
|
|
|
60 |
def __call__(self, prompt, **kwargs):
|
61 |
-
|
|
|
62 |
|
63 |
-
llm = HuggingFaceLLMWrapper()
|
64 |
|
65 |
-
#
|
66 |
chain = ConversationalRetrievalChain.from_llm(
|
67 |
llm,
|
68 |
retriever=vectorstore.as_retriever(search_kwargs={'k': 6}),
|
@@ -70,16 +63,21 @@ chain = ConversationalRetrievalChain.from_llm(
|
|
70 |
verbose=False
|
71 |
)
|
72 |
|
|
|
73 |
chat_history = []
|
74 |
|
75 |
with gr.Blocks() as demo:
|
76 |
-
chatbot = gr.Chatbot(
|
77 |
-
|
|
|
|
|
78 |
msg = gr.Textbox()
|
79 |
clear = gr.Button("Clear")
|
80 |
|
81 |
def user(query, chat_history):
|
|
|
82 |
chat_history_tuples = [(m[0], m[1]) for m in chat_history]
|
|
|
83 |
result = chain({"question": query, "chat_history": chat_history_tuples})
|
84 |
chat_history.append((query, result["answer"]))
|
85 |
return gr.update(value=""), chat_history
|
|
|
4 |
from langchain.document_loaders import PyPDFLoader, Docx2txtLoader, TextLoader
|
5 |
from langchain.text_splitter import CharacterTextSplitter
|
6 |
from langchain.vectorstores import Chroma
|
|
|
|
|
7 |
from sentence_transformers import SentenceTransformer
|
8 |
+
from transformers import pipeline
|
9 |
+
import gradio as gr
|
10 |
|
11 |
+
# Workaround for sqlite in HuggingFace Spaces
|
12 |
__import__('pysqlite3')
|
13 |
sys.modules['sqlite3'] = sys.modules.pop('pysqlite3')
|
14 |
|
15 |
+
# π Load documents
|
16 |
docs = []
|
17 |
for f in os.listdir("multiple_docs"):
|
18 |
if f.endswith(".pdf"):
|
|
|
25 |
loader = TextLoader(os.path.join("multiple_docs", f))
|
26 |
docs.extend(loader.load())
|
27 |
|
28 |
+
# π Split into chunks
|
29 |
splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=10)
|
30 |
docs = splitter.split_documents(docs)
|
31 |
|
32 |
+
# π§ Compute embeddings
|
33 |
embedding_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
|
34 |
texts = [doc.page_content for doc in docs]
|
35 |
metadatas = [{"id": i} for i in range(len(texts))]
|
36 |
embeddings = embedding_model.encode(texts)
|
37 |
|
38 |
+
# ποΈ Save in Chroma vectorstore
|
39 |
vectorstore = Chroma(persist_directory="./db")
|
40 |
vectorstore.add_texts(texts=texts, metadatas=metadatas, embeddings=embeddings)
|
41 |
vectorstore.persist()
|
42 |
|
43 |
+
# π€ Load free LLM with pipeline
|
44 |
+
model_name = "google/flan-t5-large" # small enough for CPU
|
45 |
+
generator = pipeline("text2text-generation", model=model_name, device=-1) # -1 β CPU
|
46 |
|
47 |
+
# π Wrap the pipeline for langchain
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
class HuggingFaceLLMWrapper:
|
49 |
+
def __init__(self, generator):
|
50 |
+
self.generator = generator
|
51 |
+
|
52 |
def __call__(self, prompt, **kwargs):
|
53 |
+
result = self.generator(prompt, max_new_tokens=512, num_return_sequences=1)
|
54 |
+
return result[0]['generated_text']
|
55 |
|
56 |
+
llm = HuggingFaceLLMWrapper(generator)
|
57 |
|
58 |
+
# π Create the conversational chain
|
59 |
chain = ConversationalRetrievalChain.from_llm(
|
60 |
llm,
|
61 |
retriever=vectorstore.as_retriever(search_kwargs={'k': 6}),
|
|
|
63 |
verbose=False
|
64 |
)
|
65 |
|
66 |
+
# π¬ Gradio UI
|
67 |
chat_history = []
|
68 |
|
69 |
with gr.Blocks() as demo:
|
70 |
+
chatbot = gr.Chatbot(
|
71 |
+
[("", "Hello, I'm Thierry Decae's chatbot. Ask me about my experience, skills, eligibility, etc.")],
|
72 |
+
avatar_images=["./multiple_docs/Guest.jpg", "./multiple_docs/Thierry Picture.jpg"]
|
73 |
+
)
|
74 |
msg = gr.Textbox()
|
75 |
clear = gr.Button("Clear")
|
76 |
|
77 |
def user(query, chat_history):
|
78 |
+
# convert chat history to tuples
|
79 |
chat_history_tuples = [(m[0], m[1]) for m in chat_history]
|
80 |
+
# get answer
|
81 |
result = chain({"question": query, "chat_history": chat_history_tuples})
|
82 |
chat_history.append((query, result["answer"]))
|
83 |
return gr.update(value=""), chat_history
|