Spaces:
Runtime error
Runtime error
Update models/langOpen.py
Browse filesUpdated prompt. Changed temp to 0.01. Added compression retriever.
- models/langOpen.py +11 -6
models/langOpen.py
CHANGED
@@ -9,18 +9,17 @@ from langchain.embeddings import OpenAIEmbeddings
|
|
9 |
from langchain.prompts import PromptTemplate
|
10 |
from langchain_pinecone import PineconeVectorStore
|
11 |
|
12 |
-
prompt_template = """
|
13 |
-
|
|
|
14 |
Context: {context}
|
15 |
Topic: {topic}
|
16 |
|
17 |
Use the following example format for your answer:
|
18 |
-
[FORMAT]
|
19 |
Answer:
|
20 |
The answer to the user question.
|
21 |
Reference:
|
22 |
The list of references to the specific sections of the documents that support your answer.
|
23 |
-
[END_FORMAT]
|
24 |
"""
|
25 |
|
26 |
|
@@ -30,7 +29,7 @@ PROMPT = PromptTemplate(template=prompt_template, input_variables=["context", "t
|
|
30 |
class LangOpen:
|
31 |
def __init__(self, model_name: str) -> None:
|
32 |
self.index = self.initialize_index("langOpen")
|
33 |
-
self.llm = ChatOpenAI(temperature=0.
|
34 |
self.chain = LLMChain(llm=self.llm, prompt=PROMPT)
|
35 |
|
36 |
def initialize_index(self, index_name):
|
@@ -43,7 +42,13 @@ class LangOpen:
|
|
43 |
def get_response(self, query_str):
|
44 |
print("query_str: ", query_str)
|
45 |
print("model_name: ", self.llm.model_name)
|
46 |
-
docs = self.index.similarity_search(query_str, k=4)
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
inputs = [{"context": doc.page_content, "topic": query_str} for doc in docs]
|
48 |
result = self.chain.apply(inputs)[0]["text"]
|
49 |
return result
|
|
|
9 |
from langchain.prompts import PromptTemplate
|
10 |
from langchain_pinecone import PineconeVectorStore
|
11 |
|
12 |
+
prompt_template = """You are an expert on California Drinking Water Regulations.
|
13 |
+
Answer the question solely using relevant regulations in the given context. DO NOT USE ANY OTHER SOURCES.
|
14 |
+
If the given context does not contain the relevant information, say so.
|
15 |
Context: {context}
|
16 |
Topic: {topic}
|
17 |
|
18 |
Use the following example format for your answer:
|
|
|
19 |
Answer:
|
20 |
The answer to the user question.
|
21 |
Reference:
|
22 |
The list of references to the specific sections of the documents that support your answer.
|
|
|
23 |
"""
|
24 |
|
25 |
|
|
|
29 |
class LangOpen:
|
30 |
def __init__(self, model_name: str) -> None:
|
31 |
self.index = self.initialize_index("langOpen")
|
32 |
+
self.llm = ChatOpenAI(temperature=0.01, model=model_name)
|
33 |
self.chain = LLMChain(llm=self.llm, prompt=PROMPT)
|
34 |
|
35 |
def initialize_index(self, index_name):
|
|
|
42 |
def get_response(self, query_str):
|
43 |
print("query_str: ", query_str)
|
44 |
print("model_name: ", self.llm.model_name)
|
45 |
+
#docs = self.index.similarity_search(query_str, k=4)
|
46 |
+
vectorstore_retriever = self.index.as_retriever(search_type="similarity", search_kwargs={"k": 10})
|
47 |
+
compressor = CohereRerank()
|
48 |
+
compression_retriever = ContextualCompressionRetriever(
|
49 |
+
base_compressor=compressor, base_retriever=vectorstore_retriever
|
50 |
+
)
|
51 |
+
docs = compression_retriever.get_relevant_documents(query_str)
|
52 |
inputs = [{"context": doc.page_content, "topic": query_str} for doc in docs]
|
53 |
result = self.chain.apply(inputs)[0]["text"]
|
54 |
return result
|