Aleksandr Maiorov commited on
Commit
dc23095
·
1 Parent(s): bfcae7e

- добавлен llama-index

Files changed (2) hide show
  1. app.py +5 -5
  2. data/result.json +0 -0
app.py CHANGED
@@ -30,9 +30,9 @@ set_global_tokenizer(
30
  AutoTokenizer.from_pretrained("Qwen/Qwen2.5-7B-Instruct").encode
31
  )
32
 
33
- embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/all-mpnet-base-v2")
34
-
35
- documents = SimpleDirectoryReader("./data/").load_data()
36
 
37
  def messages_to_prompt(messages):
38
  messages = [{"role": m.role.value, "content": m.content} for m in messages]
@@ -74,9 +74,9 @@ llm = LlamaCPP(
74
  )
75
 
76
  memory = ChatMemoryBuffer.from_defaults(token_limit=3900)
77
- index = VectorStoreIndex.from_documents(documents, embed_model=embed_model)
78
  chat_engine = index.as_chat_engine(
79
- chat_mode="condense_plus_context",
80
  memory=memory,
81
  llm=llm,
82
  context_propt=(
 
30
  AutoTokenizer.from_pretrained("Qwen/Qwen2.5-7B-Instruct").encode
31
  )
32
 
33
+ # embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/all-mpnet-base-v2")
34
+ #
35
+ # documents = SimpleDirectoryReader("./data/").load_data()
36
 
37
  def messages_to_prompt(messages):
38
  messages = [{"role": m.role.value, "content": m.content} for m in messages]
 
74
  )
75
 
76
  memory = ChatMemoryBuffer.from_defaults(token_limit=3900)
77
+ index = VectorStoreIndex
78
  chat_engine = index.as_chat_engine(
79
+ chat_mode="beast",
80
  memory=memory,
81
  llm=llm,
82
  context_propt=(
data/result.json DELETED
The diff for this file is too large to render. See raw diff