selcukwashere commited on
Commit
c3ce070
·
1 Parent(s): 71ce14e

First app commit

Browse files
.env.example ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # HuggingFace API Token (required for LLM access)
2
+ # Get your token from: https://huggingface.co/settings/tokens
3
+ HF_TOKEN=your_huggingface_token_here
.gitattributes CHANGED
@@ -1,35 +1,4 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
  *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  *.pkl filter=lfs diff=lfs merge=lfs -text
2
+ faiss_index/** filter=lfs diff=lfs merge=lfs -text
3
+ *.index filter=lfs diff=lfs merge=lfs -text
4
+ *.faiss filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ env/
8
+ venv/
9
+ ENV/
10
+ build/
11
+ develop-eggs/
12
+ dist/
13
+ downloads/
14
+ eggs/
15
+ .eggs/
16
+ lib/
17
+ lib64/
18
+ parts/
19
+ sdist/
20
+ var/
21
+ wheels/
22
+ *.egg-info/
23
+ .installed.cfg
24
+ *.egg
25
+
26
+ # Environment
27
+ .env
28
+ .env.local
29
+
30
+ # IDE
31
+ .vscode/
32
+ .idea/
33
+ *.swp
34
+ *.swo
35
+ *~
36
+
37
+ # OS
38
+ .DS_Store
39
+ Thumbs.db
40
+
41
+ # Gradio
42
+ flagged/
43
+
44
+ # Model cache
45
+ .cache/
46
+
README.md CHANGED
@@ -8,7 +8,5 @@ sdk_version: 5.49.1
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
- short_description: Demo space for NoterLLM, the RAG system for Turkish notaries
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
+ short_description: AI-powered Turkish Notary Law Assistant using RAG
12
+ ---
 
 
app.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from llm_rag_setup import query_rag
3
+
4
+ custom_css = """
5
+ .container {
6
+ max-width: 1200px;
7
+ margin: auto;
8
+ }
9
+ .source-box {
10
+ background-color: #f0f0f0;
11
+ padding: 10px;
12
+ border-radius: 5px;
13
+ margin: 5px 0;
14
+ border-left: 3px solid #2196F3;
15
+ }
16
+ .footer {
17
+ text-align: center;
18
+ margin-top: 20px;
19
+ color: #666;
20
+ }
21
+ """
22
+
23
+
24
+ def format_sources(source_documents):
25
+ if not source_documents:
26
+ return "Kaynak bulunamadı."
27
+
28
+ sources_html = ""
29
+ for i, doc in enumerate(source_documents[:3], 1):
30
+ metadata = doc.metadata
31
+ source_type = metadata.get("source_type", "genelge")
32
+
33
+ if source_type == "kanun":
34
+ madde_no = metadata.get("madde_no", "N/A")
35
+ madde_baslik = metadata.get("madde_baslik", "")
36
+ title = f"📜 Noterlik Kanunu - Madde {madde_no}"
37
+ if madde_baslik:
38
+ title += f" ({madde_baslik})"
39
+ kisim = metadata.get("kisim", "")
40
+ content = f"{kisim}\n\n{doc.page_content[:200]}..."
41
+ else:
42
+ genelge_no = metadata.get("genelge_no", "N/A")
43
+ madde_no = metadata.get("madde_no", "N/A")
44
+ title = f"📋 Genelge {genelge_no} - Madde {madde_no}"
45
+ genelge_baslik = metadata.get("genelge_baslik", "N/A")
46
+ content = f"{genelge_baslik}\n\n{doc.page_content[:200]}..."
47
+
48
+ sources_html += f"""
49
+ <div class="source-box">
50
+ <strong>{i}. {title}</strong><br>
51
+ <small>{content}</small>
52
+ </div>
53
+ """
54
+
55
+ return sources_html
56
+
57
+
58
+ def chat_with_rag(message, history):
59
+ if not message.strip():
60
+ return "", history
61
+
62
+ try:
63
+ history.append((message, "⚙️ Sistemi başlatıyor — lütfen bekleyin..."))
64
+ result = query_rag(message)
65
+ if result is None:
66
+ answer = "❌ Sistem başlatılamadı veya veri eksik. Lütfen sunucu günlüklerini kontrol edin."
67
+ else:
68
+ answer = result.get("result", "(Cevap alınamadı)")
69
+
70
+ sources_html = ""
71
+ if result and "source_documents" in result and result["source_documents"]:
72
+ sources_html = (
73
+ "<br><br><strong>📚 Kaynaklar:</strong><br>"
74
+ + format_sources(result["source_documents"])
75
+ )
76
+
77
+ full_response = answer + sources_html
78
+
79
+
80
+ if history:
81
+ history[-1] = (message, full_response)
82
+ else:
83
+ history.append((message, full_response))
84
+
85
+ return "", history
86
+
87
+ except Exception as e:
88
+ error_message = f"❌ Hata oluştu: {str(e)}"
89
+ history.append((message, error_message))
90
+ return "", history
91
+
92
+
93
+ def clear_chat():
94
+ return [], []
95
+
96
+
97
+ examples = [
98
+ "Araç satış işlemlerinde hangi belgeler gereklidir?",
99
+ "Noterlik işlemlerinde harç ve karar pulu nasıl hesaplanır?",
100
+ "Vekaletname düzenlenirken dikkat edilmesi gereken hususlar nelerdir?",
101
+ "Gayrimenkul satış vaadi sözleşmesi nedir?",
102
+ ]
103
+
104
+
105
+ with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
106
+ gr.Markdown(
107
+ """
108
+ # ⚖️ NoterLLM - Türk Noter Hukuku Asistanı
109
+
110
+ Noterlik Kanunu ve Türkiye Noterler Birliği genelgelerine dayalı AI destekli soru-cevap sistemi.
111
+
112
+ **Kaynaklar:**
113
+ - 📜 Noterlik Kanunu (1512) - 213 madde
114
+ - 📋 TNB Genelgeleri - 125+ genelge
115
+
116
+ *Bu sistem genelgelere dayalı bilgi sağlar ancak resmi hukuki danışmanlık yerine geçmez.*
117
+ """
118
+ )
119
+
120
+ with gr.Row():
121
+ with gr.Column(scale=4):
122
+ chatbot = gr.Chatbot(
123
+ label="Sohbet Geçmişi",
124
+ height=500,
125
+ show_label=True,
126
+ avatar_images=("👤", "⚖️"),
127
+ bubble_full_width=False,
128
+ )
129
+
130
+ with gr.Row():
131
+ msg = gr.Textbox(
132
+ label="Sorunuz",
133
+ placeholder="Noterlik hukuku ile ilgili sorunuzu yazın...",
134
+ show_label=False,
135
+ scale=9,
136
+ container=False,
137
+ )
138
+ submit_btn = gr.Button("Gönder", variant="primary", scale=1)
139
+
140
+ clear_btn = gr.Button("🗑️ Sohbeti Temizle", size="sm")
141
+
142
+ with gr.Column(scale=1):
143
+ gr.Markdown("### 💡 Örnek Sorular")
144
+ gr.Examples(
145
+ examples=examples,
146
+ inputs=msg,
147
+ label="Aşağıdaki sorulardan birini seçebilirsiniz:",
148
+ )
149
+
150
+ gr.Markdown(
151
+ """
152
+ ### ℹ️ Bilgi
153
+
154
+ **Model:** Mistral-7B-Instruct-v0.2
155
+ **Embedding:** multilingual-e5-base
156
+ **Retrieval:** FAISS + BM25 (Hybrid)
157
+
158
+ **Özellikler:**
159
+ - 🔍 Semantic & Keyword Search
160
+ - 📚 Kaynak Referansları
161
+ - 🇹🇷 Türkçe Optimizasyonu
162
+ """
163
+ )
164
+
165
+ submit_btn.click(
166
+ fn=chat_with_rag,
167
+ inputs=[msg, chatbot],
168
+ outputs=[msg, chatbot],
169
+ )
170
+
171
+ msg.submit(
172
+ fn=chat_with_rag,
173
+ inputs=[msg, chatbot],
174
+ outputs=[msg, chatbot],
175
+ )
176
+
177
+ clear_btn.click(
178
+ fn=clear_chat,
179
+ inputs=None,
180
+ outputs=[msg, chatbot],
181
+ )
182
+
183
+ gr.Markdown(
184
+ """
185
+ <div class="footer">
186
+ <p>Powered by HuggingFace 🤗 | Built with Gradio</p>
187
+ </div>
188
+ """
189
+ )
190
+
191
+ demo.launch()
bm25_retriever.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee3d727748117c15f270eee4c7dc5b7be2df510e3be48fa172ac2b0db6aa391e
3
+ size 16329768
faiss_index/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:075350f184b5ee60e6aab7a1a020a330554134bf2e5d9823e7d16b7b3b9baf9c
3
+ size 13283373
faiss_index/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fed25d8c490d29c0c3795b175c93d8bb7ba157b16af7c147969fbb5ecb0a329
3
+ size 9677628
llm_rag_setup.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.schema import Document
2
+ import json
3
+ import os
4
+ import pickle
5
+ from langchain_community.vectorstores import FAISS
6
+ from langchain_huggingface import HuggingFaceEmbeddings
7
+ from langchain_community.retrievers import BM25Retriever
8
+ from langchain.retrievers import EnsembleRetriever
9
+ from langchain_huggingface import HuggingFaceEndpoint
10
+ from langchain.chains import RetrievalQA
11
+ from langchain.prompts import PromptTemplate
12
+ from typing import Optional
13
+
14
+ _qa_chain: Optional[RetrievalQA] = None
15
+ _initialized = False
16
+
17
+
18
+ def init_rag():
19
+ global _qa_chain, _initialized
20
+
21
+ if _initialized:
22
+ return
23
+
24
+ HF_TOKEN = os.getenv("HF_TOKEN")
25
+ if not HF_TOKEN:
26
+ print(
27
+ "⚠️ HF_TOKEN not found in environment variables. Set it in Spaces secrets or .env file"
28
+ )
29
+
30
+ documents = []
31
+
32
+ try:
33
+ with open("tnb_genelgeler_rag.json", "r", encoding="utf-8") as f:
34
+ genelge_data = json.load(f)
35
+ print(f"✅ Loaded {len(genelge_data)} chunks from tnb_genelgeler_rag.json")
36
+
37
+ for item in genelge_data:
38
+ if "source_type" not in item.get("metadata", {}):
39
+ item.setdefault("metadata", {})["source_type"] = "genelge"
40
+ documents.append(
41
+ Document(
42
+ page_content=item.get("content", ""),
43
+ metadata=item.get("metadata", {}),
44
+ )
45
+ )
46
+
47
+ except FileNotFoundError:
48
+ print("⚠️ tnb_genelgeler_rag.json not found. Please upload data files.")
49
+
50
+ try:
51
+ with open("noterlik_kanunu_rag.json", "r", encoding="utf-8") as f:
52
+ kanun_data = json.load(f)
53
+ print(f"✅ Loaded {len(kanun_data)} chunks from noterlik_kanunu_rag.json")
54
+
55
+ for item in kanun_data:
56
+ documents.append(
57
+ Document(
58
+ page_content=item.get("content", ""),
59
+ metadata=item.get("metadata", {}),
60
+ )
61
+ )
62
+
63
+ except FileNotFoundError:
64
+ print("⚠️ noterlik_kanunu_rag.json not found. Please upload data files.")
65
+
66
+ if not documents:
67
+ print("❌ No documents loaded. Please prepare data files first.")
68
+ _initialized = False
69
+ return
70
+
71
+ print(f"📚 Total documents loaded: {len(documents)}")
72
+
73
+ faiss_index_path = "faiss_index"
74
+
75
+ print("🔄 Initializing embedding model (multilingual-e5-base)...")
76
+ embedding_model = HuggingFaceEmbeddings(
77
+ model_name="intfloat/multilingual-e5-base", encode_kwargs={"batch_size": 32}
78
+ )
79
+ print("✅ Embedding model initialized")
80
+
81
+ # Load or create FAISS index
82
+ if os.path.exists(faiss_index_path):
83
+ print(f"✅ Found existing FAISS index at {faiss_index_path} — loading...")
84
+ try:
85
+ vector_db = FAISS.load_local(
86
+ faiss_index_path, embedding_model, allow_dangerous_deserialization=True
87
+ )
88
+ print("✅ FAISS index loaded successfully!")
89
+ except Exception as e:
90
+ print(f"❌ Failed to load FAISS index: {e}")
91
+ print(f"🔄 Creating new FAISS index...")
92
+ vector_db = FAISS.from_documents(documents, embedding_model)
93
+ vector_db.save_local(faiss_index_path)
94
+ print(f"✅ FAISS index created and saved to {faiss_index_path}")
95
+ else:
96
+ print(f"🔄 Creating new FAISS index (this may take a few minutes)...")
97
+ vector_db = FAISS.from_documents(documents, embedding_model)
98
+ vector_db.save_local(faiss_index_path)
99
+ print(f"✅ FAISS index created and saved to {faiss_index_path}")
100
+
101
+ bm25_path = "bm25_retriever.pkl"
102
+ if os.path.exists(bm25_path):
103
+ print(f"✅ Loading existing BM25 index from {bm25_path}...")
104
+ with open(bm25_path, "rb") as f:
105
+ bm25_retriever = pickle.load(f)
106
+ print(f"✅ BM25 index loaded successfully!")
107
+ else:
108
+ print(f"🔄 Creating new BM25 index...")
109
+ bm25_retriever = BM25Retriever.from_documents(documents)
110
+ bm25_retriever.k = 5
111
+ with open(bm25_path, "wb") as f:
112
+ pickle.dump(bm25_retriever, f)
113
+ print(f"✅ BM25 index created and saved to {bm25_path}")
114
+
115
+ vector_retriever = vector_db.as_retriever(search_kwargs={"k": 5})
116
+
117
+ ensemble_retriever = EnsembleRetriever(
118
+ retrievers=[bm25_retriever, vector_retriever], weights=[0.5, 0.5]
119
+ )
120
+
121
+ print("🔄 Initializing HuggingFace LLM (Mistral-7B-Instruct)...")
122
+ try:
123
+ llm = HuggingFaceEndpoint(
124
+ repo_id="mistralai/Mistral-7B-Instruct-v0.2",
125
+ huggingfacehub_api_token=HF_TOKEN,
126
+ temperature=0.3,
127
+ max_new_tokens=1024,
128
+ top_p=0.95,
129
+ repetition_penalty=1.1,
130
+ )
131
+ print("✅ HuggingFace LLM initialized (Mistral-7B-Instruct-v0.2)")
132
+ except Exception as e:
133
+ print(f"❌ Failed to initialize LLM: {e}")
134
+ print(f" HF_TOKEN is {'set' if HF_TOKEN else 'NOT set'}")
135
+ _initialized = False
136
+ return
137
+
138
+ turkish_legal_prompt = """Sen Türk Noter Hukuku konusunda uzman bir yapay zeka asistanısın. Görevin, Noterlik Kanunu ve Türkiye Noterler Birliği genelgelerinden yararlanarak kullanıcının sorusunu doğru ve eksiksiz yanıtlamaktır.
139
+
140
+ BAĞLAM BİLGİLERİ (Kanun ve Genelgelerden):
141
+ {context}
142
+
143
+ KULLANICI SORUSU: {question}
144
+
145
+ YANITLAMA STRATEJİSİ:
146
+ 1. **KAYNAK ÖNCELİĞİ**:
147
+ - Noterlik Kanunu → Temel yasal çerçeve ve genel kurallar
148
+ - TNB Genelgeleri → Kanunun uygulanmasına ilişkin özel düzenlemeler ve açıklamalar
149
+ - Her iki kaynağı da kontrol et ve ilgili olanları kullan
150
+
151
+ 2. **HİBRİT YANITLAMA**:
152
+ - Kanun maddeleri varsa bunları temel al
153
+ - Genelgelerdeki uygulama detayları varsa ekle
154
+ - Kaynak belirtmeyi unutma!
155
+
156
+ 3. **KAYNAK BELİRTME**:
157
+ - Kanundan alınan bilgi → "Noterlik Kanunu Madde X'e göre..."
158
+ - Genelgelerden alınan bilgi → "Genelge X, Madde Y'ye göre..."
159
+ - Genel bilgi → "Genel olarak..." veya "Türk Hukuku'nda..."
160
+
161
+ 4. **KALİTE KURALLARI**:
162
+ - Yanıtını net, anlaşılır ve yapılandırılmış şekilde sun
163
+ - Hukuki terminolojiyi doğru kullan
164
+ - Kesin olmadığın konularda varsayımda bulunma
165
+ - Hem kanunu hem genelgeleri kaynak olarak kullanabilirsin
166
+
167
+ YANITINIZ:"""
168
+
169
+ prompt_template = PromptTemplate(
170
+ template=turkish_legal_prompt, input_variables=["context", "question"]
171
+ )
172
+
173
+ _qa_chain = RetrievalQA.from_chain_type(
174
+ llm=llm,
175
+ retriever=ensemble_retriever,
176
+ chain_type="stuff",
177
+ chain_type_kwargs={"prompt": prompt_template, "document_separator": "\n---\n"},
178
+ return_source_documents=True,
179
+ verbose=False,
180
+ )
181
+
182
+ if _qa_chain is None:
183
+ print("❌ QA Chain creation failed silently")
184
+ return
185
+
186
+ print("✅ RAG system initialized successfully!\n")
187
+ _initialized = True
188
+
189
+
190
+ def query_rag(question: str):
191
+ global _qa_chain, _initialized
192
+
193
+ if not _initialized:
194
+ init_rag()
195
+
196
+ if not _initialized or _qa_chain is None:
197
+ print("❌ RAG system is not properly initialized. Chain or data missing.")
198
+ return None
199
+
200
+ try:
201
+ print(f"DEBUG: _qa_chain type: {type(_qa_chain)}")
202
+ print(f"DEBUG: _qa_chain.invoke type: {type(_qa_chain.invoke)}")
203
+ print(f"DEBUG: Calling invoke with question: {question[:50]}...")
204
+ result = _qa_chain.invoke({"query": question})
205
+ return result
206
+ except Exception as e:
207
+ print(f"❌ Error querying RAG: {e}")
208
+ import traceback
209
+ traceback.print_exc()
210
+ return None
noterlik_kanunu_rag.json ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Core Framework
2
+ gradio==5.49.1
3
+
4
+ # LangChain
5
+ langchain==0.3.0
6
+ langchain-community==0.3.0
7
+ langchain-huggingface==0.1.0
8
+
9
+ # HuggingFace
10
+ huggingface-hub>=0.30.0,<1.0
11
+
12
+ # Vector Store & Embeddings
13
+ faiss-cpu==1.7.4
14
+ sentence-transformers==2.2.2
15
+
16
+ # BM25 Retriever
17
+ rank-bm25==0.2.2
18
+
tnb_genelgeler_rag.json ADDED
The diff for this file is too large to render. See raw diff