Issurance_Agent_Rag / embedder.py
Rivalcoder
Update Prompt
7acce36
raw
history blame
1.44 kB
import faiss
import numpy as np
import os
from sentence_transformers import SentenceTransformer
# Use a local cache for transformer downloads
cache_dir = os.path.join(os.getcwd(), ".cache")
os.makedirs(cache_dir, exist_ok=True)
os.environ['HF_HOME'] = cache_dir
os.environ['TRANSFORMERS_CACHE'] = cache_dir
# Lazy-loaded model
_model = None
def preload_model(model_name="all-MiniLM-L6-v2"):
global _model
if _model is not None:
return _model
print("Preloading sentence transformer model...")
try:
_model = SentenceTransformer(model_name, cache_folder=cache_dir)
except Exception as e:
print(f"Primary model load failed: {e}")
fallback_name = "sentence-transformers/" + model_name
print(f"Trying fallback: {fallback_name}")
_model = SentenceTransformer(fallback_name, cache_folder=cache_dir)
print("✅ Model ready.")
return _model
def get_model():
return preload_model()
def build_faiss_index(chunks, batch_size=128, show_progress_bar=False):
model = get_model()
# Encode using batching for speed
embeddings = model.encode(
chunks,
batch_size=batch_size,
show_progress_bar=show_progress_bar,
convert_to_numpy=True,
normalize_embeddings=True # Helps FAISS L2 perform better
)
dim = embeddings.shape[1]
index = faiss.IndexFlatL2(dim)
index.add(embeddings)
return index, chunks