Update src/RAGSample.py
Browse files- src/RAGSample.py +25 -17
src/RAGSample.py
CHANGED
@@ -19,6 +19,8 @@ from typing import Optional, List
|
|
19 |
import re
|
20 |
import torch
|
21 |
import subprocess
|
|
|
|
|
22 |
|
23 |
# OPTION 1: Use Hugging Face Pipeline (Recommended for HF Spaces)
|
24 |
from transformers import pipeline
|
@@ -367,28 +369,34 @@ Answer:
|
|
367 |
""",
|
368 |
input_variables=["question", "documents"],
|
369 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
370 |
|
371 |
# Initialize a local Hugging Face model
|
372 |
hf_pipeline = pipeline(
|
373 |
-
# "text-generation",
|
374 |
-
# model="microsoft/BioGPT",
|
375 |
-
# tokenizer="microsoft/BioGPT",
|
376 |
-
# max_new_tokens=100, # Reduced for stability
|
377 |
-
# max_length=1024, # BioGPT's context length
|
378 |
-
# temperature=0.2, # Lower for more focused responses
|
379 |
-
# device_map="auto",
|
380 |
-
# torch_dtype=torch.float16,
|
381 |
-
# return_full_text=False,
|
382 |
-
# truncation=True,
|
383 |
-
# do_sample=True,
|
384 |
-
# pad_token_id=1,
|
385 |
-
# eos_token_id=2,
|
386 |
"text-generation",
|
387 |
-
model=
|
388 |
-
tokenizer=
|
389 |
-
max_new_tokens=
|
|
|
|
|
390 |
device_map="auto",
|
391 |
-
torch_dtype=torch.float16
|
|
|
|
|
|
|
|
|
|
|
|
|
392 |
)
|
393 |
|
394 |
# Wrap it in LangChain
|
|
|
19 |
import re
|
20 |
import torch
|
21 |
import subprocess
|
22 |
+
# Load tokenizer and model separately to configure properly
|
23 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
24 |
|
25 |
# OPTION 1: Use Hugging Face Pipeline (Recommended for HF Spaces)
|
26 |
from transformers import pipeline
|
|
|
369 |
""",
|
370 |
input_variables=["question", "documents"],
|
371 |
)
|
372 |
+
|
373 |
+
tokenizer = AutoTokenizer.from_pretrained("microsoft/BioGPT")
|
374 |
+
model = AutoModelForCausalLM.from_pretrained(
|
375 |
+
"microsoft/BioGPT",
|
376 |
+
device_map="auto",
|
377 |
+
torch_dtype=torch.float16
|
378 |
+
)
|
379 |
+
|
380 |
+
# Fix the tokenizer configuration
|
381 |
+
if tokenizer.pad_token is None:
|
382 |
+
tokenizer.pad_token = tokenizer.eos_token
|
383 |
|
384 |
# Initialize a local Hugging Face model
|
385 |
hf_pipeline = pipeline(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
386 |
"text-generation",
|
387 |
+
model=model,
|
388 |
+
tokenizer=tokenizer,
|
389 |
+
max_new_tokens=100, # Reduced for stability
|
390 |
+
max_length=1024, # BioGPT's context length
|
391 |
+
temperature=0.2, # Lower for more focused responses
|
392 |
device_map="auto",
|
393 |
+
torch_dtype=torch.float16,
|
394 |
+
return_full_text=False,
|
395 |
+
truncation=True,
|
396 |
+
do_sample=True,
|
397 |
+
pad_token_id=1,
|
398 |
+
eos_token_id=2,
|
399 |
+
"text-generation"
|
400 |
)
|
401 |
|
402 |
# Wrap it in LangChain
|