Spaces:
Sleeping
Sleeping
Update src/RAGSample.py
Browse files- src/RAGSample.py +10 -8
src/RAGSample.py
CHANGED
@@ -18,6 +18,7 @@ import pandas as pd
|
|
18 |
from typing import Optional, List
|
19 |
import re
|
20 |
import torch
|
|
|
21 |
|
22 |
# OPTION 1: Use Hugging Face Pipeline (Recommended for HF Spaces)
|
23 |
from transformers import pipeline
|
@@ -387,17 +388,18 @@ Answer:
|
|
387 |
|
388 |
# Initialize a local Hugging Face model
|
389 |
hf_pipeline = pipeline(
|
390 |
-
"text-generation",
|
391 |
-
model="
|
392 |
-
tokenizer="
|
393 |
-
max_new_tokens=100,
|
394 |
-
max_length=
|
395 |
-
temperature=0.
|
396 |
-
|
397 |
return_full_text=False,
|
398 |
truncation=True,
|
399 |
do_sample=True,
|
400 |
-
|
|
|
401 |
)
|
402 |
|
403 |
# Wrap it in LangChain
|
|
|
18 |
from typing import Optional, List
|
19 |
import re
|
20 |
import torch
|
21 |
+
import subprocess
|
22 |
|
23 |
# OPTION 1: Use Hugging Face Pipeline (Recommended for HF Spaces)
|
24 |
from transformers import pipeline
|
|
|
388 |
|
389 |
# Initialize a local Hugging Face model
|
390 |
hf_pipeline = pipeline(
|
391 |
+
"text-generation",
|
392 |
+
model="microsoft/BioGPT",
|
393 |
+
tokenizer="microsoft/BioGPT",
|
394 |
+
max_new_tokens=100, # Reduced
|
395 |
+
max_length=400, # Reduced
|
396 |
+
temperature=0.2,
|
397 |
+
device_map="cpu", # Force CPU if GPU memory is limited
|
398 |
return_full_text=False,
|
399 |
truncation=True,
|
400 |
do_sample=True,
|
401 |
+
pad_token_id=1,
|
402 |
+
eos_token_id=2,
|
403 |
)
|
404 |
|
405 |
# Wrap it in LangChain
|