brendon-ai commited on
Commit
37df21e
·
verified ·
1 Parent(s): 03fbbd4

Update src/RAGSample.py

Browse files
Files changed (1) hide show
  1. src/RAGSample.py +0 -25
src/RAGSample.py CHANGED
@@ -348,29 +348,6 @@ def setup_retriever(use_kaggle_data: bool = False, kaggle_dataset: Optional[str]
348
  # # Create a chain combining the prompt template and LLM
349
  # return prompt | llm | StrOutputParser()
350
 
351
- def initialize_biogpt():
352
- try:
353
- hf_pipeline = pipeline(
354
- "text-generation",
355
- model="microsoft/BioGPT",
356
- tokenizer="microsoft/BioGPT",
357
- max_new_tokens=100,
358
- # Remove or comment out max_length to avoid truncation
359
- # max_length=1024,
360
- temperature=0.2,
361
- device_map="auto",
362
- torch_dtype=torch.float16,
363
- return_full_text=False,
364
- truncation=False, # Explicitly disable truncation
365
- do_sample=True,
366
- pad_token_id=1,
367
- eos_token_id=2,
368
- )
369
- print("BioGPT loaded successfully!")
370
- return hf_pipeline
371
- except Exception as e:
372
- print(f"Error loading BioGPT: {e}")
373
- return None
374
 
375
  def setup_rag_chain() -> Runnable:
376
  """Sets up the RAG chain with a prompt template and an LLM."""
@@ -392,8 +369,6 @@ Answer:
392
  )
393
 
394
  # Initialize a local Hugging Face model
395
- hf_pipeline = pipeline(
396
- # Initialize a local Hugging Face model
397
  hf_pipeline = pipeline(
398
  "text-generation",
399
  model="microsoft/BioGPT",
 
348
  # # Create a chain combining the prompt template and LLM
349
  # return prompt | llm | StrOutputParser()
350
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351
 
352
  def setup_rag_chain() -> Runnable:
353
  """Sets up the RAG chain with a prompt template and an LLM."""
 
369
  )
370
 
371
  # Initialize a local Hugging Face model
 
 
372
  hf_pipeline = pipeline(
373
  "text-generation",
374
  model="microsoft/BioGPT",