brendon-ai commited on
Commit
665d3e9
·
verified ·
1 Parent(s): cc2745c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +150 -52
app.py CHANGED
@@ -1,98 +1,195 @@
1
  from fastapi import FastAPI, HTTPException, status, APIRouter, Request
2
  from pydantic import BaseModel, ValidationError
3
- from transformers import AutoTokenizer, AutoModelForMaskedLM
4
  import torch
5
  import logging
 
6
 
7
  logging.basicConfig(level=logging.INFO)
8
  logger = logging.getLogger(__name__)
9
 
10
  app = FastAPI(
11
- title="NeuroBERT-Tiny Masked Language Model API",
12
- description="An API to perform Masked Language Modeling using the boltuix/NeuroBERT-Tiny model.",
13
  version="1.0.0"
14
  )
15
 
16
  api_router = APIRouter()
17
 
 
 
 
 
 
 
 
18
  try:
19
- logger.info("Loading tokenizer and model for boltuix/NeuroBERT-Tiny...")
20
- tokenizer = AutoTokenizer.from_pretrained("boltuix/NeuroBERT-Tiny")
21
- model = AutoModelForMaskedLM.from_pretrained("boltuix/NeuroBERT-Tiny")
22
- model.eval()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  logger.info("Model loaded successfully.")
24
  except Exception as e:
25
- logger.exception("Failed to load model or tokenizer during startup!")
26
  raise RuntimeError(f"Could not load model: {e}")
27
 
28
  class InferenceRequest(BaseModel):
 
 
 
 
29
  text: str
30
 
31
  class PredictionResult(BaseModel):
32
- sequence: str
33
- score: float
34
- token: int
35
- token_str: str
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
  @api_router.post(
38
- "/predict", # Prediction endpoint
39
  response_model=list[PredictionResult],
40
- summary="Predicts masked tokens in a given text",
41
- description="Accepts a text string with '[MASK]' tokens and returns top 5 predictions for each masked position."
42
  )
43
  async def predict_masked_lm(request: InferenceRequest):
44
- try:
45
- text = request.text
46
- logger.info(f"Received prediction request for text: '{text}'")
47
-
48
- inputs = tokenizer(text, return_tensors="pt")
49
- with torch.no_grad():
50
- outputs = model(**inputs)
51
-
52
- logits = outputs.logits
53
- masked_token_id = tokenizer.convert_tokens_to_ids("[MASK]")
54
-
55
- masked_token_indices = torch.where(inputs["input_ids"] == masked_token_id)[1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
- if not masked_token_indices.numel():
58
- logger.warning("No [MASK] token found in the input text. Returning 400 Bad Request.")
59
- raise HTTPException(
60
- status_code=status.HTTP_400_BAD_REQUEST,
61
- detail="Input text must contain at least one '[MASK]' token."
62
- )
63
 
 
 
 
 
 
64
  results = []
65
- for masked_index in masked_token_indices:
66
- top_5_logits = torch.topk(logits[0, masked_index], 5).values
67
- top_5_tokens = torch.topk(logits[0, masked_index], 5).indices
68
-
69
- for i in range(5):
70
- score = torch.nn.functional.softmax(logits[0, masked_index], dim=-1)[top_5_tokens[i]].item()
71
- predicted_token_id = top_5_tokens[i].item()
72
- predicted_token_str = tokenizer.decode(predicted_token_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
- temp_input_ids = inputs["input_ids"].clone()
75
- temp_input_ids[0, masked_index] = predicted_token_id
76
- full_sequence = tokenizer.decode(temp_input_ids[0], skip_special_tokens=True)
77
 
78
  results.append(PredictionResult(
79
  sequence=full_sequence,
80
- score=score,
81
- token=predicted_token_id,
82
- token_str=predicted_token_str
83
  ))
84
 
85
- logger.info(f"Successfully processed request. Returning {len(results)} predictions.")
86
- return results
87
 
88
- except ValidationError as e: # This is line 88
 
 
 
 
 
 
 
89
  logger.error(f"Validation error for request: {e.errors()}")
90
  raise HTTPException(
91
  status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
92
  detail=e.errors()
93
  )
94
  except HTTPException:
95
- raise
96
  except Exception as e:
97
  logger.exception(f"An unexpected error occurred during prediction: {e}")
98
  raise HTTPException(
@@ -107,7 +204,7 @@ async def predict_masked_lm(request: InferenceRequest):
107
  )
108
  async def health_check():
109
  logger.info("Health check endpoint accessed.")
110
- return {"message": "NeuroBERT-Tiny API is running!"}
111
 
112
  app.include_router(api_router)
113
 
@@ -119,3 +216,4 @@ async def catch_all(request: Request, path_name: str):
119
  if __name__ == "__main__":
120
  import uvicorn
121
  uvicorn.run(app, host="0.0.0.0", port=7860, log_level="info")
 
 
1
  from fastapi import FastAPI, HTTPException, status, APIRouter, Request
2
  from pydantic import BaseModel, ValidationError
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
4
  import torch
5
  import logging
6
+ import asyncio # For running synchronous model inference in a separate thread
7
 
8
  logging.basicConfig(level=logging.INFO)
9
  logger = logging.getLogger(__name__)
10
 
11
  app = FastAPI(
12
+ title="Masked Language Model API (via TinyLlama)",
13
+ description="An API to perform Masked Language Modeling using a locally hosted TinyLlama model.",
14
  version="1.0.0"
15
  )
16
 
17
  api_router = APIRouter()
18
 
19
+ # --- TinyLlama Model Configuration ---
20
+ # Using TinyLlama-1.1B-Chat-v1.0 which is a small, Llama-like model suitable for local inference.
21
+ MODEL_NAME = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
22
+ # -----------------------------------
23
+
24
+ # Load model and tokenizer globally to avoid reloading on each request
25
+ # This block runs once when the FastAPI application starts.
26
  try:
27
+ logger.info(f"Loading tokenizer and model for {MODEL_NAME}...")
28
+ # Load tokenizer and model for Causal LM (text generation)
29
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
30
+ # Using torch_dtype=torch.bfloat16 for potential memory/speed benefits if GPU is available
31
+ # and to fit within common memory limits. Also using device_map="auto" to load efficiently.
32
+ model = AutoModelForCausalLM.from_pretrained(
33
+ MODEL_NAME,
34
+ torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
35
+ device_map="auto"
36
+ )
37
+ model.eval() # Set model to evaluation mode
38
+
39
+ # Create a text generation pipeline
40
+ # We will adjust this pipeline's behavior in predict_masked_lm
41
+ # to simulate masked LM functionality by prompting the LLM.
42
+ text_generator = pipeline(
43
+ "text-generation",
44
+ model=model,
45
+ tokenizer=tokenizer,
46
+ # Ensure pad_token_id is set if tokenizer does not have one, to avoid warnings/errors
47
+ pad_token_id=tokenizer.eos_token_id if tokenizer.pad_token_id is None else tokenizer.pad_token_id
48
+ )
49
  logger.info("Model loaded successfully.")
50
  except Exception as e:
51
+ logger.exception(f"Failed to load model or tokenizer for {MODEL_NAME} during startup!")
52
  raise RuntimeError(f"Could not load model: {e}")
53
 
54
  class InferenceRequest(BaseModel):
55
+ """
56
+ Request model for the /predict endpoint.
57
+ Expects a single string field 'text' containing the sentence with [MASK] tokens.
58
+ """
59
  text: str
60
 
61
  class PredictionResult(BaseModel):
62
+ """
63
+ Response model for individual predictions from the /predict endpoint.
64
+ Simplified to focus on the sequence and score, abstracting token details.
65
+ """
66
+ sequence: str # The full sequence with the predicted token filled in
67
+ score: float # Confidence score of the prediction (approximated for generative LLMs)
68
+
69
+ async def run_inference_blocking(generator_pipeline, prompt, num_return_sequences=5):
70
+ """
71
+ Runs the synchronous model inference in a separate thread to avoid blocking FastAPI's event loop.
72
+ """
73
+ return generator_pipeline(
74
+ prompt,
75
+ max_new_tokens=10, # Generate a small number of tokens for the mask
76
+ num_return_sequences=num_return_sequences,
77
+ do_sample=True, # Enable sampling for varied predictions
78
+ temperature=0.7, # Control randomness
79
+ top_k=50, # Consider top K tokens for sampling
80
+ top_p=0.95, # Consider tokens up to a certain cumulative probability
81
+ # The stop_sequence ensures it doesn't generate too much beyond the expected word
82
+ stop_sequence=[" ", ".", ",", "!", "?", "\n"] # Stop after generating a word/punctuation
83
+ )
84
+
85
 
86
  @api_router.post(
87
+ "/predict", # Prediction endpoint remains /predict
88
  response_model=list[PredictionResult],
89
+ summary="Predicts masked tokens in a given text using a local TinyLlama model",
90
+ description="Accepts a text string with '[MASK]' tokens and returns up to 5 single-word predictions for each masked position using a local generative AI model. Output is simplified to sequences and scores."
91
  )
92
  async def predict_masked_lm(request: InferenceRequest):
93
+ """
94
+ Predicts the most likely tokens for [MASK] positions in the input text using the TinyLlama model.
95
+ Returns a list of top predictions for each masked token, including the full sequence and an approximated score.
96
+ """
97
+ text = request.text
98
+ logger.info(f"Received prediction request for text: '{text}'")
99
+
100
+ if "[MASK]" not in text:
101
+ logger.warning("No [MASK] token found in the input text. Returning 400 Bad Request.")
102
+ raise HTTPException(
103
+ status_code=status.HTTP_400_BAD_REQUEST,
104
+ detail="Input text must contain at least one '[MASK]' token."
105
+ )
106
+
107
+ # Find the position of the first [MASK] token to correctly prompt the LLM
108
+ # And to insert predictions back into the original text
109
+ mask_start_index = text.find("[MASK]")
110
+ if mask_start_index == -1: # Should already be caught above, but as a safeguard
111
+ raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="No '[MASK]' token found in input.")
112
+
113
+ # Craft a prompt that encourages the LLM to fill the mask.
114
+ # The prompt guides the generative LLM to act like a fill-mask model.
115
+ # Example: "The quick brown fox jumps over the [MASK] dog. The word that should replace [MASK] is:"
116
+ # We remove "[MASK]" from the prompt for the generative model, and then
117
+ # prepend a guiding phrase and append the text after the mask.
118
+
119
+ # Split text around the first [MASK]
120
+ parts = text.split("[MASK]", 1)
121
+ if len(parts) < 2: # Should not happen if [MASK] is found
122
+ raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Error processing mask position.")
123
+
124
+ pre_mask_text = parts[0].strip()
125
+ post_mask_text = parts[1].strip()
126
+
127
+ # Construct the prompt to guide TinyLlama
128
+ # "Fill in the blank: 'The quick brown fox jumps over the ______ dog.' Best options are:"
129
+ prompt = f"Complete the missing word in the following sentence. Give 5 single-word options. Sentence: '{pre_mask_text} ____ {post_mask_text}' Options:"
130
 
 
 
 
 
 
 
131
 
132
+ try:
133
+ # Run inference in a separate thread to not block the main event loop
134
+ # The model's output will be a list of dicts, e.g., [{"generated_text": "Prompt + predicted word"}]
135
+ raw_predictions = await run_inference_blocking(text_generator, prompt)
136
+
137
  results = []
138
+ seen_words = set() # To ensure unique predictions
139
+
140
+ for i, pred_item in enumerate(raw_predictions):
141
+ generated_text = pred_item.get("generated_text", "")
142
+
143
+ # Extract only the predicted word from the generated text
144
+ # This is heuristic and might need fine-tuning based on actual model output
145
+ # We look for text that comes *after* our prompt and try to extract the first word.
146
+ if prompt in generated_text:
147
+ completion_text = generated_text.split(prompt, 1)[-1].strip()
148
+ # Try to extract the first word if it contains spaces
149
+ predicted_word = completion_text.split(' ', 1)[0].strip().replace('.', '').replace(',', '')
150
+ # Filter out numbers, common filler words, or very short non-alpha words
151
+ if not predicted_word.isalpha() or len(predicted_word) < 2:
152
+ continue
153
+
154
+ # Further refine by splitting on common word separators, taking the first valid word
155
+ valid_words = [w for w in predicted_word.split() if w.isalpha() and len(w) > 1]
156
+ if not valid_words: continue
157
+ predicted_word = valid_words[0].lower() # Normalize to lowercase
158
+
159
+ # Ensure unique predictions
160
+ if predicted_word in seen_words:
161
+ continue
162
+ seen_words.add(predicted_word)
163
+
164
+ # Construct the full sequence with the predicted word
165
+ full_sequence = text.replace("[MASK]", predicted_word, 1)
166
 
167
+ # Approximate score (generative LLMs don't give scores directly for words)
168
+ mock_score = 0.95 - (i * 0.01) # Slightly decrease confidence for lower ranks
 
169
 
170
  results.append(PredictionResult(
171
  sequence=full_sequence,
172
+ score=mock_score
 
 
173
  ))
174
 
175
+ if len(results) >= 5: # Stop after getting 5 valid results
176
+ break
177
 
178
+ if not results:
179
+ logger.warning("No valid predictions could be formatted from LLM response.")
180
+ raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Could not extract predictions from TinyLlama output.")
181
+
182
+ logger.info(f"Successfully processed request via TinyLlama. Returning {len(results)} predictions.")
183
+ return results
184
+
185
+ except ValidationError as e:
186
  logger.error(f"Validation error for request: {e.errors()}")
187
  raise HTTPException(
188
  status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
189
  detail=e.errors()
190
  )
191
  except HTTPException:
192
+ raise # Re-raise custom HTTPExceptions
193
  except Exception as e:
194
  logger.exception(f"An unexpected error occurred during prediction: {e}")
195
  raise HTTPException(
 
204
  )
205
  async def health_check():
206
  logger.info("Health check endpoint accessed.")
207
+ return {"message": "Masked Language Model API (via TinyLlama) is running!"}
208
 
209
  app.include_router(api_router)
210
 
 
216
  if __name__ == "__main__":
217
  import uvicorn
218
  uvicorn.run(app, host="0.0.0.0", port=7860, log_level="info")
219
+