File size: 1,821 Bytes
cd123bf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
import json
import torch
from pathlib import Path
from typing import Dict, Any
from src.app.model_utils.factory import ModelFactory
class ModelManager:
"""
Manages model loading and inference operations
Args:
model_dir: Directory containing model artifacts
"""
def __init__(self, model_dir: str = "../pretrained") -> None:
self.model_dir = Path(model_dir)
self.loaded_models: Dict[str, Any] = {}
self._load_model_artifacts()
def _load_model_artifacts(self) -> None:
"""
Load model configuration and vocabulary
"""
with open(self.model_dir / "config.json", "r") as f:
self.config = json.load(f)
with open(self.model_dir / "vocab.json", "r") as f:
self.vocab = json.load(f)
self.idx_to_label = {0: "Negative", 1: "Positive"}
def get_model(self) -> torch.nn.Module:
"""
Get the loaded model (cached for performance)
Returns:
Loaded PyTorch model in evaluation mode
"""
model_type = self.config["model_type"]
if model_type not in self.loaded_models:
model = ModelFactory.create_model(
model_type=model_type,
model_params=self.config["model_params"],
state_dict_path=self.model_dir / "best_model.pth"
)
self.loaded_models[model_type] = model
return self.loaded_models[model_type]
def get_vocab(self) -> Dict[str, int]:
"""
Get vocabulary mapping
"""
return self.vocab
def get_config(self) -> Dict[str, Any]:
"""
Get model configuration
"""
return self.config
|