Spaces:
Running
Running
Add HF token support to embedding API
Browse files- embedding_api.py +8 -2
embedding_api.py
CHANGED
@@ -17,6 +17,11 @@ import uvicorn
|
|
17 |
from colpali_engine.models import ColPali, ColPaliProcessor
|
18 |
from colpali_engine.utils.torch_utils import get_torch_device
|
19 |
|
|
|
|
|
|
|
|
|
|
|
20 |
# Setup logging
|
21 |
logging.basicConfig(level=logging.INFO)
|
22 |
logger = logging.getLogger(__name__)
|
@@ -54,9 +59,10 @@ def load_model():
|
|
54 |
model = ColPali.from_pretrained(
|
55 |
model_name,
|
56 |
torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
|
57 |
-
device_map=device
|
|
|
58 |
).eval()
|
59 |
-
processor = ColPaliProcessor.from_pretrained(model_name)
|
60 |
logger.info("ColPali model loaded successfully")
|
61 |
except Exception as e:
|
62 |
logger.error(f"Error loading model: {e}")
|
|
|
17 |
from colpali_engine.models import ColPali, ColPaliProcessor
|
18 |
from colpali_engine.utils.torch_utils import get_torch_device
|
19 |
|
20 |
+
# Set HF token if available
|
21 |
+
hf_token = os.environ.get("HUGGING_FACE_TOKEN") or os.environ.get("HF_TOKEN")
|
22 |
+
if hf_token:
|
23 |
+
os.environ["HF_TOKEN"] = hf_token
|
24 |
+
|
25 |
# Setup logging
|
26 |
logging.basicConfig(level=logging.INFO)
|
27 |
logger = logging.getLogger(__name__)
|
|
|
59 |
model = ColPali.from_pretrained(
|
60 |
model_name,
|
61 |
torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
|
62 |
+
device_map=device,
|
63 |
+
token=hf_token
|
64 |
).eval()
|
65 |
+
processor = ColPaliProcessor.from_pretrained(model_name, token=hf_token)
|
66 |
logger.info("ColPali model loaded successfully")
|
67 |
except Exception as e:
|
68 |
logger.error(f"Error loading model: {e}")
|