Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -15,7 +15,7 @@ from io import BytesIO
|
|
15 |
|
16 |
from PIL import Image as PILIMAGE
|
17 |
|
18 |
-
from transformers import CLIPProcessor, CLIPModel, CLIPTokenizer
|
19 |
from sentence_transformers import SentenceTransformer, util
|
20 |
|
21 |
|
@@ -23,6 +23,7 @@ from sentence_transformers import SentenceTransformer, util
|
|
23 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
24 |
|
25 |
# Define model
|
|
|
26 |
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32", config=config.vision_config).to(device)
|
27 |
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
28 |
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32")
|
|
|
15 |
|
16 |
from PIL import Image as PILIMAGE
|
17 |
|
18 |
+
from transformers import CLIPProcessor, CLIPModel, CLIPTokenizer, CLIPConfig
|
19 |
from sentence_transformers import SentenceTransformer, util
|
20 |
|
21 |
|
|
|
23 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
24 |
|
25 |
# Define model
|
26 |
+
config = CLIPConfig.from_pretrained("openai/clip-vit-base-patch32")
|
27 |
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32", config=config.vision_config).to(device)
|
28 |
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
29 |
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32")
|