rodrigomasini commited on
Commit
27b1194
·
verified ·
1 Parent(s): 3f57afb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -10
app.py CHANGED
@@ -1,10 +1,3 @@
1
- #Acknowledgments:
2
- #This project is inspired by:
3
- #1. https://github.com/haltakov/natural-language-image-search by Vladimir Haltakov
4
- #2. OpenAI's CLIP
5
-
6
-
7
-
8
  #Importing all the necessary libraries
9
  import torch
10
  import requests
@@ -18,8 +11,6 @@ from PIL import Image as PILIMAGE
18
  from transformers import CLIPProcessor, CLIPModel, CLIPTokenizer, CLIPConfig
19
  from sentence_transformers import SentenceTransformer, util
20
 
21
-
22
-
23
  device = "cuda" if torch.cuda.is_available() else "cpu"
24
 
25
  # Define model
@@ -35,7 +26,6 @@ photo_ids = pd.read_csv("./photo_ids.csv")
35
  photo_ids = list(photo_ids['photo_id'])
36
 
37
 
38
-
39
  def encode_text(text):
40
  with torch.no_grad():
41
  # Encode and normalize the description using CLIP
@@ -44,6 +34,7 @@ def encode_text(text):
44
  text_encoded = model.get_text_features(**inputs).detach().numpy()
45
  return text_encoded
46
 
 
47
  def encode_image(image):
48
  image = PILIMAGE.fromarray(image.astype('uint8'), 'RGB')
49
  with torch.no_grad():
 
 
 
 
 
 
 
 
1
  #Importing all the necessary libraries
2
  import torch
3
  import requests
 
11
  from transformers import CLIPProcessor, CLIPModel, CLIPTokenizer, CLIPConfig
12
  from sentence_transformers import SentenceTransformer, util
13
 
 
 
14
  device = "cuda" if torch.cuda.is_available() else "cpu"
15
 
16
  # Define model
 
26
  photo_ids = list(photo_ids['photo_id'])
27
 
28
 
 
29
  def encode_text(text):
30
  with torch.no_grad():
31
  # Encode and normalize the description using CLIP
 
34
  text_encoded = model.get_text_features(**inputs).detach().numpy()
35
  return text_encoded
36
 
37
+
38
  def encode_image(image):
39
  image = PILIMAGE.fromarray(image.astype('uint8'), 'RGB')
40
  with torch.no_grad():