Update README.md
Browse files
README.md
CHANGED
@@ -3692,41 +3692,80 @@ The `GME` models support three types of input: **text**, **image**, and **image-
|
|
3692 |
|[`gme-Qwen2-VL-7B`](https://huggingface.co/Alibaba-NLP/gme-Qwen2-VL-7B-Instruct) | 8.29B | 32768 | 3584 | 67.48 | 69.73 | 67.44 |
|
3693 |
|
3694 |
## Usage
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3695 |
**Use with custom code**
|
3696 |
|
3697 |
```python
|
3698 |
# You can find the script gme_inference.py in https://huggingface.co/Alibaba-NLP/gme-Qwen2-VL-2B-Instruct/blob/main/gme_inference.py
|
3699 |
from gme_inference import GmeQwen2VL
|
3700 |
|
|
|
3701 |
texts = [
|
3702 |
-
"
|
3703 |
-
"
|
3704 |
]
|
3705 |
images = [
|
3706 |
-
'https://
|
3707 |
-
'https://
|
3708 |
]
|
3709 |
|
|
|
3710 |
gme = GmeQwen2VL("Alibaba-NLP/gme-Qwen2-VL-2B-Instruct")
|
3711 |
|
3712 |
# Single-modal embedding
|
3713 |
e_text = gme.get_text_embeddings(texts=texts)
|
3714 |
e_image = gme.get_image_embeddings(images=images)
|
3715 |
-
print((e_text
|
3716 |
-
##
|
3717 |
|
3718 |
# How to set embedding instruction
|
3719 |
-
e_query = gme.get_text_embeddings(texts=texts, instruction=
|
3720 |
# If is_query=False, we always use the default instruction.
|
3721 |
e_corpus = gme.get_image_embeddings(images=images, is_query=False)
|
3722 |
-
print((e_query
|
3723 |
-
##
|
3724 |
|
3725 |
# Fused-modal embedding
|
3726 |
e_fused = gme.get_fused_embeddings(texts=texts, images=images)
|
3727 |
-
print((e_fused
|
3728 |
-
##
|
3729 |
-
|
3730 |
```
|
3731 |
|
3732 |
## Evaluation
|
|
|
3692 |
|[`gme-Qwen2-VL-7B`](https://huggingface.co/Alibaba-NLP/gme-Qwen2-VL-7B-Instruct) | 8.29B | 32768 | 3584 | 67.48 | 69.73 | 67.44 |
|
3693 |
|
3694 |
## Usage
|
3695 |
+
**Use with sentence_transformers**
|
3696 |
+
```python
|
3697 |
+
from sentence_transformers import SentenceTransformer
|
3698 |
+
|
3699 |
+
|
3700 |
+
t2i_prompt = 'Find an image that matches the given text.'
|
3701 |
+
texts = [
|
3702 |
+
"The Tesla Cybertruck is a battery electric pickup truck built by Tesla, Inc. since 2023.",
|
3703 |
+
"Alibaba office.",
|
3704 |
+
]
|
3705 |
+
images = [
|
3706 |
+
'https://upload.wikimedia.org/wikipedia/commons/e/e9/Tesla_Cybertruck_damaged_window.jpg',
|
3707 |
+
'https://upload.wikimedia.org/wikipedia/commons/e/e0/TaobaoCity_Alibaba_Xixi_Park.jpg',
|
3708 |
+
]
|
3709 |
+
|
3710 |
+
|
3711 |
+
gme_st = SentenceTransformer("Alibaba-NLP/gme-Qwen2-VL-2B-Instruct")
|
3712 |
+
|
3713 |
+
# Single-modal embedding
|
3714 |
+
e_text = gme_st.encode(texts, convert_to_tensor=True)
|
3715 |
+
e_image = gme_st.encode([dict(image=i) for i in images], convert_to_tensor=True)
|
3716 |
+
print('Single-modal', (e_text @ e_image.T).tolist())
|
3717 |
+
## Single-modal [[0.356201171875, 0.06536865234375], [0.041717529296875, 0.37890625]]
|
3718 |
+
|
3719 |
+
# How to set embedding instruction
|
3720 |
+
e_query = gme_st.encode([dict(text=t, prompt=t2i_prompt) for t in texts], convert_to_tensor=True)
|
3721 |
+
# If no prompt, we always use the default instruction.
|
3722 |
+
e_corpus = gme_st.encode([dict(image=i) for i in images], convert_to_tensor=True)
|
3723 |
+
print('Single-modal with instruction', (e_query @ e_corpus.T).tolist())
|
3724 |
+
## Single-modal with instruction [[0.425537109375, 0.1158447265625], [0.049835205078125, 0.413818359375]]
|
3725 |
+
|
3726 |
+
# Fused-modal embedding
|
3727 |
+
e_fused = gme_st.encode([dict(text=t, image=i) for t, i in zip(texts, images)], convert_to_tensor=True)
|
3728 |
+
print('Fused-modal', (e_fused @ e_fused.T).tolist())
|
3729 |
+
## Fused-modal [[0.99951171875, 0.0556640625], [0.0556640625, 0.99951171875]]
|
3730 |
+
```
|
3731 |
+
|
3732 |
+
|
3733 |
**Use with custom code**
|
3734 |
|
3735 |
```python
|
3736 |
# You can find the script gme_inference.py in https://huggingface.co/Alibaba-NLP/gme-Qwen2-VL-2B-Instruct/blob/main/gme_inference.py
|
3737 |
from gme_inference import GmeQwen2VL
|
3738 |
|
3739 |
+
t2i_prompt = 'Find an image that matches the given text.'
|
3740 |
texts = [
|
3741 |
+
"The Tesla Cybertruck is a battery electric pickup truck built by Tesla, Inc. since 2023.",
|
3742 |
+
"Alibaba office.",
|
3743 |
]
|
3744 |
images = [
|
3745 |
+
'https://upload.wikimedia.org/wikipedia/commons/e/e9/Tesla_Cybertruck_damaged_window.jpg',
|
3746 |
+
'https://upload.wikimedia.org/wikipedia/commons/e/e0/TaobaoCity_Alibaba_Xixi_Park.jpg',
|
3747 |
]
|
3748 |
|
3749 |
+
|
3750 |
gme = GmeQwen2VL("Alibaba-NLP/gme-Qwen2-VL-2B-Instruct")
|
3751 |
|
3752 |
# Single-modal embedding
|
3753 |
e_text = gme.get_text_embeddings(texts=texts)
|
3754 |
e_image = gme.get_image_embeddings(images=images)
|
3755 |
+
print('Single-modal', (e_text @ e_image.T).tolist())
|
3756 |
+
## [[0.359619140625, 0.0655517578125], [0.04180908203125, 0.374755859375]]
|
3757 |
|
3758 |
# How to set embedding instruction
|
3759 |
+
e_query = gme.get_text_embeddings(texts=texts, instruction=t2i_prompt)
|
3760 |
# If is_query=False, we always use the default instruction.
|
3761 |
e_corpus = gme.get_image_embeddings(images=images, is_query=False)
|
3762 |
+
print('Single-modal with instruction', (e_query @ e_corpus.T).tolist())
|
3763 |
+
## [[0.429931640625, 0.11505126953125], [0.049835205078125, 0.409423828125]]
|
3764 |
|
3765 |
# Fused-modal embedding
|
3766 |
e_fused = gme.get_fused_embeddings(texts=texts, images=images)
|
3767 |
+
print('Fused-modal', (e_fused @ e_fused.T).tolist())
|
3768 |
+
## [[1.0, 0.05511474609375], [0.05511474609375, 1.0]]
|
|
|
3769 |
```
|
3770 |
|
3771 |
## Evaluation
|