blob: d2e765d60de876c84e2588437c6acc163a9fe9c3 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
|
import torch
from imagebind import data
from imagebind.models import imagebind_model
from imagebind.models.imagebind_model import ModalityType
device = "cuda:0" if torch.cuda.is_available() else "cpu"
print("Loading model")
model = imagebind_model.imagebind_huge(pretrained=True)
model.eval()
model.to(device)
def embed(mode, data):
with torch.no_grad():
return model({mode: data})[mode][0]
def embed_text(text):
return embed(ModalityType.TEXT, data.load_and_transform_text([text], device))
def embed_audio(audio_path):
return embed(
ModalityType.AUDIO, data.load_and_transform_audio_data([audio_path], device)
)
def embed_image(image_path):
return embed(
ModalityType.VISION, data.load_and_transform_vision_data([image_path], device)
)
def embed_video(video_path):
return embed(
ModalityType.VISION, data.load_and_transform_video_data([video_path], device)
)
|