#+beginsrc python :tangle ~/code/chromaexample/embeddings.py import torch from PIL import Image import torchvision.transforms as transforms import clip
device = “cuda” if torch.cuda.isavailable() else “cpu” model, preprocess = clip.load(’ViT-B/32’, device=device)
def getembeddings(path, model, preprocess): image = Image.open(path) pp = preprocess(image).unsqueeze(0).to(device) with torch.nograd(): features = model.encodeimage(pp) return features
def getcosinedifference(a, b): return torch.nn.functional.cosinesimilarity(a, b)
patha = ’./images/a.jpg’ pathb = ’./images/b.jpg’ pathc = ’./images/c.jpg’
ea = g