import torch
from transformers import AutoTokenizer, AutoModelForMaskedLM
from sklearn.metrics.pairwise import cosine_similarity
def simcse_similar(model, tokenizer, text_a, text_b):inputs_source = tokenizer(text_a, return_tensors="pt")inputs_target = tokenizer(text_b, return_tensors="pt")outputs_source = model(**inputs_source, output_hidden_states=True)outputs_target = model(**inputs_target, output_hidden_states=True)source_embedding = outputs_source.hidden_states[-1][:, 0, :].squeeze()target_embedding = outputs_target.hidden_states[-1][:, 0, :].squeeze()with torch.no_grad():silimarity_score = cosine_similarity(source_embedding.reshape(1, -1), target_embedding.reshape(1, -1))[0][0]return silimarity_scoreif __name__ == '__main__':model = AutoModelForMaskedLM.from_pretrained('../Erlangshen-SimCSE-110M-Chinese')tokenizer = AutoTokenizer.from_pretrained('../Erlangshen-SimCSE-110M-Chinese')text_a = '城市缩写'text_b = '呼叫线体ID'similarScore = simcse_similar(model, tokenizer, text_a, text_b)print(similarScore)