|
--- |
|
license: mit |
|
datasets: |
|
- M-AI-C/quran-en-tafssirs |
|
language: |
|
- en |
|
--- |
|
```python |
|
import torch.nn.functional as F |
|
|
|
from torch import Tensor |
|
from transformers import AutoTokenizer, AutoModel |
|
|
|
|
|
def average_pool(last_hidden_states: Tensor, |
|
attention_mask: Tensor) -> Tensor: |
|
last_hidden = last_hidden_states.masked_fill(~attention_mask[..., None].bool(), 0.0) |
|
return last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None] |
|
|
|
|
|
# Each input text should start with "query: " or "passage: ". |
|
# For tasks other than retrieval, you can simply use the "query: " prefix. |
|
input_texts = ['query: Who is prophet known for patience', |
|
'query: Who is moses', |
|
"passage: passage 1", |
|
"passage: passage 2"] |
|
|
|
tokenizer = AutoTokenizer.from_pretrained('intfloat/e5-small') |
|
model = AutoModel.from_pretrained('intfloat/e5-small') |
|
|
|
# Tokenize the input texts |
|
batch_dict = tokenizer(input_texts, max_length=512, padding=True, truncation=True, return_tensors='pt') |
|
|
|
outputs = model(**batch_dict) |
|
embeddings = average_pool(outputs.last_hidden_state, batch_dict['attention_mask']) |
|
|
|
# (Optionally) normalize embeddings |
|
embeddings = F.normalize(embeddings, p=2, dim=1) |
|
scores = (embeddings[:2] @ embeddings[2:].T) * 100 |
|
print(scores.tolist()) |
|
``` |