Spaces:
Sleeping
Sleeping
# coding=utf-8 | |
# Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""Tokenization classes for ESM.""" | |
import os | |
from typing import List, Optional, Union | |
from transformers.tokenization_utils import PreTrainedTokenizer | |
from transformers.tokenization_utils_base import AddedToken | |
from transformers.utils import logging | |
logger = logging.get_logger(__name__) | |
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} | |
PRETRAINED_VOCAB_FILES_MAP = { | |
"vocab_file": { | |
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt", | |
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt", | |
}, | |
} | |
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { | |
"facebook/esm2_t6_8M_UR50D": 1024, | |
"facebook/esm2_t12_35M_UR50D": 1024, | |
} | |
def load_vocab_file(vocab_file): | |
with open(vocab_file, "r") as f: | |
lines = f.read().splitlines() | |
return [l.strip() for l in lines] | |
class EsmTokenizer(PreTrainedTokenizer): | |
""" | |
Constructs an ESM tokenizer. | |
""" | |
vocab_files_names = VOCAB_FILES_NAMES | |
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP | |
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES | |
model_input_names = ["input_ids", "attention_mask"] | |
def __init__(self, vocab_file, **kwargs): | |
super().__init__(**kwargs) | |
self.all_tokens = load_vocab_file(vocab_file) | |
self._id_to_token = {ind: tok for ind, tok in enumerate(self.all_tokens)} | |
self._token_to_id = {tok: ind for ind, tok in enumerate(self.all_tokens)} | |
self.unk_token = "<unk>" | |
self.cls_token = "<cls>" | |
self.pad_token = "<pad>" | |
self.mask_token = "<mask>" | |
self.eos_token = "<eos>" | |
self.unique_no_split_tokens = self.all_tokens | |
self._create_trie(self.unique_no_split_tokens) | |
def _convert_id_to_token(self, index: int) -> str: | |
return self._id_to_token.get(index, self.unk_token) | |
def _convert_token_to_id(self, token: str) -> int: | |
return self._token_to_id.get(token, self._token_to_id.get(self.unk_token)) | |
def _tokenize(self, text, **kwargs): | |
return text.split() | |
def get_vocab_size(self, with_added_tokens=False): | |
return len(self._id_to_token) | |
def get_vocab(self): | |
return {token: i for i, token in enumerate(self.all_tokens)} | |
def token_to_id(self, token: str) -> int: | |
return self._token_to_id.get(token, self._token_to_id.get(self.unk_token)) | |
def id_to_token(self, index: int) -> str: | |
return self._id_to_token.get(index, self.unk_token) | |
def build_inputs_with_special_tokens( | |
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None | |
) -> List[int]: | |
if token_ids_1 is None: | |
return [self.cls_token_id] + token_ids_0 + [self.eos_token_id] | |
cls = [self.cls_token_id] | |
sep = [self.eos_token_id] # No sep token in ESM vocabulary | |
return cls + token_ids_0 + sep + token_ids_1 + sep | |
def get_special_tokens_mask( | |
self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False | |
) -> List[int]: | |
""" | |
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding | |
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. | |
Args: | |
token_ids_0 (`List[int]`): | |
List of ids of the first sequence. | |
token_ids_1 (`List[int]`, *optional*): | |
List of ids of the second sequence. | |
already_has_special_tokens (`bool`, *optional*, defaults to `False`): | |
Whether or not the token list is already formatted with special tokens for the model. | |
Returns: | |
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. | |
""" | |
if already_has_special_tokens: | |
if token_ids_1 is not None: | |
raise ValueError( | |
"You should not supply a second sequence if the provided sequence of " | |
"ids is already formatted with special tokens for the model." | |
) | |
return [1 if token in self.all_special_ids else 0 for token in token_ids_0] | |
mask = [1] + ([0] * len(token_ids_0)) + [1] | |
if token_ids_1 is not None: | |
mask += [0] * len(token_ids_1) + [1] | |
return mask | |
def save_vocabulary(self, save_directory, filename_prefix): | |
vocab_file = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "vocab.txt") | |
with open(vocab_file, "w") as f: | |
f.write("\n".join(self.all_tokens)) | |
return (vocab_file,) | |
def vocab_size(self) -> int: | |
return self.get_vocab_size(with_added_tokens=False) | |
def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int: | |
return super()._add_tokens(new_tokens, special_tokens=True) | |