|
{ |
|
"added_tokens_decoder": { |
|
"27": { |
|
"content": "[UNK]", |
|
"lstrip": true, |
|
"normalized": false, |
|
"rstrip": true, |
|
"single_word": false, |
|
"special": false |
|
}, |
|
"28": { |
|
"content": "[PAD]", |
|
"lstrip": true, |
|
"normalized": false, |
|
"rstrip": true, |
|
"single_word": false, |
|
"special": false |
|
}, |
|
"29": { |
|
"content": "<s>", |
|
"lstrip": false, |
|
"normalized": false, |
|
"rstrip": false, |
|
"single_word": false, |
|
"special": true |
|
}, |
|
"30": { |
|
"content": "</s>", |
|
"lstrip": false, |
|
"normalized": false, |
|
"rstrip": false, |
|
"single_word": false, |
|
"special": true |
|
} |
|
}, |
|
"bos_token": "<s>", |
|
"clean_up_tokenization_spaces": true, |
|
"do_lower_case": false, |
|
"eos_token": "</s>", |
|
"model_max_length": 1000000000000000019884624838656, |
|
"pad_token": "[PAD]", |
|
"replace_word_delimiter_char": " ", |
|
"target_lang": null, |
|
"tokenizer_class": "Wav2Vec2CTCTokenizer", |
|
"unk_token": "[UNK]", |
|
"vocab_dict": { |
|
"[PAD]": 28, |
|
"[UNK]": 27, |
|
"a": 1, |
|
"b": 2, |
|
"c": 3, |
|
"d": 4, |
|
"e": 5, |
|
"f": 6, |
|
"g": 7, |
|
"h": 8, |
|
"i": 9, |
|
"j": 10, |
|
"k": 11, |
|
"l": 12, |
|
"m": 13, |
|
"n": 14, |
|
"o": 15, |
|
"p": 16, |
|
"q": 17, |
|
"r": 18, |
|
"s": 19, |
|
"t": 20, |
|
"u": 21, |
|
"v": 22, |
|
"w": 23, |
|
"x": 24, |
|
"y": 25, |
|
"z": 26, |
|
"|": 0 |
|
}, |
|
"word_delimiter_token": "|" |
|
} |
|
|