HamzaSidhu786 commited on
Commit
4bb2f4f
1 Parent(s): 3c78167

Upload tokenizer

Browse files
Files changed (3) hide show
  1. special_tokens_map.json +2 -2
  2. tokenizer_config.json +17 -19
  3. vocab.json +21 -56
special_tokens_map.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "bos_token": "<s>",
3
  "eos_token": "</s>",
4
- "pad_token": "<pad>",
5
- "unk_token": "<unk>"
6
  }
 
1
  {
2
  "bos_token": "<s>",
3
  "eos_token": "</s>",
4
+ "pad_token": "[PAD]",
5
+ "unk_token": "[UNK]"
6
  }
tokenizer_config.json CHANGED
@@ -1,49 +1,47 @@
1
  {
2
  "added_tokens_decoder": {
3
- "0": {
4
- "content": "<pad>",
5
  "lstrip": true,
6
  "normalized": false,
7
  "rstrip": true,
8
  "single_word": false,
9
  "special": false
10
  },
11
- "1": {
12
- "content": "<s>",
13
  "lstrip": true,
14
  "normalized": false,
15
  "rstrip": true,
16
  "single_word": false,
17
  "special": false
18
  },
19
- "2": {
20
- "content": "</s>",
21
- "lstrip": true,
22
  "normalized": false,
23
- "rstrip": true,
24
  "single_word": false,
25
- "special": false
26
  },
27
- "3": {
28
- "content": "<unk>",
29
- "lstrip": true,
30
  "normalized": false,
31
- "rstrip": true,
32
  "single_word": false,
33
- "special": false
34
  }
35
  },
36
  "bos_token": "<s>",
37
  "clean_up_tokenization_spaces": true,
38
  "do_lower_case": false,
39
- "do_normalize": true,
40
  "eos_token": "</s>",
41
  "model_max_length": 1000000000000000019884624838656,
42
- "pad_token": "<pad>",
43
  "replace_word_delimiter_char": " ",
44
- "return_attention_mask": false,
45
  "target_lang": null,
46
  "tokenizer_class": "Wav2Vec2CTCTokenizer",
47
- "unk_token": "<unk>",
48
- "word_delimiter_token": "/"
49
  }
 
1
  {
2
  "added_tokens_decoder": {
3
+ "19": {
4
+ "content": "[UNK]",
5
  "lstrip": true,
6
  "normalized": false,
7
  "rstrip": true,
8
  "single_word": false,
9
  "special": false
10
  },
11
+ "20": {
12
+ "content": "[PAD]",
13
  "lstrip": true,
14
  "normalized": false,
15
  "rstrip": true,
16
  "single_word": false,
17
  "special": false
18
  },
19
+ "21": {
20
+ "content": "<s>",
21
+ "lstrip": false,
22
  "normalized": false,
23
+ "rstrip": false,
24
  "single_word": false,
25
+ "special": true
26
  },
27
+ "22": {
28
+ "content": "</s>",
29
+ "lstrip": false,
30
  "normalized": false,
31
+ "rstrip": false,
32
  "single_word": false,
33
+ "special": true
34
  }
35
  },
36
  "bos_token": "<s>",
37
  "clean_up_tokenization_spaces": true,
38
  "do_lower_case": false,
 
39
  "eos_token": "</s>",
40
  "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "[PAD]",
42
  "replace_word_delimiter_char": " ",
 
43
  "target_lang": null,
44
  "tokenizer_class": "Wav2Vec2CTCTokenizer",
45
+ "unk_token": "[UNK]",
46
+ "word_delimiter_token": "|"
47
  }
vocab.json CHANGED
@@ -1,58 +1,23 @@
1
  {
2
- "$": 24,
3
- "&": 8,
4
- "'": 5,
5
- "*": 20,
6
- "/": 4,
7
- "<": 9,
8
- "</s>": 2,
9
- "<pad>": 0,
10
- "<s>": 1,
11
- "<unk>": 3,
12
- ">": 7,
13
- "A": 11,
14
- "D": 26,
15
- "E": 29,
16
- "F": 42,
17
- "G": 55,
18
- "H": 17,
19
- "J": 53,
20
- "K": 44,
21
- "N": 43,
22
- "P": 52,
23
- "S": 25,
24
- "T": 27,
25
- "V": 54,
26
- "Y": 40,
27
- "Z": 28,
28
- "_": 31,
29
- "`": 50,
30
- "a": 45,
31
- "b": 12,
32
- "d": 19,
33
- "f": 32,
34
- "g": 30,
35
- "h": 38,
36
- "i": 47,
37
- "j": 16,
38
- "k": 34,
39
- "l": 35,
40
- "m": 36,
41
- "n": 37,
42
- "o": 49,
43
- "p": 13,
44
- "q": 33,
45
- "r": 21,
46
- "s": 23,
47
- "t": 14,
48
- "u": 46,
49
- "v": 15,
50
- "w": 39,
51
- "x": 18,
52
- "y": 41,
53
- "z": 22,
54
- "{": 51,
55
- "|": 6,
56
- "}": 10,
57
- "~": 48
58
  }
 
1
  {
2
+ "[PAD]": 20,
3
+ "[UNK]": 19,
4
+ "|": 0,
5
+ "ا": 16,
6
+ "ب": 2,
7
+ "ح": 7,
8
+ "د": 17,
9
+ "ر": 1,
10
+ "س": 13,
11
+ "ع": 4,
12
+ "ل": 3,
13
+ "م": 15,
14
+ "ن": 6,
15
+ "ه": 5,
16
+ "ي": 14,
17
+ "َ": 9,
18
+ "ُ": 18,
19
+ "ِ": 12,
20
+ "ّ": 8,
21
+ "ْ": 10,
22
+ "ٰ": 11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  }