danielhanchen commited on
Commit
561b775
1 Parent(s): 28ea972

Upload tokenizer

Browse files
Files changed (3) hide show
  1. special_tokens_map.json +11 -20
  2. tokenizer.json +0 -9
  3. tokenizer_config.json +7 -29
special_tokens_map.json CHANGED
@@ -1,37 +1,28 @@
1
  {
2
  "additional_special_tokens": [
3
- "<|endoftext|>",
4
  "<|im_start|>",
5
- "<|im_end|>",
6
- "<repo_name>",
7
- "<reponame>",
8
- "<file_sep>",
9
- "<filename>",
10
- "<gh_stars>",
11
- "<issue_start>",
12
- "<issue_comment>",
13
- "<issue_closed>",
14
- "<jupyter_start>",
15
- "<jupyter_text>",
16
- "<jupyter_code>",
17
- "<jupyter_output>",
18
- "<jupyter_script>",
19
- "<empty_output>"
20
  ],
21
  "bos_token": {
22
- "content": "<|endoftext|>",
23
  "lstrip": false,
24
  "normalized": false,
25
  "rstrip": false,
26
  "single_word": false
27
  },
28
  "eos_token": {
29
- "content": "<|endoftext|>",
30
  "lstrip": false,
31
  "normalized": false,
32
  "rstrip": false,
33
  "single_word": false
34
  },
35
- "pad_token": "<|PAD_TOKEN|>",
36
- "unk_token": "�"
 
 
 
 
 
 
37
  }
 
1
  {
2
  "additional_special_tokens": [
 
3
  "<|im_start|>",
4
+ "<|im_end|>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  ],
6
  "bos_token": {
7
+ "content": "<|im_start|>",
8
  "lstrip": false,
9
  "normalized": false,
10
  "rstrip": false,
11
  "single_word": false
12
  },
13
  "eos_token": {
14
+ "content": "<|im_end|>",
15
  "lstrip": false,
16
  "normalized": false,
17
  "rstrip": false,
18
  "single_word": false
19
  },
20
+ "pad_token": "<|endoftext|>",
21
+ "unk_token": {
22
+ "content": "<|endoftext|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ }
28
  }
tokenizer.json CHANGED
@@ -155,15 +155,6 @@
155
  "rstrip": false,
156
  "normalized": false,
157
  "special": true
158
- },
159
- {
160
- "id": 49152,
161
- "content": "<|PAD_TOKEN|>",
162
- "single_word": false,
163
- "lstrip": false,
164
- "rstrip": false,
165
- "normalized": false,
166
- "special": true
167
  }
168
  ],
169
  "normalizer": null,
 
155
  "rstrip": false,
156
  "normalized": false,
157
  "special": true
 
 
 
 
 
 
 
 
 
158
  }
159
  ],
160
  "normalizer": null,
tokenizer_config.json CHANGED
@@ -136,42 +136,20 @@
136
  "rstrip": false,
137
  "single_word": false,
138
  "special": true
139
- },
140
- "49152": {
141
- "content": "<|PAD_TOKEN|>",
142
- "lstrip": false,
143
- "normalized": false,
144
- "rstrip": false,
145
- "single_word": false,
146
- "special": true
147
  }
148
  },
149
  "additional_special_tokens": [
150
- "<|endoftext|>",
151
  "<|im_start|>",
152
- "<|im_end|>",
153
- "<repo_name>",
154
- "<reponame>",
155
- "<file_sep>",
156
- "<filename>",
157
- "<gh_stars>",
158
- "<issue_start>",
159
- "<issue_comment>",
160
- "<issue_closed>",
161
- "<jupyter_start>",
162
- "<jupyter_text>",
163
- "<jupyter_code>",
164
- "<jupyter_output>",
165
- "<jupyter_script>",
166
- "<empty_output>"
167
  ],
168
- "bos_token": "<|endoftext|>",
 
169
  "clean_up_tokenization_spaces": false,
170
- "eos_token": "<|endoftext|>",
171
- "model_max_length": 8192,
172
- "pad_token": "<|PAD_TOKEN|>",
173
  "padding_side": "left",
174
  "tokenizer_class": "GPT2Tokenizer",
175
- "unk_token": "�",
176
  "vocab_size": 49152
177
  }
 
136
  "rstrip": false,
137
  "single_word": false,
138
  "special": true
 
 
 
 
 
 
 
 
139
  }
140
  },
141
  "additional_special_tokens": [
 
142
  "<|im_start|>",
143
+ "<|im_end|>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  ],
145
+ "bos_token": "<|im_start|>",
146
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful AI assistant named SmolLM, trained by Hugging Face<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
147
  "clean_up_tokenization_spaces": false,
148
+ "eos_token": "<|im_end|>",
149
+ "model_max_length": 2048,
150
+ "pad_token": "<|endoftext|>",
151
  "padding_side": "left",
152
  "tokenizer_class": "GPT2Tokenizer",
153
+ "unk_token": "<|endoftext|>",
154
  "vocab_size": 49152
155
  }