huseinzol05 commited on
Commit
582037b
1 Parent(s): 65c43de

Upload tokenizer

Browse files
Files changed (3) hide show
  1. special_tokens_map.json +14 -7
  2. tokenizer.json +0 -0
  3. tokenizer_config.json +62 -18
special_tokens_map.json CHANGED
@@ -1,23 +1,30 @@
1
  {
 
 
 
 
 
 
 
2
  "eos_token": {
3
  "content": "</s>",
4
- "lstrip": true,
5
  "normalized": false,
6
- "rstrip": true,
7
  "single_word": false
8
  },
9
  "pad_token": {
10
- "content": "</s>",
11
- "lstrip": true,
12
  "normalized": false,
13
- "rstrip": true,
14
  "single_word": false
15
  },
16
  "unk_token": {
17
  "content": "<unk>",
18
- "lstrip": true,
19
  "normalized": false,
20
- "rstrip": true,
21
  "single_word": false
22
  }
23
  }
 
1
  {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
  "eos_token": {
10
  "content": "</s>",
11
+ "lstrip": false,
12
  "normalized": false,
13
+ "rstrip": false,
14
  "single_word": false
15
  },
16
  "pad_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
  "normalized": false,
20
+ "rstrip": false,
21
  "single_word": false
22
  },
23
  "unk_token": {
24
  "content": "<unk>",
25
+ "lstrip": false,
26
  "normalized": false,
27
+ "rstrip": false,
28
  "single_word": false
29
  }
30
  }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,34 +1,78 @@
1
  {
 
 
2
  "added_tokens_decoder": {
 
 
 
 
 
 
 
 
3
  "1": {
4
- "content": "</s>",
5
- "lstrip": true,
6
  "normalized": false,
7
- "rstrip": true,
8
  "single_word": false,
9
  "special": true
10
  },
11
  "2": {
12
- "content": "<unk>",
13
- "lstrip": true,
14
  "normalized": false,
15
- "rstrip": true,
16
  "single_word": false,
17
  "special": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  }
19
  },
20
- "additional_special_tokens": [],
21
- "clean_up_tokenization_spaces": true,
22
- "do_lower_case": true,
23
  "eos_token": "</s>",
24
- "model_input_names": [
25
- "input_ids"
26
- ],
27
- "model_max_length": 64,
28
- "pad_token": "</s>",
29
- "processor_class": "SiglipProcessor",
30
  "sp_model_kwargs": {},
31
- "tokenizer_class": "SiglipTokenizer",
32
- "trust_remote_code": false,
33
- "unk_token": "<unk>"
 
 
 
34
  }
 
1
  {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
  "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
  "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
  "normalized": false,
17
+ "rstrip": false,
18
  "single_word": false,
19
  "special": true
20
  },
21
  "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
  "normalized": false,
25
+ "rstrip": false,
26
  "single_word": false,
27
  "special": true
28
+ },
29
+ "32000": {
30
+ "content": "",
39
+ "lstrip": false,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": false
44
+ },
45
+ "32002": {
46
+ "content": "<audio>",
47
+ "lstrip": false,
48
+ "normalized": true,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": false
52
+ },
53
+ "32003": {
54
+ "content": "</audio>",
55
+ "lstrip": false,
56
+ "normalized": true,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": false
60
  }
61
  },
62
+ "bos_token": "<s>",
63
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
64
+ "clean_up_tokenization_spaces": false,
65
  "eos_token": "</s>",
66
+ "legacy": false,
67
+ "max_length": 2048,
68
+ "model_max_length": 1000000000000000019884624838656,
69
+ "pad_token": "<unk>",
70
+ "padding_side": "right",
 
71
  "sp_model_kwargs": {},
72
+ "stride": 0,
73
+ "tokenizer_class": "LlamaTokenizer",
74
+ "truncation_side": "right",
75
+ "truncation_strategy": "longest_first",
76
+ "unk_token": "<unk>",
77
+ "use_default_system_prompt": false
78
  }