m-ric HF staff commited on
Commit
7d36ecb
1 Parent(s): 123ce7a

Upload processor

Browse files
preprocessor_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_transform": null,
3
+ "auto_map": {
4
+ "AutoImageProcessor": "rhymes-ai/Aria--vision_processor.AriaVisionProcessor",
5
+ "AutoProcessor": "rhymes-ai/Aria--processing_aria.AriaProcessor"
6
+ },
7
+ "image_mean": [
8
+ 0.5,
9
+ 0.5,
10
+ 0.5
11
+ ],
12
+ "image_processor_type": "AriaVisionProcessor",
13
+ "image_std": [
14
+ 0.5,
15
+ 0.5,
16
+ 0.5
17
+ ],
18
+ "max_image_size": 980,
19
+ "min_image_size": 336,
20
+ "processor_class": "AriaProcessor"
21
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "pad_token": "<unk>",
3
+ "unk_token": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ }
10
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e429a008ed1045d14464933311e0b3258575980efc9db4e61f368e399c719d2a
3
+ size 1696299
tokenizer_config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ }
14
+ },
15
+ "bos_token": null,
16
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}{% elif message['content'] is iterable %}{% for item in message['content'] %}{% if item['type'] == 'text' %}{{ item['text'] }}{% elif item['type'] == 'image' %}<fim_prefix><|img|><fim_suffix>{% endif %}{% endfor %}{% endif %}<|im_end|>\n{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}",
17
+ "clean_up_tokenization_spaces": false,
18
+ "eos_token": null,
19
+ "legacy": true,
20
+ "model_max_length": 1000000000000000019884624838656,
21
+ "pad_token": "<unk>",
22
+ "sp_model_kwargs": {},
23
+ "spaces_between_special_tokens": false,
24
+ "tokenizer_class": "LlamaTokenizer",
25
+ "unk_token": "<unk>",
26
+ "use_default_system_prompt": false
27
+ }