davidna22 commited on
Commit
36a04e2
1 Parent(s): f306aea

Upload 7 files

Browse files
Files changed (5) hide show
  1. config.json +2 -2
  2. merges.txt +1 -1
  3. pytorch_model.bin +1 -1
  4. tokenizer.json +1 -0
  5. tokenizer_config.json +1 -1
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "models/text-failed-classifier",
3
  "architectures": [
4
  "RobertaForSequenceClassification"
5
  ],
@@ -29,7 +29,7 @@
29
  "position_embedding_type": "absolute",
30
  "problem_type": "single_label_classification",
31
  "torch_dtype": "float32",
32
- "transformers_version": "4.27.2",
33
  "type_vocab_size": 1,
34
  "use_cache": true,
35
  "vocab_size": 50265
 
1
  {
2
+ "_name_or_path": "../models/text-failed-classifier",
3
  "architectures": [
4
  "RobertaForSequenceClassification"
5
  ],
 
29
  "position_embedding_type": "absolute",
30
  "problem_type": "single_label_classification",
31
  "torch_dtype": "float32",
32
+ "transformers_version": "4.28.1",
33
  "type_vocab_size": 1,
34
  "use_cache": true,
35
  "vocab_size": 50265
merges.txt CHANGED
@@ -1,4 +1,4 @@
1
- #version: 0.2 - Trained by `huggingface/tokenizers`
2
  Ġ t
3
  Ġ a
4
  h e
 
1
+ #version: 0.2
2
  Ġ t
3
  Ġ a
4
  h e
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3a45fd1a11ec52eb04abac4041618ee02cca426d2f53a9801054238fd0bb6da0
3
  size 498660333
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cafa7cb036e2062fd00d3a783c5f8d9f44c1cc20ec8308a76ea1f0a4e0c455b
3
  size 498660333
tokenizer.json CHANGED
@@ -82,6 +82,7 @@
82
  "continuing_subword_prefix": "",
83
  "end_of_word_suffix": "",
84
  "fuse_unk": false,
 
85
  "vocab": {
86
  "<s>": 0,
87
  "<pad>": 1,
 
82
  "continuing_subword_prefix": "",
83
  "end_of_word_suffix": "",
84
  "fuse_unk": false,
85
+ "byte_fallback": false,
86
  "vocab": {
87
  "<s>": 0,
88
  "<pad>": 1,
tokenizer_config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "add_prefix_space": false,
3
  "bos_token": "<s>",
 
4
  "cls_token": "<s>",
5
  "eos_token": "</s>",
6
  "errors": "replace",
@@ -8,7 +9,6 @@
8
  "model_max_length": 512,
9
  "pad_token": "<pad>",
10
  "sep_token": "</s>",
11
- "special_tokens_map_file": null,
12
  "tokenizer_class": "RobertaTokenizer",
13
  "trim_offsets": true,
14
  "unk_token": "<unk>"
 
1
  {
2
  "add_prefix_space": false,
3
  "bos_token": "<s>",
4
+ "clean_up_tokenization_spaces": true,
5
  "cls_token": "<s>",
6
  "eos_token": "</s>",
7
  "errors": "replace",
 
9
  "model_max_length": 512,
10
  "pad_token": "<pad>",
11
  "sep_token": "</s>",
 
12
  "tokenizer_class": "RobertaTokenizer",
13
  "trim_offsets": true,
14
  "unk_token": "<unk>"