AptaArkana commited on
Commit
f2af8f6
1 Parent(s): e5c7aa0

Training in progress, step 500

Browse files
config.json CHANGED
@@ -1,45 +1,37 @@
1
  {
2
- "_name_or_path": "hanifnoerr/Fine-tuned-Indonesian-Sentiment-Classifier",
3
- "_num_labels": 5,
4
  "architectures": [
5
- "BertForSequenceClassification"
6
  ],
7
- "attention_probs_dropout_prob": 0.1,
8
- "classifier_dropout": null,
9
- "directionality": "bidi",
10
- "hidden_act": "gelu",
11
- "hidden_dropout_prob": 0.1,
12
- "hidden_size": 768,
13
  "id2label": {
14
  "0": "negative",
15
  "1": "netral",
16
  "2": "positive"
17
  },
18
  "initializer_range": 0.02,
19
- "intermediate_size": 3072,
20
  "label2id": {
21
  "negative": 0,
22
  "netral": 1,
23
  "positive": 2
24
  },
25
- "layer_norm_eps": 1e-12,
26
  "max_position_embeddings": 512,
27
  "max_split_size_mb": 100,
28
- "model_type": "bert",
29
- "num_attention_heads": 12,
30
- "num_hidden_layers": 12,
31
  "output_past": true,
32
  "pad_token_id": 0,
33
- "pooler_fc_size": 768,
34
- "pooler_num_attention_heads": 12,
35
- "pooler_num_fc_layers": 3,
36
- "pooler_size_per_head": 128,
37
- "pooler_type": "first_token_transform",
38
- "position_embedding_type": "absolute",
39
  "problem_type": "single_label_classification",
 
 
 
 
40
  "torch_dtype": "float32",
41
  "transformers_version": "4.37.2",
42
- "type_vocab_size": 2,
43
- "use_cache": true,
44
- "vocab_size": 50000
45
  }
 
1
  {
2
+ "_name_or_path": "lxyuan/distilbert-base-multilingual-cased-sentiments-student",
3
+ "activation": "gelu",
4
  "architectures": [
5
+ "DistilBertForSequenceClassification"
6
  ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
 
 
11
  "id2label": {
12
  "0": "negative",
13
  "1": "netral",
14
  "2": "positive"
15
  },
16
  "initializer_range": 0.02,
 
17
  "label2id": {
18
  "negative": 0,
19
  "netral": 1,
20
  "positive": 2
21
  },
 
22
  "max_position_embeddings": 512,
23
  "max_split_size_mb": 100,
24
+ "model_type": "distilbert",
25
+ "n_heads": 12,
26
+ "n_layers": 6,
27
  "output_past": true,
28
  "pad_token_id": 0,
 
 
 
 
 
 
29
  "problem_type": "single_label_classification",
30
+ "qa_dropout": 0.1,
31
+ "seq_classif_dropout": 0.2,
32
+ "sinusoidal_pos_embds": false,
33
+ "tie_weights_": true,
34
  "torch_dtype": "float32",
35
  "transformers_version": "4.37.2",
36
+ "vocab_size": 119547
 
 
37
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e28a903c11a002ea3e79ad40713937266ce341a03940344baa02db0776c8e866
3
- size 497798148
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3a7ee76cb83474d5a82243fca04cc6560ae7e50d28df6c6b06a9d390dc1c79f
3
+ size 541320452
runs/Feb22_05-56-09_c8d13364ede2/events.out.tfevents.1708581370.c8d13364ede2.1075.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:629451d95e089d7c1976a95448d13c1939441ce9f56ba8aa7b86562785bcee1d
3
+ size 4653
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -8,7 +8,7 @@
8
  "single_word": false,
9
  "special": true
10
  },
11
- "1": {
12
  "content": "[UNK]",
13
  "lstrip": false,
14
  "normalized": false,
@@ -16,7 +16,7 @@
16
  "single_word": false,
17
  "special": true
18
  },
19
- "2": {
20
  "content": "[CLS]",
21
  "lstrip": false,
22
  "normalized": false,
@@ -24,7 +24,7 @@
24
  "single_word": false,
25
  "special": true
26
  },
27
- "3": {
28
  "content": "[SEP]",
29
  "lstrip": false,
30
  "normalized": false,
@@ -32,7 +32,7 @@
32
  "single_word": false,
33
  "special": true
34
  },
35
- "4": {
36
  "content": "[MASK]",
37
  "lstrip": false,
38
  "normalized": false,
@@ -44,15 +44,21 @@
44
  "clean_up_tokenization_spaces": true,
45
  "cls_token": "[CLS]",
46
  "do_basic_tokenize": true,
47
- "do_lower_case": true,
48
  "mask_token": "[MASK]",
49
  "max_length": 512,
50
- "model_max_length": 1000000000000000019884624838656,
51
  "never_split": null,
 
52
  "pad_token": "[PAD]",
 
 
53
  "sep_token": "[SEP]",
 
54
  "strip_accents": null,
55
  "tokenize_chinese_chars": true,
56
- "tokenizer_class": "BertTokenizer",
 
 
57
  "unk_token": "[UNK]"
58
  }
 
8
  "single_word": false,
9
  "special": true
10
  },
11
+ "100": {
12
  "content": "[UNK]",
13
  "lstrip": false,
14
  "normalized": false,
 
16
  "single_word": false,
17
  "special": true
18
  },
19
+ "101": {
20
  "content": "[CLS]",
21
  "lstrip": false,
22
  "normalized": false,
 
24
  "single_word": false,
25
  "special": true
26
  },
27
+ "102": {
28
  "content": "[SEP]",
29
  "lstrip": false,
30
  "normalized": false,
 
32
  "single_word": false,
33
  "special": true
34
  },
35
+ "103": {
36
  "content": "[MASK]",
37
  "lstrip": false,
38
  "normalized": false,
 
44
  "clean_up_tokenization_spaces": true,
45
  "cls_token": "[CLS]",
46
  "do_basic_tokenize": true,
47
+ "do_lower_case": false,
48
  "mask_token": "[MASK]",
49
  "max_length": 512,
50
+ "model_max_length": 512,
51
  "never_split": null,
52
+ "pad_to_multiple_of": null,
53
  "pad_token": "[PAD]",
54
+ "pad_token_type_id": 0,
55
+ "padding_side": "right",
56
  "sep_token": "[SEP]",
57
+ "stride": 0,
58
  "strip_accents": null,
59
  "tokenize_chinese_chars": true,
60
+ "tokenizer_class": "DistilBertTokenizer",
61
+ "truncation_side": "right",
62
+ "truncation_strategy": "longest_first",
63
  "unk_token": "[UNK]"
64
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f13d40545425ad0dcbdeec10e83a7c0ddca63fbf4c94e88e68694fd7b9c435d1
3
  size 4664
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11bf066db97bce61d10cca02c8a5961d0917904ef5b22283d3f686b48d3816f6
3
  size 4664
vocab.txt CHANGED
The diff for this file is too large to render. See raw diff