HachiML commited on
Commit
361f64b
1 Parent(s): 751205c

End of training

Browse files
.gitattributes CHANGED
@@ -1,35 +1,4 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ modeling_bit_llama.py filter=lfs diff=lfs merge=lfs -text
2
+ model.safetensors filter=lfs diff=lfs merge=lfs -text
3
+ tokenizer.model filter=lfs diff=lfs merge=lfs -text
4
+ training_args.bin filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - generated_from_trainer
4
+ model-index:
5
+ - name: myBit-Llama2-jp-127M-test-12
6
+ results: []
7
+ ---
8
+
9
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
10
+ should probably proofread and complete it, then remove this comment. -->
11
+
12
+ # myBit-Llama2-jp-127M-test-12
13
+
14
+ This model is a fine-tuned version of [](https://huggingface.co/) on an unknown dataset.
15
+ It achieves the following results on the evaluation set:
16
+ - Loss: 6.3033
17
+
18
+ ## Model description
19
+
20
+ More information needed
21
+
22
+ ## Intended uses & limitations
23
+
24
+ More information needed
25
+
26
+ ## Training and evaluation data
27
+
28
+ More information needed
29
+
30
+ ## Training procedure
31
+
32
+ ### Training hyperparameters
33
+
34
+ The following hyperparameters were used during training:
35
+ - learning_rate: 0.00024
36
+ - train_batch_size: 96
37
+ - eval_batch_size: 96
38
+ - seed: 42
39
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
40
+ - lr_scheduler_type: polynomial
41
+ - lr_scheduler_warmup_steps: 50
42
+ - num_epochs: 1
43
+
44
+ ### Training results
45
+
46
+ | Training Loss | Epoch | Step | Validation Loss |
47
+ |:-------------:|:-----:|:----:|:---------------:|
48
+ | 10.0642 | 0.07 | 20 | 9.4022 |
49
+ | 9.0973 | 0.15 | 40 | 8.6900 |
50
+ | 8.3576 | 0.22 | 60 | 7.9690 |
51
+ | 7.7499 | 0.29 | 80 | 7.4678 |
52
+ | 7.3343 | 0.36 | 100 | 7.1266 |
53
+ | 7.0503 | 0.44 | 120 | 6.9132 |
54
+ | 6.9035 | 0.51 | 140 | 6.8141 |
55
+ | 6.8471 | 0.58 | 160 | 6.7611 |
56
+ | 6.8217 | 0.65 | 180 | 6.7305 |
57
+ | 6.7783 | 0.73 | 200 | 6.6345 |
58
+ | 6.6382 | 0.8 | 220 | 6.4734 |
59
+ | 6.5006 | 0.87 | 240 | 6.3855 |
60
+ | 6.4282 | 0.95 | 260 | 6.3033 |
61
+
62
+
63
+ ### Framework versions
64
+
65
+ - Transformers 4.38.2
66
+ - Pytorch 2.2.1+cu121
67
+ - Datasets 2.18.0
68
+ - Tokenizers 0.15.2
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.38.2"
6
+ }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c187ca85d77b1bb27088e82769e70d0ea6a53e16eebccd97138ac3932835a875
3
  size 511344824
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1442b89267cb6b4fc19a27bd580c9fe45f12e07bb48e7f7747edaa8da6cf47a9
3
  size 511344824
modeling_bit_llama.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ from transformers.models.llama.modeling_llama import (
3
+ LlamaConfig,
4
+ LlamaModel,
5
+ LlamaForCausalLM,
6
+ LlamaAttention,
7
+ LlamaFlashAttention2,
8
+ LlamaSdpaAttention,
9
+ LlamaMLP,
10
+ LlamaDecoderLayer,
11
+ )
12
+ from mybitnet.bitnet import BitLinear
13
+ from torch import nn
14
+
15
+ class BitLlamaConfig(LlamaConfig):
16
+ model_type = "bit_llama"
17
+
18
+ def __init__(self, bits=8, **kwargs):
19
+ super().__init__(**kwargs)
20
+ self.bits = bits
21
+
22
+ class BitLlamaMLP(LlamaMLP):
23
+ def __init__(self, config):
24
+ super().__init__(config)
25
+ self.gate_proj = BitLinear(self.hidden_size, self.intermediate_size, bias=False, bits=config.bits, flg_before_linear=True)
26
+ self.up_proj = BitLinear(self.hidden_size, self.intermediate_size, bias=False, bits=config.bits, flg_before_linear=True)
27
+ self.down_proj = BitLinear(self.intermediate_size, self.hidden_size, bias=False, bits=config.bits, flg_before_linear=False)
28
+
29
+ class BitLlamaAttention(LlamaAttention):
30
+ def __init__(self, config: BitLlamaConfig, layer_idx: Optional[int] = None):
31
+ super().__init__(config)
32
+ self.q_proj = BitLinear(self.hidden_size, self.num_heads * self.head_dim, bias=False, bits=config.bits, flg_before_linear=True)
33
+ self.k_proj = BitLinear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False, bits=config.bits, flg_before_linear=True)
34
+ self.v_proj = BitLinear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False, bits=config.bits, flg_before_linear=True)
35
+ self.o_proj = BitLinear(self.hidden_size, self.hidden_size, bias=False, bits=config.bits, flg_before_linear=True)
36
+
37
+ class BitLlamaFlashAttention2(LlamaFlashAttention2):
38
+ def __init__(self, config: BitLlamaConfig, layer_idx: Optional[int] = None):
39
+ super().__init__(config, layer_idx)
40
+ self.q_proj = BitLinear(self.hidden_size, self.num_heads * self.head_dim, bias=False, bits=config.bits, flg_before_linear=True)
41
+ self.k_proj = BitLinear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False, bits=config.bits, flg_before_linear=True)
42
+ self.v_proj = BitLinear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False, bits=config.bits, flg_before_linear=True)
43
+ self.o_proj = BitLinear(self.hidden_size, self.hidden_size, bias=False, bits=config.bits, flg_before_linear=True)
44
+
45
+ class BitLlamaSdpaAttention(LlamaSdpaAttention):
46
+ def __init__(self, config: BitLlamaConfig, layer_idx: Optional[int] = None):
47
+ super().__init__(config, layer_idx)
48
+ self.q_proj = BitLinear(self.hidden_size, self.num_heads * self.head_dim, bias=False, bits=config.bits, flg_before_linear=True)
49
+ self.k_proj = BitLinear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False, bits=config.bits, flg_before_linear=True)
50
+ self.v_proj = BitLinear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False, bits=config.bits, flg_before_linear=True)
51
+ self.o_proj = BitLinear(self.hidden_size, self.hidden_size, bias=False, bits=config.bits, flg_before_linear=True)
52
+
53
+ BITLLAMA_ATTENTION_CLASSES = {
54
+ "eager": BitLlamaAttention,
55
+ "flash_attention_2": BitLlamaFlashAttention2,
56
+ "sdpa": BitLlamaSdpaAttention,
57
+ }
58
+
59
+ class BitLlamaDecoderLayer(LlamaDecoderLayer):
60
+ def __init__(self, config: BitLlamaConfig, layer_idx: int):
61
+ super().__init__(config, layer_idx)
62
+ self.self_attn = BITLLAMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
63
+ self.mlp = BitLlamaMLP(config)
64
+
65
+ class BitLlamaModel(LlamaModel):
66
+ def __init__(self, config: BitLlamaConfig):
67
+ super().__init__(config)
68
+ self.layers = nn.ModuleList(
69
+ [BitLlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
70
+ )
71
+
72
+ class BitLlamaForCausalLM(LlamaForCausalLM):
73
+ def __init__(self, config: BitLlamaConfig):
74
+ super().__init__(config)
75
+ self.model = BitLlamaModel(config)
76
+ self.lm_head = BitLinear(config.hidden_size, config.vocab_size, bias=False, bits=config.bits, flg_before_linear=True)
77
+