lole25 commited on
Commit
814158f
1 Parent(s): 5fcca16

Model save

Browse files
README.md CHANGED
@@ -2,14 +2,12 @@
2
  license: mit
3
  library_name: peft
4
  tags:
5
- - alignment-handbook
6
- - generated_from_trainer
7
  - trl
8
  - sft
9
  - generated_from_trainer
10
  base_model: microsoft/phi-2
11
  datasets:
12
- - HuggingFaceH4/ultrachat_200k
13
  model-index:
14
  - name: phi-2-sft-lora-chat
15
  results: []
@@ -20,9 +18,9 @@ should probably proofread and complete it, then remove this comment. -->
20
 
21
  # phi-2-sft-lora-chat
22
 
23
- This model is a fine-tuned version of [microsoft/phi-2](https://huggingface.co/microsoft/phi-2) on the HuggingFaceH4/ultrachat_200k dataset.
24
  It achieves the following results on the evaluation set:
25
- - Loss: 1.2272
26
 
27
  ## Model description
28
 
@@ -53,13 +51,13 @@ The following hyperparameters were used during training:
53
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
54
  - lr_scheduler_type: cosine
55
  - lr_scheduler_warmup_ratio: 0.1
56
- - num_epochs: 0.02
57
 
58
  ### Training results
59
 
60
  | Training Loss | Epoch | Step | Validation Loss |
61
  |:-------------:|:-----:|:----:|:---------------:|
62
- | 1.2627 | 0.02 | 111 | 1.2272 |
63
 
64
 
65
  ### Framework versions
 
2
  license: mit
3
  library_name: peft
4
  tags:
 
 
5
  - trl
6
  - sft
7
  - generated_from_trainer
8
  base_model: microsoft/phi-2
9
  datasets:
10
+ - generator
11
  model-index:
12
  - name: phi-2-sft-lora-chat
13
  results: []
 
18
 
19
  # phi-2-sft-lora-chat
20
 
21
+ This model is a fine-tuned version of [microsoft/phi-2](https://huggingface.co/microsoft/phi-2) on the generator dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 1.2234
24
 
25
  ## Model description
26
 
 
51
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
52
  - lr_scheduler_type: cosine
53
  - lr_scheduler_warmup_ratio: 0.1
54
+ - num_epochs: 1.0
55
 
56
  ### Training results
57
 
58
  | Training Loss | Epoch | Step | Validation Loss |
59
  |:-------------:|:-----:|:----:|:---------------:|
60
+ | 1.2513 | 1.0 | 5520 | 1.2234 |
61
 
62
 
63
  ### Framework versions
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:caf01da28ac318fbad4608a3f01733fe18dfc50affbbad55a912855b5fecadab
3
  size 335579632
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f0671cfaa4dfb46dcf468d952b75d2f83e8b139a51bae719f4ae4df95aae3c2
3
  size 335579632
all_results.json CHANGED
@@ -1,13 +1,13 @@
1
  {
2
- "epoch": 0.02,
3
- "eval_loss": 1.2271578311920166,
4
- "eval_runtime": 1802.0046,
5
  "eval_samples": 23110,
6
- "eval_samples_per_second": 16.275,
7
- "eval_steps_per_second": 1.356,
8
- "train_loss": 1.2791897563246992,
9
- "train_runtime": 2444.0756,
10
  "train_samples": 207865,
11
- "train_samples_per_second": 2.168,
12
- "train_steps_per_second": 0.045
13
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "eval_loss": 1.2233707904815674,
4
+ "eval_runtime": 1795.3542,
5
  "eval_samples": 23110,
6
+ "eval_samples_per_second": 16.335,
7
+ "eval_steps_per_second": 1.361,
8
+ "train_loss": 1.269419441966043,
9
+ "train_runtime": 32984.8618,
10
  "train_samples": 207865,
11
+ "train_samples_per_second": 8.033,
12
+ "train_steps_per_second": 0.167
13
  }
eval_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 0.02,
3
- "eval_loss": 1.2271578311920166,
4
- "eval_runtime": 1802.0046,
5
  "eval_samples": 23110,
6
- "eval_samples_per_second": 16.275,
7
- "eval_steps_per_second": 1.356
8
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "eval_loss": 1.2233707904815674,
4
+ "eval_runtime": 1795.3542,
5
  "eval_samples": 23110,
6
+ "eval_samples_per_second": 16.335,
7
+ "eval_steps_per_second": 1.361
8
  }
runs/May19_00-28-02_gpu4-119-5/events.out.tfevents.1716042533.gpu4-119-5.2683548.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:902ad1ce605ebe107d529854b3626c62387a7e556bd920837f10f4801d867579
3
- size 174268
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d2120054a4336ce958b841f9bc8edf8205d067fe70fe40323b378486b5ecd28
3
+ size 175521
runs/May19_00-28-02_gpu4-119-5/events.out.tfevents.1716077313.gpu4-119-5.2683548.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db4c20ea532d3ca1d938b820504b2400e44f7e0955ac2bbfbd6e738322c33009
3
+ size 359
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 0.02,
3
- "train_loss": 1.2791897563246992,
4
- "train_runtime": 2444.0756,
5
  "train_samples": 207865,
6
- "train_samples_per_second": 2.168,
7
- "train_steps_per_second": 0.045
8
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "train_loss": 1.269419441966043,
4
+ "train_runtime": 32984.8618,
5
  "train_samples": 207865,
6
+ "train_samples_per_second": 8.033,
7
+ "train_steps_per_second": 0.167
8
  }
trainer_state.json CHANGED
The diff for this file is too large to render. See raw diff