Guytron commited on
Commit
c3c71a0
1 Parent(s): 95cbac3

Upload RCDS1.yaml

Browse files

yaml file for RosettaCode DataSet
auto-created using Sonnet 3.5
untested as of July 17, 2024

Files changed (1) hide show
  1. RCDS1.yaml +69 -0
RCDS1.yaml ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
2
+ model_type: LlamaForCausalLM
3
+ tokenizer_type: LlamaTokenizer
4
+ is_llama_derived_model: true
5
+
6
+ load_in_8bit: false
7
+ load_in_4bit: true
8
+ strict: false
9
+
10
+ datasets:
11
+ - path: Guytron/RosettaCodeDataSet1
12
+ type: json # Assuming the dataset is in JSON format
13
+ dataset_prepared_path:
14
+ val_set_size: 0.05
15
+ output_dir: ./qlora-out-rosetta
16
+
17
+ adapter: qlora
18
+ lora_model_dir:
19
+
20
+ sequence_len: 2048 # Increased to accommodate potentially longer code samples
21
+ sample_packing: true
22
+ pad_to_sequence_len: true
23
+
24
+ lora_r: 32
25
+ lora_alpha: 16
26
+ lora_dropout: 0.05
27
+ lora_target_modules:
28
+ lora_target_linear: true
29
+ lora_fan_in_fan_out:
30
+
31
+ wandb_project: rosetta-code-training
32
+ wandb_entity:
33
+ wandb_watch:
34
+ wandb_name: rosetta-code-run-1
35
+ wandb_log_model:
36
+
37
+ mlflow_experiment_name: rosetta-code-experiment
38
+
39
+ gradient_accumulation_steps: 4 # Increased to handle larger dataset
40
+ micro_batch_size: 2 # Adjusted based on your GPU memory
41
+ num_epochs: 3
42
+ max_steps: -1 # Set to -1 to train on the entire dataset
43
+ optimizer: paged_adamw_32bit
44
+ lr_scheduler: cosine
45
+ learning_rate: 0.0002
46
+
47
+ train_on_inputs: false
48
+ group_by_length: true # Changed to true for efficiency with varying length samples
49
+ bf16: false
50
+ fp16: true
51
+ tf32: false
52
+
53
+ gradient_checkpointing: true
54
+ early_stopping_patience:
55
+ resume_from_checkpoint:
56
+ local_rank:
57
+ logging_steps: 10
58
+ xformers_attention:
59
+ flash_attention: false
60
+
61
+ warmup_steps: 100 # Increased for a larger dataset
62
+ evals_per_epoch: 1
63
+ saves_per_epoch: 1
64
+ debug:
65
+ deepspeed:
66
+ weight_decay: 0.01 # Added some weight decay for regularization
67
+ fsdp:
68
+ fsdp_config:
69
+ special_tokens: