transmogrifier commited on
Commit
2fb1f92
1 Parent(s): 8366f69

Upload cfg.yaml

Browse files
Files changed (1) hide show
  1. cfg.yaml +115 -0
cfg.yaml ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ architecture:
2
+ backbone_dtype: int8
3
+ force_embedding_gradients: true
4
+ gradient_checkpointing: true
5
+ intermediate_dropout: 0.0
6
+ pretrained: true
7
+ pretrained_weights: /media/akshay/datasets/largeModels/llms/h2o/h2o-llmstudio/output/user/economic-ferret.1/checkpoint.pth
8
+ augmentation:
9
+ random_parent_probability: 0.5
10
+ skip_parent_probability: 0.0
11
+ token_mask_probability: 0.0
12
+ dataset:
13
+ add_eos_token_to_answer: true
14
+ add_eos_token_to_prompt: true
15
+ add_eos_token_to_system: true
16
+ answer_column: response
17
+ chatbot_author: H2O.ai
18
+ chatbot_name: h2oGPT
19
+ data_sample: 1.0
20
+ data_sample_choice:
21
+ - Train
22
+ - Validation
23
+ limit_chained_samples: false
24
+ mask_prompt_labels: true
25
+ parent_id_column: None
26
+ personalize: false
27
+ prompt_column:
28
+ - instruction
29
+ system_column: None
30
+ text_answer_separator: <|answer|>
31
+ text_prompt_start: <|prompt|>
32
+ text_system_start: <|system|>
33
+ train_dataframe: /media/akshay/datasets/largeModels/llms/h2o/h2o-llmstudio/data/user/PR-singleQA-July13/singleQA.csv
34
+ validation_dataframe: None
35
+ validation_size: 0.01
36
+ validation_strategy: automatic
37
+ environment:
38
+ compile_model: false
39
+ find_unused_parameters: false
40
+ gpus:
41
+ - '0'
42
+ huggingface_branch: main
43
+ mixed_precision: true
44
+ number_of_workers: 8
45
+ seed: -1
46
+ trust_remote_code: true
47
+ use_fsdp: false
48
+ experiment_name: economic-ferret.1.1
49
+ llm_backbone: tiiuae/falcon-7b
50
+ logging:
51
+ logger: None
52
+ neptune_project: ''
53
+ number_of_texts: 10
54
+ output_directory: /media/akshay/datasets/largeModels/llms/h2o/h2o-llmstudio/output/user/economic-ferret.1.1/
55
+ prediction:
56
+ batch_size_inference: 0
57
+ do_sample: false
58
+ max_length_inference: 256
59
+ metric: BLEU
60
+ metric_gpt_model: gpt-3.5-turbo-0301
61
+ min_length_inference: 2
62
+ num_beams: 1
63
+ num_history: 4
64
+ repetition_penalty: 1.2
65
+ stop_tokens: ''
66
+ temperature: 0.3
67
+ top_k: 0
68
+ top_p: 1.0
69
+ problem_type: text_causal_language_modeling
70
+ tokenizer:
71
+ add_prefix_space: false
72
+ add_prompt_answer_tokens: false
73
+ max_length: 1760
74
+ max_length_answer: 512
75
+ max_length_prompt: 1024
76
+ padding_quantile: 1.0
77
+ use_fast: true
78
+ training:
79
+ adaptive_kl_control: true
80
+ advantages_gamma: 0.99
81
+ advantages_lambda: 0.95
82
+ batch_size: 2
83
+ differential_learning_rate: 1.0e-05
84
+ differential_learning_rate_layers: []
85
+ drop_last_batch: true
86
+ epochs: 3
87
+ evaluate_before_training: true
88
+ evaluation_epochs: 1.0
89
+ grad_accumulation: 4
90
+ gradient_clip: 0.9
91
+ initial_kl_coefficient: 0.2
92
+ kl_horizon: 10000
93
+ kl_target: 6.0
94
+ learning_rate: 0.0001
95
+ lora: true
96
+ lora_alpha: 16
97
+ lora_dropout: 0.05
98
+ lora_r: 8
99
+ lora_target_modules: query_key_value, dense, dense_h_to_4h, dense_4h_to_h
100
+ loss_function: TokenAveragedCrossEntropy
101
+ offload_reward_model: false
102
+ optimizer: AdamW
103
+ ppo_batch_size: 1
104
+ ppo_clip_policy: 0.2
105
+ ppo_clip_value: 0.2
106
+ ppo_epochs: 4
107
+ ppo_generate_temperature: 1.0
108
+ reward_model: OpenAssistant/reward-model-deberta-v3-large-v2
109
+ save_best_checkpoint: false
110
+ scaling_factor_value_loss: 0.1
111
+ schedule: Cosine
112
+ train_validation_data: true
113
+ use_rlhf: false
114
+ warmup_epochs: 0.0
115
+ weight_decay: 0.0