Create README.md
Browse files
README.md
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
training_arguments = transformers.TrainingArguments(
|
2 |
+
per_device_train_batch_size=4,
|
3 |
+
gradient_accumulation_steps=BATCH_SIZE // MICRO_BATCH_SIZE,
|
4 |
+
warmup_steps=100,
|
5 |
+
max_steps=300,
|
6 |
+
learning_rate=3e-4,
|
7 |
+
fp16=True,
|
8 |
+
logging_steps=10,
|
9 |
+
optim="adamw_torch",
|
10 |
+
evaluation_strategy="steps",
|
11 |
+
save_strategy="steps",
|
12 |
+
eval_steps=50,
|
13 |
+
save_steps=50,
|
14 |
+
save_total_limit=3,
|
15 |
+
load_best_model_at_end=True,
|
16 |
+
)
|
17 |
+
|
18 |
+
LORA_R = 8
|
19 |
+
LORA_ALPHA = 16
|
20 |
+
LORA_DROPOUT= 0
|
21 |
+
LORA_TARGET_MODULES = [
|
22 |
+
"q_proj",
|
23 |
+
"v_proj",
|
24 |
+
]
|
25 |
+
|
26 |
+
BATCH_SIZE = 128
|
27 |
+
MICRO_BATCH_SIZE = 4
|
28 |
+
GRADIENT_ACCUMULATION_STEPS = BATCH_SIZE // MICRO_BATCH_SIZE
|
29 |
+
LEARNING_RATE = 3e-4
|
30 |
+
TRAIN_STEPS = 300
|
31 |
+
OUTPUT_DIR = "llygmaV2-13B"
|