diff --git a/prompt-a/twochar/README.md b/prompt-a/twochar/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1e3637f645b79c1dff559d466047b102e3892f5d
--- /dev/null
+++ b/prompt-a/twochar/README.md
@@ -0,0 +1,21 @@
+---
+library_name: peft
+---
+## Training procedure
+
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: bitsandbytes
+- load_in_8bit: False
+- load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: bfloat16
+### Framework versions
+
+
+- PEFT 0.6.0.dev0
diff --git a/prompt-a/twochar/adapter_config.json b/prompt-a/twochar/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..e15b2402bceaab5939e21de3eb1d9a6ad2980bbd
--- /dev/null
+++ b/prompt-a/twochar/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 16,
+ "lora_dropout": 0.01,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "revision": null,
+ "target_modules": [
+ "down_proj",
+ "k_proj",
+ "o_proj",
+ "v_proj",
+ "up_proj",
+ "q_proj",
+ "gate_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/prompt-a/twochar/adapter_model.bin b/prompt-a/twochar/adapter_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c96c513e5c63ebcef49c385fe35a3a5331b09141
--- /dev/null
+++ b/prompt-a/twochar/adapter_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3686a6854a70ed7424cb6434eaf2d2d208980a99034b0d2075128de53b3297a2
+size 125374989
diff --git a/prompt-a/twochar/added_tokens.json b/prompt-a/twochar/added_tokens.json
new file mode 100644
index 0000000000000000000000000000000000000000..e41416ddd79948246ea2dced6800ea3cd531c424
--- /dev/null
+++ b/prompt-a/twochar/added_tokens.json
@@ -0,0 +1,3 @@
+{
+ "[PAD]": 32000
+}
diff --git a/prompt-a/twochar/checkpoint-100/README.md b/prompt-a/twochar/checkpoint-100/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1e3637f645b79c1dff559d466047b102e3892f5d
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-100/README.md
@@ -0,0 +1,21 @@
+---
+library_name: peft
+---
+## Training procedure
+
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: bitsandbytes
+- load_in_8bit: False
+- load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: bfloat16
+### Framework versions
+
+
+- PEFT 0.6.0.dev0
diff --git a/prompt-a/twochar/checkpoint-100/adapter_config.json b/prompt-a/twochar/checkpoint-100/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..e15b2402bceaab5939e21de3eb1d9a6ad2980bbd
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-100/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 16,
+ "lora_dropout": 0.01,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "revision": null,
+ "target_modules": [
+ "down_proj",
+ "k_proj",
+ "o_proj",
+ "v_proj",
+ "up_proj",
+ "q_proj",
+ "gate_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/prompt-a/twochar/checkpoint-100/adapter_model.bin b/prompt-a/twochar/checkpoint-100/adapter_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..49151612708bf6f92b80633f78936fc9edd40e8f
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-100/adapter_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:42f483a8475f9c9615a60f6b89a84b8920c7a840df45ceabed0a3effadffc0e0
+size 125374989
diff --git a/prompt-a/twochar/checkpoint-100/adapter_model/README.md b/prompt-a/twochar/checkpoint-100/adapter_model/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1e3637f645b79c1dff559d466047b102e3892f5d
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-100/adapter_model/README.md
@@ -0,0 +1,21 @@
+---
+library_name: peft
+---
+## Training procedure
+
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: bitsandbytes
+- load_in_8bit: False
+- load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: bfloat16
+### Framework versions
+
+
+- PEFT 0.6.0.dev0
diff --git a/prompt-a/twochar/checkpoint-100/adapter_model/adapter_config.json b/prompt-a/twochar/checkpoint-100/adapter_model/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..e15b2402bceaab5939e21de3eb1d9a6ad2980bbd
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-100/adapter_model/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 16,
+ "lora_dropout": 0.01,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "revision": null,
+ "target_modules": [
+ "down_proj",
+ "k_proj",
+ "o_proj",
+ "v_proj",
+ "up_proj",
+ "q_proj",
+ "gate_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/prompt-a/twochar/checkpoint-100/adapter_model/adapter_model.bin b/prompt-a/twochar/checkpoint-100/adapter_model/adapter_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..49151612708bf6f92b80633f78936fc9edd40e8f
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-100/adapter_model/adapter_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:42f483a8475f9c9615a60f6b89a84b8920c7a840df45ceabed0a3effadffc0e0
+size 125374989
diff --git a/prompt-a/twochar/checkpoint-100/optimizer.pt b/prompt-a/twochar/checkpoint-100/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1c5d9592c072640fc7938faa7a0d46ccca79bd4f
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-100/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:20d2e41d040cc30ba3448d2ef29ddff84499af170be7c5a74b063a9a719180ad
+size 250681597
diff --git a/prompt-a/twochar/checkpoint-100/rng_state_0.pth b/prompt-a/twochar/checkpoint-100/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ad9269ebf081152ae5eb5ab3ee652a4345bb7e78
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-100/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2f4ac5c72b3a8d37d6f5f4e72f59a981105e0739be3b7567447236a07702ef98
+size 21687
diff --git a/prompt-a/twochar/checkpoint-100/rng_state_1.pth b/prompt-a/twochar/checkpoint-100/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..2c25cd2c6610ab0b0ee6505778c9f72c0e778929
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-100/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:09a9e7711a12aff1cb01365952914663e874439fc80ca6174d7f1ed92dd1fd03
+size 21687
diff --git a/prompt-a/twochar/checkpoint-100/rng_state_2.pth b/prompt-a/twochar/checkpoint-100/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..37cc9e20eb59043d72cead19ce5dc45258cc2636
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-100/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e71e7c3877097d7a81872b94110dacc2bb9179e8811b27061b892815e8d9aa49
+size 21687
diff --git a/prompt-a/twochar/checkpoint-100/rng_state_3.pth b/prompt-a/twochar/checkpoint-100/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..6192a3dd961ecf758de8e77528077bd1e7222f9e
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-100/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:101962960d17908eccf4d72170394b7c436c70f3676372f4a98047973af8b216
+size 21687
diff --git a/prompt-a/twochar/checkpoint-100/rng_state_4.pth b/prompt-a/twochar/checkpoint-100/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..78a4c2caca665f09924545c8d9ec7b9ee70af367
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-100/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3419ae90d4ffed73c9b8458beb5e60e1e5a986b8daf2d55fd3289f908f2ef5c9
+size 21687
diff --git a/prompt-a/twochar/checkpoint-100/rng_state_5.pth b/prompt-a/twochar/checkpoint-100/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..8f7687a1d24f89e63e2a757a192c6bf93d76ba97
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-100/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:064ab007d23c2b923f5dce2324188582329cc05945abb2006e33f8fbb94f27e0
+size 21687
diff --git a/prompt-a/twochar/checkpoint-100/rng_state_6.pth b/prompt-a/twochar/checkpoint-100/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..063ab9b97b6e3c347ec39d2f85b182f0ccc5f3c6
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-100/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a231d16fbc0b8c521a5b6329b1042c9237bbaaf2cfddf194cc2c875474f5d4a9
+size 21687
diff --git a/prompt-a/twochar/checkpoint-100/rng_state_7.pth b/prompt-a/twochar/checkpoint-100/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..26015a28868cef53a04382443b335393ceb0c07f
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-100/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ab804b08c8130b997416c537045d5e5f947a41304867b421f39443b0b80131fe
+size 21687
diff --git a/prompt-a/twochar/checkpoint-100/scheduler.pt b/prompt-a/twochar/checkpoint-100/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..4d832c94e2de61db5216a37122c6fd1112328631
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-100/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b066d96ed0b5d7ac788c3a38bca16c4cd9e92b1f62ff59691c8a4867593fd5cf
+size 627
diff --git a/prompt-a/twochar/checkpoint-100/trainer_state.json b/prompt-a/twochar/checkpoint-100/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..6df5ae3c271883edf90d3a885b5442ba1369420b
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-100/trainer_state.json
@@ -0,0 +1,659 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.0,
+ "eval_steps": 20,
+ "global_step": 100,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.01,
+ "learning_rate": 6.5e-06,
+ "loss": 1.9562,
+ "step": 1
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 1.3e-05,
+ "loss": 2.0273,
+ "step": 2
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.9499999999999996e-05,
+ "loss": 2.2516,
+ "step": 3
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 2.6e-05,
+ "loss": 2.1955,
+ "step": 4
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 3.25e-05,
+ "loss": 2.1752,
+ "step": 5
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 3.899999999999999e-05,
+ "loss": 2.2144,
+ "step": 6
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 4.5499999999999995e-05,
+ "loss": 2.0829,
+ "step": 7
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 5.2e-05,
+ "loss": 2.0503,
+ "step": 8
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 5.85e-05,
+ "loss": 2.1558,
+ "step": 9
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 6.5e-05,
+ "loss": 2.0924,
+ "step": 10
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 6.499555741225524e-05,
+ "loss": 2.1238,
+ "step": 11
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 6.498223086358009e-05,
+ "loss": 2.129,
+ "step": 12
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 6.49600239973199e-05,
+ "loss": 2.0559,
+ "step": 13
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 6.492894288461016e-05,
+ "loss": 2.0659,
+ "step": 14
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 6.488899602271677e-05,
+ "loss": 2.1563,
+ "step": 15
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 6.484019433271287e-05,
+ "loss": 2.1706,
+ "step": 16
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 6.478255115649324e-05,
+ "loss": 2.0736,
+ "step": 17
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 6.471608225312668e-05,
+ "loss": 2.1295,
+ "step": 18
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 6.464080579454772e-05,
+ "loss": 2.0618,
+ "step": 19
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 6.455674236058847e-05,
+ "loss": 2.12,
+ "step": 20
+ },
+ {
+ "epoch": 0.2,
+ "eval_loss": 2.1955978870391846,
+ "eval_runtime": 2.9627,
+ "eval_samples_per_second": 3.375,
+ "eval_steps_per_second": 0.675,
+ "step": 20
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 6.446391493335242e-05,
+ "loss": 2.1576,
+ "step": 21
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 6.43623488909313e-05,
+ "loss": 2.0446,
+ "step": 22
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 6.425207200046697e-05,
+ "loss": 2.0033,
+ "step": 23
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 6.413311441056019e-05,
+ "loss": 2.1382,
+ "step": 24
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 6.400550864302823e-05,
+ "loss": 2.0989,
+ "step": 25
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 6.386928958401383e-05,
+ "loss": 2.1276,
+ "step": 26
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 6.372449447444757e-05,
+ "loss": 2.023,
+ "step": 27
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 6.357116289986662e-05,
+ "loss": 2.0597,
+ "step": 28
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 6.340933677959248e-05,
+ "loss": 2.0529,
+ "step": 29
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 6.323906035527062e-05,
+ "loss": 1.9873,
+ "step": 30
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 6.306038017877523e-05,
+ "loss": 2.0613,
+ "step": 31
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 6.287334509948247e-05,
+ "loss": 2.0765,
+ "step": 32
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 6.267800625091547e-05,
+ "loss": 2.0809,
+ "step": 33
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 6.2474417036765e-05,
+ "loss": 2.072,
+ "step": 34
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 6.226263311628936e-05,
+ "loss": 2.0069,
+ "step": 35
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 6.204271238909777e-05,
+ "loss": 2.0784,
+ "step": 36
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 6.181471497932117e-05,
+ "loss": 2.1763,
+ "step": 37
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 6.157870321917481e-05,
+ "loss": 2.0402,
+ "step": 38
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 6.133474163191738e-05,
+ "loss": 2.1082,
+ "step": 39
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 6.108289691421088e-05,
+ "loss": 2.1381,
+ "step": 40
+ },
+ {
+ "epoch": 0.4,
+ "eval_loss": 2.172107219696045,
+ "eval_runtime": 2.97,
+ "eval_samples_per_second": 3.367,
+ "eval_steps_per_second": 0.673,
+ "step": 40
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 6.08232379178865e-05,
+ "loss": 2.0538,
+ "step": 41
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 6.055583563112115e-05,
+ "loss": 2.222,
+ "step": 42
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 6.0280763159030015e-05,
+ "loss": 1.996,
+ "step": 43
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 5.999809570368039e-05,
+ "loss": 1.9798,
+ "step": 44
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 5.970791054353217e-05,
+ "loss": 2.1825,
+ "step": 45
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 5.941028701231065e-05,
+ "loss": 2.1671,
+ "step": 46
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 5.91053064773175e-05,
+ "loss": 2.0992,
+ "step": 47
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 5.8793052317185785e-05,
+ "loss": 2.0544,
+ "step": 48
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 5.8473609899085046e-05,
+ "loss": 2.1054,
+ "step": 49
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 5.814706655538279e-05,
+ "loss": 2.1214,
+ "step": 50
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 5.781351155976865e-05,
+ "loss": 1.9961,
+ "step": 51
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 5.7473036102847915e-05,
+ "loss": 2.0233,
+ "step": 52
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 5.712573326721088e-05,
+ "loss": 2.0756,
+ "step": 53
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 5.6771698001984996e-05,
+ "loss": 2.0138,
+ "step": 54
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 5.641102709687678e-05,
+ "loss": 1.9964,
+ "step": 55
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 5.604381915571039e-05,
+ "loss": 2.0915,
+ "step": 56
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 5.5670174569470405e-05,
+ "loss": 1.9954,
+ "step": 57
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 5.529019548885587e-05,
+ "loss": 2.1395,
+ "step": 58
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 5.490398579635338e-05,
+ "loss": 2.0388,
+ "step": 59
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 5.451165107783658e-05,
+ "loss": 2.0979,
+ "step": 60
+ },
+ {
+ "epoch": 0.6,
+ "eval_loss": 2.166550874710083,
+ "eval_runtime": 2.9777,
+ "eval_samples_per_second": 3.358,
+ "eval_steps_per_second": 0.672,
+ "step": 60
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 5.411329859370006e-05,
+ "loss": 2.0614,
+ "step": 61
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 5.370903724953548e-05,
+ "loss": 2.061,
+ "step": 62
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 5.329897756635779e-05,
+ "loss": 2.0868,
+ "step": 63
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 5.2883231650389946e-05,
+ "loss": 2.0659,
+ "step": 64
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 5.2461913162414206e-05,
+ "loss": 2.1325,
+ "step": 65
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 5.203513728669831e-05,
+ "loss": 2.0955,
+ "step": 66
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 5.160302069950537e-05,
+ "loss": 1.9692,
+ "step": 67
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 5.116568153719566e-05,
+ "loss": 2.0858,
+ "step": 68
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 5.072323936392936e-05,
+ "loss": 2.0884,
+ "step": 69
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 5.027581513897888e-05,
+ "loss": 1.952,
+ "step": 70
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 4.9823531183659796e-05,
+ "loss": 2.0223,
+ "step": 71
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 4.936651114788949e-05,
+ "loss": 2.0098,
+ "step": 72
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 4.890487997638242e-05,
+ "loss": 1.9965,
+ "step": 73
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 4.843876387449151e-05,
+ "loss": 2.1097,
+ "step": 74
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 4.796829027370489e-05,
+ "loss": 2.1051,
+ "step": 75
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 4.749358779680735e-05,
+ "loss": 2.1672,
+ "step": 76
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 4.701478622271621e-05,
+ "loss": 2.0641,
+ "step": 77
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 4.653201645100101e-05,
+ "loss": 2.1899,
+ "step": 78
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 4.6045410466096934e-05,
+ "loss": 2.007,
+ "step": 79
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 4.555510130122151e-05,
+ "loss": 2.1355,
+ "step": 80
+ },
+ {
+ "epoch": 0.8,
+ "eval_loss": 2.160327434539795,
+ "eval_runtime": 2.9723,
+ "eval_samples_per_second": 3.364,
+ "eval_steps_per_second": 0.673,
+ "step": 80
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 4.5061223002004694e-05,
+ "loss": 2.0046,
+ "step": 81
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 4.456391058984217e-05,
+ "loss": 2.0902,
+ "step": 82
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 4.406330002498178e-05,
+ "loss": 2.1259,
+ "step": 83
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 4.3559528169353394e-05,
+ "loss": 1.9241,
+ "step": 84
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 4.3052732749152215e-05,
+ "loss": 2.1537,
+ "step": 85
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 4.254305231718579e-05,
+ "loss": 1.9482,
+ "step": 86
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 4.2030626214995104e-05,
+ "loss": 2.0181,
+ "step": 87
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 4.151559453475994e-05,
+ "loss": 1.9839,
+ "step": 88
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 4.0998098080999136e-05,
+ "loss": 2.0735,
+ "step": 89
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 4.047827833207597e-05,
+ "loss": 2.1691,
+ "step": 90
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 3.995627740151942e-05,
+ "loss": 2.2761,
+ "step": 91
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 3.9432237999171774e-05,
+ "loss": 2.1657,
+ "step": 92
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 3.89063033921731e-05,
+ "loss": 1.9501,
+ "step": 93
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 3.8378617365793576e-05,
+ "loss": 2.0869,
+ "step": 94
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 3.784932418412385e-05,
+ "loss": 2.0112,
+ "step": 95
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 3.731856855063486e-05,
+ "loss": 1.9212,
+ "step": 96
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 3.678649556861724e-05,
+ "loss": 2.0395,
+ "step": 97
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 3.625325070151155e-05,
+ "loss": 2.0179,
+ "step": 98
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 3.571897973314004e-05,
+ "loss": 1.9713,
+ "step": 99
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 3.51838287278508e-05,
+ "loss": 2.0756,
+ "step": 100
+ },
+ {
+ "epoch": 1.0,
+ "eval_loss": 2.1577982902526855,
+ "eval_runtime": 2.9721,
+ "eval_samples_per_second": 3.365,
+ "eval_steps_per_second": 0.673,
+ "step": 100
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 200,
+ "num_train_epochs": 2,
+ "save_steps": 50,
+ "total_flos": 8.361608023638016e+16,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/prompt-a/twochar/checkpoint-100/training_args.bin b/prompt-a/twochar/checkpoint-100/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..d58072f53f91b75b135f9498c19f7bcc5b637ba4
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-100/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c9212c1ed1ebf30dd523c0f488bee8e1daf66e67eaaf76d60029260d57f02f88
+size 4411
diff --git a/prompt-a/twochar/checkpoint-150/README.md b/prompt-a/twochar/checkpoint-150/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1e3637f645b79c1dff559d466047b102e3892f5d
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-150/README.md
@@ -0,0 +1,21 @@
+---
+library_name: peft
+---
+## Training procedure
+
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: bitsandbytes
+- load_in_8bit: False
+- load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: bfloat16
+### Framework versions
+
+
+- PEFT 0.6.0.dev0
diff --git a/prompt-a/twochar/checkpoint-150/adapter_config.json b/prompt-a/twochar/checkpoint-150/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..e15b2402bceaab5939e21de3eb1d9a6ad2980bbd
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-150/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 16,
+ "lora_dropout": 0.01,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "revision": null,
+ "target_modules": [
+ "down_proj",
+ "k_proj",
+ "o_proj",
+ "v_proj",
+ "up_proj",
+ "q_proj",
+ "gate_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/prompt-a/twochar/checkpoint-150/adapter_model.bin b/prompt-a/twochar/checkpoint-150/adapter_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..8a8efd52487c5a6d03c2acbd87646121a3bc8993
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-150/adapter_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ef2308923705e0db072fa56c1601591d86bd453af7564a28a77b84dd1d79e8c4
+size 125374989
diff --git a/prompt-a/twochar/checkpoint-150/adapter_model/README.md b/prompt-a/twochar/checkpoint-150/adapter_model/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1e3637f645b79c1dff559d466047b102e3892f5d
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-150/adapter_model/README.md
@@ -0,0 +1,21 @@
+---
+library_name: peft
+---
+## Training procedure
+
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: bitsandbytes
+- load_in_8bit: False
+- load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: bfloat16
+### Framework versions
+
+
+- PEFT 0.6.0.dev0
diff --git a/prompt-a/twochar/checkpoint-150/adapter_model/adapter_config.json b/prompt-a/twochar/checkpoint-150/adapter_model/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..e15b2402bceaab5939e21de3eb1d9a6ad2980bbd
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-150/adapter_model/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 16,
+ "lora_dropout": 0.01,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "revision": null,
+ "target_modules": [
+ "down_proj",
+ "k_proj",
+ "o_proj",
+ "v_proj",
+ "up_proj",
+ "q_proj",
+ "gate_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/prompt-a/twochar/checkpoint-150/adapter_model/adapter_model.bin b/prompt-a/twochar/checkpoint-150/adapter_model/adapter_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..8a8efd52487c5a6d03c2acbd87646121a3bc8993
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-150/adapter_model/adapter_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ef2308923705e0db072fa56c1601591d86bd453af7564a28a77b84dd1d79e8c4
+size 125374989
diff --git a/prompt-a/twochar/checkpoint-150/optimizer.pt b/prompt-a/twochar/checkpoint-150/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..bee1c631d6e08e9d85a2b0ea3676903b5231b59f
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-150/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:602ac917af5d4639ca282f520dde56e364bf3f0db429735790992bf32ae66b1d
+size 250681597
diff --git a/prompt-a/twochar/checkpoint-150/rng_state_0.pth b/prompt-a/twochar/checkpoint-150/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ff97c3eec33e99bff98915ef9653fc357a69a596
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-150/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:89eadc3c165657fd8751b85f9ff44d7c95cc36af0669fbff4a4ffe5e0b07f5fc
+size 21687
diff --git a/prompt-a/twochar/checkpoint-150/rng_state_1.pth b/prompt-a/twochar/checkpoint-150/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..f3906a31dca8232805cf57d482767b629b9edddc
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-150/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3745b667b2a6ca4a67753c6c91a46d6482d85c16a72b363be05fc59f7c442c14
+size 21687
diff --git a/prompt-a/twochar/checkpoint-150/rng_state_2.pth b/prompt-a/twochar/checkpoint-150/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..664d72b29c93bfd26f452d7040f3a706a2e36bfc
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-150/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:730ab402a78e7c0ffa824fcd5f320e6ac0fca6070a3982c84cac9eb233003d70
+size 21687
diff --git a/prompt-a/twochar/checkpoint-150/rng_state_3.pth b/prompt-a/twochar/checkpoint-150/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..6edda0af9099e51fc25e909f0025f262c63fc11d
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-150/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d6ee13cc98624b772f8f4143d6dccdc5cdaff35faa989b04753b6d57e692fa4c
+size 21687
diff --git a/prompt-a/twochar/checkpoint-150/rng_state_4.pth b/prompt-a/twochar/checkpoint-150/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..4bd710f04455a879d85408cba5732797e8ab834b
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-150/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9c7a46010650ea94fbb45a5cb40a5d7a9cc9a325ad0dcbbe32f26384cfedc3c9
+size 21687
diff --git a/prompt-a/twochar/checkpoint-150/rng_state_5.pth b/prompt-a/twochar/checkpoint-150/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..cf7573cbe764b170b82cd4319cf5c6b97f96f4c9
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-150/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:448b2bc0d10409f11bb41fd0844a35fce476a3ffaa24967c8dfa611c6f55547d
+size 21687
diff --git a/prompt-a/twochar/checkpoint-150/rng_state_6.pth b/prompt-a/twochar/checkpoint-150/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..135530c5623bf8d5ed5a1849641cde2073f0b2b1
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-150/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a2d6cd239229ac1dd36dd86fc7bed222b898f45d69914c4668c12378f553f4be
+size 21687
diff --git a/prompt-a/twochar/checkpoint-150/rng_state_7.pth b/prompt-a/twochar/checkpoint-150/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..9fd61e90c186fc30d0299e9d2d5b6850cc3cc878
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-150/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d1f9bd38de615aa1185c1abc88320d66fe7f03cb4281a215b715cb5b31205b2e
+size 21687
diff --git a/prompt-a/twochar/checkpoint-150/scheduler.pt b/prompt-a/twochar/checkpoint-150/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f96a2443802cb8f1ec8dd6c856c6222f3dc2e216
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-150/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:31da033238aea895275b0ce325aba2b20a03c1ced0b3d0cc962cb556ea08a8a9
+size 627
diff --git a/prompt-a/twochar/checkpoint-150/trainer_state.json b/prompt-a/twochar/checkpoint-150/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..546bb8ad60dcf9792311556f8d8d895fd1021f81
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-150/trainer_state.json
@@ -0,0 +1,975 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.5,
+ "eval_steps": 20,
+ "global_step": 150,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.01,
+ "learning_rate": 6.5e-06,
+ "loss": 1.9562,
+ "step": 1
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 1.3e-05,
+ "loss": 2.0273,
+ "step": 2
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.9499999999999996e-05,
+ "loss": 2.2516,
+ "step": 3
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 2.6e-05,
+ "loss": 2.1955,
+ "step": 4
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 3.25e-05,
+ "loss": 2.1752,
+ "step": 5
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 3.899999999999999e-05,
+ "loss": 2.2144,
+ "step": 6
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 4.5499999999999995e-05,
+ "loss": 2.0829,
+ "step": 7
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 5.2e-05,
+ "loss": 2.0503,
+ "step": 8
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 5.85e-05,
+ "loss": 2.1558,
+ "step": 9
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 6.5e-05,
+ "loss": 2.0924,
+ "step": 10
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 6.499555741225524e-05,
+ "loss": 2.1238,
+ "step": 11
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 6.498223086358009e-05,
+ "loss": 2.129,
+ "step": 12
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 6.49600239973199e-05,
+ "loss": 2.0559,
+ "step": 13
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 6.492894288461016e-05,
+ "loss": 2.0659,
+ "step": 14
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 6.488899602271677e-05,
+ "loss": 2.1563,
+ "step": 15
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 6.484019433271287e-05,
+ "loss": 2.1706,
+ "step": 16
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 6.478255115649324e-05,
+ "loss": 2.0736,
+ "step": 17
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 6.471608225312668e-05,
+ "loss": 2.1295,
+ "step": 18
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 6.464080579454772e-05,
+ "loss": 2.0618,
+ "step": 19
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 6.455674236058847e-05,
+ "loss": 2.12,
+ "step": 20
+ },
+ {
+ "epoch": 0.2,
+ "eval_loss": 2.1955978870391846,
+ "eval_runtime": 2.9627,
+ "eval_samples_per_second": 3.375,
+ "eval_steps_per_second": 0.675,
+ "step": 20
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 6.446391493335242e-05,
+ "loss": 2.1576,
+ "step": 21
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 6.43623488909313e-05,
+ "loss": 2.0446,
+ "step": 22
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 6.425207200046697e-05,
+ "loss": 2.0033,
+ "step": 23
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 6.413311441056019e-05,
+ "loss": 2.1382,
+ "step": 24
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 6.400550864302823e-05,
+ "loss": 2.0989,
+ "step": 25
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 6.386928958401383e-05,
+ "loss": 2.1276,
+ "step": 26
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 6.372449447444757e-05,
+ "loss": 2.023,
+ "step": 27
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 6.357116289986662e-05,
+ "loss": 2.0597,
+ "step": 28
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 6.340933677959248e-05,
+ "loss": 2.0529,
+ "step": 29
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 6.323906035527062e-05,
+ "loss": 1.9873,
+ "step": 30
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 6.306038017877523e-05,
+ "loss": 2.0613,
+ "step": 31
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 6.287334509948247e-05,
+ "loss": 2.0765,
+ "step": 32
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 6.267800625091547e-05,
+ "loss": 2.0809,
+ "step": 33
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 6.2474417036765e-05,
+ "loss": 2.072,
+ "step": 34
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 6.226263311628936e-05,
+ "loss": 2.0069,
+ "step": 35
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 6.204271238909777e-05,
+ "loss": 2.0784,
+ "step": 36
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 6.181471497932117e-05,
+ "loss": 2.1763,
+ "step": 37
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 6.157870321917481e-05,
+ "loss": 2.0402,
+ "step": 38
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 6.133474163191738e-05,
+ "loss": 2.1082,
+ "step": 39
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 6.108289691421088e-05,
+ "loss": 2.1381,
+ "step": 40
+ },
+ {
+ "epoch": 0.4,
+ "eval_loss": 2.172107219696045,
+ "eval_runtime": 2.97,
+ "eval_samples_per_second": 3.367,
+ "eval_steps_per_second": 0.673,
+ "step": 40
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 6.08232379178865e-05,
+ "loss": 2.0538,
+ "step": 41
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 6.055583563112115e-05,
+ "loss": 2.222,
+ "step": 42
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 6.0280763159030015e-05,
+ "loss": 1.996,
+ "step": 43
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 5.999809570368039e-05,
+ "loss": 1.9798,
+ "step": 44
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 5.970791054353217e-05,
+ "loss": 2.1825,
+ "step": 45
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 5.941028701231065e-05,
+ "loss": 2.1671,
+ "step": 46
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 5.91053064773175e-05,
+ "loss": 2.0992,
+ "step": 47
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 5.8793052317185785e-05,
+ "loss": 2.0544,
+ "step": 48
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 5.8473609899085046e-05,
+ "loss": 2.1054,
+ "step": 49
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 5.814706655538279e-05,
+ "loss": 2.1214,
+ "step": 50
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 5.781351155976865e-05,
+ "loss": 1.9961,
+ "step": 51
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 5.7473036102847915e-05,
+ "loss": 2.0233,
+ "step": 52
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 5.712573326721088e-05,
+ "loss": 2.0756,
+ "step": 53
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 5.6771698001984996e-05,
+ "loss": 2.0138,
+ "step": 54
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 5.641102709687678e-05,
+ "loss": 1.9964,
+ "step": 55
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 5.604381915571039e-05,
+ "loss": 2.0915,
+ "step": 56
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 5.5670174569470405e-05,
+ "loss": 1.9954,
+ "step": 57
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 5.529019548885587e-05,
+ "loss": 2.1395,
+ "step": 58
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 5.490398579635338e-05,
+ "loss": 2.0388,
+ "step": 59
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 5.451165107783658e-05,
+ "loss": 2.0979,
+ "step": 60
+ },
+ {
+ "epoch": 0.6,
+ "eval_loss": 2.166550874710083,
+ "eval_runtime": 2.9777,
+ "eval_samples_per_second": 3.358,
+ "eval_steps_per_second": 0.672,
+ "step": 60
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 5.411329859370006e-05,
+ "loss": 2.0614,
+ "step": 61
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 5.370903724953548e-05,
+ "loss": 2.061,
+ "step": 62
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 5.329897756635779e-05,
+ "loss": 2.0868,
+ "step": 63
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 5.2883231650389946e-05,
+ "loss": 2.0659,
+ "step": 64
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 5.2461913162414206e-05,
+ "loss": 2.1325,
+ "step": 65
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 5.203513728669831e-05,
+ "loss": 2.0955,
+ "step": 66
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 5.160302069950537e-05,
+ "loss": 1.9692,
+ "step": 67
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 5.116568153719566e-05,
+ "loss": 2.0858,
+ "step": 68
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 5.072323936392936e-05,
+ "loss": 2.0884,
+ "step": 69
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 5.027581513897888e-05,
+ "loss": 1.952,
+ "step": 70
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 4.9823531183659796e-05,
+ "loss": 2.0223,
+ "step": 71
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 4.936651114788949e-05,
+ "loss": 2.0098,
+ "step": 72
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 4.890487997638242e-05,
+ "loss": 1.9965,
+ "step": 73
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 4.843876387449151e-05,
+ "loss": 2.1097,
+ "step": 74
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 4.796829027370489e-05,
+ "loss": 2.1051,
+ "step": 75
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 4.749358779680735e-05,
+ "loss": 2.1672,
+ "step": 76
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 4.701478622271621e-05,
+ "loss": 2.0641,
+ "step": 77
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 4.653201645100101e-05,
+ "loss": 2.1899,
+ "step": 78
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 4.6045410466096934e-05,
+ "loss": 2.007,
+ "step": 79
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 4.555510130122151e-05,
+ "loss": 2.1355,
+ "step": 80
+ },
+ {
+ "epoch": 0.8,
+ "eval_loss": 2.160327434539795,
+ "eval_runtime": 2.9723,
+ "eval_samples_per_second": 3.364,
+ "eval_steps_per_second": 0.673,
+ "step": 80
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 4.5061223002004694e-05,
+ "loss": 2.0046,
+ "step": 81
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 4.456391058984217e-05,
+ "loss": 2.0902,
+ "step": 82
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 4.406330002498178e-05,
+ "loss": 2.1259,
+ "step": 83
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 4.3559528169353394e-05,
+ "loss": 1.9241,
+ "step": 84
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 4.3052732749152215e-05,
+ "loss": 2.1537,
+ "step": 85
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 4.254305231718579e-05,
+ "loss": 1.9482,
+ "step": 86
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 4.2030626214995104e-05,
+ "loss": 2.0181,
+ "step": 87
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 4.151559453475994e-05,
+ "loss": 1.9839,
+ "step": 88
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 4.0998098080999136e-05,
+ "loss": 2.0735,
+ "step": 89
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 4.047827833207597e-05,
+ "loss": 2.1691,
+ "step": 90
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 3.995627740151942e-05,
+ "loss": 2.2761,
+ "step": 91
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 3.9432237999171774e-05,
+ "loss": 2.1657,
+ "step": 92
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 3.89063033921731e-05,
+ "loss": 1.9501,
+ "step": 93
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 3.8378617365793576e-05,
+ "loss": 2.0869,
+ "step": 94
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 3.784932418412385e-05,
+ "loss": 2.0112,
+ "step": 95
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 3.731856855063486e-05,
+ "loss": 1.9212,
+ "step": 96
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 3.678649556861724e-05,
+ "loss": 2.0395,
+ "step": 97
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 3.625325070151155e-05,
+ "loss": 2.0179,
+ "step": 98
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 3.571897973314004e-05,
+ "loss": 1.9713,
+ "step": 99
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 3.51838287278508e-05,
+ "loss": 2.0756,
+ "step": 100
+ },
+ {
+ "epoch": 1.0,
+ "eval_loss": 2.1577982902526855,
+ "eval_runtime": 2.9721,
+ "eval_samples_per_second": 3.365,
+ "eval_steps_per_second": 0.673,
+ "step": 100
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 3.4647943990585227e-05,
+ "loss": 1.9634,
+ "step": 101
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 3.411147202687972e-05,
+ "loss": 2.1004,
+ "step": 102
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 3.357455950281251e-05,
+ "loss": 1.9162,
+ "step": 103
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 3.303735320490665e-05,
+ "loss": 1.9874,
+ "step": 104
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 3.25e-05,
+ "loss": 2.0868,
+ "step": 105
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 3.1962646795093345e-05,
+ "loss": 1.8818,
+ "step": 106
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 3.142544049718748e-05,
+ "loss": 1.9699,
+ "step": 107
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 3.088852797312027e-05,
+ "loss": 1.994,
+ "step": 108
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 3.0352056009414768e-05,
+ "loss": 2.1169,
+ "step": 109
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 2.98161712721492e-05,
+ "loss": 1.997,
+ "step": 110
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 2.928102026685996e-05,
+ "loss": 2.1137,
+ "step": 111
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 2.874674929848845e-05,
+ "loss": 2.0281,
+ "step": 112
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 2.8213504431382755e-05,
+ "loss": 2.0649,
+ "step": 113
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 2.768143144936513e-05,
+ "loss": 1.9891,
+ "step": 114
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 2.715067581587614e-05,
+ "loss": 1.9841,
+ "step": 115
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 2.6621382634206436e-05,
+ "loss": 2.1146,
+ "step": 116
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 2.6093696607826892e-05,
+ "loss": 2.0479,
+ "step": 117
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 2.556776200082823e-05,
+ "loss": 2.1255,
+ "step": 118
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 2.5043722598480574e-05,
+ "loss": 2.0523,
+ "step": 119
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 2.4521721667924026e-05,
+ "loss": 2.1502,
+ "step": 120
+ },
+ {
+ "epoch": 1.2,
+ "eval_loss": 2.1567764282226562,
+ "eval_runtime": 2.9942,
+ "eval_samples_per_second": 3.34,
+ "eval_steps_per_second": 0.668,
+ "step": 120
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 2.4001901919000855e-05,
+ "loss": 2.0222,
+ "step": 121
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 2.3484405465240055e-05,
+ "loss": 2.003,
+ "step": 122
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 2.2969373785004897e-05,
+ "loss": 1.9935,
+ "step": 123
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 2.2456947682814208e-05,
+ "loss": 1.92,
+ "step": 124
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 2.1947267250847786e-05,
+ "loss": 2.0776,
+ "step": 125
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 2.1440471830646597e-05,
+ "loss": 2.0785,
+ "step": 126
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 2.093669997501821e-05,
+ "loss": 2.1597,
+ "step": 127
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 2.0436089410157818e-05,
+ "loss": 2.0279,
+ "step": 128
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 1.993877699799531e-05,
+ "loss": 2.107,
+ "step": 129
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 1.9444898698778497e-05,
+ "loss": 2.121,
+ "step": 130
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 1.8954589533903067e-05,
+ "loss": 2.1024,
+ "step": 131
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 1.8467983548998984e-05,
+ "loss": 2.1763,
+ "step": 132
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 1.798521377728379e-05,
+ "loss": 2.0075,
+ "step": 133
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 1.750641220319265e-05,
+ "loss": 2.0791,
+ "step": 134
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 1.703170972629511e-05,
+ "loss": 2.1985,
+ "step": 135
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 1.6561236125508495e-05,
+ "loss": 2.1254,
+ "step": 136
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 1.6095120023617585e-05,
+ "loss": 2.0442,
+ "step": 137
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 1.5633488852110516e-05,
+ "loss": 2.0451,
+ "step": 138
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 1.5176468816340209e-05,
+ "loss": 2.029,
+ "step": 139
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 1.472418486102113e-05,
+ "loss": 2.108,
+ "step": 140
+ },
+ {
+ "epoch": 1.4,
+ "eval_loss": 2.153236150741577,
+ "eval_runtime": 2.9714,
+ "eval_samples_per_second": 3.365,
+ "eval_steps_per_second": 0.673,
+ "step": 140
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 1.427676063607064e-05,
+ "loss": 2.0035,
+ "step": 141
+ },
+ {
+ "epoch": 1.42,
+ "learning_rate": 1.3834318462804333e-05,
+ "loss": 2.0776,
+ "step": 142
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 1.3396979300494626e-05,
+ "loss": 1.9116,
+ "step": 143
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 1.2964862713301684e-05,
+ "loss": 2.0144,
+ "step": 144
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 1.2538086837585795e-05,
+ "loss": 2.1723,
+ "step": 145
+ },
+ {
+ "epoch": 1.46,
+ "learning_rate": 1.2116768349610045e-05,
+ "loss": 2.0467,
+ "step": 146
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 1.1701022433642207e-05,
+ "loss": 1.9612,
+ "step": 147
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 1.1290962750464517e-05,
+ "loss": 2.0506,
+ "step": 148
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 1.0886701406299927e-05,
+ "loss": 1.9718,
+ "step": 149
+ },
+ {
+ "epoch": 1.5,
+ "learning_rate": 1.048834892216342e-05,
+ "loss": 2.0387,
+ "step": 150
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 200,
+ "num_train_epochs": 2,
+ "save_steps": 50,
+ "total_flos": 1.251428724071465e+17,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/prompt-a/twochar/checkpoint-150/training_args.bin b/prompt-a/twochar/checkpoint-150/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..d58072f53f91b75b135f9498c19f7bcc5b637ba4
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-150/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c9212c1ed1ebf30dd523c0f488bee8e1daf66e67eaaf76d60029260d57f02f88
+size 4411
diff --git a/prompt-a/twochar/checkpoint-200/README.md b/prompt-a/twochar/checkpoint-200/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1e3637f645b79c1dff559d466047b102e3892f5d
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-200/README.md
@@ -0,0 +1,21 @@
+---
+library_name: peft
+---
+## Training procedure
+
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: bitsandbytes
+- load_in_8bit: False
+- load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: bfloat16
+### Framework versions
+
+
+- PEFT 0.6.0.dev0
diff --git a/prompt-a/twochar/checkpoint-200/adapter_config.json b/prompt-a/twochar/checkpoint-200/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..e15b2402bceaab5939e21de3eb1d9a6ad2980bbd
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-200/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 16,
+ "lora_dropout": 0.01,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "revision": null,
+ "target_modules": [
+ "down_proj",
+ "k_proj",
+ "o_proj",
+ "v_proj",
+ "up_proj",
+ "q_proj",
+ "gate_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/prompt-a/twochar/checkpoint-200/adapter_model.bin b/prompt-a/twochar/checkpoint-200/adapter_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c96c513e5c63ebcef49c385fe35a3a5331b09141
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-200/adapter_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3686a6854a70ed7424cb6434eaf2d2d208980a99034b0d2075128de53b3297a2
+size 125374989
diff --git a/prompt-a/twochar/checkpoint-200/adapter_model/README.md b/prompt-a/twochar/checkpoint-200/adapter_model/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1e3637f645b79c1dff559d466047b102e3892f5d
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-200/adapter_model/README.md
@@ -0,0 +1,21 @@
+---
+library_name: peft
+---
+## Training procedure
+
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: bitsandbytes
+- load_in_8bit: False
+- load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: bfloat16
+### Framework versions
+
+
+- PEFT 0.6.0.dev0
diff --git a/prompt-a/twochar/checkpoint-200/adapter_model/adapter_config.json b/prompt-a/twochar/checkpoint-200/adapter_model/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..e15b2402bceaab5939e21de3eb1d9a6ad2980bbd
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-200/adapter_model/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 16,
+ "lora_dropout": 0.01,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "revision": null,
+ "target_modules": [
+ "down_proj",
+ "k_proj",
+ "o_proj",
+ "v_proj",
+ "up_proj",
+ "q_proj",
+ "gate_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/prompt-a/twochar/checkpoint-200/adapter_model/adapter_model.bin b/prompt-a/twochar/checkpoint-200/adapter_model/adapter_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c96c513e5c63ebcef49c385fe35a3a5331b09141
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-200/adapter_model/adapter_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3686a6854a70ed7424cb6434eaf2d2d208980a99034b0d2075128de53b3297a2
+size 125374989
diff --git a/prompt-a/twochar/checkpoint-200/optimizer.pt b/prompt-a/twochar/checkpoint-200/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b91c9b3a87116ba9c5b51128f99efb6b48580d97
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-200/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:85cc40f0d7f81e4b3136a22c9ee61293c7e2a46795fded91d441e599b632fe7b
+size 250681597
diff --git a/prompt-a/twochar/checkpoint-200/rng_state_0.pth b/prompt-a/twochar/checkpoint-200/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..0b6fb0a77ef10e58c647935eb9df2dc9d1b48312
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-200/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cb71352271decccd21025740fb7c4585441f353b8cbc20f09b524c2fe7cf027d
+size 21687
diff --git a/prompt-a/twochar/checkpoint-200/rng_state_1.pth b/prompt-a/twochar/checkpoint-200/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ecb36eaa5ead1a2f52fd2d4368407b6efddff9d7
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-200/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bd2535ba0a236e9acf8608fce0f4038a69c182287799399a90be854fb25b7c34
+size 21687
diff --git a/prompt-a/twochar/checkpoint-200/rng_state_2.pth b/prompt-a/twochar/checkpoint-200/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..9324b5ad6d9e39fa976dfa1f629f78807c7ae59a
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-200/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eba2c62c4b80d00ace4ba0acc8ebdad176eddf3cd3a5b1050f7df7c4700163a6
+size 21687
diff --git a/prompt-a/twochar/checkpoint-200/rng_state_3.pth b/prompt-a/twochar/checkpoint-200/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..569379c20034aed4344efb81bf440feccffe600e
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-200/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cb2fcfaf47b0c610c3711de92cc4604e5d6b41802b02ec9b5c582f4f729af8f2
+size 21687
diff --git a/prompt-a/twochar/checkpoint-200/rng_state_4.pth b/prompt-a/twochar/checkpoint-200/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..16fe71ba3916826371f2252eb0fbc2e7ea31f0dc
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-200/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1a3d6d3323887d5d66971f041a51221a123e538e2a897b4864257eadd27473e1
+size 21687
diff --git a/prompt-a/twochar/checkpoint-200/rng_state_5.pth b/prompt-a/twochar/checkpoint-200/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..0da27d0b1c857e40f507b34e3e8e50c680935545
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-200/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b1fb79dd2434c5cd9a6946f1fb7f838110f92f121c85810b9185b6e281a9b0b2
+size 21687
diff --git a/prompt-a/twochar/checkpoint-200/rng_state_6.pth b/prompt-a/twochar/checkpoint-200/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..e9ff0b3f4b969a683a222a53eff3f29de16f65d8
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-200/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:458ac70464bdf3b43495369875d77f8490d72156a48c1118ce85ad717e807a7b
+size 21687
diff --git a/prompt-a/twochar/checkpoint-200/rng_state_7.pth b/prompt-a/twochar/checkpoint-200/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..62e7103eef41d1525ab0ca9c0819f4477a68338c
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-200/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:198429cb8ecd769bf49f460c1af624920d6cbc0343641d33de69f2dd82bca33f
+size 21687
diff --git a/prompt-a/twochar/checkpoint-200/scheduler.pt b/prompt-a/twochar/checkpoint-200/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..deea23988d426e71f3857d834f7bd88d8ce96a10
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-200/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6afe3ae57ca7ad0c3a1c16a19bcce025d43cf50175f27f254174a0652c2ad2b3
+size 627
diff --git a/prompt-a/twochar/checkpoint-200/trainer_state.json b/prompt-a/twochar/checkpoint-200/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..932b2f2e443e56057492d3f9e316190ab4f16b42
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-200/trainer_state.json
@@ -0,0 +1,1299 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 2.0,
+ "eval_steps": 20,
+ "global_step": 200,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.01,
+ "learning_rate": 6.5e-06,
+ "loss": 1.9562,
+ "step": 1
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 1.3e-05,
+ "loss": 2.0273,
+ "step": 2
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.9499999999999996e-05,
+ "loss": 2.2516,
+ "step": 3
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 2.6e-05,
+ "loss": 2.1955,
+ "step": 4
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 3.25e-05,
+ "loss": 2.1752,
+ "step": 5
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 3.899999999999999e-05,
+ "loss": 2.2144,
+ "step": 6
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 4.5499999999999995e-05,
+ "loss": 2.0829,
+ "step": 7
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 5.2e-05,
+ "loss": 2.0503,
+ "step": 8
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 5.85e-05,
+ "loss": 2.1558,
+ "step": 9
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 6.5e-05,
+ "loss": 2.0924,
+ "step": 10
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 6.499555741225524e-05,
+ "loss": 2.1238,
+ "step": 11
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 6.498223086358009e-05,
+ "loss": 2.129,
+ "step": 12
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 6.49600239973199e-05,
+ "loss": 2.0559,
+ "step": 13
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 6.492894288461016e-05,
+ "loss": 2.0659,
+ "step": 14
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 6.488899602271677e-05,
+ "loss": 2.1563,
+ "step": 15
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 6.484019433271287e-05,
+ "loss": 2.1706,
+ "step": 16
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 6.478255115649324e-05,
+ "loss": 2.0736,
+ "step": 17
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 6.471608225312668e-05,
+ "loss": 2.1295,
+ "step": 18
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 6.464080579454772e-05,
+ "loss": 2.0618,
+ "step": 19
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 6.455674236058847e-05,
+ "loss": 2.12,
+ "step": 20
+ },
+ {
+ "epoch": 0.2,
+ "eval_loss": 2.1955978870391846,
+ "eval_runtime": 2.9627,
+ "eval_samples_per_second": 3.375,
+ "eval_steps_per_second": 0.675,
+ "step": 20
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 6.446391493335242e-05,
+ "loss": 2.1576,
+ "step": 21
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 6.43623488909313e-05,
+ "loss": 2.0446,
+ "step": 22
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 6.425207200046697e-05,
+ "loss": 2.0033,
+ "step": 23
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 6.413311441056019e-05,
+ "loss": 2.1382,
+ "step": 24
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 6.400550864302823e-05,
+ "loss": 2.0989,
+ "step": 25
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 6.386928958401383e-05,
+ "loss": 2.1276,
+ "step": 26
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 6.372449447444757e-05,
+ "loss": 2.023,
+ "step": 27
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 6.357116289986662e-05,
+ "loss": 2.0597,
+ "step": 28
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 6.340933677959248e-05,
+ "loss": 2.0529,
+ "step": 29
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 6.323906035527062e-05,
+ "loss": 1.9873,
+ "step": 30
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 6.306038017877523e-05,
+ "loss": 2.0613,
+ "step": 31
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 6.287334509948247e-05,
+ "loss": 2.0765,
+ "step": 32
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 6.267800625091547e-05,
+ "loss": 2.0809,
+ "step": 33
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 6.2474417036765e-05,
+ "loss": 2.072,
+ "step": 34
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 6.226263311628936e-05,
+ "loss": 2.0069,
+ "step": 35
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 6.204271238909777e-05,
+ "loss": 2.0784,
+ "step": 36
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 6.181471497932117e-05,
+ "loss": 2.1763,
+ "step": 37
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 6.157870321917481e-05,
+ "loss": 2.0402,
+ "step": 38
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 6.133474163191738e-05,
+ "loss": 2.1082,
+ "step": 39
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 6.108289691421088e-05,
+ "loss": 2.1381,
+ "step": 40
+ },
+ {
+ "epoch": 0.4,
+ "eval_loss": 2.172107219696045,
+ "eval_runtime": 2.97,
+ "eval_samples_per_second": 3.367,
+ "eval_steps_per_second": 0.673,
+ "step": 40
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 6.08232379178865e-05,
+ "loss": 2.0538,
+ "step": 41
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 6.055583563112115e-05,
+ "loss": 2.222,
+ "step": 42
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 6.0280763159030015e-05,
+ "loss": 1.996,
+ "step": 43
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 5.999809570368039e-05,
+ "loss": 1.9798,
+ "step": 44
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 5.970791054353217e-05,
+ "loss": 2.1825,
+ "step": 45
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 5.941028701231065e-05,
+ "loss": 2.1671,
+ "step": 46
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 5.91053064773175e-05,
+ "loss": 2.0992,
+ "step": 47
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 5.8793052317185785e-05,
+ "loss": 2.0544,
+ "step": 48
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 5.8473609899085046e-05,
+ "loss": 2.1054,
+ "step": 49
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 5.814706655538279e-05,
+ "loss": 2.1214,
+ "step": 50
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 5.781351155976865e-05,
+ "loss": 1.9961,
+ "step": 51
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 5.7473036102847915e-05,
+ "loss": 2.0233,
+ "step": 52
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 5.712573326721088e-05,
+ "loss": 2.0756,
+ "step": 53
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 5.6771698001984996e-05,
+ "loss": 2.0138,
+ "step": 54
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 5.641102709687678e-05,
+ "loss": 1.9964,
+ "step": 55
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 5.604381915571039e-05,
+ "loss": 2.0915,
+ "step": 56
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 5.5670174569470405e-05,
+ "loss": 1.9954,
+ "step": 57
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 5.529019548885587e-05,
+ "loss": 2.1395,
+ "step": 58
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 5.490398579635338e-05,
+ "loss": 2.0388,
+ "step": 59
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 5.451165107783658e-05,
+ "loss": 2.0979,
+ "step": 60
+ },
+ {
+ "epoch": 0.6,
+ "eval_loss": 2.166550874710083,
+ "eval_runtime": 2.9777,
+ "eval_samples_per_second": 3.358,
+ "eval_steps_per_second": 0.672,
+ "step": 60
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 5.411329859370006e-05,
+ "loss": 2.0614,
+ "step": 61
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 5.370903724953548e-05,
+ "loss": 2.061,
+ "step": 62
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 5.329897756635779e-05,
+ "loss": 2.0868,
+ "step": 63
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 5.2883231650389946e-05,
+ "loss": 2.0659,
+ "step": 64
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 5.2461913162414206e-05,
+ "loss": 2.1325,
+ "step": 65
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 5.203513728669831e-05,
+ "loss": 2.0955,
+ "step": 66
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 5.160302069950537e-05,
+ "loss": 1.9692,
+ "step": 67
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 5.116568153719566e-05,
+ "loss": 2.0858,
+ "step": 68
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 5.072323936392936e-05,
+ "loss": 2.0884,
+ "step": 69
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 5.027581513897888e-05,
+ "loss": 1.952,
+ "step": 70
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 4.9823531183659796e-05,
+ "loss": 2.0223,
+ "step": 71
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 4.936651114788949e-05,
+ "loss": 2.0098,
+ "step": 72
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 4.890487997638242e-05,
+ "loss": 1.9965,
+ "step": 73
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 4.843876387449151e-05,
+ "loss": 2.1097,
+ "step": 74
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 4.796829027370489e-05,
+ "loss": 2.1051,
+ "step": 75
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 4.749358779680735e-05,
+ "loss": 2.1672,
+ "step": 76
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 4.701478622271621e-05,
+ "loss": 2.0641,
+ "step": 77
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 4.653201645100101e-05,
+ "loss": 2.1899,
+ "step": 78
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 4.6045410466096934e-05,
+ "loss": 2.007,
+ "step": 79
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 4.555510130122151e-05,
+ "loss": 2.1355,
+ "step": 80
+ },
+ {
+ "epoch": 0.8,
+ "eval_loss": 2.160327434539795,
+ "eval_runtime": 2.9723,
+ "eval_samples_per_second": 3.364,
+ "eval_steps_per_second": 0.673,
+ "step": 80
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 4.5061223002004694e-05,
+ "loss": 2.0046,
+ "step": 81
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 4.456391058984217e-05,
+ "loss": 2.0902,
+ "step": 82
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 4.406330002498178e-05,
+ "loss": 2.1259,
+ "step": 83
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 4.3559528169353394e-05,
+ "loss": 1.9241,
+ "step": 84
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 4.3052732749152215e-05,
+ "loss": 2.1537,
+ "step": 85
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 4.254305231718579e-05,
+ "loss": 1.9482,
+ "step": 86
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 4.2030626214995104e-05,
+ "loss": 2.0181,
+ "step": 87
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 4.151559453475994e-05,
+ "loss": 1.9839,
+ "step": 88
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 4.0998098080999136e-05,
+ "loss": 2.0735,
+ "step": 89
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 4.047827833207597e-05,
+ "loss": 2.1691,
+ "step": 90
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 3.995627740151942e-05,
+ "loss": 2.2761,
+ "step": 91
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 3.9432237999171774e-05,
+ "loss": 2.1657,
+ "step": 92
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 3.89063033921731e-05,
+ "loss": 1.9501,
+ "step": 93
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 3.8378617365793576e-05,
+ "loss": 2.0869,
+ "step": 94
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 3.784932418412385e-05,
+ "loss": 2.0112,
+ "step": 95
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 3.731856855063486e-05,
+ "loss": 1.9212,
+ "step": 96
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 3.678649556861724e-05,
+ "loss": 2.0395,
+ "step": 97
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 3.625325070151155e-05,
+ "loss": 2.0179,
+ "step": 98
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 3.571897973314004e-05,
+ "loss": 1.9713,
+ "step": 99
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 3.51838287278508e-05,
+ "loss": 2.0756,
+ "step": 100
+ },
+ {
+ "epoch": 1.0,
+ "eval_loss": 2.1577982902526855,
+ "eval_runtime": 2.9721,
+ "eval_samples_per_second": 3.365,
+ "eval_steps_per_second": 0.673,
+ "step": 100
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 3.4647943990585227e-05,
+ "loss": 1.9634,
+ "step": 101
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 3.411147202687972e-05,
+ "loss": 2.1004,
+ "step": 102
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 3.357455950281251e-05,
+ "loss": 1.9162,
+ "step": 103
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 3.303735320490665e-05,
+ "loss": 1.9874,
+ "step": 104
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 3.25e-05,
+ "loss": 2.0868,
+ "step": 105
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 3.1962646795093345e-05,
+ "loss": 1.8818,
+ "step": 106
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 3.142544049718748e-05,
+ "loss": 1.9699,
+ "step": 107
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 3.088852797312027e-05,
+ "loss": 1.994,
+ "step": 108
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 3.0352056009414768e-05,
+ "loss": 2.1169,
+ "step": 109
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 2.98161712721492e-05,
+ "loss": 1.997,
+ "step": 110
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 2.928102026685996e-05,
+ "loss": 2.1137,
+ "step": 111
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 2.874674929848845e-05,
+ "loss": 2.0281,
+ "step": 112
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 2.8213504431382755e-05,
+ "loss": 2.0649,
+ "step": 113
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 2.768143144936513e-05,
+ "loss": 1.9891,
+ "step": 114
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 2.715067581587614e-05,
+ "loss": 1.9841,
+ "step": 115
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 2.6621382634206436e-05,
+ "loss": 2.1146,
+ "step": 116
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 2.6093696607826892e-05,
+ "loss": 2.0479,
+ "step": 117
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 2.556776200082823e-05,
+ "loss": 2.1255,
+ "step": 118
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 2.5043722598480574e-05,
+ "loss": 2.0523,
+ "step": 119
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 2.4521721667924026e-05,
+ "loss": 2.1502,
+ "step": 120
+ },
+ {
+ "epoch": 1.2,
+ "eval_loss": 2.1567764282226562,
+ "eval_runtime": 2.9942,
+ "eval_samples_per_second": 3.34,
+ "eval_steps_per_second": 0.668,
+ "step": 120
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 2.4001901919000855e-05,
+ "loss": 2.0222,
+ "step": 121
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 2.3484405465240055e-05,
+ "loss": 2.003,
+ "step": 122
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 2.2969373785004897e-05,
+ "loss": 1.9935,
+ "step": 123
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 2.2456947682814208e-05,
+ "loss": 1.92,
+ "step": 124
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 2.1947267250847786e-05,
+ "loss": 2.0776,
+ "step": 125
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 2.1440471830646597e-05,
+ "loss": 2.0785,
+ "step": 126
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 2.093669997501821e-05,
+ "loss": 2.1597,
+ "step": 127
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 2.0436089410157818e-05,
+ "loss": 2.0279,
+ "step": 128
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 1.993877699799531e-05,
+ "loss": 2.107,
+ "step": 129
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 1.9444898698778497e-05,
+ "loss": 2.121,
+ "step": 130
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 1.8954589533903067e-05,
+ "loss": 2.1024,
+ "step": 131
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 1.8467983548998984e-05,
+ "loss": 2.1763,
+ "step": 132
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 1.798521377728379e-05,
+ "loss": 2.0075,
+ "step": 133
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 1.750641220319265e-05,
+ "loss": 2.0791,
+ "step": 134
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 1.703170972629511e-05,
+ "loss": 2.1985,
+ "step": 135
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 1.6561236125508495e-05,
+ "loss": 2.1254,
+ "step": 136
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 1.6095120023617585e-05,
+ "loss": 2.0442,
+ "step": 137
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 1.5633488852110516e-05,
+ "loss": 2.0451,
+ "step": 138
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 1.5176468816340209e-05,
+ "loss": 2.029,
+ "step": 139
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 1.472418486102113e-05,
+ "loss": 2.108,
+ "step": 140
+ },
+ {
+ "epoch": 1.4,
+ "eval_loss": 2.153236150741577,
+ "eval_runtime": 2.9714,
+ "eval_samples_per_second": 3.365,
+ "eval_steps_per_second": 0.673,
+ "step": 140
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 1.427676063607064e-05,
+ "loss": 2.0035,
+ "step": 141
+ },
+ {
+ "epoch": 1.42,
+ "learning_rate": 1.3834318462804333e-05,
+ "loss": 2.0776,
+ "step": 142
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 1.3396979300494626e-05,
+ "loss": 1.9116,
+ "step": 143
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 1.2964862713301684e-05,
+ "loss": 2.0144,
+ "step": 144
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 1.2538086837585795e-05,
+ "loss": 2.1723,
+ "step": 145
+ },
+ {
+ "epoch": 1.46,
+ "learning_rate": 1.2116768349610045e-05,
+ "loss": 2.0467,
+ "step": 146
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 1.1701022433642207e-05,
+ "loss": 1.9612,
+ "step": 147
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 1.1290962750464517e-05,
+ "loss": 2.0506,
+ "step": 148
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 1.0886701406299927e-05,
+ "loss": 1.9718,
+ "step": 149
+ },
+ {
+ "epoch": 1.5,
+ "learning_rate": 1.048834892216342e-05,
+ "loss": 2.0387,
+ "step": 150
+ },
+ {
+ "epoch": 1.51,
+ "learning_rate": 1.009601420364661e-05,
+ "loss": 1.9916,
+ "step": 151
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 9.70980451114412e-06,
+ "loss": 2.1382,
+ "step": 152
+ },
+ {
+ "epoch": 1.53,
+ "learning_rate": 9.329825430529596e-06,
+ "loss": 1.8168,
+ "step": 153
+ },
+ {
+ "epoch": 1.54,
+ "learning_rate": 8.956180844289608e-06,
+ "loss": 2.0349,
+ "step": 154
+ },
+ {
+ "epoch": 1.55,
+ "learning_rate": 8.588972903123222e-06,
+ "loss": 2.1169,
+ "step": 155
+ },
+ {
+ "epoch": 1.56,
+ "learning_rate": 8.228301998015008e-06,
+ "loss": 2.0028,
+ "step": 156
+ },
+ {
+ "epoch": 1.57,
+ "learning_rate": 7.874266732789127e-06,
+ "loss": 1.9355,
+ "step": 157
+ },
+ {
+ "epoch": 1.58,
+ "learning_rate": 7.526963897152088e-06,
+ "loss": 2.117,
+ "step": 158
+ },
+ {
+ "epoch": 1.59,
+ "learning_rate": 7.1864884402313486e-06,
+ "loss": 1.8699,
+ "step": 159
+ },
+ {
+ "epoch": 1.6,
+ "learning_rate": 6.8529334446172116e-06,
+ "loss": 2.0929,
+ "step": 160
+ },
+ {
+ "epoch": 1.6,
+ "eval_loss": 2.152268409729004,
+ "eval_runtime": 2.971,
+ "eval_samples_per_second": 3.366,
+ "eval_steps_per_second": 0.673,
+ "step": 160
+ },
+ {
+ "epoch": 1.61,
+ "learning_rate": 6.526390100914946e-06,
+ "loss": 2.002,
+ "step": 161
+ },
+ {
+ "epoch": 1.62,
+ "learning_rate": 6.206947682814211e-06,
+ "loss": 2.0595,
+ "step": 162
+ },
+ {
+ "epoch": 1.63,
+ "learning_rate": 5.894693522682498e-06,
+ "loss": 2.1649,
+ "step": 163
+ },
+ {
+ "epoch": 1.64,
+ "learning_rate": 5.589712987689348e-06,
+ "loss": 2.0812,
+ "step": 164
+ },
+ {
+ "epoch": 1.65,
+ "learning_rate": 5.292089456467822e-06,
+ "loss": 2.0669,
+ "step": 165
+ },
+ {
+ "epoch": 1.66,
+ "learning_rate": 5.001904296319595e-06,
+ "loss": 1.9335,
+ "step": 166
+ },
+ {
+ "epoch": 1.67,
+ "learning_rate": 4.7192368409699775e-06,
+ "loss": 2.0828,
+ "step": 167
+ },
+ {
+ "epoch": 1.68,
+ "learning_rate": 4.444164368878848e-06,
+ "loss": 2.0618,
+ "step": 168
+ },
+ {
+ "epoch": 1.69,
+ "learning_rate": 4.1767620821134845e-06,
+ "loss": 1.9768,
+ "step": 169
+ },
+ {
+ "epoch": 1.7,
+ "learning_rate": 3.917103085789108e-06,
+ "loss": 2.0585,
+ "step": 170
+ },
+ {
+ "epoch": 1.71,
+ "learning_rate": 3.665258368082616e-06,
+ "loss": 2.2143,
+ "step": 171
+ },
+ {
+ "epoch": 1.72,
+ "learning_rate": 3.4212967808251866e-06,
+ "loss": 1.9548,
+ "step": 172
+ },
+ {
+ "epoch": 1.73,
+ "learning_rate": 3.1852850206788356e-06,
+ "loss": 1.9494,
+ "step": 173
+ },
+ {
+ "epoch": 1.74,
+ "learning_rate": 2.957287610902218e-06,
+ "loss": 1.9549,
+ "step": 174
+ },
+ {
+ "epoch": 1.75,
+ "learning_rate": 2.7373668837106307e-06,
+ "loss": 2.0845,
+ "step": 175
+ },
+ {
+ "epoch": 1.76,
+ "learning_rate": 2.5255829632349955e-06,
+ "loss": 2.0526,
+ "step": 176
+ },
+ {
+ "epoch": 1.77,
+ "learning_rate": 2.3219937490845233e-06,
+ "loss": 2.0889,
+ "step": 177
+ },
+ {
+ "epoch": 1.78,
+ "learning_rate": 2.1266549005175255e-06,
+ "loss": 2.0376,
+ "step": 178
+ },
+ {
+ "epoch": 1.79,
+ "learning_rate": 1.9396198212247617e-06,
+ "loss": 1.9105,
+ "step": 179
+ },
+ {
+ "epoch": 1.8,
+ "learning_rate": 1.7609396447293741e-06,
+ "loss": 1.8756,
+ "step": 180
+ },
+ {
+ "epoch": 1.8,
+ "eval_loss": 2.1519551277160645,
+ "eval_runtime": 2.9671,
+ "eval_samples_per_second": 3.37,
+ "eval_steps_per_second": 0.674,
+ "step": 180
+ },
+ {
+ "epoch": 1.81,
+ "learning_rate": 1.5906632204075102e-06,
+ "loss": 2.0699,
+ "step": 181
+ },
+ {
+ "epoch": 1.82,
+ "learning_rate": 1.4288371001333764e-06,
+ "loss": 2.1844,
+ "step": 182
+ },
+ {
+ "epoch": 1.83,
+ "learning_rate": 1.2755055255524352e-06,
+ "loss": 1.8558,
+ "step": 183
+ },
+ {
+ "epoch": 1.84,
+ "learning_rate": 1.1307104159861715e-06,
+ "loss": 2.0213,
+ "step": 184
+ },
+ {
+ "epoch": 1.85,
+ "learning_rate": 9.944913569717627e-07,
+ "loss": 1.9312,
+ "step": 185
+ },
+ {
+ "epoch": 1.86,
+ "learning_rate": 8.668855894398133e-07,
+ "loss": 1.9671,
+ "step": 186
+ },
+ {
+ "epoch": 1.87,
+ "learning_rate": 7.479279995330236e-07,
+ "loss": 1.9802,
+ "step": 187
+ },
+ {
+ "epoch": 1.88,
+ "learning_rate": 6.376511090686959e-07,
+ "loss": 2.0252,
+ "step": 188
+ },
+ {
+ "epoch": 1.89,
+ "learning_rate": 5.36085066647573e-07,
+ "loss": 2.0668,
+ "step": 189
+ },
+ {
+ "epoch": 1.9,
+ "learning_rate": 4.4325763941152447e-07,
+ "loss": 1.9095,
+ "step": 190
+ },
+ {
+ "epoch": 1.91,
+ "learning_rate": 3.5919420545227397e-07,
+ "loss": 2.0075,
+ "step": 191
+ },
+ {
+ "epoch": 1.92,
+ "learning_rate": 2.8391774687330616e-07,
+ "loss": 2.0847,
+ "step": 192
+ },
+ {
+ "epoch": 1.93,
+ "learning_rate": 2.1744884350676035e-07,
+ "loss": 1.9515,
+ "step": 193
+ },
+ {
+ "epoch": 1.94,
+ "learning_rate": 1.5980566728712895e-07,
+ "loss": 2.0127,
+ "step": 194
+ },
+ {
+ "epoch": 1.95,
+ "learning_rate": 1.1100397728322997e-07,
+ "loss": 1.9781,
+ "step": 195
+ },
+ {
+ "epoch": 1.96,
+ "learning_rate": 7.105711538982761e-08,
+ "loss": 1.8445,
+ "step": 196
+ },
+ {
+ "epoch": 1.97,
+ "learning_rate": 3.997600268009604e-08,
+ "loss": 1.905,
+ "step": 197
+ },
+ {
+ "epoch": 1.98,
+ "learning_rate": 1.7769136419903407e-08,
+ "loss": 1.994,
+ "step": 198
+ },
+ {
+ "epoch": 1.99,
+ "learning_rate": 4.442587744757365e-09,
+ "loss": 2.0697,
+ "step": 199
+ },
+ {
+ "epoch": 2.0,
+ "learning_rate": 0.0,
+ "loss": 2.0917,
+ "step": 200
+ },
+ {
+ "epoch": 2.0,
+ "eval_loss": 2.1522109508514404,
+ "eval_runtime": 2.9686,
+ "eval_samples_per_second": 3.369,
+ "eval_steps_per_second": 0.674,
+ "step": 200
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 200,
+ "num_train_epochs": 2,
+ "save_steps": 50,
+ "total_flos": 1.6723216094252237e+17,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/prompt-a/twochar/checkpoint-200/training_args.bin b/prompt-a/twochar/checkpoint-200/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..d58072f53f91b75b135f9498c19f7bcc5b637ba4
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-200/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c9212c1ed1ebf30dd523c0f488bee8e1daf66e67eaaf76d60029260d57f02f88
+size 4411
diff --git a/prompt-a/twochar/checkpoint-50/README.md b/prompt-a/twochar/checkpoint-50/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1e3637f645b79c1dff559d466047b102e3892f5d
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-50/README.md
@@ -0,0 +1,21 @@
+---
+library_name: peft
+---
+## Training procedure
+
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: bitsandbytes
+- load_in_8bit: False
+- load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: bfloat16
+### Framework versions
+
+
+- PEFT 0.6.0.dev0
diff --git a/prompt-a/twochar/checkpoint-50/adapter_config.json b/prompt-a/twochar/checkpoint-50/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..e15b2402bceaab5939e21de3eb1d9a6ad2980bbd
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-50/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 16,
+ "lora_dropout": 0.01,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "revision": null,
+ "target_modules": [
+ "down_proj",
+ "k_proj",
+ "o_proj",
+ "v_proj",
+ "up_proj",
+ "q_proj",
+ "gate_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/prompt-a/twochar/checkpoint-50/adapter_model.bin b/prompt-a/twochar/checkpoint-50/adapter_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..b32d692e503b2fe7d2460d16d257316e977bc030
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-50/adapter_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d840bb206b581a6ed50a4e2191a12b71cc79e1cbc78a648fb9168f1c00985f5d
+size 125374989
diff --git a/prompt-a/twochar/checkpoint-50/adapter_model/README.md b/prompt-a/twochar/checkpoint-50/adapter_model/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1e3637f645b79c1dff559d466047b102e3892f5d
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-50/adapter_model/README.md
@@ -0,0 +1,21 @@
+---
+library_name: peft
+---
+## Training procedure
+
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: bitsandbytes
+- load_in_8bit: False
+- load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: bfloat16
+### Framework versions
+
+
+- PEFT 0.6.0.dev0
diff --git a/prompt-a/twochar/checkpoint-50/adapter_model/adapter_config.json b/prompt-a/twochar/checkpoint-50/adapter_model/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..e15b2402bceaab5939e21de3eb1d9a6ad2980bbd
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-50/adapter_model/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 16,
+ "lora_dropout": 0.01,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "revision": null,
+ "target_modules": [
+ "down_proj",
+ "k_proj",
+ "o_proj",
+ "v_proj",
+ "up_proj",
+ "q_proj",
+ "gate_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/prompt-a/twochar/checkpoint-50/adapter_model/adapter_model.bin b/prompt-a/twochar/checkpoint-50/adapter_model/adapter_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..b32d692e503b2fe7d2460d16d257316e977bc030
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-50/adapter_model/adapter_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d840bb206b581a6ed50a4e2191a12b71cc79e1cbc78a648fb9168f1c00985f5d
+size 125374989
diff --git a/prompt-a/twochar/checkpoint-50/optimizer.pt b/prompt-a/twochar/checkpoint-50/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..49ac3f1cc9a2674ebb439613a7e39777890234c2
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-50/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:473dc7891d944ed624fd40da875a8426b5821d4124ce68859d629ae31a043331
+size 250681597
diff --git a/prompt-a/twochar/checkpoint-50/rng_state_0.pth b/prompt-a/twochar/checkpoint-50/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..5eb3a4edab5539e7094148dfe79adbe66f009b23
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-50/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3a8f71da32d999311dd9a36a9c6572fd4ca746e4452d5398c8f8c76bfb2688d6
+size 21687
diff --git a/prompt-a/twochar/checkpoint-50/rng_state_1.pth b/prompt-a/twochar/checkpoint-50/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b3073ead5c80b18ad7fd8314f1928cfc50ab4add
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-50/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ba2c07e342aa482f28d55882c49641acf46cc55125b6b51531e6e3f2bcacaeae
+size 21687
diff --git a/prompt-a/twochar/checkpoint-50/rng_state_2.pth b/prompt-a/twochar/checkpoint-50/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..28afca9c021658b97f4e5b445407275eec76e1a1
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-50/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:11d43713e09c4fc5cb64856b182d04e6b740881eab6f1f0534db9efbe16932e5
+size 21687
diff --git a/prompt-a/twochar/checkpoint-50/rng_state_3.pth b/prompt-a/twochar/checkpoint-50/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..f0e6ca75d45273e6ebc7acb8687bccffb7976224
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-50/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b9e340ac3a586f35d54d09bd32d1a46c322776319814c13e0eacff9eeac97996
+size 21687
diff --git a/prompt-a/twochar/checkpoint-50/rng_state_4.pth b/prompt-a/twochar/checkpoint-50/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..153694a41821b7130dbc2a87696a50d7ac275827
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-50/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:db165393406425492c97925ce8b799592ff36152d3326fc7b571db8b86a48482
+size 21687
diff --git a/prompt-a/twochar/checkpoint-50/rng_state_5.pth b/prompt-a/twochar/checkpoint-50/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ea5efb06eff095b8ed0792688131a2e36981e3a9
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-50/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:13196792301f7e45f8d68c4b33d57ccd0e52c5b2bb79af3bfec6cd2d1c7b3e9d
+size 21687
diff --git a/prompt-a/twochar/checkpoint-50/rng_state_6.pth b/prompt-a/twochar/checkpoint-50/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ecdc9981c4bcc00853b7038b96a7d52ecfede8b1
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-50/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:016ebea38ff490b7c931a7e76a166feca870ac0396e5d93c15dbb6884dc8029f
+size 21687
diff --git a/prompt-a/twochar/checkpoint-50/rng_state_7.pth b/prompt-a/twochar/checkpoint-50/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..00845597001c86526d11ae88103a9edea68ce7f5
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-50/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c41b6bd8b9fc31601ff6fd8b87bffa1a1b3486376be9e8e9a6f98f399af08a34
+size 21687
diff --git a/prompt-a/twochar/checkpoint-50/scheduler.pt b/prompt-a/twochar/checkpoint-50/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a66070eb64faba0589cb72d93620faf364bf42c3
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-50/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a13b7fd8d980f681c533668d750003260844bd1cef51c39f5f27763fb2d4c82d
+size 627
diff --git a/prompt-a/twochar/checkpoint-50/trainer_state.json b/prompt-a/twochar/checkpoint-50/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..2502a1fdfdaa22740926f7c9a77b59cfd7517a60
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-50/trainer_state.json
@@ -0,0 +1,335 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.5,
+ "eval_steps": 20,
+ "global_step": 50,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.01,
+ "learning_rate": 6.5e-06,
+ "loss": 1.9562,
+ "step": 1
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 1.3e-05,
+ "loss": 2.0273,
+ "step": 2
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.9499999999999996e-05,
+ "loss": 2.2516,
+ "step": 3
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 2.6e-05,
+ "loss": 2.1955,
+ "step": 4
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 3.25e-05,
+ "loss": 2.1752,
+ "step": 5
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 3.899999999999999e-05,
+ "loss": 2.2144,
+ "step": 6
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 4.5499999999999995e-05,
+ "loss": 2.0829,
+ "step": 7
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 5.2e-05,
+ "loss": 2.0503,
+ "step": 8
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 5.85e-05,
+ "loss": 2.1558,
+ "step": 9
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 6.5e-05,
+ "loss": 2.0924,
+ "step": 10
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 6.499555741225524e-05,
+ "loss": 2.1238,
+ "step": 11
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 6.498223086358009e-05,
+ "loss": 2.129,
+ "step": 12
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 6.49600239973199e-05,
+ "loss": 2.0559,
+ "step": 13
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 6.492894288461016e-05,
+ "loss": 2.0659,
+ "step": 14
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 6.488899602271677e-05,
+ "loss": 2.1563,
+ "step": 15
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 6.484019433271287e-05,
+ "loss": 2.1706,
+ "step": 16
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 6.478255115649324e-05,
+ "loss": 2.0736,
+ "step": 17
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 6.471608225312668e-05,
+ "loss": 2.1295,
+ "step": 18
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 6.464080579454772e-05,
+ "loss": 2.0618,
+ "step": 19
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 6.455674236058847e-05,
+ "loss": 2.12,
+ "step": 20
+ },
+ {
+ "epoch": 0.2,
+ "eval_loss": 2.1955978870391846,
+ "eval_runtime": 2.9627,
+ "eval_samples_per_second": 3.375,
+ "eval_steps_per_second": 0.675,
+ "step": 20
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 6.446391493335242e-05,
+ "loss": 2.1576,
+ "step": 21
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 6.43623488909313e-05,
+ "loss": 2.0446,
+ "step": 22
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 6.425207200046697e-05,
+ "loss": 2.0033,
+ "step": 23
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 6.413311441056019e-05,
+ "loss": 2.1382,
+ "step": 24
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 6.400550864302823e-05,
+ "loss": 2.0989,
+ "step": 25
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 6.386928958401383e-05,
+ "loss": 2.1276,
+ "step": 26
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 6.372449447444757e-05,
+ "loss": 2.023,
+ "step": 27
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 6.357116289986662e-05,
+ "loss": 2.0597,
+ "step": 28
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 6.340933677959248e-05,
+ "loss": 2.0529,
+ "step": 29
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 6.323906035527062e-05,
+ "loss": 1.9873,
+ "step": 30
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 6.306038017877523e-05,
+ "loss": 2.0613,
+ "step": 31
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 6.287334509948247e-05,
+ "loss": 2.0765,
+ "step": 32
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 6.267800625091547e-05,
+ "loss": 2.0809,
+ "step": 33
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 6.2474417036765e-05,
+ "loss": 2.072,
+ "step": 34
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 6.226263311628936e-05,
+ "loss": 2.0069,
+ "step": 35
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 6.204271238909777e-05,
+ "loss": 2.0784,
+ "step": 36
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 6.181471497932117e-05,
+ "loss": 2.1763,
+ "step": 37
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 6.157870321917481e-05,
+ "loss": 2.0402,
+ "step": 38
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 6.133474163191738e-05,
+ "loss": 2.1082,
+ "step": 39
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 6.108289691421088e-05,
+ "loss": 2.1381,
+ "step": 40
+ },
+ {
+ "epoch": 0.4,
+ "eval_loss": 2.172107219696045,
+ "eval_runtime": 2.97,
+ "eval_samples_per_second": 3.367,
+ "eval_steps_per_second": 0.673,
+ "step": 40
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 6.08232379178865e-05,
+ "loss": 2.0538,
+ "step": 41
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 6.055583563112115e-05,
+ "loss": 2.222,
+ "step": 42
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 6.0280763159030015e-05,
+ "loss": 1.996,
+ "step": 43
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 5.999809570368039e-05,
+ "loss": 1.9798,
+ "step": 44
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 5.970791054353217e-05,
+ "loss": 2.1825,
+ "step": 45
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 5.941028701231065e-05,
+ "loss": 2.1671,
+ "step": 46
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 5.91053064773175e-05,
+ "loss": 2.0992,
+ "step": 47
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 5.8793052317185785e-05,
+ "loss": 2.0544,
+ "step": 48
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 5.8473609899085046e-05,
+ "loss": 2.1054,
+ "step": 49
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 5.814706655538279e-05,
+ "loss": 2.1214,
+ "step": 50
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 200,
+ "num_train_epochs": 2,
+ "save_steps": 50,
+ "total_flos": 4.206166558742938e+16,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/prompt-a/twochar/checkpoint-50/training_args.bin b/prompt-a/twochar/checkpoint-50/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..d58072f53f91b75b135f9498c19f7bcc5b637ba4
--- /dev/null
+++ b/prompt-a/twochar/checkpoint-50/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c9212c1ed1ebf30dd523c0f488bee8e1daf66e67eaaf76d60029260d57f02f88
+size 4411
diff --git a/prompt-a/twochar/config.json b/prompt-a/twochar/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..99be7fbf147c96ef93ed720729ea90e8e21bdfca
--- /dev/null
+++ b/prompt-a/twochar/config.json
@@ -0,0 +1,38 @@
+{
+ "_name_or_path": "meta-llama/Llama-2-13b-hf",
+ "architectures": [
+ "LlamaForCausalLM"
+ ],
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "hidden_act": "silu",
+ "hidden_size": 5120,
+ "initializer_range": 0.02,
+ "intermediate_size": 13824,
+ "max_position_embeddings": 4096,
+ "model_type": "llama",
+ "num_attention_heads": 40,
+ "num_hidden_layers": 40,
+ "num_key_value_heads": 40,
+ "pretraining_tp": 1,
+ "quantization_config": {
+ "bnb_4bit_compute_dtype": "bfloat16",
+ "bnb_4bit_quant_type": "nf4",
+ "bnb_4bit_use_double_quant": true,
+ "llm_int8_enable_fp32_cpu_offload": false,
+ "llm_int8_has_fp16_weight": false,
+ "llm_int8_skip_modules": null,
+ "llm_int8_threshold": 6.0,
+ "load_in_4bit": true,
+ "load_in_8bit": false,
+ "quant_method": "bitsandbytes"
+ },
+ "rms_norm_eps": 1e-05,
+ "rope_scaling": null,
+ "rope_theta": 10000.0,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float16",
+ "transformers_version": "4.34.0.dev0",
+ "use_cache": false,
+ "vocab_size": 32001
+}
diff --git a/prompt-a/twochar/special_tokens_map.json b/prompt-a/twochar/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..3f58a5e115855c6ea3cec98accae196ad927222e
--- /dev/null
+++ b/prompt-a/twochar/special_tokens_map.json
@@ -0,0 +1,6 @@
+{
+ "bos_token": "",
+ "eos_token": "",
+ "pad_token": "[PAD]",
+ "unk_token": ""
+}
diff --git a/prompt-a/twochar/tokenizer.model b/prompt-a/twochar/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/prompt-a/twochar/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/prompt-a/twochar/tokenizer_config.json b/prompt-a/twochar/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..77849a9a3ca128f00bc3febdc9b39391a4ef43a3
--- /dev/null
+++ b/prompt-a/twochar/tokenizer_config.json
@@ -0,0 +1,39 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "bos_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "clean_up_tokenization_spaces": false,
+ "eos_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": null,
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "trust_remote_code": false,
+ "unk_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "use_default_system_prompt": true,
+ "use_fast": true
+}