diff --git a/multi-prompt/twochar/README.md b/multi-prompt/twochar/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1e3637f645b79c1dff559d466047b102e3892f5d
--- /dev/null
+++ b/multi-prompt/twochar/README.md
@@ -0,0 +1,21 @@
+---
+library_name: peft
+---
+## Training procedure
+
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: bitsandbytes
+- load_in_8bit: False
+- load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: bfloat16
+### Framework versions
+
+
+- PEFT 0.6.0.dev0
diff --git a/multi-prompt/twochar/adapter_config.json b/multi-prompt/twochar/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..b33f8685004fc9f9eef2843be41eb6081508fc38
--- /dev/null
+++ b/multi-prompt/twochar/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 16,
+ "lora_dropout": 0.01,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "q_proj",
+ "down_proj",
+ "k_proj",
+ "gate_proj",
+ "o_proj",
+ "up_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/multi-prompt/twochar/adapter_model.bin b/multi-prompt/twochar/adapter_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c79fe6f2dcda218f8075f5ac37aaf3f0caeff3d8
--- /dev/null
+++ b/multi-prompt/twochar/adapter_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:433a53226b2a4581547b8a2e381d865d58aa0575acf593735501071cf46d7fef
+size 125374989
diff --git a/multi-prompt/twochar/added_tokens.json b/multi-prompt/twochar/added_tokens.json
new file mode 100644
index 0000000000000000000000000000000000000000..e41416ddd79948246ea2dced6800ea3cd531c424
--- /dev/null
+++ b/multi-prompt/twochar/added_tokens.json
@@ -0,0 +1,3 @@
+{
+ "[PAD]": 32000
+}
diff --git a/multi-prompt/twochar/checkpoint-100/README.md b/multi-prompt/twochar/checkpoint-100/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1e3637f645b79c1dff559d466047b102e3892f5d
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-100/README.md
@@ -0,0 +1,21 @@
+---
+library_name: peft
+---
+## Training procedure
+
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: bitsandbytes
+- load_in_8bit: False
+- load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: bfloat16
+### Framework versions
+
+
+- PEFT 0.6.0.dev0
diff --git a/multi-prompt/twochar/checkpoint-100/adapter_config.json b/multi-prompt/twochar/checkpoint-100/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..b33f8685004fc9f9eef2843be41eb6081508fc38
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-100/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 16,
+ "lora_dropout": 0.01,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "q_proj",
+ "down_proj",
+ "k_proj",
+ "gate_proj",
+ "o_proj",
+ "up_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/multi-prompt/twochar/checkpoint-100/adapter_model.bin b/multi-prompt/twochar/checkpoint-100/adapter_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..e81a2b2e0a25412c7d260c0d5d2c6a06f2e69f19
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-100/adapter_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0708b4cbcf4d2eab6bb13a3b593efb8f9ffd0d3554bd6f0d85be7c2443d26b80
+size 125374989
diff --git a/multi-prompt/twochar/checkpoint-100/adapter_model/README.md b/multi-prompt/twochar/checkpoint-100/adapter_model/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1e3637f645b79c1dff559d466047b102e3892f5d
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-100/adapter_model/README.md
@@ -0,0 +1,21 @@
+---
+library_name: peft
+---
+## Training procedure
+
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: bitsandbytes
+- load_in_8bit: False
+- load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: bfloat16
+### Framework versions
+
+
+- PEFT 0.6.0.dev0
diff --git a/multi-prompt/twochar/checkpoint-100/adapter_model/adapter_config.json b/multi-prompt/twochar/checkpoint-100/adapter_model/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..b33f8685004fc9f9eef2843be41eb6081508fc38
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-100/adapter_model/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 16,
+ "lora_dropout": 0.01,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "q_proj",
+ "down_proj",
+ "k_proj",
+ "gate_proj",
+ "o_proj",
+ "up_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/multi-prompt/twochar/checkpoint-100/adapter_model/adapter_model.bin b/multi-prompt/twochar/checkpoint-100/adapter_model/adapter_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..e81a2b2e0a25412c7d260c0d5d2c6a06f2e69f19
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-100/adapter_model/adapter_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0708b4cbcf4d2eab6bb13a3b593efb8f9ffd0d3554bd6f0d85be7c2443d26b80
+size 125374989
diff --git a/multi-prompt/twochar/checkpoint-100/optimizer.pt b/multi-prompt/twochar/checkpoint-100/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..aaf808f10c0b4118259cb0a33780917be0e14042
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-100/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0221233935a3705a400234359b0d51ecb64e96906b36ad8742d87e4e81167bd8
+size 250681597
diff --git a/multi-prompt/twochar/checkpoint-100/rng_state_0.pth b/multi-prompt/twochar/checkpoint-100/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..c027d7e1b409ca7399334f4fb94eb2145805c56d
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-100/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ef3b38ac0e685cf7486af95e3e44d65bd9d5f0166e4b7cda4087f3df177fe2ac
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-100/rng_state_1.pth b/multi-prompt/twochar/checkpoint-100/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..4365f610fe476feb9e4a0815636c0f03dc780878
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-100/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:96f602cf80bbe8b7b20f61815b81a180e05c4349767b3055d38cee11e56d2f52
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-100/rng_state_2.pth b/multi-prompt/twochar/checkpoint-100/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..e78b1d9b263773f1fc17c537a59fccaad3fa4237
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-100/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c4c642ebadc5d5d3863c2af80d71300c9b960385b08a7e97d47221351e923536
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-100/rng_state_3.pth b/multi-prompt/twochar/checkpoint-100/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..e18b7bd48ce79cdea57bade728fd0ee5570b5300
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-100/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bdbab4ca5a74f90e77ec13e5c8a300b12d2ec0ffa5f7150ef45db3b39c0ba3fd
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-100/rng_state_4.pth b/multi-prompt/twochar/checkpoint-100/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..686c2ba0482c0a3291c1c693317c281ba57c82a0
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-100/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8b9e295735ec1efb6e545052a0a5b42c962567ac43c037ef05cd67f65aac86e5
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-100/rng_state_5.pth b/multi-prompt/twochar/checkpoint-100/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..d30c7d2e3633c5923fbec2dd60c0959aa47b1699
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-100/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bf0b7826ae8f9d6847c100c005a195903f6702ffefbcf20043e504db1cbb9cf0
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-100/rng_state_6.pth b/multi-prompt/twochar/checkpoint-100/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..580b669b95dd45546196ccdc669d2b2365f7ba8e
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-100/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3c9b6a36344bf4dd837aca81f6f221eaaf42fdaf4ad38def06c6583bd294478b
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-100/rng_state_7.pth b/multi-prompt/twochar/checkpoint-100/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b469c82317c3fbe5c8f9d58b23b976eeac1777d4
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-100/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fe85df90b682b7f8f20f078de75c81ecf060770f57b3f4465e9d00985eaa8a39
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-100/scheduler.pt b/multi-prompt/twochar/checkpoint-100/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..36e20fc64cdb8ac5215ac226a0de4079c0dc732a
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-100/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b8c8c5e17820ade3bd0f83be62a888f354614fa9e9626e94814a99519af6a262
+size 627
diff --git a/multi-prompt/twochar/checkpoint-100/trainer_state.json b/multi-prompt/twochar/checkpoint-100/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..161723e07a2e1943ff3ec6ff69406115e373d4c9
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-100/trainer_state.json
@@ -0,0 +1,659 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.9900990099009901,
+ "eval_steps": 20,
+ "global_step": 100,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.01,
+ "learning_rate": 6.5e-06,
+ "loss": 2.0407,
+ "step": 1
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 1.3e-05,
+ "loss": 2.3107,
+ "step": 2
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.9499999999999996e-05,
+ "loss": 2.1613,
+ "step": 3
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 2.6e-05,
+ "loss": 2.0444,
+ "step": 4
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 3.25e-05,
+ "loss": 2.2512,
+ "step": 5
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 3.899999999999999e-05,
+ "loss": 2.0516,
+ "step": 6
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 4.5499999999999995e-05,
+ "loss": 2.2006,
+ "step": 7
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 5.2e-05,
+ "loss": 2.1229,
+ "step": 8
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 5.85e-05,
+ "loss": 2.2498,
+ "step": 9
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 6.5e-05,
+ "loss": 2.0293,
+ "step": 10
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 6.499564948206075e-05,
+ "loss": 2.1316,
+ "step": 11
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 6.498259909298188e-05,
+ "loss": 2.1274,
+ "step": 12
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 6.49608523266681e-05,
+ "loss": 2.1129,
+ "step": 13
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 6.493041500525461e-05,
+ "loss": 2.1517,
+ "step": 14
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 6.489129527754836e-05,
+ "loss": 2.1078,
+ "step": 15
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 6.484350361684639e-05,
+ "loss": 2.0362,
+ "step": 16
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 6.478705281813194e-05,
+ "loss": 2.0553,
+ "step": 17
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 6.472195799464884e-05,
+ "loss": 2.1223,
+ "step": 18
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 6.464823657385538e-05,
+ "loss": 2.0326,
+ "step": 19
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 6.456590829275857e-05,
+ "loss": 2.1802,
+ "step": 20
+ },
+ {
+ "epoch": 0.2,
+ "eval_loss": 2.012301445007324,
+ "eval_runtime": 1.4855,
+ "eval_samples_per_second": 5.385,
+ "eval_steps_per_second": 0.673,
+ "step": 20
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 6.447499519263001e-05,
+ "loss": 2.0665,
+ "step": 21
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 6.437552161310498e-05,
+ "loss": 2.0354,
+ "step": 22
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 6.426751418566609e-05,
+ "loss": 2.1289,
+ "step": 23
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 6.415100182651334e-05,
+ "loss": 2.0742,
+ "step": 24
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 6.402601572882268e-05,
+ "loss": 2.0412,
+ "step": 25
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 6.38925893543947e-05,
+ "loss": 2.1974,
+ "step": 26
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 6.375075842469626e-05,
+ "loss": 2.196,
+ "step": 27
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 6.360056091129678e-05,
+ "loss": 2.0775,
+ "step": 28
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 6.344203702570254e-05,
+ "loss": 2.0366,
+ "step": 29
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 6.327522920859093e-05,
+ "loss": 2.1438,
+ "step": 30
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 6.310018211844817e-05,
+ "loss": 2.0639,
+ "step": 31
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 6.291694261961308e-05,
+ "loss": 2.1794,
+ "step": 32
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 6.272555976973039e-05,
+ "loss": 2.0592,
+ "step": 33
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 6.252608480661681e-05,
+ "loss": 2.1628,
+ "step": 34
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 6.231857113454346e-05,
+ "loss": 2.1851,
+ "step": 35
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 6.21030743099382e-05,
+ "loss": 2.0531,
+ "step": 36
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 6.18796520265119e-05,
+ "loss": 2.0284,
+ "step": 37
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 6.164836409981237e-05,
+ "loss": 2.2359,
+ "step": 38
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 6.140927245121032e-05,
+ "loss": 1.8967,
+ "step": 39
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 6.116244109132153e-05,
+ "loss": 2.0275,
+ "step": 40
+ },
+ {
+ "epoch": 0.4,
+ "eval_loss": 1.9861761331558228,
+ "eval_runtime": 1.4896,
+ "eval_samples_per_second": 5.37,
+ "eval_steps_per_second": 0.671,
+ "step": 40
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 6.0907936102869656e-05,
+ "loss": 2.0316,
+ "step": 41
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 6.0645825622994254e-05,
+ "loss": 2.0768,
+ "step": 42
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 6.037617982500884e-05,
+ "loss": 2.0058,
+ "step": 43
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 6.009907089961381e-05,
+ "loss": 2.2319,
+ "step": 44
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 5.981457303556923e-05,
+ "loss": 2.1693,
+ "step": 45
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 5.9522762399832716e-05,
+ "loss": 2.0858,
+ "step": 46
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 5.922371711716768e-05,
+ "loss": 2.0576,
+ "step": 47
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 5.891751724922749e-05,
+ "loss": 1.9946,
+ "step": 48
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 5.860424477312095e-05,
+ "loss": 2.0166,
+ "step": 49
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 5.828398355946514e-05,
+ "loss": 2.1482,
+ "step": 50
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 5.795681934993113e-05,
+ "loss": 2.0698,
+ "step": 51
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 5.7622839734288945e-05,
+ "loss": 1.9317,
+ "step": 52
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 5.728213412695761e-05,
+ "loss": 2.1399,
+ "step": 53
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 5.693479374306676e-05,
+ "loss": 2.1637,
+ "step": 54
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 5.658091157403617e-05,
+ "loss": 2.0557,
+ "step": 55
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 5.622058236267965e-05,
+ "loss": 1.926,
+ "step": 56
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 5.585390257784018e-05,
+ "loss": 2.0984,
+ "step": 57
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 5.548097038856279e-05,
+ "loss": 2.0428,
+ "step": 58
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 5.5101885637812374e-05,
+ "loss": 1.9624,
+ "step": 59
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 5.4716749815743304e-05,
+ "loss": 1.913,
+ "step": 60
+ },
+ {
+ "epoch": 0.59,
+ "eval_loss": 1.97765052318573,
+ "eval_runtime": 1.4886,
+ "eval_samples_per_second": 5.374,
+ "eval_steps_per_second": 0.672,
+ "step": 60
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 5.432566603252809e-05,
+ "loss": 2.0125,
+ "step": 61
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 5.3928738990752234e-05,
+ "loss": 2.0721,
+ "step": 62
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 5.3526074957382866e-05,
+ "loss": 2.1046,
+ "step": 63
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 5.311778173531847e-05,
+ "loss": 2.0074,
+ "step": 64
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 5.2703968634527514e-05,
+ "loss": 2.1667,
+ "step": 65
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 5.2284746442783414e-05,
+ "loss": 2.0677,
+ "step": 66
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 5.186022739600408e-05,
+ "loss": 2.1798,
+ "step": 67
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 5.143052514820357e-05,
+ "loss": 2.105,
+ "step": 68
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 5.099575474106419e-05,
+ "loss": 2.118,
+ "step": 69
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 5.055603257313707e-05,
+ "loss": 2.0791,
+ "step": 70
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 5.011147636867943e-05,
+ "loss": 2.032,
+ "step": 71
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 4.9662205146136955e-05,
+ "loss": 1.9331,
+ "step": 72
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 4.92083391862797e-05,
+ "loss": 2.0077,
+ "step": 73
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 4.874999999999999e-05,
+ "loss": 2.0253,
+ "step": 74
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 4.828731029578105e-05,
+ "loss": 1.9922,
+ "step": 75
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 4.7820393946844926e-05,
+ "loss": 2.0295,
+ "step": 76
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 4.734937595798867e-05,
+ "loss": 1.9514,
+ "step": 77
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 4.687438243211754e-05,
+ "loss": 2.1018,
+ "step": 78
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 4.639554053648416e-05,
+ "loss": 2.1032,
+ "step": 79
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 4.5912978468642824e-05,
+ "loss": 2.1258,
+ "step": 80
+ },
+ {
+ "epoch": 0.79,
+ "eval_loss": 1.972991704940796,
+ "eval_runtime": 1.494,
+ "eval_samples_per_second": 5.355,
+ "eval_steps_per_second": 0.669,
+ "step": 80
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 4.542682542212785e-05,
+ "loss": 2.0353,
+ "step": 81
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 4.4937211551865415e-05,
+ "loss": 1.9987,
+ "step": 82
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 4.444426793932787e-05,
+ "loss": 1.8856,
+ "step": 83
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 4.3948126557440085e-05,
+ "loss": 2.0904,
+ "step": 84
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 4.344892023524714e-05,
+ "loss": 2.0685,
+ "step": 85
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 4.2946782622352746e-05,
+ "loss": 2.1,
+ "step": 86
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 4.244184815313799e-05,
+ "loss": 1.9322,
+ "step": 87
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 4.193425201077002e-05,
+ "loss": 2.194,
+ "step": 88
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 4.14241300910103e-05,
+ "loss": 2.0455,
+ "step": 89
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 4.091161896583192e-05,
+ "loss": 2.1239,
+ "step": 90
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 4.0396855846856076e-05,
+ "loss": 2.142,
+ "step": 91
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 3.9879978548617125e-05,
+ "loss": 2.2304,
+ "step": 92
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 3.936112545166636e-05,
+ "loss": 1.925,
+ "step": 93
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 3.884043546552417e-05,
+ "loss": 2.1547,
+ "step": 94
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 3.831804799149057e-05,
+ "loss": 2.0439,
+ "step": 95
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 3.779410288532413e-05,
+ "loss": 1.9763,
+ "step": 96
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 3.726874041979925e-05,
+ "loss": 2.114,
+ "step": 97
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 3.674210124715168e-05,
+ "loss": 2.0293,
+ "step": 98
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 3.621432636142251e-05,
+ "loss": 2.0833,
+ "step": 99
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 3.5685557060710726e-05,
+ "loss": 2.0452,
+ "step": 100
+ },
+ {
+ "epoch": 0.99,
+ "eval_loss": 1.9682413339614868,
+ "eval_runtime": 1.4934,
+ "eval_samples_per_second": 5.357,
+ "eval_steps_per_second": 0.67,
+ "step": 100
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 202,
+ "num_train_epochs": 2,
+ "save_steps": 50,
+ "total_flos": 8.359599153348608e+16,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/multi-prompt/twochar/checkpoint-100/training_args.bin b/multi-prompt/twochar/checkpoint-100/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..47d6200b8d4047ff364e6b15cf014dde61f2166b
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-100/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ac3fb199292750362f1e387a5d8bde7088e81478bbe173194e512403378f1c70
+size 4475
diff --git a/multi-prompt/twochar/checkpoint-150/README.md b/multi-prompt/twochar/checkpoint-150/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1e3637f645b79c1dff559d466047b102e3892f5d
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-150/README.md
@@ -0,0 +1,21 @@
+---
+library_name: peft
+---
+## Training procedure
+
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: bitsandbytes
+- load_in_8bit: False
+- load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: bfloat16
+### Framework versions
+
+
+- PEFT 0.6.0.dev0
diff --git a/multi-prompt/twochar/checkpoint-150/adapter_config.json b/multi-prompt/twochar/checkpoint-150/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..b33f8685004fc9f9eef2843be41eb6081508fc38
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-150/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 16,
+ "lora_dropout": 0.01,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "q_proj",
+ "down_proj",
+ "k_proj",
+ "gate_proj",
+ "o_proj",
+ "up_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/multi-prompt/twochar/checkpoint-150/adapter_model.bin b/multi-prompt/twochar/checkpoint-150/adapter_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..4f8ca6891c7773372d69fb2ad8df2ffaa55debb5
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-150/adapter_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:94befd076d47fa24e3ba916e9a3778dd8ece4972bb7c203083cc5f53c5e46bfa
+size 125374989
diff --git a/multi-prompt/twochar/checkpoint-150/adapter_model/README.md b/multi-prompt/twochar/checkpoint-150/adapter_model/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1e3637f645b79c1dff559d466047b102e3892f5d
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-150/adapter_model/README.md
@@ -0,0 +1,21 @@
+---
+library_name: peft
+---
+## Training procedure
+
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: bitsandbytes
+- load_in_8bit: False
+- load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: bfloat16
+### Framework versions
+
+
+- PEFT 0.6.0.dev0
diff --git a/multi-prompt/twochar/checkpoint-150/adapter_model/adapter_config.json b/multi-prompt/twochar/checkpoint-150/adapter_model/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..b33f8685004fc9f9eef2843be41eb6081508fc38
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-150/adapter_model/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 16,
+ "lora_dropout": 0.01,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "q_proj",
+ "down_proj",
+ "k_proj",
+ "gate_proj",
+ "o_proj",
+ "up_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/multi-prompt/twochar/checkpoint-150/adapter_model/adapter_model.bin b/multi-prompt/twochar/checkpoint-150/adapter_model/adapter_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..4f8ca6891c7773372d69fb2ad8df2ffaa55debb5
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-150/adapter_model/adapter_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:94befd076d47fa24e3ba916e9a3778dd8ece4972bb7c203083cc5f53c5e46bfa
+size 125374989
diff --git a/multi-prompt/twochar/checkpoint-150/optimizer.pt b/multi-prompt/twochar/checkpoint-150/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f561be21d8cb9043f5dda0d68fc3f76b25f632f3
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-150/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:60fd84426c8963b86174be0513aea26512fafa4a79de37a27d66e42699c92c1d
+size 250681597
diff --git a/multi-prompt/twochar/checkpoint-150/rng_state_0.pth b/multi-prompt/twochar/checkpoint-150/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..2e852bd74a8f93ab1c4de0f44917d87779fa6280
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-150/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d127d4391ee6771b6d20db8ce3f0dd0584c753057cc9bdfdd0be6f8cef84d955
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-150/rng_state_1.pth b/multi-prompt/twochar/checkpoint-150/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..967c3b57c88f0fd6224fc782325081203d780c53
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-150/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d7ccf1beada17255706986caf9094f244684163a8547f833874af8de8dd4daa0
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-150/rng_state_2.pth b/multi-prompt/twochar/checkpoint-150/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..01a3800c419d7a10b3033798c115a4599ca65708
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-150/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:277501f7b2a65d7470e01413187a94540bcb41c51790d63ac495be26b8fdfb89
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-150/rng_state_3.pth b/multi-prompt/twochar/checkpoint-150/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..3f49ae80b4f69a09a48a97d4fab2e4dc9306d4b0
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-150/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1d55ef5ebc7e10e4cd63d908d65f05dfdc0de99fd4911bc3aa2e57e01940ef54
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-150/rng_state_4.pth b/multi-prompt/twochar/checkpoint-150/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..53096add211f9c568abe4840b3309d8fa88cf191
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-150/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ddc9393cb55ab7d202c2cc2ae933435652c478277daf312e6370eb3404db7e9f
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-150/rng_state_5.pth b/multi-prompt/twochar/checkpoint-150/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..61804cccdfd8c946e52825a7b57b5c48feb69634
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-150/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fcbf8760f23fffddb8debdb97db5712925a0c049476c45b8b57488cb099ff82e
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-150/rng_state_6.pth b/multi-prompt/twochar/checkpoint-150/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..d0be26302b066358e34db48b75ac1314b1a7dc78
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-150/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:db101559fa9579ad6b9ca8b4a109575c11fc03a27ef55f3e0bb92c5642fc4001
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-150/rng_state_7.pth b/multi-prompt/twochar/checkpoint-150/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..26ace6a6033c3e5cabf8c074559423187fe8f8d5
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-150/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:644bacb2bf138ffa7d1d20a3358a4fab98d6ab8c6ede06df95e5de696a31d2ce
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-150/scheduler.pt b/multi-prompt/twochar/checkpoint-150/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..239565dc0a5d8045d4945debf9c17e89b35c6b1f
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-150/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c4e06f3498f4a96af42d371e2251f32c6afc73dc33f11f9b431ef7f76337ee64
+size 627
diff --git a/multi-prompt/twochar/checkpoint-150/trainer_state.json b/multi-prompt/twochar/checkpoint-150/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..027041939045976d83cbeaf08797e3bda1e610de
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-150/trainer_state.json
@@ -0,0 +1,975 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.4851485148514851,
+ "eval_steps": 20,
+ "global_step": 150,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.01,
+ "learning_rate": 6.5e-06,
+ "loss": 2.0407,
+ "step": 1
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 1.3e-05,
+ "loss": 2.3107,
+ "step": 2
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.9499999999999996e-05,
+ "loss": 2.1613,
+ "step": 3
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 2.6e-05,
+ "loss": 2.0444,
+ "step": 4
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 3.25e-05,
+ "loss": 2.2512,
+ "step": 5
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 3.899999999999999e-05,
+ "loss": 2.0516,
+ "step": 6
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 4.5499999999999995e-05,
+ "loss": 2.2006,
+ "step": 7
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 5.2e-05,
+ "loss": 2.1229,
+ "step": 8
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 5.85e-05,
+ "loss": 2.2498,
+ "step": 9
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 6.5e-05,
+ "loss": 2.0293,
+ "step": 10
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 6.499564948206075e-05,
+ "loss": 2.1316,
+ "step": 11
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 6.498259909298188e-05,
+ "loss": 2.1274,
+ "step": 12
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 6.49608523266681e-05,
+ "loss": 2.1129,
+ "step": 13
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 6.493041500525461e-05,
+ "loss": 2.1517,
+ "step": 14
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 6.489129527754836e-05,
+ "loss": 2.1078,
+ "step": 15
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 6.484350361684639e-05,
+ "loss": 2.0362,
+ "step": 16
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 6.478705281813194e-05,
+ "loss": 2.0553,
+ "step": 17
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 6.472195799464884e-05,
+ "loss": 2.1223,
+ "step": 18
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 6.464823657385538e-05,
+ "loss": 2.0326,
+ "step": 19
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 6.456590829275857e-05,
+ "loss": 2.1802,
+ "step": 20
+ },
+ {
+ "epoch": 0.2,
+ "eval_loss": 2.012301445007324,
+ "eval_runtime": 1.4855,
+ "eval_samples_per_second": 5.385,
+ "eval_steps_per_second": 0.673,
+ "step": 20
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 6.447499519263001e-05,
+ "loss": 2.0665,
+ "step": 21
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 6.437552161310498e-05,
+ "loss": 2.0354,
+ "step": 22
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 6.426751418566609e-05,
+ "loss": 2.1289,
+ "step": 23
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 6.415100182651334e-05,
+ "loss": 2.0742,
+ "step": 24
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 6.402601572882268e-05,
+ "loss": 2.0412,
+ "step": 25
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 6.38925893543947e-05,
+ "loss": 2.1974,
+ "step": 26
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 6.375075842469626e-05,
+ "loss": 2.196,
+ "step": 27
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 6.360056091129678e-05,
+ "loss": 2.0775,
+ "step": 28
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 6.344203702570254e-05,
+ "loss": 2.0366,
+ "step": 29
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 6.327522920859093e-05,
+ "loss": 2.1438,
+ "step": 30
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 6.310018211844817e-05,
+ "loss": 2.0639,
+ "step": 31
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 6.291694261961308e-05,
+ "loss": 2.1794,
+ "step": 32
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 6.272555976973039e-05,
+ "loss": 2.0592,
+ "step": 33
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 6.252608480661681e-05,
+ "loss": 2.1628,
+ "step": 34
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 6.231857113454346e-05,
+ "loss": 2.1851,
+ "step": 35
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 6.21030743099382e-05,
+ "loss": 2.0531,
+ "step": 36
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 6.18796520265119e-05,
+ "loss": 2.0284,
+ "step": 37
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 6.164836409981237e-05,
+ "loss": 2.2359,
+ "step": 38
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 6.140927245121032e-05,
+ "loss": 1.8967,
+ "step": 39
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 6.116244109132153e-05,
+ "loss": 2.0275,
+ "step": 40
+ },
+ {
+ "epoch": 0.4,
+ "eval_loss": 1.9861761331558228,
+ "eval_runtime": 1.4896,
+ "eval_samples_per_second": 5.37,
+ "eval_steps_per_second": 0.671,
+ "step": 40
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 6.0907936102869656e-05,
+ "loss": 2.0316,
+ "step": 41
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 6.0645825622994254e-05,
+ "loss": 2.0768,
+ "step": 42
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 6.037617982500884e-05,
+ "loss": 2.0058,
+ "step": 43
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 6.009907089961381e-05,
+ "loss": 2.2319,
+ "step": 44
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 5.981457303556923e-05,
+ "loss": 2.1693,
+ "step": 45
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 5.9522762399832716e-05,
+ "loss": 2.0858,
+ "step": 46
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 5.922371711716768e-05,
+ "loss": 2.0576,
+ "step": 47
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 5.891751724922749e-05,
+ "loss": 1.9946,
+ "step": 48
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 5.860424477312095e-05,
+ "loss": 2.0166,
+ "step": 49
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 5.828398355946514e-05,
+ "loss": 2.1482,
+ "step": 50
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 5.795681934993113e-05,
+ "loss": 2.0698,
+ "step": 51
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 5.7622839734288945e-05,
+ "loss": 1.9317,
+ "step": 52
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 5.728213412695761e-05,
+ "loss": 2.1399,
+ "step": 53
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 5.693479374306676e-05,
+ "loss": 2.1637,
+ "step": 54
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 5.658091157403617e-05,
+ "loss": 2.0557,
+ "step": 55
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 5.622058236267965e-05,
+ "loss": 1.926,
+ "step": 56
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 5.585390257784018e-05,
+ "loss": 2.0984,
+ "step": 57
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 5.548097038856279e-05,
+ "loss": 2.0428,
+ "step": 58
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 5.5101885637812374e-05,
+ "loss": 1.9624,
+ "step": 59
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 5.4716749815743304e-05,
+ "loss": 1.913,
+ "step": 60
+ },
+ {
+ "epoch": 0.59,
+ "eval_loss": 1.97765052318573,
+ "eval_runtime": 1.4886,
+ "eval_samples_per_second": 5.374,
+ "eval_steps_per_second": 0.672,
+ "step": 60
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 5.432566603252809e-05,
+ "loss": 2.0125,
+ "step": 61
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 5.3928738990752234e-05,
+ "loss": 2.0721,
+ "step": 62
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 5.3526074957382866e-05,
+ "loss": 2.1046,
+ "step": 63
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 5.311778173531847e-05,
+ "loss": 2.0074,
+ "step": 64
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 5.2703968634527514e-05,
+ "loss": 2.1667,
+ "step": 65
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 5.2284746442783414e-05,
+ "loss": 2.0677,
+ "step": 66
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 5.186022739600408e-05,
+ "loss": 2.1798,
+ "step": 67
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 5.143052514820357e-05,
+ "loss": 2.105,
+ "step": 68
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 5.099575474106419e-05,
+ "loss": 2.118,
+ "step": 69
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 5.055603257313707e-05,
+ "loss": 2.0791,
+ "step": 70
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 5.011147636867943e-05,
+ "loss": 2.032,
+ "step": 71
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 4.9662205146136955e-05,
+ "loss": 1.9331,
+ "step": 72
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 4.92083391862797e-05,
+ "loss": 2.0077,
+ "step": 73
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 4.874999999999999e-05,
+ "loss": 2.0253,
+ "step": 74
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 4.828731029578105e-05,
+ "loss": 1.9922,
+ "step": 75
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 4.7820393946844926e-05,
+ "loss": 2.0295,
+ "step": 76
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 4.734937595798867e-05,
+ "loss": 1.9514,
+ "step": 77
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 4.687438243211754e-05,
+ "loss": 2.1018,
+ "step": 78
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 4.639554053648416e-05,
+ "loss": 2.1032,
+ "step": 79
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 4.5912978468642824e-05,
+ "loss": 2.1258,
+ "step": 80
+ },
+ {
+ "epoch": 0.79,
+ "eval_loss": 1.972991704940796,
+ "eval_runtime": 1.494,
+ "eval_samples_per_second": 5.355,
+ "eval_steps_per_second": 0.669,
+ "step": 80
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 4.542682542212785e-05,
+ "loss": 2.0353,
+ "step": 81
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 4.4937211551865415e-05,
+ "loss": 1.9987,
+ "step": 82
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 4.444426793932787e-05,
+ "loss": 1.8856,
+ "step": 83
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 4.3948126557440085e-05,
+ "loss": 2.0904,
+ "step": 84
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 4.344892023524714e-05,
+ "loss": 2.0685,
+ "step": 85
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 4.2946782622352746e-05,
+ "loss": 2.1,
+ "step": 86
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 4.244184815313799e-05,
+ "loss": 1.9322,
+ "step": 87
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 4.193425201077002e-05,
+ "loss": 2.194,
+ "step": 88
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 4.14241300910103e-05,
+ "loss": 2.0455,
+ "step": 89
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 4.091161896583192e-05,
+ "loss": 2.1239,
+ "step": 90
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 4.0396855846856076e-05,
+ "loss": 2.142,
+ "step": 91
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 3.9879978548617125e-05,
+ "loss": 2.2304,
+ "step": 92
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 3.936112545166636e-05,
+ "loss": 1.925,
+ "step": 93
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 3.884043546552417e-05,
+ "loss": 2.1547,
+ "step": 94
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 3.831804799149057e-05,
+ "loss": 2.0439,
+ "step": 95
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 3.779410288532413e-05,
+ "loss": 1.9763,
+ "step": 96
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 3.726874041979925e-05,
+ "loss": 2.114,
+ "step": 97
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 3.674210124715168e-05,
+ "loss": 2.0293,
+ "step": 98
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 3.621432636142251e-05,
+ "loss": 2.0833,
+ "step": 99
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 3.5685557060710726e-05,
+ "loss": 2.0452,
+ "step": 100
+ },
+ {
+ "epoch": 0.99,
+ "eval_loss": 1.9682413339614868,
+ "eval_runtime": 1.4934,
+ "eval_samples_per_second": 5.357,
+ "eval_steps_per_second": 0.67,
+ "step": 100
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 3.5155934909344214e-05,
+ "loss": 1.9682,
+ "step": 101
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 3.462560169997965e-05,
+ "loss": 2.0723,
+ "step": 102
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 3.409469941564109e-05,
+ "loss": 2.0713,
+ "step": 103
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 3.356337019170772e-05,
+ "loss": 2.067,
+ "step": 104
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 3.303175627786082e-05,
+ "loss": 1.934,
+ "step": 105
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 3.25e-05,
+ "loss": 2.2083,
+ "step": 106
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 3.1968243722139173e-05,
+ "loss": 2.045,
+ "step": 107
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 3.143662980829228e-05,
+ "loss": 1.9531,
+ "step": 108
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 3.090530058435891e-05,
+ "loss": 2.0913,
+ "step": 109
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 3.0374398300020347e-05,
+ "loss": 1.9211,
+ "step": 110
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 2.984406509065579e-05,
+ "loss": 2.0983,
+ "step": 111
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 2.9314442939289275e-05,
+ "loss": 2.1621,
+ "step": 112
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 2.8785673638577486e-05,
+ "loss": 2.1728,
+ "step": 113
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 2.825789875284833e-05,
+ "loss": 2.0512,
+ "step": 114
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 2.773125958020074e-05,
+ "loss": 2.0462,
+ "step": 115
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 2.7205897114675866e-05,
+ "loss": 1.9357,
+ "step": 116
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 2.668195200850944e-05,
+ "loss": 1.8384,
+ "step": 117
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 2.6159564534475832e-05,
+ "loss": 1.9649,
+ "step": 118
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 2.563887454833363e-05,
+ "loss": 2.0262,
+ "step": 119
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 2.5120021451382872e-05,
+ "loss": 2.1315,
+ "step": 120
+ },
+ {
+ "epoch": 1.19,
+ "eval_loss": 1.9639294147491455,
+ "eval_runtime": 1.4959,
+ "eval_samples_per_second": 5.348,
+ "eval_steps_per_second": 0.668,
+ "step": 120
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 2.4603144153143925e-05,
+ "loss": 1.9284,
+ "step": 121
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 2.4088381034168072e-05,
+ "loss": 2.0671,
+ "step": 122
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 2.3575869908989705e-05,
+ "loss": 2.0938,
+ "step": 123
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 2.306574798922998e-05,
+ "loss": 1.9189,
+ "step": 124
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 2.2558151846862005e-05,
+ "loss": 2.0635,
+ "step": 125
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 2.2053217377647255e-05,
+ "loss": 2.1367,
+ "step": 126
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 2.1551079764752848e-05,
+ "loss": 1.9967,
+ "step": 127
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 2.105187344255991e-05,
+ "loss": 2.1994,
+ "step": 128
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 2.0555732060672138e-05,
+ "loss": 2.151,
+ "step": 129
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 2.0062788448134583e-05,
+ "loss": 2.0517,
+ "step": 130
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 1.957317457787214e-05,
+ "loss": 2.0584,
+ "step": 131
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 1.908702153135717e-05,
+ "loss": 2.0868,
+ "step": 132
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 1.860445946351584e-05,
+ "loss": 1.9646,
+ "step": 133
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 1.8125617567882463e-05,
+ "loss": 1.986,
+ "step": 134
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 1.7650624042011325e-05,
+ "loss": 1.9015,
+ "step": 135
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 1.7179606053155072e-05,
+ "loss": 2.1968,
+ "step": 136
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 1.6712689704218944e-05,
+ "loss": 1.9268,
+ "step": 137
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 1.6250000000000005e-05,
+ "loss": 2.0205,
+ "step": 138
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 1.5791660813720294e-05,
+ "loss": 2.1183,
+ "step": 139
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 1.533779485386304e-05,
+ "loss": 2.08,
+ "step": 140
+ },
+ {
+ "epoch": 1.39,
+ "eval_loss": 1.9603089094161987,
+ "eval_runtime": 1.495,
+ "eval_samples_per_second": 5.351,
+ "eval_steps_per_second": 0.669,
+ "step": 140
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 1.4888523631320579e-05,
+ "loss": 1.9309,
+ "step": 141
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 1.4443967426862935e-05,
+ "loss": 2.0579,
+ "step": 142
+ },
+ {
+ "epoch": 1.42,
+ "learning_rate": 1.4004245258935799e-05,
+ "loss": 1.8377,
+ "step": 143
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 1.3569474851796432e-05,
+ "loss": 2.0764,
+ "step": 144
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 1.3139772603995914e-05,
+ "loss": 2.1289,
+ "step": 145
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 1.2715253557216577e-05,
+ "loss": 2.041,
+ "step": 146
+ },
+ {
+ "epoch": 1.46,
+ "learning_rate": 1.2296031365472491e-05,
+ "loss": 1.9343,
+ "step": 147
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 1.1882218264681525e-05,
+ "loss": 1.9665,
+ "step": 148
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 1.1473925042617137e-05,
+ "loss": 2.1843,
+ "step": 149
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 1.1071261009247762e-05,
+ "loss": 2.0125,
+ "step": 150
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 202,
+ "num_train_epochs": 2,
+ "save_steps": 50,
+ "total_flos": 1.2497211511065805e+17,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/multi-prompt/twochar/checkpoint-150/training_args.bin b/multi-prompt/twochar/checkpoint-150/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..47d6200b8d4047ff364e6b15cf014dde61f2166b
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-150/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ac3fb199292750362f1e387a5d8bde7088e81478bbe173194e512403378f1c70
+size 4475
diff --git a/multi-prompt/twochar/checkpoint-200/README.md b/multi-prompt/twochar/checkpoint-200/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1e3637f645b79c1dff559d466047b102e3892f5d
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-200/README.md
@@ -0,0 +1,21 @@
+---
+library_name: peft
+---
+## Training procedure
+
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: bitsandbytes
+- load_in_8bit: False
+- load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: bfloat16
+### Framework versions
+
+
+- PEFT 0.6.0.dev0
diff --git a/multi-prompt/twochar/checkpoint-200/adapter_config.json b/multi-prompt/twochar/checkpoint-200/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..b33f8685004fc9f9eef2843be41eb6081508fc38
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-200/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 16,
+ "lora_dropout": 0.01,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "q_proj",
+ "down_proj",
+ "k_proj",
+ "gate_proj",
+ "o_proj",
+ "up_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/multi-prompt/twochar/checkpoint-200/adapter_model.bin b/multi-prompt/twochar/checkpoint-200/adapter_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..8087b4c30f7a0539b15177120ff755b578eba028
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-200/adapter_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c882f2a426060ad44b5cf280eb92814d684c2673107ca7f19c9e34b4c3fe2980
+size 125374989
diff --git a/multi-prompt/twochar/checkpoint-200/adapter_model/README.md b/multi-prompt/twochar/checkpoint-200/adapter_model/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1e3637f645b79c1dff559d466047b102e3892f5d
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-200/adapter_model/README.md
@@ -0,0 +1,21 @@
+---
+library_name: peft
+---
+## Training procedure
+
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: bitsandbytes
+- load_in_8bit: False
+- load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: bfloat16
+### Framework versions
+
+
+- PEFT 0.6.0.dev0
diff --git a/multi-prompt/twochar/checkpoint-200/adapter_model/adapter_config.json b/multi-prompt/twochar/checkpoint-200/adapter_model/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..b33f8685004fc9f9eef2843be41eb6081508fc38
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-200/adapter_model/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 16,
+ "lora_dropout": 0.01,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "q_proj",
+ "down_proj",
+ "k_proj",
+ "gate_proj",
+ "o_proj",
+ "up_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/multi-prompt/twochar/checkpoint-200/adapter_model/adapter_model.bin b/multi-prompt/twochar/checkpoint-200/adapter_model/adapter_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..8087b4c30f7a0539b15177120ff755b578eba028
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-200/adapter_model/adapter_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c882f2a426060ad44b5cf280eb92814d684c2673107ca7f19c9e34b4c3fe2980
+size 125374989
diff --git a/multi-prompt/twochar/checkpoint-200/optimizer.pt b/multi-prompt/twochar/checkpoint-200/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..3571f4113bbbf3511ef6bbc74d021d6e95cb81f9
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-200/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:16b546d91cb67c74c723898ad8350b04c2ee27be0612fff40324b5db78dd87a0
+size 250681597
diff --git a/multi-prompt/twochar/checkpoint-200/rng_state_0.pth b/multi-prompt/twochar/checkpoint-200/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..f7672dac941e999991ba5ab5e715528b64140eb3
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-200/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:19c4ecd007574b31ef585631572d90a849b7178b232262c2c41125e361bc3517
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-200/rng_state_1.pth b/multi-prompt/twochar/checkpoint-200/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..f026ecd896be0127311bc707250b1058306d0ff9
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-200/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3cc05c6dead1d35e2b0a4b8c8ca26e5b7aeb7547f432cc1c89591900a46a09b1
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-200/rng_state_2.pth b/multi-prompt/twochar/checkpoint-200/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..85110d5a642c4bd071c7683f564e33f3a00ef14f
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-200/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:10c2d8765af7ee78c7ddacf8d9c6ad56309da3711e52aa502ca9e611e8b23df8
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-200/rng_state_3.pth b/multi-prompt/twochar/checkpoint-200/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..86e83fa6afa0df4b6812870b44ce98497c19bcdc
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-200/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f581ef0ddd2f517335879867250c3475ad9cf8ee68bb06bf8b999b0ce994eaff
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-200/rng_state_4.pth b/multi-prompt/twochar/checkpoint-200/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ea54c5d05db733142fd6a2b82a00f09b4f9c4962
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-200/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:79d2080ee3cbb2aa61b39ad477ae141bb73b045d6857eaac0076468a913cf2e7
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-200/rng_state_5.pth b/multi-prompt/twochar/checkpoint-200/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ccb27daceb7ac2a3543fd1c8ca4a8a6745d4f2ab
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-200/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8436025ee61c8b1e2881effd13c23c89bf56b5d39765503ff7e4d1ff287fb833
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-200/rng_state_6.pth b/multi-prompt/twochar/checkpoint-200/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..10bdadd541a4844d4f34266c66033c8426994fe4
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-200/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2e172e6c55f1ad800d794787541c81bf86fbab4bc0b983c42152c182f34992c0
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-200/rng_state_7.pth b/multi-prompt/twochar/checkpoint-200/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..e088b0c18a3ce89c1ec7c68ecc0ba07e135370e1
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-200/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9547c06d3d25b5745d0cbb3abc1cd35c3aa5dcebb97f2bb750d73d134c8e4a73
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-200/scheduler.pt b/multi-prompt/twochar/checkpoint-200/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e663fbc8d5230eaa608d8784982b5358e06d4d6f
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-200/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fb782d53827be80b0bd501ee71f6a9c209f4fa9b3850b8515f3c0b99d0f7e4f7
+size 627
diff --git a/multi-prompt/twochar/checkpoint-200/trainer_state.json b/multi-prompt/twochar/checkpoint-200/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..56075ef7c10a941c1d8ddfd8b4cf58f56a2b6d9e
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-200/trainer_state.json
@@ -0,0 +1,1299 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.9801980198019802,
+ "eval_steps": 20,
+ "global_step": 200,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.01,
+ "learning_rate": 6.5e-06,
+ "loss": 2.0407,
+ "step": 1
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 1.3e-05,
+ "loss": 2.3107,
+ "step": 2
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.9499999999999996e-05,
+ "loss": 2.1613,
+ "step": 3
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 2.6e-05,
+ "loss": 2.0444,
+ "step": 4
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 3.25e-05,
+ "loss": 2.2512,
+ "step": 5
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 3.899999999999999e-05,
+ "loss": 2.0516,
+ "step": 6
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 4.5499999999999995e-05,
+ "loss": 2.2006,
+ "step": 7
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 5.2e-05,
+ "loss": 2.1229,
+ "step": 8
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 5.85e-05,
+ "loss": 2.2498,
+ "step": 9
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 6.5e-05,
+ "loss": 2.0293,
+ "step": 10
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 6.499564948206075e-05,
+ "loss": 2.1316,
+ "step": 11
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 6.498259909298188e-05,
+ "loss": 2.1274,
+ "step": 12
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 6.49608523266681e-05,
+ "loss": 2.1129,
+ "step": 13
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 6.493041500525461e-05,
+ "loss": 2.1517,
+ "step": 14
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 6.489129527754836e-05,
+ "loss": 2.1078,
+ "step": 15
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 6.484350361684639e-05,
+ "loss": 2.0362,
+ "step": 16
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 6.478705281813194e-05,
+ "loss": 2.0553,
+ "step": 17
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 6.472195799464884e-05,
+ "loss": 2.1223,
+ "step": 18
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 6.464823657385538e-05,
+ "loss": 2.0326,
+ "step": 19
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 6.456590829275857e-05,
+ "loss": 2.1802,
+ "step": 20
+ },
+ {
+ "epoch": 0.2,
+ "eval_loss": 2.012301445007324,
+ "eval_runtime": 1.4855,
+ "eval_samples_per_second": 5.385,
+ "eval_steps_per_second": 0.673,
+ "step": 20
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 6.447499519263001e-05,
+ "loss": 2.0665,
+ "step": 21
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 6.437552161310498e-05,
+ "loss": 2.0354,
+ "step": 22
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 6.426751418566609e-05,
+ "loss": 2.1289,
+ "step": 23
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 6.415100182651334e-05,
+ "loss": 2.0742,
+ "step": 24
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 6.402601572882268e-05,
+ "loss": 2.0412,
+ "step": 25
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 6.38925893543947e-05,
+ "loss": 2.1974,
+ "step": 26
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 6.375075842469626e-05,
+ "loss": 2.196,
+ "step": 27
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 6.360056091129678e-05,
+ "loss": 2.0775,
+ "step": 28
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 6.344203702570254e-05,
+ "loss": 2.0366,
+ "step": 29
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 6.327522920859093e-05,
+ "loss": 2.1438,
+ "step": 30
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 6.310018211844817e-05,
+ "loss": 2.0639,
+ "step": 31
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 6.291694261961308e-05,
+ "loss": 2.1794,
+ "step": 32
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 6.272555976973039e-05,
+ "loss": 2.0592,
+ "step": 33
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 6.252608480661681e-05,
+ "loss": 2.1628,
+ "step": 34
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 6.231857113454346e-05,
+ "loss": 2.1851,
+ "step": 35
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 6.21030743099382e-05,
+ "loss": 2.0531,
+ "step": 36
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 6.18796520265119e-05,
+ "loss": 2.0284,
+ "step": 37
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 6.164836409981237e-05,
+ "loss": 2.2359,
+ "step": 38
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 6.140927245121032e-05,
+ "loss": 1.8967,
+ "step": 39
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 6.116244109132153e-05,
+ "loss": 2.0275,
+ "step": 40
+ },
+ {
+ "epoch": 0.4,
+ "eval_loss": 1.9861761331558228,
+ "eval_runtime": 1.4896,
+ "eval_samples_per_second": 5.37,
+ "eval_steps_per_second": 0.671,
+ "step": 40
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 6.0907936102869656e-05,
+ "loss": 2.0316,
+ "step": 41
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 6.0645825622994254e-05,
+ "loss": 2.0768,
+ "step": 42
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 6.037617982500884e-05,
+ "loss": 2.0058,
+ "step": 43
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 6.009907089961381e-05,
+ "loss": 2.2319,
+ "step": 44
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 5.981457303556923e-05,
+ "loss": 2.1693,
+ "step": 45
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 5.9522762399832716e-05,
+ "loss": 2.0858,
+ "step": 46
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 5.922371711716768e-05,
+ "loss": 2.0576,
+ "step": 47
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 5.891751724922749e-05,
+ "loss": 1.9946,
+ "step": 48
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 5.860424477312095e-05,
+ "loss": 2.0166,
+ "step": 49
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 5.828398355946514e-05,
+ "loss": 2.1482,
+ "step": 50
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 5.795681934993113e-05,
+ "loss": 2.0698,
+ "step": 51
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 5.7622839734288945e-05,
+ "loss": 1.9317,
+ "step": 52
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 5.728213412695761e-05,
+ "loss": 2.1399,
+ "step": 53
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 5.693479374306676e-05,
+ "loss": 2.1637,
+ "step": 54
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 5.658091157403617e-05,
+ "loss": 2.0557,
+ "step": 55
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 5.622058236267965e-05,
+ "loss": 1.926,
+ "step": 56
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 5.585390257784018e-05,
+ "loss": 2.0984,
+ "step": 57
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 5.548097038856279e-05,
+ "loss": 2.0428,
+ "step": 58
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 5.5101885637812374e-05,
+ "loss": 1.9624,
+ "step": 59
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 5.4716749815743304e-05,
+ "loss": 1.913,
+ "step": 60
+ },
+ {
+ "epoch": 0.59,
+ "eval_loss": 1.97765052318573,
+ "eval_runtime": 1.4886,
+ "eval_samples_per_second": 5.374,
+ "eval_steps_per_second": 0.672,
+ "step": 60
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 5.432566603252809e-05,
+ "loss": 2.0125,
+ "step": 61
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 5.3928738990752234e-05,
+ "loss": 2.0721,
+ "step": 62
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 5.3526074957382866e-05,
+ "loss": 2.1046,
+ "step": 63
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 5.311778173531847e-05,
+ "loss": 2.0074,
+ "step": 64
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 5.2703968634527514e-05,
+ "loss": 2.1667,
+ "step": 65
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 5.2284746442783414e-05,
+ "loss": 2.0677,
+ "step": 66
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 5.186022739600408e-05,
+ "loss": 2.1798,
+ "step": 67
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 5.143052514820357e-05,
+ "loss": 2.105,
+ "step": 68
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 5.099575474106419e-05,
+ "loss": 2.118,
+ "step": 69
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 5.055603257313707e-05,
+ "loss": 2.0791,
+ "step": 70
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 5.011147636867943e-05,
+ "loss": 2.032,
+ "step": 71
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 4.9662205146136955e-05,
+ "loss": 1.9331,
+ "step": 72
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 4.92083391862797e-05,
+ "loss": 2.0077,
+ "step": 73
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 4.874999999999999e-05,
+ "loss": 2.0253,
+ "step": 74
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 4.828731029578105e-05,
+ "loss": 1.9922,
+ "step": 75
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 4.7820393946844926e-05,
+ "loss": 2.0295,
+ "step": 76
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 4.734937595798867e-05,
+ "loss": 1.9514,
+ "step": 77
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 4.687438243211754e-05,
+ "loss": 2.1018,
+ "step": 78
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 4.639554053648416e-05,
+ "loss": 2.1032,
+ "step": 79
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 4.5912978468642824e-05,
+ "loss": 2.1258,
+ "step": 80
+ },
+ {
+ "epoch": 0.79,
+ "eval_loss": 1.972991704940796,
+ "eval_runtime": 1.494,
+ "eval_samples_per_second": 5.355,
+ "eval_steps_per_second": 0.669,
+ "step": 80
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 4.542682542212785e-05,
+ "loss": 2.0353,
+ "step": 81
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 4.4937211551865415e-05,
+ "loss": 1.9987,
+ "step": 82
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 4.444426793932787e-05,
+ "loss": 1.8856,
+ "step": 83
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 4.3948126557440085e-05,
+ "loss": 2.0904,
+ "step": 84
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 4.344892023524714e-05,
+ "loss": 2.0685,
+ "step": 85
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 4.2946782622352746e-05,
+ "loss": 2.1,
+ "step": 86
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 4.244184815313799e-05,
+ "loss": 1.9322,
+ "step": 87
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 4.193425201077002e-05,
+ "loss": 2.194,
+ "step": 88
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 4.14241300910103e-05,
+ "loss": 2.0455,
+ "step": 89
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 4.091161896583192e-05,
+ "loss": 2.1239,
+ "step": 90
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 4.0396855846856076e-05,
+ "loss": 2.142,
+ "step": 91
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 3.9879978548617125e-05,
+ "loss": 2.2304,
+ "step": 92
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 3.936112545166636e-05,
+ "loss": 1.925,
+ "step": 93
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 3.884043546552417e-05,
+ "loss": 2.1547,
+ "step": 94
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 3.831804799149057e-05,
+ "loss": 2.0439,
+ "step": 95
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 3.779410288532413e-05,
+ "loss": 1.9763,
+ "step": 96
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 3.726874041979925e-05,
+ "loss": 2.114,
+ "step": 97
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 3.674210124715168e-05,
+ "loss": 2.0293,
+ "step": 98
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 3.621432636142251e-05,
+ "loss": 2.0833,
+ "step": 99
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 3.5685557060710726e-05,
+ "loss": 2.0452,
+ "step": 100
+ },
+ {
+ "epoch": 0.99,
+ "eval_loss": 1.9682413339614868,
+ "eval_runtime": 1.4934,
+ "eval_samples_per_second": 5.357,
+ "eval_steps_per_second": 0.67,
+ "step": 100
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 3.5155934909344214e-05,
+ "loss": 1.9682,
+ "step": 101
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 3.462560169997965e-05,
+ "loss": 2.0723,
+ "step": 102
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 3.409469941564109e-05,
+ "loss": 2.0713,
+ "step": 103
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 3.356337019170772e-05,
+ "loss": 2.067,
+ "step": 104
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 3.303175627786082e-05,
+ "loss": 1.934,
+ "step": 105
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 3.25e-05,
+ "loss": 2.2083,
+ "step": 106
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 3.1968243722139173e-05,
+ "loss": 2.045,
+ "step": 107
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 3.143662980829228e-05,
+ "loss": 1.9531,
+ "step": 108
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 3.090530058435891e-05,
+ "loss": 2.0913,
+ "step": 109
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 3.0374398300020347e-05,
+ "loss": 1.9211,
+ "step": 110
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 2.984406509065579e-05,
+ "loss": 2.0983,
+ "step": 111
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 2.9314442939289275e-05,
+ "loss": 2.1621,
+ "step": 112
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 2.8785673638577486e-05,
+ "loss": 2.1728,
+ "step": 113
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 2.825789875284833e-05,
+ "loss": 2.0512,
+ "step": 114
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 2.773125958020074e-05,
+ "loss": 2.0462,
+ "step": 115
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 2.7205897114675866e-05,
+ "loss": 1.9357,
+ "step": 116
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 2.668195200850944e-05,
+ "loss": 1.8384,
+ "step": 117
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 2.6159564534475832e-05,
+ "loss": 1.9649,
+ "step": 118
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 2.563887454833363e-05,
+ "loss": 2.0262,
+ "step": 119
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 2.5120021451382872e-05,
+ "loss": 2.1315,
+ "step": 120
+ },
+ {
+ "epoch": 1.19,
+ "eval_loss": 1.9639294147491455,
+ "eval_runtime": 1.4959,
+ "eval_samples_per_second": 5.348,
+ "eval_steps_per_second": 0.668,
+ "step": 120
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 2.4603144153143925e-05,
+ "loss": 1.9284,
+ "step": 121
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 2.4088381034168072e-05,
+ "loss": 2.0671,
+ "step": 122
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 2.3575869908989705e-05,
+ "loss": 2.0938,
+ "step": 123
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 2.306574798922998e-05,
+ "loss": 1.9189,
+ "step": 124
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 2.2558151846862005e-05,
+ "loss": 2.0635,
+ "step": 125
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 2.2053217377647255e-05,
+ "loss": 2.1367,
+ "step": 126
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 2.1551079764752848e-05,
+ "loss": 1.9967,
+ "step": 127
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 2.105187344255991e-05,
+ "loss": 2.1994,
+ "step": 128
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 2.0555732060672138e-05,
+ "loss": 2.151,
+ "step": 129
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 2.0062788448134583e-05,
+ "loss": 2.0517,
+ "step": 130
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 1.957317457787214e-05,
+ "loss": 2.0584,
+ "step": 131
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 1.908702153135717e-05,
+ "loss": 2.0868,
+ "step": 132
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 1.860445946351584e-05,
+ "loss": 1.9646,
+ "step": 133
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 1.8125617567882463e-05,
+ "loss": 1.986,
+ "step": 134
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 1.7650624042011325e-05,
+ "loss": 1.9015,
+ "step": 135
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 1.7179606053155072e-05,
+ "loss": 2.1968,
+ "step": 136
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 1.6712689704218944e-05,
+ "loss": 1.9268,
+ "step": 137
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 1.6250000000000005e-05,
+ "loss": 2.0205,
+ "step": 138
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 1.5791660813720294e-05,
+ "loss": 2.1183,
+ "step": 139
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 1.533779485386304e-05,
+ "loss": 2.08,
+ "step": 140
+ },
+ {
+ "epoch": 1.39,
+ "eval_loss": 1.9603089094161987,
+ "eval_runtime": 1.495,
+ "eval_samples_per_second": 5.351,
+ "eval_steps_per_second": 0.669,
+ "step": 140
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 1.4888523631320579e-05,
+ "loss": 1.9309,
+ "step": 141
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 1.4443967426862935e-05,
+ "loss": 2.0579,
+ "step": 142
+ },
+ {
+ "epoch": 1.42,
+ "learning_rate": 1.4004245258935799e-05,
+ "loss": 1.8377,
+ "step": 143
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 1.3569474851796432e-05,
+ "loss": 2.0764,
+ "step": 144
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 1.3139772603995914e-05,
+ "loss": 2.1289,
+ "step": 145
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 1.2715253557216577e-05,
+ "loss": 2.041,
+ "step": 146
+ },
+ {
+ "epoch": 1.46,
+ "learning_rate": 1.2296031365472491e-05,
+ "loss": 1.9343,
+ "step": 147
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 1.1882218264681525e-05,
+ "loss": 1.9665,
+ "step": 148
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 1.1473925042617137e-05,
+ "loss": 2.1843,
+ "step": 149
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 1.1071261009247762e-05,
+ "loss": 2.0125,
+ "step": 150
+ },
+ {
+ "epoch": 1.5,
+ "learning_rate": 1.06743339674719e-05,
+ "loss": 2.1792,
+ "step": 151
+ },
+ {
+ "epoch": 1.5,
+ "learning_rate": 1.028325018425668e-05,
+ "loss": 1.9815,
+ "step": 152
+ },
+ {
+ "epoch": 1.51,
+ "learning_rate": 9.898114362187623e-06,
+ "loss": 1.9769,
+ "step": 153
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 9.519029611437207e-06,
+ "loss": 1.9301,
+ "step": 154
+ },
+ {
+ "epoch": 1.53,
+ "learning_rate": 9.146097422159812e-06,
+ "loss": 1.979,
+ "step": 155
+ },
+ {
+ "epoch": 1.54,
+ "learning_rate": 8.77941763732035e-06,
+ "loss": 2.0074,
+ "step": 156
+ },
+ {
+ "epoch": 1.55,
+ "learning_rate": 8.419088425963836e-06,
+ "loss": 2.2051,
+ "step": 157
+ },
+ {
+ "epoch": 1.56,
+ "learning_rate": 8.065206256933238e-06,
+ "loss": 2.0702,
+ "step": 158
+ },
+ {
+ "epoch": 1.57,
+ "learning_rate": 7.717865873042387e-06,
+ "loss": 1.9569,
+ "step": 159
+ },
+ {
+ "epoch": 1.58,
+ "learning_rate": 7.377160265711047e-06,
+ "loss": 2.0747,
+ "step": 160
+ },
+ {
+ "epoch": 1.58,
+ "eval_loss": 1.9598100185394287,
+ "eval_runtime": 1.5252,
+ "eval_samples_per_second": 5.245,
+ "eval_steps_per_second": 0.656,
+ "step": 160
+ },
+ {
+ "epoch": 1.59,
+ "learning_rate": 7.043180650068862e-06,
+ "loss": 2.0023,
+ "step": 161
+ },
+ {
+ "epoch": 1.6,
+ "learning_rate": 6.71601644053486e-06,
+ "loss": 1.8974,
+ "step": 162
+ },
+ {
+ "epoch": 1.61,
+ "learning_rate": 6.395755226879042e-06,
+ "loss": 2.0356,
+ "step": 163
+ },
+ {
+ "epoch": 1.62,
+ "learning_rate": 6.082482750772508e-06,
+ "loss": 2.0573,
+ "step": 164
+ },
+ {
+ "epoch": 1.63,
+ "learning_rate": 5.776282882832316e-06,
+ "loss": 2.188,
+ "step": 165
+ },
+ {
+ "epoch": 1.64,
+ "learning_rate": 5.477237600167276e-06,
+ "loss": 1.9582,
+ "step": 166
+ },
+ {
+ "epoch": 1.65,
+ "learning_rate": 5.185426964430758e-06,
+ "loss": 1.9744,
+ "step": 167
+ },
+ {
+ "epoch": 1.66,
+ "learning_rate": 4.9009291003861895e-06,
+ "loss": 1.9664,
+ "step": 168
+ },
+ {
+ "epoch": 1.67,
+ "learning_rate": 4.623820174991159e-06,
+ "loss": 1.9808,
+ "step": 169
+ },
+ {
+ "epoch": 1.68,
+ "learning_rate": 4.3541743770057415e-06,
+ "loss": 2.0246,
+ "step": 170
+ },
+ {
+ "epoch": 1.69,
+ "learning_rate": 4.092063897130342e-06,
+ "loss": 2.0081,
+ "step": 171
+ },
+ {
+ "epoch": 1.7,
+ "learning_rate": 3.837558908678464e-06,
+ "loss": 2.0681,
+ "step": 172
+ },
+ {
+ "epoch": 1.71,
+ "learning_rate": 3.590727548789681e-06,
+ "loss": 1.9028,
+ "step": 173
+ },
+ {
+ "epoch": 1.72,
+ "learning_rate": 3.3516359001876314e-06,
+ "loss": 2.0582,
+ "step": 174
+ },
+ {
+ "epoch": 1.73,
+ "learning_rate": 3.120347973488091e-06,
+ "loss": 2.0035,
+ "step": 175
+ },
+ {
+ "epoch": 1.74,
+ "learning_rate": 2.896925690061783e-06,
+ "loss": 1.8968,
+ "step": 176
+ },
+ {
+ "epoch": 1.75,
+ "learning_rate": 2.6814288654565337e-06,
+ "loss": 2.103,
+ "step": 177
+ },
+ {
+ "epoch": 1.76,
+ "learning_rate": 2.473915193383181e-06,
+ "loss": 1.9192,
+ "step": 178
+ },
+ {
+ "epoch": 1.77,
+ "learning_rate": 2.2744402302696074e-06,
+ "loss": 2.0644,
+ "step": 179
+ },
+ {
+ "epoch": 1.78,
+ "learning_rate": 2.08305738038692e-06,
+ "loss": 2.0469,
+ "step": 180
+ },
+ {
+ "epoch": 1.78,
+ "eval_loss": 1.958908200263977,
+ "eval_runtime": 1.5285,
+ "eval_samples_per_second": 5.234,
+ "eval_steps_per_second": 0.654,
+ "step": 180
+ },
+ {
+ "epoch": 1.79,
+ "learning_rate": 1.8998178815518273e-06,
+ "loss": 2.0494,
+ "step": 181
+ },
+ {
+ "epoch": 1.8,
+ "learning_rate": 1.7247707914090686e-06,
+ "loss": 2.0655,
+ "step": 182
+ },
+ {
+ "epoch": 1.81,
+ "learning_rate": 1.557962974297461e-06,
+ "loss": 2.1354,
+ "step": 183
+ },
+ {
+ "epoch": 1.82,
+ "learning_rate": 1.3994390887032131e-06,
+ "loss": 2.1334,
+ "step": 184
+ },
+ {
+ "epoch": 1.83,
+ "learning_rate": 1.2492415753037343e-06,
+ "loss": 1.9995,
+ "step": 185
+ },
+ {
+ "epoch": 1.84,
+ "learning_rate": 1.1074106456052833e-06,
+ "loss": 2.0004,
+ "step": 186
+ },
+ {
+ "epoch": 1.85,
+ "learning_rate": 9.739842711773208e-07,
+ "loss": 1.9561,
+ "step": 187
+ },
+ {
+ "epoch": 1.86,
+ "learning_rate": 8.489981734866568e-07,
+ "loss": 2.0507,
+ "step": 188
+ },
+ {
+ "epoch": 1.87,
+ "learning_rate": 7.324858143339169e-07,
+ "loss": 2.0923,
+ "step": 189
+ },
+ {
+ "epoch": 1.88,
+ "learning_rate": 6.24478386895011e-07,
+ "loss": 1.9369,
+ "step": 190
+ },
+ {
+ "epoch": 1.89,
+ "learning_rate": 5.250048073699814e-07,
+ "loss": 2.0687,
+ "step": 191
+ },
+ {
+ "epoch": 1.9,
+ "learning_rate": 4.3409170724143323e-07,
+ "loss": 1.9242,
+ "step": 192
+ },
+ {
+ "epoch": 1.91,
+ "learning_rate": 3.51763426144617e-07,
+ "loss": 1.9617,
+ "step": 193
+ },
+ {
+ "epoch": 1.92,
+ "learning_rate": 2.7804200535116254e-07,
+ "loss": 1.9776,
+ "step": 194
+ },
+ {
+ "epoch": 1.93,
+ "learning_rate": 2.1294718186805808e-07,
+ "loss": 2.0486,
+ "step": 195
+ },
+ {
+ "epoch": 1.94,
+ "learning_rate": 1.564963831536034e-07,
+ "loss": 1.8825,
+ "step": 196
+ },
+ {
+ "epoch": 1.95,
+ "learning_rate": 1.0870472245163964e-07,
+ "loss": 2.0531,
+ "step": 197
+ },
+ {
+ "epoch": 1.96,
+ "learning_rate": 6.958499474538692e-08,
+ "loss": 2.0535,
+ "step": 198
+ },
+ {
+ "epoch": 1.97,
+ "learning_rate": 3.9147673331896836e-08,
+ "loss": 2.068,
+ "step": 199
+ },
+ {
+ "epoch": 1.98,
+ "learning_rate": 1.740090701811525e-08,
+ "loss": 2.0969,
+ "step": 200
+ },
+ {
+ "epoch": 1.98,
+ "eval_loss": 1.9594972133636475,
+ "eval_runtime": 1.4959,
+ "eval_samples_per_second": 5.348,
+ "eval_steps_per_second": 0.668,
+ "step": 200
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 202,
+ "num_train_epochs": 2,
+ "save_steps": 50,
+ "total_flos": 1.6713924878742323e+17,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/multi-prompt/twochar/checkpoint-200/training_args.bin b/multi-prompt/twochar/checkpoint-200/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..47d6200b8d4047ff364e6b15cf014dde61f2166b
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-200/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ac3fb199292750362f1e387a5d8bde7088e81478bbe173194e512403378f1c70
+size 4475
diff --git a/multi-prompt/twochar/checkpoint-50/README.md b/multi-prompt/twochar/checkpoint-50/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1e3637f645b79c1dff559d466047b102e3892f5d
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-50/README.md
@@ -0,0 +1,21 @@
+---
+library_name: peft
+---
+## Training procedure
+
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: bitsandbytes
+- load_in_8bit: False
+- load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: bfloat16
+### Framework versions
+
+
+- PEFT 0.6.0.dev0
diff --git a/multi-prompt/twochar/checkpoint-50/adapter_config.json b/multi-prompt/twochar/checkpoint-50/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..b33f8685004fc9f9eef2843be41eb6081508fc38
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-50/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 16,
+ "lora_dropout": 0.01,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "q_proj",
+ "down_proj",
+ "k_proj",
+ "gate_proj",
+ "o_proj",
+ "up_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/multi-prompt/twochar/checkpoint-50/adapter_model.bin b/multi-prompt/twochar/checkpoint-50/adapter_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..2f26f32c7a8c453fe666d94983d7c87659ca0240
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-50/adapter_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ff9f2ca982d4c94bf1984cebd603a043e51c0688b2c7ebbee14d46cf8dbcdcfe
+size 125374989
diff --git a/multi-prompt/twochar/checkpoint-50/adapter_model/README.md b/multi-prompt/twochar/checkpoint-50/adapter_model/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1e3637f645b79c1dff559d466047b102e3892f5d
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-50/adapter_model/README.md
@@ -0,0 +1,21 @@
+---
+library_name: peft
+---
+## Training procedure
+
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: bitsandbytes
+- load_in_8bit: False
+- load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: bfloat16
+### Framework versions
+
+
+- PEFT 0.6.0.dev0
diff --git a/multi-prompt/twochar/checkpoint-50/adapter_model/adapter_config.json b/multi-prompt/twochar/checkpoint-50/adapter_model/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..b33f8685004fc9f9eef2843be41eb6081508fc38
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-50/adapter_model/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 16,
+ "lora_dropout": 0.01,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "q_proj",
+ "down_proj",
+ "k_proj",
+ "gate_proj",
+ "o_proj",
+ "up_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/multi-prompt/twochar/checkpoint-50/adapter_model/adapter_model.bin b/multi-prompt/twochar/checkpoint-50/adapter_model/adapter_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..2f26f32c7a8c453fe666d94983d7c87659ca0240
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-50/adapter_model/adapter_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ff9f2ca982d4c94bf1984cebd603a043e51c0688b2c7ebbee14d46cf8dbcdcfe
+size 125374989
diff --git a/multi-prompt/twochar/checkpoint-50/optimizer.pt b/multi-prompt/twochar/checkpoint-50/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a8b935ad800ec9e7860de2a93f2c0ce470e20c71
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-50/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:97bdb6008d49ae7a5e669f8a5688e4d26503776faaa8f016deecbb48fbd9e87c
+size 250681597
diff --git a/multi-prompt/twochar/checkpoint-50/rng_state_0.pth b/multi-prompt/twochar/checkpoint-50/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..72b662466f58c7db850cfef57f82a2ec023746e4
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-50/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:378041dd0edab043964c17a1498bb1e053b53be09587a86c91983bfd003b402b
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-50/rng_state_1.pth b/multi-prompt/twochar/checkpoint-50/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..29f0804883c5a894698a7fc435db06826bfcd53c
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-50/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae8625521971de986a3fe9a83426bc9be410a25486ff090d580bb201b91ac627
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-50/rng_state_2.pth b/multi-prompt/twochar/checkpoint-50/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..020aa958562074fb02dfd261fb7ab17ef540abf4
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-50/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a1b9c9c8dc3258484444dd2bf445247a35bc336f53a47204202a54fb6afbdc8d
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-50/rng_state_3.pth b/multi-prompt/twochar/checkpoint-50/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..a9b062ffdf8152b11bab4c9b856a417b48a99b10
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-50/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9b94926ac56f32be2195666cf81c1c71845d6d44a1679442e46d2526336a6140
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-50/rng_state_4.pth b/multi-prompt/twochar/checkpoint-50/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..287217400ace09086b92ee95413967f4343b2498
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-50/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c78c64ffa806625bf637e6aa6b7774f099fb4e67041e78daaa625279d2694463
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-50/rng_state_5.pth b/multi-prompt/twochar/checkpoint-50/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..09c5fad4fd1b0e65f746fd42f0061479f2d56c11
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-50/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:76cb87e9b3de42221799a59f9f5e4a326327de746c8b9819ceaeea29b729fcb4
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-50/rng_state_6.pth b/multi-prompt/twochar/checkpoint-50/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..33b3442d82a5cce1b337a09cd59acf7ff173f268
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-50/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:67f914005f2340a9fbb9ec4f599910b12669db318a4462202dd7dd60e50b2ca8
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-50/rng_state_7.pth b/multi-prompt/twochar/checkpoint-50/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ab863846afd96aa648917bcd70edac42a158c90e
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-50/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c7f956d6c91aadf25a203f3b37e60d03f812c2e0cf7591573e4c904f3da82f7d
+size 21687
diff --git a/multi-prompt/twochar/checkpoint-50/scheduler.pt b/multi-prompt/twochar/checkpoint-50/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..052f2ada5cd173a143a172c13df4ada4c11c059a
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-50/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0e54688b73ae6a59a6bcf42b1a01810dc387a75e410ba415dabec8c9a74ae5f2
+size 627
diff --git a/multi-prompt/twochar/checkpoint-50/trainer_state.json b/multi-prompt/twochar/checkpoint-50/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..14281618ea239e82104de28bb3d6b1bcb49c1bdf
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-50/trainer_state.json
@@ -0,0 +1,335 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.49504950495049505,
+ "eval_steps": 20,
+ "global_step": 50,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.01,
+ "learning_rate": 6.5e-06,
+ "loss": 2.0407,
+ "step": 1
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 1.3e-05,
+ "loss": 2.3107,
+ "step": 2
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.9499999999999996e-05,
+ "loss": 2.1613,
+ "step": 3
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 2.6e-05,
+ "loss": 2.0444,
+ "step": 4
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 3.25e-05,
+ "loss": 2.2512,
+ "step": 5
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 3.899999999999999e-05,
+ "loss": 2.0516,
+ "step": 6
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 4.5499999999999995e-05,
+ "loss": 2.2006,
+ "step": 7
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 5.2e-05,
+ "loss": 2.1229,
+ "step": 8
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 5.85e-05,
+ "loss": 2.2498,
+ "step": 9
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 6.5e-05,
+ "loss": 2.0293,
+ "step": 10
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 6.499564948206075e-05,
+ "loss": 2.1316,
+ "step": 11
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 6.498259909298188e-05,
+ "loss": 2.1274,
+ "step": 12
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 6.49608523266681e-05,
+ "loss": 2.1129,
+ "step": 13
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 6.493041500525461e-05,
+ "loss": 2.1517,
+ "step": 14
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 6.489129527754836e-05,
+ "loss": 2.1078,
+ "step": 15
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 6.484350361684639e-05,
+ "loss": 2.0362,
+ "step": 16
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 6.478705281813194e-05,
+ "loss": 2.0553,
+ "step": 17
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 6.472195799464884e-05,
+ "loss": 2.1223,
+ "step": 18
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 6.464823657385538e-05,
+ "loss": 2.0326,
+ "step": 19
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 6.456590829275857e-05,
+ "loss": 2.1802,
+ "step": 20
+ },
+ {
+ "epoch": 0.2,
+ "eval_loss": 2.012301445007324,
+ "eval_runtime": 1.4855,
+ "eval_samples_per_second": 5.385,
+ "eval_steps_per_second": 0.673,
+ "step": 20
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 6.447499519263001e-05,
+ "loss": 2.0665,
+ "step": 21
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 6.437552161310498e-05,
+ "loss": 2.0354,
+ "step": 22
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 6.426751418566609e-05,
+ "loss": 2.1289,
+ "step": 23
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 6.415100182651334e-05,
+ "loss": 2.0742,
+ "step": 24
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 6.402601572882268e-05,
+ "loss": 2.0412,
+ "step": 25
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 6.38925893543947e-05,
+ "loss": 2.1974,
+ "step": 26
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 6.375075842469626e-05,
+ "loss": 2.196,
+ "step": 27
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 6.360056091129678e-05,
+ "loss": 2.0775,
+ "step": 28
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 6.344203702570254e-05,
+ "loss": 2.0366,
+ "step": 29
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 6.327522920859093e-05,
+ "loss": 2.1438,
+ "step": 30
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 6.310018211844817e-05,
+ "loss": 2.0639,
+ "step": 31
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 6.291694261961308e-05,
+ "loss": 2.1794,
+ "step": 32
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 6.272555976973039e-05,
+ "loss": 2.0592,
+ "step": 33
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 6.252608480661681e-05,
+ "loss": 2.1628,
+ "step": 34
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 6.231857113454346e-05,
+ "loss": 2.1851,
+ "step": 35
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 6.21030743099382e-05,
+ "loss": 2.0531,
+ "step": 36
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 6.18796520265119e-05,
+ "loss": 2.0284,
+ "step": 37
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 6.164836409981237e-05,
+ "loss": 2.2359,
+ "step": 38
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 6.140927245121032e-05,
+ "loss": 1.8967,
+ "step": 39
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 6.116244109132153e-05,
+ "loss": 2.0275,
+ "step": 40
+ },
+ {
+ "epoch": 0.4,
+ "eval_loss": 1.9861761331558228,
+ "eval_runtime": 1.4896,
+ "eval_samples_per_second": 5.37,
+ "eval_steps_per_second": 0.671,
+ "step": 40
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 6.0907936102869656e-05,
+ "loss": 2.0316,
+ "step": 41
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 6.0645825622994254e-05,
+ "loss": 2.0768,
+ "step": 42
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 6.037617982500884e-05,
+ "loss": 2.0058,
+ "step": 43
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 6.009907089961381e-05,
+ "loss": 2.2319,
+ "step": 44
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 5.981457303556923e-05,
+ "loss": 2.1693,
+ "step": 45
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 5.9522762399832716e-05,
+ "loss": 2.0858,
+ "step": 46
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 5.922371711716768e-05,
+ "loss": 2.0576,
+ "step": 47
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 5.891751724922749e-05,
+ "loss": 1.9946,
+ "step": 48
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 5.860424477312095e-05,
+ "loss": 2.0166,
+ "step": 49
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 5.828398355946514e-05,
+ "loss": 2.1482,
+ "step": 50
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 202,
+ "num_train_epochs": 2,
+ "save_steps": 50,
+ "total_flos": 4.187835255803085e+16,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/multi-prompt/twochar/checkpoint-50/training_args.bin b/multi-prompt/twochar/checkpoint-50/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..47d6200b8d4047ff364e6b15cf014dde61f2166b
--- /dev/null
+++ b/multi-prompt/twochar/checkpoint-50/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ac3fb199292750362f1e387a5d8bde7088e81478bbe173194e512403378f1c70
+size 4475
diff --git a/multi-prompt/twochar/config.json b/multi-prompt/twochar/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..99be7fbf147c96ef93ed720729ea90e8e21bdfca
--- /dev/null
+++ b/multi-prompt/twochar/config.json
@@ -0,0 +1,38 @@
+{
+ "_name_or_path": "meta-llama/Llama-2-13b-hf",
+ "architectures": [
+ "LlamaForCausalLM"
+ ],
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "hidden_act": "silu",
+ "hidden_size": 5120,
+ "initializer_range": 0.02,
+ "intermediate_size": 13824,
+ "max_position_embeddings": 4096,
+ "model_type": "llama",
+ "num_attention_heads": 40,
+ "num_hidden_layers": 40,
+ "num_key_value_heads": 40,
+ "pretraining_tp": 1,
+ "quantization_config": {
+ "bnb_4bit_compute_dtype": "bfloat16",
+ "bnb_4bit_quant_type": "nf4",
+ "bnb_4bit_use_double_quant": true,
+ "llm_int8_enable_fp32_cpu_offload": false,
+ "llm_int8_has_fp16_weight": false,
+ "llm_int8_skip_modules": null,
+ "llm_int8_threshold": 6.0,
+ "load_in_4bit": true,
+ "load_in_8bit": false,
+ "quant_method": "bitsandbytes"
+ },
+ "rms_norm_eps": 1e-05,
+ "rope_scaling": null,
+ "rope_theta": 10000.0,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float16",
+ "transformers_version": "4.34.0.dev0",
+ "use_cache": false,
+ "vocab_size": 32001
+}
diff --git a/multi-prompt/twochar/special_tokens_map.json b/multi-prompt/twochar/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..3f58a5e115855c6ea3cec98accae196ad927222e
--- /dev/null
+++ b/multi-prompt/twochar/special_tokens_map.json
@@ -0,0 +1,6 @@
+{
+ "bos_token": "",
+ "eos_token": "",
+ "pad_token": "[PAD]",
+ "unk_token": ""
+}
diff --git a/multi-prompt/twochar/tokenizer.model b/multi-prompt/twochar/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/multi-prompt/twochar/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/multi-prompt/twochar/tokenizer_config.json b/multi-prompt/twochar/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..77849a9a3ca128f00bc3febdc9b39391a4ef43a3
--- /dev/null
+++ b/multi-prompt/twochar/tokenizer_config.json
@@ -0,0 +1,39 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "bos_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "clean_up_tokenization_spaces": false,
+ "eos_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": null,
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "trust_remote_code": false,
+ "unk_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "use_default_system_prompt": true,
+ "use_fast": true
+}