0%| | 0/2 [00:00 train() File "/home/sagemaker-user/output-7b-26k-lora/../lora_finetuning_push_to_hub_save_local.py", line 223, in train trainer.train() File "/opt/conda/lib/python3.10/site-packages/transformers/trainer.py", line 1539, in train return inner_training_loop( File "/opt/conda/lib/python3.10/site-packages/transformers/trainer.py", line 1809, in _inner_training_loop tr_loss_step = self.training_step(model, inputs) File "/opt/conda/lib/python3.10/site-packages/transformers/trainer.py", line 2665, in training_step self.accelerator.backward(loss) File "/opt/conda/lib/python3.10/site-packages/accelerate/accelerator.py", line 1853, in backward loss.backward(**kwargs) File "/opt/conda/lib/python3.10/site-packages/torch/_tensor.py", line 487, in backward torch.autograd.backward( File "/opt/conda/lib/python3.10/site-packages/torch/autograd/__init__.py", line 200, in backward Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn