|
#!/bin/bash |
|
|
|
export LC_ALL=C.UTF-8 |
|
export LANG=C.UTF-8 |
|
|
|
|
|
export OUTPUT_DIR=/home/saied/code/gpt2-medium-persian |
|
export MODEL_TYPE=gpt2 |
|
export CONFIG_NAME=/home/saied/code/gpt2-medium-persian |
|
export TOKENIZER_NAME=/home/saied/code/gpt2-medium-persian |
|
|
|
|
|
|
|
|
|
export DATASET_NAME=oscar |
|
export DATASET_CONFIG_NAME=unshuffled_deduplicated_fa |
|
export MAX_SEQUENCE_LENGTH=512 |
|
|
|
|
|
|
|
|
|
export PER_DEVICE_TRAIN_BATCH_SIZE=16 |
|
export PER_DEVICE_EVAL_BATCH_SIZE=16 |
|
export NUM_TRAIN_EPOCHS=10.0 |
|
export LEARNING_RATE=1e-3 |
|
export WARMUP_STEPS=5000 |
|
export LOGGING_STEPS=500 |
|
export EVAL_STEPS=2500 |
|
export SAVE_STEPS=2500 |
|
|
|
python src/run_clm_flax.py \ |
|
--output_dir="$OUTPUT_DIR" \ |
|
--model_type="$MODEL_TYPE" \ |
|
--config_name="$CONFIG_NAME" \ |
|
--tokenizer_name="$TOKENIZER_NAME" \ |
|
--dataset_name="$DATASET_NAME" \ |
|
--dataset_config_name="$DATASET_CONFIG_NAME" \ |
|
--block_size=$MAX_SEQUENCE_LENGTH \ |
|
--per_device_train_batch_size=$PER_DEVICE_TRAIN_BATCH_SIZE \ |
|
--per_device_eval_batch_size=$PER_DEVICE_EVAL_BATCH_SIZE \ |
|
--num_train_epochs=$NUM_TRAIN_EPOCHS \ |
|
--learning_rate=$LEARNING_RATE \ |
|
--warmup_steps=$WARMUP_STEPS \ |
|
--logging_step=$LOGGING_STEPS \ |
|
--eval_steps=$EVAL_STEPS \ |
|
--save_steps=$SAVE_STEPS \ |
|
--do_train \ |
|
--do_eval \ |
|
--overwrite_output_dir \ |
|
--push_to_hub |