| export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 | |
| model_name_or_path="meta-llama/Llama-2-7b-chat-hf" | |
| super_tokenizer_name_or_path="meta-llama/Llama-2-7b-chat-hf" | |
| output_dir="data/outputs/90k_0111+8_30k" | |
| mkdir -p ${output_dir} | |
| deepspeed --master_port 12345 --module main.train \ | |
| --model_name_or_path ${model_name_or_path} \ | |
| --super_tokenizer_name_or_path ${super_tokenizer_name_or_path} \ | |
| --super_tokenizer_num_hidden_layers 8 \ | |
| --dataset_list "redpajama_90k_0111" \ | |
| --output_dir ${output_dir} \ | |
| --learning_rate 5e-5 \ | |
| --num_train_epochs 6 \ | |
| --per_device_train_batch_size 1 \ | |
| --max_steps 30000 \ | |
| --logging_strategy "steps" \ | |
| --logging_steps 50 \ | |
| --save_strategy "steps" \ | |
| --save_steps 30000 \ | |
| --gradient_checkpointing \ | |
| --deepspeed "data/ds_config/ds_config_stage1.json" \ | |
| | tee "${output_dir}/train.log" |