llama-factory fine-tuning 4 (mixtral fine-tuning)
introduction
fine-tuning
command
mistral
click to view the code
CUDA_VISIBLE_DEVICES=1 nohup python src/train_bash.py \
--stage sft \
--do_train \
--model_name_or_path mistralai/Mistral-7B-v0.1 \
--dataset alpaca_med_cqa_en \
--template mistral \
--quantization_bit 8 \
--lora_target q_proj,v_proj \
--output_dir ../FINE/mistral-alpaca_med_cqa_en \
--overwrite_cache \
--per_device_train_batch_size 4 \
--gradient_accumulation_steps 4 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--save_steps 1000 \
--learning_rate 5e-5 \
--num_train_epochs 3.0 \
--plot_loss \
--fp16 \
>> ./logs/mistral-alpaca_med_cqa_en.log 2>&1 &
mixtral
click to view the command
nohup accelerate launch src/train_bash.py \
--stage sft \
--do_train \
--model_name_or_path mistralai/Mixtral-8x7B-v0.1 \
--dataset alpaca_med_cqa_en \
--template default \
--quantization_bit 4 \
--lora_target q_proj,v_proj \
--output_dir ../FINE/mixtral-8x7-alpaca_med_cqa_en \
--overwrite_cache \
--per_device_train_batch_size 4 \
--gradient_accumulation_steps 4 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--save_steps 1000 \
--learning_rate 5e-5 \
--num_train_epochs 3.0 \
--plot_loss \
>> ./logs/mixtral-alpaca_med_cqa_en.log 2>&1 &
浙公网安备 33010602011771号