med-cqa llama-factory fine-tuning
command
In these commands, I changed the prompt, input format and output format.
click to view the commad
# original prompt + qutsion_input + true_option_output
# data_to_save = [{
# "instruction": "Assuming you are a doctor, answer questions based on the patient's symptoms.",
# "input": item['question'],
# "output": (item['opa'] if item['cop'] == 0 else
# item['opb'] if item['cop'] == 1 else
# item['opc'] if item['cop'] == 2 else
# item['opd'] if item['cop'] == 3 else "No valid answer found")
# } for item in data]
CUDA_VISIBLE_DEVICES=1 python src/train_bash.py \
--stage sft \
--model_name_or_path meta-llama/Llama-2-7b-hf \
--do_train \
--dataset alpaca_med_cqa_opn_en \
--template llama2 \
--finetuning_type lora \
--lora_target q_proj,v_proj \
--output_dir ./FINE/llama2-7b-med_cqa_opn_single \
--overwrite_cache \
--per_device_train_batch_size 1 \
--gradient_accumulation_steps 4 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--save_steps 1000 \
--learning_rate 5e-5 \
--num_train_epochs 3.0 \
--plot_loss \
--fp16
# original prompt + qutsion+prompt+options + true_option_output
# original prompt + qutsion_input + true_option_output
# data_to_save = [{
# "instruction": "Assuming you are a doctor, answer questions based on the patient's symptoms.",
# "input": item['question']+' \n '+ "select from the following option."+' \n '+ "A. {}, B. {}, C. {}, D. {}".format(item['opa'], item['opb'], item['opc'], item['opd']),
# "output": (item['opa'] if item['cop'] == 0 else
# item['opb'] if item['cop'] == 1 else
# item['opc'] if item['cop'] == 2 else
# item['opd'] if item['cop'] == 3 else "No valid answer found")
# } for item in data]
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage sft \
--model_name_or_path meta-llama/Llama-2-7b-hf \
--do_train \
--dataset alpca_med_cqa_in_modify_ou_T_option_en \
--template llama2 \
--finetuning_type lora \
--lora_target q_proj,v_proj \
--output_dir ./FINE/llama2-7b-med_cqa_in_modify_ou_T_option_single \
--overwrite_cache \
--per_device_train_batch_size 1 \
--gradient_accumulation_steps 4 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--save_steps 1000 \
--learning_rate 5e-5 \
--num_train_epochs 3.0 \
--plot_loss \
--fp16
# modified prompt--qutsion--true output
# data_to_save = [{
# "instruction": "This is real-world medical entrance exam questions, please give the true answer based on the question and selection.",
# "input": item['question'],
# "output": (item['opa'] if item['cop'] == 0 else
# item['opb'] if item['cop'] == 1 else
# item['opc'] if item['cop'] == 2 else
# item['opd'] if item['cop'] == 3 else "No valid answer found")
# } for item in data]
CUDA_VISIBLE_DEVICES=2 python src/train_bash.py \
--stage sft \
--model_name_or_path meta-llama/Llama-2-7b-hf \
--do_train \
--dataset alpca_med_cqa_modi_prompt_q_T_out_en \
--template llama2 \
--finetuning_type lora \
--lora_target q_proj,v_proj \
--output_dir ./FINE/llama2-7b-med_cqa_modi_prompt_q_T_out_single \
--overwrite_cache \
--per_device_train_batch_size 1 \
--gradient_accumulation_steps 4 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--save_steps 1000 \
--learning_rate 5e-5 \
--num_train_epochs 3.0 \
--plot_loss \
--fp16
# modified prompt-- qutsion+prompt+option--true output
# data_to_save = [{
# "instruction": "This is real-world medical entrance exam question, please give the true answer based on the question and selection.",
# "input": item['question']+' \n '+ "select from the following option."+' \n '+ "A. {}, B. {}, C. {}, D. {}".format(item['opa'], item['opb'], item['opc'], item['opd']),
# "output": (item['opa'] if item['cop'] == 0 else
# item['opb'] if item['cop'] == 1 else
# item['opc'] if item['cop'] == 2 else
# item['opd'] if item['cop'] == 3 else "No valid answer found")
# } for item in data]
CUDA_VISIBLE_DEVICES=3 python src/train_bash.py \
--stage sft \
--model_name_or_path meta-llama/Llama-2-7b-hf \
--do_train \
--dataset alpca_med_cqa_modi_prompt_in_modify_ou_T_option_en \
--template llama2 \
--finetuning_type lora \
--lora_target q_proj,v_proj \
--output_dir ./FINE/llama2-7b-med_cqa_modi_prompt_in_modify_ou_T_option_single \
--overwrite_cache \
--per_device_train_batch_size 1 \
--gradient_accumulation_steps 4 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--save_steps 1000 \
--learning_rate 5e-5 \
--num_train_epochs 3.0 \
--plot_loss \
--fp16
export models
click to view the code
python src/export_model.py \
--model_name_or_path meta-llama/Llama-2-7b-hf \
--adapter_name_or_path ./FINE/llama2-7b-med_cqa_opn_single/checkpoint-137000 \
--template llama2 \
--finetuning_type lora \
--export_dir ./FINE_EXPORT/fine_137000_llama2-7b-med_cqa_opn \
--export_size 2 \
--export_legacy_format False
python src/export_model.py \
--model_name_or_path meta-llama/Llama-2-7b-hf \
--adapter_name_or_path ./FINE/llama2-7b-med_cqa_modi_prompt_q_T_out_single/checkpoint-137000 \
--template llama2 \
--finetuning_type lora \
--export_dir ./FINE_EXPORT/fine_137000_llama2-7b-med_cqa_modi_prompt_q_T_out \
--export_size 2 \
--export_legacy_format False
python src/export_model.py \
--model_name_or_path meta-llama/Llama-2-7b-hf \
--adapter_name_or_path ./FINE/llama2-7b-med_cqa_modi_prompt_in_modify_ou_T_option_single/checkpoint-137000 \
--template llama2 \
--finetuning_type lora \
--export_dir ./FINE_EXPORT/fine_137000_llama2-7b-med_cqa_modi_prompt_in_modify_ou_T_option \
--export_size 2 \
--export_legacy_format False
python src/export_model.py \
--model_name_or_path meta-llama/Llama-2-7b-hf \
--adapter_name_or_path ./FINE/llama2-7b-med_cqa_in_modify_ou_T_option_single/checkpoint-137000 \
--template llama2 \
--finetuning_type lora \
--export_dir ./FINE_EXPORT/fine_137000_llama2-7b-med_cqa_in_modify_ou_T_option \
--export_size 2 \
--export_legacy_format False
May 2024
command
click to view the code
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage sft \
--model_name_or_path microsoft/phi-2 \
--do_train \
--dataset alpca_med_cqa_in_modify_ou_T_option_en \
--template llama2 \
--finetuning_type lora \
--lora_target q_proj,v_proj \
--output_dir ./FINE/phi2-med_cqa_in_modify_ou_T_option_single \
--overwrite_cache \
--per_device_train_batch_size 1 \
--gradient_accumulation_steps 4 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--save_steps 1000 \
--learning_rate 5e-5 \
--num_train_epochs 3.0 \
--plot_loss \
--fp16
# phi-3-4k-instruct
CUDA_VISIBLE_DEVICES=2 python src/train_bash.py \
--stage sft \
--model_name_or_path microsoft/Phi-3-mini-4k-instruct \
--do_train \
--dataset alpca_med_cqa_in_modify_ou_T_option_en \
--template default \
--finetuning_type lora \
--lora_target qkv_proj \
--output_dir ./FINE/phi3-4k-instruct-med_cqa_in_modify_ou_T_option_single \
--overwrite_cache \
--per_device_train_batch_size 1 \
--gradient_accumulation_steps 4 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--save_steps 1000 \
--learning_rate 5e-5 \
--num_train_epochs 3.0 \
--plot_loss \
--fp16
# llama3 7b
CUDA_VISIBLE_DEVICES=3 python src/train_bash.py \
--stage sft \
--model_name_or_path meta-llama/Meta-Llama-3-8B \
--do_train \
--dataset alpca_med_cqa_in_modify_ou_T_option_en \
--template llama2 \
--finetuning_type lora \
--lora_target q_proj,v_proj \
--output_dir ./FINE/llama3-8b-med_cqa_in_modify_ou_T_option_single \
--overwrite_cache \
--per_device_train_batch_size 1 \
--gradient_accumulation_steps 4 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--save_steps 1000 \
--learning_rate 5e-5 \
--num_train_epochs 3.0 \
--plot_loss \
--fp16
# IA3
CUDA_VISIBLE_DEVICES=1 python finetune_phi.py --model_type llama-3 --base_model meta-llama/Meta-Llama-3-8B --model_name llama-3-medical --dataset ../../data/alpca_med_cqa_modi_prompt_in_modify_ou_T_option_en.json
CUDA_VISIBLE_DEVICES=2 python finetune_phi.py --model_type phi-3 --base_model microsoft/Phi-3-mini-4k-instruct --model_name phi-3-instruct-medical --dataset ../../data/alpca_med_cqa_modi_prompt_in_modify_ou_T_option_en.json
CUDA_VISIBLE_DEVICES=0 python finetune_phi.py --model_type phi-moe --base_model /home/ludaze/Docker/Llama/MOE-n-experts/models/PhiMoeForCausalLM-0-1-top1 --model_name phi-moe-medical --dataset ../../data/alpca_med_cqa_modi_prompt_in_modify_ou_T_option_en.json
浙公网安备 33010602011771号