大模型微调

环境准备

conda create -n qianwen2 python=3.10 ipykernel

conda activate qianwen2

pip install -r requirements.txt

1f7aee09-03de-4923-8b17-320d348aea83

模型准备

# 模型下载
from modelscope import snapshot_download
model_dir = snapshot_download('qwen/Qwen2-0.5B-Instruct', cache_dir='./')


# 模型推理
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "qwen/Qwen2-0___5B-Instruct"
device = "cuda" # the device to load the model onto

# 加载模型和分词器
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype="auto",
device_map="cuda" # cpu cuda
)
tokenizer = AutoTokenizer.from_pretrained(model_name)

# 给问题
prompt = "我的快递地址可以改吗"
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
# 把问题给提示模板
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# 分词器处理提示词变成id
model_inputs = tokenizer([text], return_tensors="pt").to(device)

# 模型接收输入,输出预测的id(带问题的id)
generated_ids = model.generate(
**model_inputs,
max_new_tokens=512
)
# 处理输出的id(把问题的id删除)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
# 把id转为token
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
print(response)

数据转换

import json

# 读取原始数据
with open('./data/sales.jsonl', 'r', encoding='utf8') as f:
data = json.load(f)

# 进行数据转化
data_change = []
for item in data:
message = {
"type": "chatml",
"messages": [
{
"role": "system",
"content": "You are a helpful assistant." # 你是vivo手机智能客服,你叫小V
},
{
"role": "user",
"content": item['instruction']
},
{
"role": "assistant",
"content": item['output']
}
],
"source": "unknown"
}
data_change.append(message) # 数据拼接

## 数据保存
with open('./data/new_sales.jsonl', 'w', encoding='utf8') as f:
for d in data_change:
d = json.dumps(d, ensure_ascii=False) # 字典转成json格式
f.write(d + '\n') # 每行都是一个字典,所以要换行

print('数据转化成功!')

aa670f5cbfcba3f95310360597ccb578

模型微调

# 法1:linux执行
# cd examples/sft
# bash finetune.sh -m <model_path> -d <data_path> --deepspeed <config_path> [--use_lora True] [--q_lora True]
# bash finetune.sh -m ./qwen/Qwen2-0___5B-Instruct -d data/new_sales.jsonl
# --deepspeed ./ds_config_zero3.json --q_lora True

# 法2:linux+windows执行py文件
code = '''
python ./examples/sft/finetune.py # 要执行的脚本
--model_name_or_path ./qwen/Qwen2-0___5B-Instruct # 模型路径
--data_path ./data/new_sales.jsonl # 数据路径
--output_dir ./output # 模型微调结果的保存路径
--per_device_train_batch_size 2 # 训练数据的批次大小
--learning_rate 0.0001 # 学习率
--weight_decay 0.1 # 学习率衰减权重
--num_train_epochs 10 # 训练次数
--logging_strategy "steps" # 多少步打印一次日志
--logging_steps 100 # 100步打印日志
--save_strategy "steps" # 多少步保存一次模型
--save_steps 100 # 100步保存一次
--save_total_limit 2 # 一共限制保存多少个结果
--use_cpu False # 是否使用cpu
--use_lora True # 是否使用lora微调
--model_max_length 512 # 模型输出的最大长度
'''
print(code)


# 处理代码并执行, 删除# 后的备注以及\n
import re
code = re.sub('#.+\n|\n','', code)
print(code)

# 执行脚本
import os
os.system(code)

b62a5136-5500-4310-beaa-2dab415e1d34

28e31fb9-9a02-4f6f-a642-43fdb68d86c1

模型合并

from transformers import AutoModelForCausalLM,AutoTokenizer
from peft import PeftModel

# 1.加载原始模型和分词器
model_id = 'qwen/Qwen2-0___5B-Instruct'
model = AutoModelForCausalLM.from_pretrained(model_id)

tokenizer = AutoTokenizer.from_pretrained(model_id)

# 2. 导入微调后的模型
lora_model = PeftModel.from_pretrained(
model=model, # 原始模型
model_id='output/checkpoint-710' # 微调后的模型
)

# 3. 合并
model_new = lora_model.merge_and_unload()

# 4.保存模型文件
save_path = './qwen/Qwen2-0.5B-Instruct-LoRA'
model_new.save_pretrained(save_path)
tokenizer.save_pretrained(save_path)

print('保存成功!')

0e16b8bb3539b390661867ad823f64d4

posted @ 2026-04-05 20:33  小蓝莓  阅读(0)  评论(0)    收藏  举报