Unloth
Unsloth 是一个加速 LLM 微调的库,它使得训练模型更快,并减少计算资源的需求。Unsloth 与 TRL 集成。
可在 Google Colab T4 GPU 上免费运行。
pip install unsloth vllm
pip install --upgrade pillow
Setup unsloth 加载模型
from unsloth import FastLanguageModel 这个类将 transformers 与 Unsloth 优化集成。
加载 Google 的 Gemma 3 1B Instruct 模型并配置它进行微调。
from unsloth import FastLanguageModel
import torch
max_seq_length = 1024 # Can increase for longer reasoning traces
lora_rank = 32 # Larger rank = smarter, but slower
model, tokenizer = FastLanguageModel.from_pretrained(
model_name="google/gemma-3-1b-it",
max_seq_length=max_seq_length,
load_in_4bit=True, # False for LoRA 16bit
fast_inference=True, # Enable vLLM fast inference
max_lora_rank=lora_rank,
gpu_memory_utilization=0.6, # Reduce if out of memory
)
model = FastLanguageModel.get_peft_model(
model,
r=lora_rank, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
target_modules=[
"q_proj",
"k_proj",
"v_proj",
"o_proj",
"gate_proj",
"up_proj",
"down_proj",
], # Remove QKVO if out of memory
lora_alpha=lora_rank,
use_gradient_checkpointing="unsloth", # Enable long context finetuning
random_state=3407,
)
用 4 位量化方式加载模型以节省内存,并应用 LoRA 低秩适配进行高效微调。 target_modules 参数指定要微调模型的哪些层, use_gradient_checkpointing 参数启用使用更长的上下文进行训练。
准备数据
使用 GSM8K 数据集,其包含小学数学问题。我们将格式化数据,以鼓励模型在给出答案之前展示其推理过程。首先,定义提示和答案的格式:
# Define the system prompt that instructs the model to use a specific format
SYSTEM_PROMPT = """
Respond in the following format:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
XML_COT_FORMAT = """\
<reasoning>
{reasoning}
</reasoning>
<answer>
{answer}
</answer>
"""
然后准备数据集,从数据集中提取答案并将其格式化为字符串:
import re
from datasets import load_dataset, Dataset
# Helper functions to extract answers from different formats
def extract_xml_answer(text: str) -> str:
answer = text.split("<answer>")[-1]
answer = answer.split("</answer>")[0]
return answer.strip()
def extract_hash_answer(text: str) -> str | None:
if "####" not in text:
return None
return text.split("####")[1].strip()
# Function to prepare the GSM8K dataset
def get_gsm8k_questions(split="train") -> Dataset:
data = load_dataset("openai/gsm8k", "main")[split]
data = data.map(
lambda x: {
"prompt": [
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": x["question"]},
],
"answer": extract_hash_answer(x["answer"]),
}
)
return data
dataset = get_gsm8k_questions()
定义 Reward function
回顾下, GRPO 可以使用奖励函数根据可验证的标准(如长度和格式)来指导模型的学习。
# 奖励模型当其答案与正确答案匹配时
def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:
responses = [completion[0]["content"] for completion in completions]
q = prompts[0][-1]["content"]
extracted_responses = [extract_xml_answer(r) for r in responses]
print(
"-" * 20,
f"Question:\n{q}",
f"\nAnswer:\n{answer[0]}",
f"\nResponse:\n{responses[0]}",
f"\nExtracted:\n{extracted_responses[0]}",
)
return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]
# 奖励模型提供 数值答案
def int_reward_func(completions, **kwargs) -> list[float]:
responses = [completion[0]["content"] for completion in completions]
extracted_responses = [extract_xml_answer(r) for r in responses]
return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]
# 奖励模型遵循指定格式
def strict_format_reward_func(completions, **kwargs) -> list[float]:
pattern = r"^<reasoning>\n.*?\n</reasoning>\n<answer>\n.*?\n</answer>\n$"
responses = [completion[0]["content"] for completion in completions]
matches = [re.match(pattern, r) for r in responses]
return [0.5 if match else 0.0 for match in matches]
# 奖励模型遵循指定格式
def soft_format_reward_func(completions, **kwargs) -> list[float]:
pattern = r"<reasoning>.*?</reasoning>\s*<answer>.*?</answer>"
responses = [completion[0]["content"] for completion in completions]
matches = [re.match(pattern, r) for r in responses]
return [0.5 if match else 0.0 for match in matches]
# 奖励正确的 XML 标签使用,并惩罚闭合标签后的额外内容
def count_xml(text) -> float:
count = 0.0
if text.count("<reasoning>\n") == 1:
count += 0.125
if text.count("\n</reasoning>\n") == 1:
count += 0.125
if text.count("\n<answer>\n") == 1:
count += 0.125
count -= len(text.split("\n</answer>\n")[-1]) * 0.001
if text.count("\n</answer>") == 1:
count += 0.125
count -= (len(text.split("\n</answer>")[-1]) - 1) * 0.001
return count
def xmlcount_reward_func(completions, **kwargs) -> list[float]:
contents = [completion[0]["content"] for completion in completions]
return [count_xml(c) for c in contents]
使用 GRPO 进行训练
现在使用得到的模型、分词器和奖励函数来设置 GRPO 训练器:
from trl import GRPOConfig, GRPOTrainer
max_prompt_length = 256
training_args = GRPOConfig(
learning_rate=5e-6,
adam_beta1=0.9,
adam_beta2=0.99,
weight_decay=0.1,
warmup_ratio=0.1,
lr_scheduler_type="cosine",
optim="paged_adamw_8bit",
logging_steps=1,
per_device_train_batch_size=1,
gradient_accumulation_steps=1, # Increase to 4 for smoother training
num_generations=6, # Decrease if out of memory
max_prompt_length=max_prompt_length,
max_completion_length=max_seq_length - max_prompt_length,
# num_train_epochs = 1, # Set to 1 for a full training run
max_steps=250,
save_steps=250,
max_grad_norm=0.1,
report_to="none", # Can use Weights & Biases
output_dir="outputs",
)
trainer = GRPOTrainer(
model=model,
processing_class=tokenizer,
reward_funcs=[
xmlcount_reward_func,
soft_format_reward_func,
strict_format_reward_func,
int_reward_func,
correctness_reward_func,
],
args=training_args,
train_dataset=dataset,
)
开始训练:
trainer.train()
测试模型
model.save_lora("grpo_saved_lora")
from vllm import SamplingParams
# 计算pi
text = tokenizer.apply_chat_template(
[
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": "Calculate pi."},
],
tokenize=False,
add_generation_prompt=True,
)
sampling_params = SamplingParams(
temperature=0.8,
top_p=0.95,
max_tokens=1024,
)
output = (
model.fast_generate(
text,
sampling_params=sampling_params,
lora_request=model.load_lora("grpo_saved_lora"),
)[0]
.outputs[0]
.text
)
print(output)
保存模型
Unsloth 提供了多种保存您微调模型的选项:
- 保存为 16-bit precision
model.save_pretrained_merged("model", tokenizer, save_method="merged_16bit")
- push 到 HF
model.push_to_hub_merged(
"your-username/model-name", tokenizer, save_method="merged_16bit", token="your-token"
)
- 保存为 GGUF 格式,用于与
llama.cpp一起使用
model.push_to_hub_gguf(
"your-username/model-name",
tokenizer,
quantization_method=["q4_k_m", "q8_0", "q5_k_m"],
token="your-token",
)
然后可以使用 llama.cpp 运行模型: llama-cli -m my_model.gguf。