fix tests

Former-commit-id: 23f97bd437424ef43b2b84743d56acc5d1ca70d5
This commit is contained in:
hiyouga
2024-01-20 19:58:04 +08:00
parent 80637fc06d
commit 1750218057
12 changed files with 80 additions and 65 deletions

View File

@@ -11,9 +11,10 @@ from typing import Optional
from torch.utils.data import DataLoader
from transformers import DataCollatorForSeq2Seq
from llmtuner.data import get_dataset, preprocess_dataset
from llmtuner.data import get_dataset
from llmtuner.extras.constants import IGNORE_INDEX
from llmtuner.model import get_train_args, load_model_and_tokenizer
from llmtuner.hparams import get_train_args
from llmtuner.model import load_model_and_tokenizer
BASE_LR = 3e-4 # 1.5e-4 for 30B-70B models
@@ -26,7 +27,7 @@ def calculate_lr(
cutoff_len: int, # i.e. maximum input length during training
batch_size: int, # total batch size, namely (batch size * gradient accumulation * world size)
is_mistral: bool, # mistral model uses a smaller learning rate,
dataset_dir: Optional[str] = "../data"
dataset_dir: Optional[str] = "data"
):
model_args, data_args, training_args, finetuning_args, _ = get_train_args(dict(
stage="sft",
@@ -37,9 +38,8 @@ def calculate_lr(
cutoff_len=cutoff_len,
output_dir="dummy_dir"
))
trainset = get_dataset(model_args, data_args)
_, tokenizer = load_model_and_tokenizer(model_args, finetuning_args, is_trainable=False, add_valuehead=False)
trainset = preprocess_dataset(trainset, tokenizer, data_args, training_args, stage="sft")
trainset = get_dataset(tokenizer, model_args, data_args, training_args, stage="sft")
data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, label_pad_token_id=IGNORE_INDEX)
dataloader = DataLoader(
dataset=trainset, batch_size=batch_size, shuffle=True, collate_fn=data_collator, pin_memory=True

View File

@@ -7,11 +7,15 @@ import os
import fire
import torch
import torch.nn as nn
from typing import Optional
from typing import TYPE_CHECKING, Optional
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import LoftQConfig, LoraConfig, TaskType, get_peft_model
if TYPE_CHECKING:
from transformers import PreTrainedModel
class Shell(nn.Module):
def __init__(self, weight: torch.Tensor, bias: Optional[torch.Tensor] = None):
@@ -42,7 +46,8 @@ def quantize_loftq(
loftq_iter: Optional[int] = 1,
lora_alpha: Optional[int] = None,
lora_rank: Optional[int] = 16,
lora_target: Optional[str] = "q_proj,v_proj"
lora_target: Optional[str] = "q_proj,v_proj",
save_safetensors: Optional[bool] = False,
):
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, trust_remote_code=True, torch_dtype="auto")
@@ -60,16 +65,16 @@ def quantize_loftq(
# Init LoftQ model
lora_model = get_peft_model(model, lora_config)
base_model = lora_model.get_base_model()
base_model: "PreTrainedModel" = lora_model.get_base_model()
# Save LoftQ model
setattr(lora_model.base_model.peft_config["default"], "base_model_name_or_path", save_dir)
setattr(lora_model.base_model.peft_config["default"], "init_lora_weights", True)
lora_model.save_pretrained(os.path.join(save_dir, "adapters"))
lora_model.save_pretrained(os.path.join(save_dir, "adapters"), safe_serialization=save_safetensors)
# Save base model
unwrap_model(base_model)
base_model.save_pretrained(save_dir)
base_model.save_pretrained(save_dir, safe_serialization=save_safetensors)
tokenizer.save_pretrained(save_dir)

View File

@@ -1,49 +0,0 @@
# coding=utf-8
# Quantizes models with AutoGPTQ (https://github.com/PanQiWei/AutoGPTQ).
# Usage: python quantize.py --input_dir path_to_llama_model --output_dir path_to_quant_model --data_file alpaca.json
# --max_length 1024 --max_samples 1024
# dataset format: instruction (string), input (string), output (string), history (List[string])
import fire
from datasets import load_dataset
from transformers import AutoTokenizer
from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
def quantize(input_dir: str, output_dir: str, data_file: str, max_length: int, max_samples: int):
tokenizer = AutoTokenizer.from_pretrained(input_dir, use_fast=False, padding_side="left")
def format_example(examples):
prefix=("A chat between a curious user and an artificial intelligence assistant. "
"The assistant gives helpful, detailed, and polite answers to the user's questions.")
texts = []
for i in range(len(examples["instruction"])):
prompt = prefix + "\n"
if "history" in examples:
for user_query, bot_resp in examples["history"][i]:
prompt += "Human: {}\nAssistant: {}\n".format(user_query, bot_resp)
prompt += "Human: {}\nAssistant: {}".format(
examples["instruction"][i] + "\n" + examples["input"][i], examples["output"][i]
)
texts.append(prompt)
return tokenizer(texts, truncation=True, max_length=max_length)
dataset = load_dataset("json", data_files=data_file)["train"]
column_names = list(dataset.column_names)
dataset = dataset.select(range(min(len(dataset), max_samples)))
dataset = dataset.map(format_example, batched=True, remove_columns=column_names)
dataset = dataset.shuffle()
quantize_config = BaseQuantizeConfig(
bits=4,
group_size=128,
desc_act=False
)
model = AutoGPTQForCausalLM.from_pretrained(input_dir, quantize_config, trust_remote_code=True)
model.quantize(dataset)
model.save_quantized(output_dir)
if __name__ == "__main__":
fire.Fire(quantize)