refactor adapter hparam

Former-commit-id: f82aece9ebd6df83a7a005cc7cbbcec07fa6e14d
This commit is contained in:
hiyouga
2023-12-15 20:53:11 +08:00
parent 27ef5b1aa7
commit f902b0d420
21 changed files with 302 additions and 311 deletions

View File

@@ -14,7 +14,7 @@ def save_model(
lang: str,
model_name: str,
model_path: str,
checkpoints: List[str],
adapter_path: List[str],
finetuning_type: str,
template: str,
max_shard_size: int,
@@ -25,8 +25,8 @@ def save_model(
error = ALERTS["err_no_model"][lang]
elif not model_path:
error = ALERTS["err_no_path"][lang]
elif not checkpoints:
error = ALERTS["err_no_checkpoint"][lang]
elif not adapter_path:
error = ALERTS["err_no_adapter"][lang]
elif not export_dir:
error = ALERTS["err_no_export_dir"][lang]
@@ -37,7 +37,7 @@ def save_model(
args = dict(
model_name_or_path=model_path,
checkpoint_dir=",".join([get_save_dir(model_name, finetuning_type, ckpt) for ckpt in checkpoints]),
adapter_name_or_path=",".join([get_save_dir(model_name, finetuning_type, adapter) for adapter in adapter_path]),
finetuning_type=finetuning_type,
template=template,
export_dir=export_dir,
@@ -63,7 +63,7 @@ def create_export_tab(engine: "Engine") -> Dict[str, "Component"]:
engine.manager.get_elem_by_name("top.lang"),
engine.manager.get_elem_by_name("top.model_name"),
engine.manager.get_elem_by_name("top.model_path"),
engine.manager.get_elem_by_name("top.checkpoints"),
engine.manager.get_elem_by_name("top.adapter_path"),
engine.manager.get_elem_by_name("top.finetuning_type"),
engine.manager.get_elem_by_name("top.template"),
max_shard_size,

View File

@@ -3,7 +3,7 @@ from typing import TYPE_CHECKING, Dict
from llmtuner.data.template import templates
from llmtuner.extras.constants import METHODS, SUPPORTED_MODELS
from llmtuner.webui.common import get_model_path, get_template, list_checkpoint, save_config
from llmtuner.webui.common import get_model_path, get_template, list_adapters, save_config
from llmtuner.webui.utils import can_quantize
if TYPE_CHECKING:
@@ -20,7 +20,7 @@ def create_top() -> Dict[str, "Component"]:
with gr.Row():
finetuning_type = gr.Dropdown(choices=METHODS, value="lora", scale=1)
checkpoints = gr.Dropdown(multiselect=True, scale=5)
adapter_path = gr.Dropdown(multiselect=True, scale=5)
refresh_btn = gr.Button(scale=1)
with gr.Accordion(label="Advanced config", open=False) as advanced_tab:
@@ -34,7 +34,7 @@ def create_top() -> Dict[str, "Component"]:
shift_attn = gr.Checkbox(value=False)
model_name.change(
list_checkpoint, [model_name, finetuning_type], [checkpoints], queue=False
list_adapters, [model_name, finetuning_type], [adapter_path], queue=False
).then(
get_model_path, [model_name], [model_path], queue=False
).then(
@@ -44,13 +44,13 @@ def create_top() -> Dict[str, "Component"]:
model_path.change(save_config, inputs=[lang, model_name, model_path], queue=False)
finetuning_type.change(
list_checkpoint, [model_name, finetuning_type], [checkpoints], queue=False
list_adapters, [model_name, finetuning_type], [adapter_path], queue=False
).then(
can_quantize, [finetuning_type], [quantization_bit], queue=False
)
refresh_btn.click(
list_checkpoint, [model_name, finetuning_type], [checkpoints], queue=False
list_adapters, [model_name, finetuning_type], [adapter_path], queue=False
)
return dict(
@@ -58,7 +58,7 @@ def create_top() -> Dict[str, "Component"]:
model_name=model_name,
model_path=model_path,
finetuning_type=finetuning_type,
checkpoints=checkpoints,
adapter_path=adapter_path,
refresh_btn=refresh_btn,
advanced_tab=advanced_tab,
quantization_bit=quantization_bit,

View File

@@ -3,7 +3,7 @@ from typing import TYPE_CHECKING, Dict
from transformers.trainer_utils import SchedulerType
from llmtuner.extras.constants import TRAINING_STAGES
from llmtuner.webui.common import list_checkpoint, list_dataset, DEFAULT_DATA_DIR
from llmtuner.webui.common import list_adapters, list_dataset, DEFAULT_DATA_DIR
from llmtuner.webui.components.data import create_preview_box
from llmtuner.webui.utils import gen_plot
@@ -60,7 +60,7 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]:
lr_scheduler_type=lr_scheduler_type, max_grad_norm=max_grad_norm, val_size=val_size
))
with gr.Accordion(label="Advanced config", open=False) as advanced_tab:
with gr.Accordion(label="Extra config", open=False) as extra_tab:
with gr.Row():
logging_steps = gr.Slider(value=5, minimum=5, maximum=1000, step=5)
save_steps = gr.Slider(value=100, minimum=10, maximum=5000, step=10)
@@ -73,7 +73,7 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]:
input_elems.update({logging_steps, save_steps, warmup_steps, neftune_alpha, train_on_prompt, upcast_layernorm})
elem_dict.update(dict(
advanced_tab=advanced_tab, logging_steps=logging_steps, save_steps=save_steps, warmup_steps=warmup_steps,
extra_tab=extra_tab, logging_steps=logging_steps, save_steps=save_steps, warmup_steps=warmup_steps,
neftune_alpha=neftune_alpha, train_on_prompt=train_on_prompt, upcast_layernorm=upcast_layernorm
))
@@ -83,12 +83,12 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]:
lora_dropout = gr.Slider(value=0.1, minimum=0, maximum=1, step=0.01, scale=1)
lora_target = gr.Textbox(scale=1)
additional_target = gr.Textbox(scale=1)
resume_lora_training = gr.Checkbox(value=True, scale=1)
create_new_adapter = gr.Checkbox(scale=1)
input_elems.update({lora_rank, lora_dropout, lora_target, additional_target, resume_lora_training})
input_elems.update({lora_rank, lora_dropout, lora_target, additional_target, create_new_adapter})
elem_dict.update(dict(
lora_tab=lora_tab, lora_rank=lora_rank, lora_dropout=lora_dropout, lora_target=lora_target,
additional_target=additional_target, resume_lora_training=resume_lora_training,
additional_target=additional_target, create_new_adapter=create_new_adapter
))
with gr.Accordion(label="RLHF config", open=False) as rlhf_tab:
@@ -98,7 +98,7 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]:
refresh_btn = gr.Button(scale=1)
refresh_btn.click(
list_checkpoint,
list_adapters,
[engine.manager.get_elem_by_name("top.model_name"), engine.manager.get_elem_by_name("top.finetuning_type")],
[reward_model],
queue=False