update webui #1086

Former-commit-id: 65a48bc398f18f71f5f2659b2070e3b9593af243
This commit is contained in:
hiyouga
2023-10-09 14:50:14 +08:00
parent f22886e2b6
commit 5c4248a29c
10 changed files with 105 additions and 56 deletions

View File

@@ -57,6 +57,9 @@ def create_eval_tab(top_elems: Dict[str, "Component"], runner: "Runner") -> Dict
top_elems["quantization_bit"],
top_elems["template"],
top_elems["system_prompt"],
top_elems["flash_attn"],
top_elems["shift_attn"],
top_elems["rope_scaling"],
dataset_dir,
dataset,
cutoff_len,

View File

@@ -28,7 +28,10 @@ def create_infer_tab(top_elems: Dict[str, "Component"]) -> Dict[str, "Component"
top_elems["finetuning_type"],
top_elems["quantization_bit"],
top_elems["template"],
top_elems["system_prompt"]
top_elems["system_prompt"],
top_elems["flash_attn"],
top_elems["shift_attn"],
top_elems["rope_scaling"]
],
[info_box]
).then(

View File

@@ -26,10 +26,16 @@ def create_top() -> Dict[str, "Component"]:
with gr.Accordion(label="Advanced config", open=False) as advanced_tab:
with gr.Row():
quantization_bit = gr.Dropdown(choices=["None", "8", "4"], value="None", scale=1)
quantization_bit = gr.Dropdown(choices=["none", "8", "4"], value="none", scale=1)
template = gr.Dropdown(choices=list(templates.keys()), value="default", scale=1)
system_prompt = gr.Textbox(scale=2)
with gr.Accordion(label="Model config (LLaMA only)", open=False) as llama_tab:
with gr.Row():
flash_attn = gr.Checkbox(value=False)
shift_attn = gr.Checkbox(value=False)
rope_scaling = gr.Dropdown(choices=["none", "linear", "dynamic"], value="none")
lang.change(save_config, [lang, model_name, model_path])
model_name.change(
@@ -62,5 +68,9 @@ def create_top() -> Dict[str, "Component"]:
advanced_tab=advanced_tab,
quantization_bit=quantization_bit,
template=template,
system_prompt=system_prompt
system_prompt=system_prompt,
llama_tab=llama_tab,
flash_attn=flash_attn,
shift_attn=shift_attn,
rope_scaling=rope_scaling
)

View File

@@ -55,8 +55,6 @@ def create_train_tab(top_elems: Dict[str, "Component"], runner: "Runner") -> Dic
logging_steps = gr.Slider(value=5, minimum=5, maximum=1000, step=5)
save_steps = gr.Slider(value=100, minimum=10, maximum=5000, step=10)
warmup_steps = gr.Slider(value=0, minimum=0, maximum=5000, step=1)
flash_attn = gr.Checkbox(value=False)
rope_scaling = gr.Checkbox(value=False)
with gr.Accordion(label="LoRA config", open=False) as lora_tab:
with gr.Row():
@@ -67,8 +65,8 @@ def create_train_tab(top_elems: Dict[str, "Component"], runner: "Runner") -> Dic
with gr.Accordion(label="RLHF config", open=False) as rlhf_tab:
with gr.Row():
dpo_beta = gr.Slider(value=0.1, minimum=0, maximum=1, step=0.01, scale=2)
reward_model = gr.Dropdown(scale=2)
dpo_beta = gr.Slider(value=0.1, minimum=0, maximum=1, step=0.01, scale=1)
reward_model = gr.Dropdown(scale=3)
refresh_btn = gr.Button(scale=1)
refresh_btn.click(
@@ -105,6 +103,9 @@ def create_train_tab(top_elems: Dict[str, "Component"], runner: "Runner") -> Dic
top_elems["quantization_bit"],
top_elems["template"],
top_elems["system_prompt"],
top_elems["flash_attn"],
top_elems["shift_attn"],
top_elems["rope_scaling"],
training_stage,
dataset_dir,
dataset,
@@ -121,8 +122,6 @@ def create_train_tab(top_elems: Dict[str, "Component"], runner: "Runner") -> Dic
logging_steps,
save_steps,
warmup_steps,
flash_attn,
rope_scaling,
lora_rank,
lora_dropout,
lora_target,
@@ -167,8 +166,6 @@ def create_train_tab(top_elems: Dict[str, "Component"], runner: "Runner") -> Dic
logging_steps=logging_steps,
save_steps=save_steps,
warmup_steps=warmup_steps,
flash_attn=flash_attn,
rope_scaling=rope_scaling,
lora_tab=lora_tab,
lora_rank=lora_rank,
lora_dropout=lora_dropout,