support lora for llama pro

Former-commit-id: f74c78ba95f0545aae89e603e466f494705ad024
This commit is contained in:
hiyouga
2024-02-21 02:17:22 +08:00
parent a3f30038a0
commit bc16c9a54a
7 changed files with 119 additions and 28 deletions

View File

@@ -108,6 +108,18 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]:
)
)
with gr.Accordion(label="Freeze config", open=False) as freeze_tab:
with gr.Row():
num_layer_trainable = gr.Slider(value=3, minimum=1, maximum=128, step=1, scale=2)
name_module_trainable = gr.Textbox(scale=3)
input_elems.update({num_layer_trainable, name_module_trainable})
elem_dict.update(
dict(
freeze_tab=freeze_tab, num_layer_trainable=num_layer_trainable, name_module_trainable=name_module_trainable
)
)
with gr.Accordion(label="LoRA config", open=False) as lora_tab:
with gr.Row():
lora_rank = gr.Slider(value=8, minimum=1, maximum=1024, step=1)