[feat] support megatron-LM training by mcore_adapter (#9237)

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
Co-authored-by: Yaowei Zheng <hiyouga@buaa.edu.cn>
This commit is contained in:
Kingsley
2025-10-26 16:21:30 +08:00
committed by GitHub
parent 129e918106
commit 13170577b2
14 changed files with 671 additions and 8 deletions

View File

@@ -0,0 +1,29 @@
model_name_or_path: Qwen/Qwen2-VL-7B-Instruct
image_max_pixels: 262144
video_max_pixels: 16384
do_train: true
stage: sft
finetuning_type: full # only support full for now
dataset: llava_1k_en
preprocessing_num_workers: 8
cutoff_len: 4096
template: qwen2_vl
output_dir: saves/mca/qwen2_vl_full
per_device_train_batch_size: 1
gradient_accumulation_steps: 2
num_train_epochs: 2
learning_rate: 2e-5
logging_steps: 1
save_steps: 100
lr_scheduler_type: cosine
bf16: true
# mcore speed up
tensor_model_parallel_size: 4
sequence_parallel: true
pipeline_model_parallel_size: 2
bias_activation_fusion: true
apply_rope_fusion: true
use_distributed_optimizer: true

View File

@@ -0,0 +1,35 @@
model_name_or_path: Qwen/Qwen3-30B-A3B-Instruct-2507
# GPU memory: 8 * 78GB
do_train: true
stage: sft
finetuning_type: full # only support full for now
dataset: alpaca_en_demo
preprocessing_num_workers: 8
cutoff_len: 4096
template: qwen3_nothink
# global batchsize = (8 // 2 // 4) * 8 = 8
output_dir: saves/mca/qwen3_moe_full
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
num_train_epochs: 2
learning_rate: 3e-6
logging_steps: 1
save_steps: 100
lr_scheduler_type: constant
bf16: true
# mcore speed up
tensor_model_parallel_size: 1
sequence_parallel: false
pipeline_model_parallel_size: 4
bias_activation_fusion: true
apply_rope_fusion: true
use_distributed_optimizer: true
overlap_param_gather: true
overlap_grad_reduce: true
moe_grouped_gemm: true
moe_token_dispatcher_type: alltoall
expert_model_parallel_size: 2
recompute_granularity: full