[misc] update mcore related docker and mca supported models (#10114)

This commit is contained in:
Kingsley
2026-01-19 14:55:16 +08:00
committed by GitHub
parent 44eadbda1c
commit db2f794f7b
3 changed files with 18 additions and 18 deletions

View File

@@ -57,6 +57,7 @@ LLAMABOARD_CONFIG = "llamaboard_config.yaml"
MCA_SUPPORTED_MODELS = {
"deepseek_v3",
"glm4_moe",
"llama",
"mistral",
"mixtral",

View File

@@ -340,7 +340,7 @@ def get_train_args(args: dict[str, Any] | list[str] | None = None) -> _TRAIN_CLS
if training_args.deepspeed is not None and (finetuning_args.use_galore or finetuning_args.use_apollo):
raise ValueError("GaLore and APOLLO are incompatible with DeepSpeed yet.")
if training_args.fp8 and model_args.quantization_bit is not None:
if not finetuning_args.use_mca and training_args.fp8 and model_args.quantization_bit is not None:
raise ValueError("FP8 training is not compatible with quantization. Please disable one of them.")
if model_args.infer_backend != EngineName.HF:
@@ -359,7 +359,7 @@ def get_train_args(args: dict[str, Any] | list[str] | None = None) -> _TRAIN_CLS
_verify_model_args(model_args, data_args, finetuning_args)
_check_extra_dependencies(model_args, finetuning_args, training_args)
if training_args.fp8_enable_fsdp_float8_all_gather and not training_args.fp8:
if not finetuning_args.use_mca and training_args.fp8_enable_fsdp_float8_all_gather and not training_args.fp8:
logger.warning_rank0("fp8_enable_fsdp_float8_all_gather requires fp8=True. Setting fp8=True.")
model_args.fp8 = True