From 44eadbda1ca0ed9805bf6ef479c886fecd30a460 Mon Sep 17 00:00:00 2001 From: jiaqiw09 <60021713+jiaqiw09@users.noreply.github.com> Date: Sat, 17 Jan 2026 09:24:54 +0800 Subject: [PATCH] [v1] fix kernel moe patch (#9867) --- .../v1/plugins/model_plugins/kernels/ops/mlp/npu_fused_moe.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llamafactory/v1/plugins/model_plugins/kernels/ops/mlp/npu_fused_moe.py b/src/llamafactory/v1/plugins/model_plugins/kernels/ops/mlp/npu_fused_moe.py index 62854777d..7b4e29269 100644 --- a/src/llamafactory/v1/plugins/model_plugins/kernels/ops/mlp/npu_fused_moe.py +++ b/src/llamafactory/v1/plugins/model_plugins/kernels/ops/mlp/npu_fused_moe.py @@ -324,7 +324,7 @@ class NpuFusedMoEKernel(BaseKernel): if not cls.check_deps(): raise RuntimeError("torch_npu is not available but NpuMoEFusedMoEKernel was called.") - archs = getattr(model.config, "architectures", []) + archs = getattr(model.config, "architectures", None) or [] target_moe_mapping = None for arch in archs: if arch in kernel_moe_mapping: