fix quant infer and qwen2moe

Former-commit-id: b75d16767f35c36e2cf2aaab8a3844135085bccf
This commit is contained in:
hiyouga
2024-04-09 17:12:59 +08:00
parent 6030a4a720
commit 566d71b7a9
2 changed files with 3 additions and 3 deletions

View File

@@ -109,9 +109,6 @@ def load_model(
if not is_trainable:
model.requires_grad_(False)
model.eval()
for param in model.parameters():
if param.device.type == "cuda":
param.data = param.data.to(model_args.compute_dtype)
else:
model.train()