add vllm config

Former-commit-id: 95365f0ce4f362bde7de8b679b54b548d7055bfb
This commit is contained in:
hiyouga
2024-11-10 21:28:18 +08:00
parent fcb6283a72
commit 1e6f96508a
34 changed files with 44 additions and 34 deletions

View File

@@ -83,6 +83,7 @@ class VllmEngine(BaseEngine):
"enable_lora": model_args.adapter_name_or_path is not None,
"max_lora_rank": model_args.vllm_max_lora_rank,
}
engine_args.update(model_args.vllm_config)
if getattr(config, "is_yi_vl_derived_model", None):
import vllm.model_executor.models.llava
@@ -173,7 +174,7 @@ class VllmEngine(BaseEngine):
multi_modal_data = None
result_generator = self.model.generate(
inputs={"prompt_token_ids": prompt_ids, "multi_modal_data": multi_modal_data},
{"prompt_token_ids": prompt_ids, "multi_modal_data": multi_modal_data},
sampling_params=sampling_params,
request_id=request_id,
lora_request=self.lora_request,