[model] do not force load processor (#8457)

This commit is contained in:
Yaowei Zheng
2025-06-25 19:43:00 +08:00
committed by GitHub
parent 48897e5b16
commit f276b9a963
3 changed files with 7 additions and 5 deletions

View File

@@ -111,9 +111,8 @@ def load_tokenizer(model_args: "ModelArguments") -> "TokenizerModule":
**init_kwargs,
)
except Exception as e:
raise OSError("Failed to load processor.") from e
patch_processor(processor, tokenizer, model_args)
logger.info_rank0(f"Failed to load processor: {e}.")
processor = None
# Avoid load tokenizer, see:
# https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/models/auto/processing_auto.py#L324
@@ -121,6 +120,9 @@ def load_tokenizer(model_args: "ModelArguments") -> "TokenizerModule":
logger.debug("The loaded processor is not an instance of Processor. Dropping it.")
processor = None
if processor is not None:
patch_processor(processor, tokenizer, model_args)
return {"tokenizer": tokenizer, "processor": processor}