fix mixed mm inputs and rlhf-v
Former-commit-id: 7c248fac20bf85d57a91132ce7a793c7f84e9218
This commit is contained in:
@@ -1,3 +1,4 @@
|
||||
from copy import deepcopy
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple
|
||||
|
||||
from PIL.Image import Image
|
||||
@@ -27,32 +28,33 @@ def _get_mm_inputs(images: Sequence["ImageObject"], processor: "ProcessorMixin")
|
||||
|
||||
Returns: (qwen2-vl)
|
||||
pixel_values: tensor with shape (num_patches, patch_dim)
|
||||
image_grid_thw: tensot with shape (num_images, 3), where the three numbers are time, width, height
|
||||
image_grid_thw: tensor with shape (num_images, 3), where the three numbers are time, width, height
|
||||
|
||||
It holds num_patches == torch.prod(image_grid_thw)
|
||||
"""
|
||||
image_processor: "BaseImageProcessor" = getattr(processor, "image_processor")
|
||||
if len(images) != 0:
|
||||
image_inputs = image_processor(images=images, return_tensors="pt")
|
||||
else:
|
||||
image = Image.new("RGB", (56, 56), (255, 255, 255))
|
||||
else: # add NoneType for fake images
|
||||
image = Image.new("RGB", (64, 64), (255, 255, 255))
|
||||
image_inputs = image_processor(images=[image], return_tensors="pt")
|
||||
if "image_grid_thw" in image_inputs: # fake image for qwen2-vl
|
||||
image_inputs["image_grid_thw"][0][0] = 0
|
||||
image_inputs = {key: None for key in image_inputs.keys()}
|
||||
|
||||
return image_inputs
|
||||
|
||||
|
||||
def _get_paligemma_token_type_ids(input_len: int, processor: "ProcessorMixin") -> List[List[int]]:
|
||||
def _get_paligemma_token_type_ids(
|
||||
images: Sequence["ImageObject"], input_len: int, processor: "ProcessorMixin"
|
||||
) -> List[List[int]]:
|
||||
r"""
|
||||
Gets paligemma token type ids for computing loss.
|
||||
|
||||
Returns:
|
||||
token_type_ids: shape (1, seq_len)
|
||||
"""
|
||||
image_processor: "BaseImageProcessor" = getattr(processor, "image_processor")
|
||||
image_seq_length: int = getattr(image_processor, "image_seq_length")
|
||||
return [[0] * image_seq_length + [1] * (input_len - image_seq_length)]
|
||||
num_images = len(images)
|
||||
image_seqlen = num_images * getattr(processor, "image_seqlen")
|
||||
return [[0] * image_seqlen + [1] * (input_len - image_seqlen)]
|
||||
|
||||
|
||||
class BasePlugin:
|
||||
@@ -74,6 +76,7 @@ class BasePlugin:
|
||||
self,
|
||||
input_ids: List[int],
|
||||
labels: Optional[List[int]],
|
||||
images: Sequence["ImageObject"],
|
||||
tokenizer: "PreTrainedTokenizer",
|
||||
processor: Optional["ProcessorMixin"],
|
||||
) -> Tuple[List[int], Optional[List[int]]]:
|
||||
@@ -93,18 +96,6 @@ class BasePlugin:
|
||||
"""
|
||||
return {}
|
||||
|
||||
def process_model_inputs(
|
||||
self,
|
||||
model_inputs: Dict[str, List[Any]],
|
||||
images: Sequence["ImageObject"],
|
||||
feature_seqlens: Dict[str, int],
|
||||
processor: Optional["ProcessorMixin"],
|
||||
) -> None:
|
||||
r"""
|
||||
Appends multimodal inputs to model inputs for VLMs.
|
||||
"""
|
||||
return
|
||||
|
||||
|
||||
class LlavaPlugin(BasePlugin):
|
||||
def process_messages(
|
||||
@@ -113,21 +104,21 @@ class LlavaPlugin(BasePlugin):
|
||||
images: Sequence["ImageObject"],
|
||||
processor: Optional["ProcessorMixin"],
|
||||
) -> List[Dict[str, str]]:
|
||||
image_count = 0
|
||||
new_messages = []
|
||||
num_images = 0
|
||||
image_seqlen = getattr(processor, "image_seqlen")
|
||||
messages = deepcopy(messages)
|
||||
for message in messages:
|
||||
content = message["content"]
|
||||
while IMAGE_PLACEHOLDER in content:
|
||||
image_count += 1
|
||||
if image_count > 1:
|
||||
raise ValueError("Llava model only accepts one image per sample.")
|
||||
|
||||
num_images += 1
|
||||
content = content.replace(IMAGE_PLACEHOLDER, "{{image}}", 1)
|
||||
|
||||
content = content.replace("{{image}}", self.image_token)
|
||||
new_messages.append({"role": message["role"], "content": content})
|
||||
message["content"] = content.replace("{{image}}", self.image_token * image_seqlen)
|
||||
|
||||
return new_messages
|
||||
if len(images) != num_images:
|
||||
raise ValueError("The number of images does not match the number of {} tokens".format(IMAGE_PLACEHOLDER))
|
||||
|
||||
return messages
|
||||
|
||||
def get_mm_inputs(
|
||||
self,
|
||||
@@ -137,17 +128,6 @@ class LlavaPlugin(BasePlugin):
|
||||
) -> Dict[str, Any]:
|
||||
return _get_mm_inputs(images, processor)
|
||||
|
||||
def process_model_inputs(
|
||||
self,
|
||||
model_inputs: Dict[str, List[Any]],
|
||||
images: Sequence["ImageObject"],
|
||||
feature_seqlens: Dict[str, int],
|
||||
processor: Optional["ProcessorMixin"],
|
||||
) -> None:
|
||||
mm_inputs = self.get_mm_inputs(images, feature_seqlens, processor)
|
||||
for key, value in mm_inputs.items():
|
||||
model_inputs[key].append(value[0])
|
||||
|
||||
|
||||
class PaliGemmaPlugin(BasePlugin):
|
||||
def process_messages(
|
||||
@@ -156,34 +136,35 @@ class PaliGemmaPlugin(BasePlugin):
|
||||
images: Sequence["ImageObject"],
|
||||
processor: Optional["ProcessorMixin"],
|
||||
) -> List[Dict[str, str]]:
|
||||
image_count = 0
|
||||
new_messages = []
|
||||
num_images = 0
|
||||
messages = deepcopy(messages)
|
||||
for message in messages:
|
||||
content = message["content"]
|
||||
while IMAGE_PLACEHOLDER in content:
|
||||
image_count += 1
|
||||
if image_count > 1:
|
||||
raise ValueError("PaliGemma model only accepts one image per sample.")
|
||||
num_images += 1
|
||||
content = content.replace(IMAGE_PLACEHOLDER, "{{image}}", 1)
|
||||
|
||||
content = content.replace(IMAGE_PLACEHOLDER, "", 1)
|
||||
message["content"] = content.replace("{{image}}", "")
|
||||
|
||||
new_messages.append({"role": message["role"], "content": content})
|
||||
if len(images) != num_images:
|
||||
raise ValueError("The number of images does not match the number of {} tokens".format(IMAGE_PLACEHOLDER))
|
||||
|
||||
return new_messages
|
||||
return messages
|
||||
|
||||
def process_token_ids(
|
||||
self,
|
||||
input_ids: List[int],
|
||||
labels: Optional[List[int]],
|
||||
images: Sequence["ImageObject"],
|
||||
tokenizer: "PreTrainedTokenizer",
|
||||
processor: Optional["ProcessorMixin"],
|
||||
) -> Tuple[List[int], Optional[List[int]]]:
|
||||
image_processor: "BaseImageProcessor" = getattr(processor, "image_processor")
|
||||
image_seq_length: int = getattr(image_processor, "image_seq_length")
|
||||
num_images = len(images)
|
||||
image_seqlen = num_images * getattr(processor, "image_seqlen")
|
||||
image_token_id = tokenizer.convert_tokens_to_ids(self.image_token)
|
||||
input_ids = [image_token_id] * image_seq_length + input_ids
|
||||
input_ids = [image_token_id] * image_seqlen + input_ids
|
||||
if labels is not None:
|
||||
labels = [IGNORE_INDEX] * image_seq_length + labels
|
||||
labels = [IGNORE_INDEX] * image_seqlen + labels
|
||||
|
||||
return input_ids, labels
|
||||
|
||||
@@ -195,21 +176,10 @@ class PaliGemmaPlugin(BasePlugin):
|
||||
) -> Dict[str, Any]:
|
||||
mm_inputs = _get_mm_inputs(images, processor)
|
||||
for feature_name, feature_length in feature_seqlens.items():
|
||||
mm_inputs[feature_name] = _get_paligemma_token_type_ids(feature_length, processor)
|
||||
mm_inputs[feature_name] = _get_paligemma_token_type_ids(images, feature_length, processor)
|
||||
|
||||
return mm_inputs
|
||||
|
||||
def process_model_inputs(
|
||||
self,
|
||||
model_inputs: Dict[str, List[Any]],
|
||||
images: Sequence["ImageObject"],
|
||||
feature_seqlens: Dict[str, int],
|
||||
processor: Optional["ProcessorMixin"],
|
||||
) -> None:
|
||||
mm_inputs = self.get_mm_inputs(images, feature_seqlens, processor)
|
||||
for key, value in mm_inputs.items():
|
||||
model_inputs[key].append(value[0])
|
||||
|
||||
|
||||
class Qwen2vlPlugin(BasePlugin):
|
||||
def process_messages(
|
||||
@@ -223,23 +193,26 @@ class Qwen2vlPlugin(BasePlugin):
|
||||
if len(images) > 0:
|
||||
image_grid_thw = _get_mm_inputs(images, processor)["image_grid_thw"]
|
||||
|
||||
index = 0
|
||||
new_messages = []
|
||||
num_images = 0
|
||||
messages = deepcopy(messages)
|
||||
for message in messages:
|
||||
content = message["content"]
|
||||
while IMAGE_PLACEHOLDER in content:
|
||||
content = content.replace(
|
||||
IMAGE_PLACEHOLDER,
|
||||
"<|vision_start|>{}<|vision_end|>".format(
|
||||
self.image_token * (image_grid_thw[index].prod() // merge_length)
|
||||
self.image_token * (image_grid_thw[num_images].prod() // merge_length)
|
||||
),
|
||||
1,
|
||||
)
|
||||
index += 1
|
||||
num_images += 1
|
||||
|
||||
new_messages.append({"role": message["role"], "content": content})
|
||||
message["content"] = content
|
||||
|
||||
return new_messages
|
||||
if len(images) != num_images:
|
||||
raise ValueError("The number of images does not match the number of {} tokens".format(IMAGE_PLACEHOLDER))
|
||||
|
||||
return messages
|
||||
|
||||
def get_mm_inputs(
|
||||
self,
|
||||
@@ -249,17 +222,6 @@ class Qwen2vlPlugin(BasePlugin):
|
||||
) -> Dict[str, Any]:
|
||||
return _get_mm_inputs(images, processor)
|
||||
|
||||
def process_model_inputs(
|
||||
self,
|
||||
model_inputs: Dict[str, List[Any]],
|
||||
images: Sequence["ImageObject"],
|
||||
feature_seqlens: Dict[str, int],
|
||||
processor: Optional["ProcessorMixin"],
|
||||
) -> None:
|
||||
mm_inputs = self.get_mm_inputs(images, feature_seqlens, processor)
|
||||
for key, value in mm_inputs.items():
|
||||
model_inputs[key].append(value) # support multi-image
|
||||
|
||||
|
||||
PLUGINS = {
|
||||
"base": BasePlugin,
|
||||
@@ -270,7 +232,8 @@ PLUGINS = {
|
||||
|
||||
|
||||
def get_mm_plugin(name: str, image_token: str) -> "BasePlugin":
|
||||
if name not in PLUGINS:
|
||||
raise ValueError("{} not found.".format(name))
|
||||
plugin_class = PLUGINS.get(name, None)
|
||||
if plugin_class is None:
|
||||
raise ValueError("Multimodal plugin `{}` not found.".format(name))
|
||||
|
||||
return PLUGINS[name](image_token)
|
||||
return plugin_class(image_token)
|
||||
|
||||
Reference in New Issue
Block a user