Upload processing_minicpmv.py with huggingface_hub
Browse files- processing_minicpmv.py +581 -0
processing_minicpmv.py
ADDED
@@ -0,0 +1,581 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2024 RhapsodyAI and ModelBest Inc. and Microsoft and the HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
# coding=utf-8
|
17 |
+
# Copyright 2024 The HuggingFace Inc. team.
|
18 |
+
#
|
19 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
20 |
+
# you may not use this file except in compliance with the License.
|
21 |
+
# You may obtain a copy of the License at
|
22 |
+
#
|
23 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
24 |
+
#
|
25 |
+
# Unless required by applicable law or agreed to in writing, software
|
26 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
27 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
28 |
+
# See the License for the specific language governing permissions and
|
29 |
+
# limitations under the License.
|
30 |
+
|
31 |
+
|
32 |
+
import re
|
33 |
+
from typing import List, Optional, Union, Dict
|
34 |
+
|
35 |
+
import math
|
36 |
+
import torch
|
37 |
+
from torchvision import transforms
|
38 |
+
from PIL import Image
|
39 |
+
|
40 |
+
import transformers
|
41 |
+
from transformers.feature_extraction_utils import BatchFeature
|
42 |
+
from transformers.image_utils import ImageInput
|
43 |
+
from transformers.processing_utils import ProcessorMixin
|
44 |
+
from transformers.tokenization_utils_base import PaddingStrategy, TextInput, TruncationStrategy, PreTokenizedInput
|
45 |
+
from transformers.utils import TensorType
|
46 |
+
from transformers.image_processing_utils import BaseImageProcessor, BatchFeature
|
47 |
+
# from transformers.image_transforms import (
|
48 |
+
# convert_to_rgb,
|
49 |
+
# )
|
50 |
+
|
51 |
+
from transformers import LlamaTokenizer # for text processing
|
52 |
+
|
53 |
+
from transformers.utils import logging
|
54 |
+
|
55 |
+
logger = logging.get_logger(__name__)
|
56 |
+
|
57 |
+
|
58 |
+
# image tokenizer
|
59 |
+
def ensure_divide(length, patch_size):
|
60 |
+
return max(round(length / patch_size) * patch_size, patch_size)
|
61 |
+
|
62 |
+
def find_best_resize(original_size, scale_resolution, patch_size, allow_upscale=False):
|
63 |
+
width, height = original_size
|
64 |
+
if (width * height > scale_resolution * scale_resolution) or allow_upscale:
|
65 |
+
r = width / height
|
66 |
+
height = int(scale_resolution / math.sqrt(r))
|
67 |
+
width = int(height * r)
|
68 |
+
best_width = ensure_divide(width, patch_size)
|
69 |
+
best_height = ensure_divide(height, patch_size)
|
70 |
+
return (best_width, best_height)
|
71 |
+
|
72 |
+
def get_refine_size(
|
73 |
+
original_size, grid, scale_resolution, patch_size, allow_upscale=False
|
74 |
+
):
|
75 |
+
width, height = original_size
|
76 |
+
grid_x, grid_y = grid
|
77 |
+
|
78 |
+
refine_width = ensure_divide(width, grid_x)
|
79 |
+
refine_height = ensure_divide(height, grid_y)
|
80 |
+
|
81 |
+
grid_width = refine_width / grid_x
|
82 |
+
grid_height = refine_height / grid_y
|
83 |
+
|
84 |
+
best_grid_size = find_best_resize(
|
85 |
+
(grid_width, grid_height),
|
86 |
+
scale_resolution,
|
87 |
+
patch_size,
|
88 |
+
allow_upscale=allow_upscale,
|
89 |
+
)
|
90 |
+
|
91 |
+
refine_size = (best_grid_size[0] * grid_x, best_grid_size[1] * grid_y)
|
92 |
+
|
93 |
+
return refine_size
|
94 |
+
|
95 |
+
def split_to_patches(image, grid):
|
96 |
+
patches = []
|
97 |
+
width, height = image.size
|
98 |
+
grid_x = int(width / grid[0])
|
99 |
+
grid_y = int(height / grid[1])
|
100 |
+
|
101 |
+
for i in range(0, height, grid_y):
|
102 |
+
images = []
|
103 |
+
for j in range(0, width, grid_x):
|
104 |
+
box = (j, i, j + grid_x, i + grid_y)
|
105 |
+
patch = image.crop(box)
|
106 |
+
logger.info(f"I don't think it is so called `patch`. split_to_patches: patch size = {box}")
|
107 |
+
images.append(patch)
|
108 |
+
patches.append(images)
|
109 |
+
|
110 |
+
return patches
|
111 |
+
|
112 |
+
def slice_image(
|
113 |
+
image,
|
114 |
+
max_slice_nums=9,
|
115 |
+
scale_resolution=448,
|
116 |
+
patch_size=14,
|
117 |
+
never_split=False
|
118 |
+
):
|
119 |
+
original_size = image.size
|
120 |
+
original_width, original_height = original_size
|
121 |
+
log_ratio = math.log(original_width / original_height)
|
122 |
+
ratio = original_width * original_height / (scale_resolution * scale_resolution)
|
123 |
+
multiple = min(math.ceil(ratio), max_slice_nums)
|
124 |
+
|
125 |
+
source_image = None
|
126 |
+
best_grid = None
|
127 |
+
patches = []
|
128 |
+
|
129 |
+
if multiple <= 1 or never_split:
|
130 |
+
# don't need to slice, upsample
|
131 |
+
best_size = find_best_resize(
|
132 |
+
original_size, scale_resolution, patch_size, allow_upscale=True
|
133 |
+
)
|
134 |
+
source_image = image.resize(best_size, Image.Resampling.BICUBIC)
|
135 |
+
else:
|
136 |
+
candidate_split_grids_nums = []
|
137 |
+
for i in [multiple - 1, multiple, multiple + 1]:
|
138 |
+
if i == 1 or i > max_slice_nums:
|
139 |
+
continue
|
140 |
+
candidate_split_grids_nums.append(i)
|
141 |
+
|
142 |
+
# source image, down-sampling and ensure divided by patch_size
|
143 |
+
best_resize = find_best_resize(original_size, scale_resolution, patch_size)
|
144 |
+
source_image = image.copy().resize(best_resize, Image.Resampling.BICUBIC)
|
145 |
+
candidate_grids = []
|
146 |
+
|
147 |
+
# find best grid
|
148 |
+
for split_grids_nums in candidate_split_grids_nums:
|
149 |
+
m = 1
|
150 |
+
while m <= split_grids_nums:
|
151 |
+
if split_grids_nums % m == 0:
|
152 |
+
candidate_grids.append([m, split_grids_nums // m])
|
153 |
+
m += 1
|
154 |
+
|
155 |
+
best_grid = [1, 1]
|
156 |
+
min_error = float("inf")
|
157 |
+
for grid in candidate_grids:
|
158 |
+
error = abs(log_ratio - math.log(grid[0] / grid[1]))
|
159 |
+
if error < min_error:
|
160 |
+
best_grid = grid
|
161 |
+
min_error = error
|
162 |
+
|
163 |
+
refine_size = get_refine_size(
|
164 |
+
original_size, best_grid, scale_resolution, patch_size, allow_upscale=True
|
165 |
+
)
|
166 |
+
|
167 |
+
refine_image = image.resize(refine_size, Image.Resampling.BICUBIC)
|
168 |
+
patches = split_to_patches(refine_image, best_grid)
|
169 |
+
|
170 |
+
return source_image, patches, best_grid
|
171 |
+
|
172 |
+
def reshape_by_patch(image_tensor, patch_size=14):
|
173 |
+
"""
|
174 |
+
:param image_tensor: shape [3, H, W]
|
175 |
+
:param patch_size:
|
176 |
+
:return: [3, patch_size, HW/patch_size]
|
177 |
+
"""
|
178 |
+
patches = torch.nn.functional.unfold(
|
179 |
+
image_tensor,
|
180 |
+
(patch_size, patch_size),
|
181 |
+
stride=(patch_size, patch_size)
|
182 |
+
)
|
183 |
+
|
184 |
+
patches = patches.reshape(image_tensor.size(0), patch_size, patch_size, -1)
|
185 |
+
patches = patches.permute(0, 1, 3, 2).reshape(image_tensor.size(0), patch_size, -1)
|
186 |
+
return patches
|
187 |
+
|
188 |
+
class MiniCPMVImageProcessor(BaseImageProcessor):
|
189 |
+
r"""
|
190 |
+
MiniCPMV image processor. -> Based on Phi3 image processor -> Used LlaVa-UHD. dynamic slicing one image image.
|
191 |
+
|
192 |
+
Args:
|
193 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
|
194 |
+
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
|
195 |
+
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
|
196 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
|
197 |
+
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
|
198 |
+
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
|
199 |
+
Can be overridden by the `image_std` parameter in the `preprocess` method.
|
200 |
+
do_convert_rgb (`bool`, *optional*, defaults to `True`):
|
201 |
+
Whether to convert the image to RGB.
|
202 |
+
"""
|
203 |
+
|
204 |
+
def __init__(
|
205 |
+
self,
|
206 |
+
query_num: int = 64,
|
207 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
208 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
209 |
+
max_slice_nums: int = 9,
|
210 |
+
scale_resolution: int = 448,
|
211 |
+
patch_size: int = 14,
|
212 |
+
**kwargs,
|
213 |
+
) -> None:
|
214 |
+
super().__init__(**kwargs)
|
215 |
+
|
216 |
+
self.query_num = query_num
|
217 |
+
self.image_mean = image_mean
|
218 |
+
self.image_std = image_std
|
219 |
+
self.max_slice_nums = max_slice_nums
|
220 |
+
self.scale_resolution = scale_resolution
|
221 |
+
self.patch_size = patch_size
|
222 |
+
|
223 |
+
def preprocess(
|
224 |
+
self,
|
225 |
+
image,
|
226 |
+
slice_mode: bool = True,
|
227 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
228 |
+
):
|
229 |
+
"""
|
230 |
+
Args:
|
231 |
+
images (`ImageInput`):
|
232 |
+
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
|
233 |
+
passing in images with pixel values between 0 and 1, set `do_rescale=False`. # modified: one image per invoke.
|
234 |
+
return_tensors (`str` or `TensorType`, *optional*):
|
235 |
+
The type of tensors to return. Can be one of:
|
236 |
+
- Unset: Return a list of `np.ndarray`.
|
237 |
+
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
|
238 |
+
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
|
239 |
+
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
|
240 |
+
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
|
241 |
+
"""
|
242 |
+
|
243 |
+
transform = transforms.Compose(
|
244 |
+
[
|
245 |
+
transforms.ToTensor(),
|
246 |
+
transforms.Normalize(mean=self.image_mean, std=self.image_std)
|
247 |
+
]
|
248 |
+
)
|
249 |
+
|
250 |
+
images_ = []
|
251 |
+
tgt_sizes = []
|
252 |
+
|
253 |
+
if slice_mode:
|
254 |
+
slice_images = []
|
255 |
+
source_image, patches, best_grid = slice_image( # 耗时
|
256 |
+
image,
|
257 |
+
self.max_slice_nums,
|
258 |
+
self.scale_resolution,
|
259 |
+
self.patch_size,
|
260 |
+
)
|
261 |
+
|
262 |
+
slice_images.append(source_image)
|
263 |
+
if len(patches) > 0:
|
264 |
+
for i in range(len(patches)):
|
265 |
+
for j in range(len(patches[0])):
|
266 |
+
slice_images.append(patches[i][j])
|
267 |
+
|
268 |
+
for image_ in slice_images:
|
269 |
+
slice_image_ = transform(image_) # 耗时
|
270 |
+
H, W = slice_image_.shape[1:]
|
271 |
+
slice_image_patchified_ = reshape_by_patch(slice_image_)
|
272 |
+
images_.append(slice_image_patchified_)
|
273 |
+
tgt_sizes.append(torch.Tensor([H // self.patch_size, W // self.patch_size]).type(torch.int32))
|
274 |
+
|
275 |
+
else:
|
276 |
+
best_grid = None
|
277 |
+
image_ = transform(image)
|
278 |
+
H, W = image_.shape[1:]
|
279 |
+
image_patchified_ = reshape_by_patch(image_)
|
280 |
+
images_.append(image_patchified_) # 耗时
|
281 |
+
tgt_sizes.append(torch.Tensor([H // self.patch_size, W // self.patch_size]).type(torch.int32))
|
282 |
+
|
283 |
+
return images_, tgt_sizes, best_grid
|
284 |
+
|
285 |
+
|
286 |
+
# text tokenizer
|
287 |
+
class MiniCPMVTextTokenizer(LlamaTokenizer):
|
288 |
+
def __init__(self, **kwargs):
|
289 |
+
super().__init__(**kwargs)
|
290 |
+
self.im_start = ""
|
292 |
+
self.ref_start = "<ref>"
|
293 |
+
self.ref_end = "</ref>"
|
294 |
+
self.box_start = "<box>"
|
295 |
+
self.box_end = "</box>"
|
296 |
+
self.quad_start = "<quad>"
|
297 |
+
self.quad_end = "</quad>"
|
298 |
+
self.point_start = "<point>"
|
299 |
+
self.point_end = "</point>"
|
300 |
+
self.slice_start = "<slice>"
|
301 |
+
self.slice_end = "</slice>"
|
302 |
+
|
303 |
+
@property
|
304 |
+
def eos_id(self):
|
305 |
+
return self.sp_model.eos_id()
|
306 |
+
|
307 |
+
@property
|
308 |
+
def bos_id(self):
|
309 |
+
return self.sp_model.bos_id()
|
310 |
+
|
311 |
+
@property
|
312 |
+
def unk_id(self):
|
313 |
+
return self.sp_model.unk_id()
|
314 |
+
|
315 |
+
@property
|
316 |
+
def im_start_id(self):
|
317 |
+
return self._convert_token_to_id(self.im_start)
|
318 |
+
|
319 |
+
@property
|
320 |
+
def im_end_id(self):
|
321 |
+
return self._convert_token_to_id(self.im_end)
|
322 |
+
|
323 |
+
def get_grid_placeholder(tokenizer, grid, query_num):
|
324 |
+
image_placeholder = (
|
325 |
+
tokenizer.im_start + tokenizer.unk_token * query_num + tokenizer.im_end
|
326 |
+
)
|
327 |
+
|
328 |
+
cols = grid[0]
|
329 |
+
rows = grid[1]
|
330 |
+
slices = []
|
331 |
+
for i in range(rows):
|
332 |
+
lines = []
|
333 |
+
for j in range(cols):
|
334 |
+
lines.append(image_placeholder)
|
335 |
+
slices.append("".join(lines))
|
336 |
+
slice_placeholder = tokenizer.slice_start + "\n".join(slices) + tokenizer.slice_end
|
337 |
+
return slice_placeholder
|
338 |
+
|
339 |
+
def pad(orig_items, max_length=None, padding_value=0, padding_side="left"):
|
340 |
+
"""
|
341 |
+
Args:
|
342 |
+
orig_items: a list of input_ids, each input_ids should be [1, length_i]
|
343 |
+
"""
|
344 |
+
assert isinstance(orig_items, list)
|
345 |
+
assert isinstance(orig_items[0], torch.Tensor)
|
346 |
+
padding_value = 2
|
347 |
+
items = [t.squeeze() for t in orig_items]
|
348 |
+
|
349 |
+
batch_size = len(items)
|
350 |
+
shape = items[0].shape
|
351 |
+
|
352 |
+
dim = len(shape)
|
353 |
+
assert dim == 1, "This pad function only expect B * Tensor([seq_len]) input." # Assuming 1D tensors for simplicity
|
354 |
+
|
355 |
+
if max_length is None:
|
356 |
+
max_length = max(item.shape[0] for item in items)
|
357 |
+
|
358 |
+
tensor = torch.full((batch_size, max_length), padding_value, dtype=items[0].dtype)
|
359 |
+
attention_mask = torch.zeros((batch_size, max_length), dtype=torch.int8)
|
360 |
+
|
361 |
+
for i, item in enumerate(items):
|
362 |
+
length = item.shape[0]
|
363 |
+
if padding_side == "left":
|
364 |
+
raise Exception("Please use right padding")
|
365 |
+
tensor[i, -length:] = item.clone()
|
366 |
+
attention_mask[i, -length:] = 1
|
367 |
+
else:
|
368 |
+
tensor[i, 0:length] = item.clone()
|
369 |
+
attention_mask[i, 0:length] = 1
|
370 |
+
|
371 |
+
return_dict = {
|
372 |
+
"input_ids": tensor,
|
373 |
+
"attention_mask": attention_mask,
|
374 |
+
}
|
375 |
+
|
376 |
+
return return_dict
|
377 |
+
|
378 |
+
def convert_to_tokens(input_str, tokenizer, max_inp_length):
|
379 |
+
if tokenizer.add_bos_token:
|
380 |
+
input_ids = tokenizer.encode(input_str)
|
381 |
+
else:
|
382 |
+
input_ids = [tokenizer.bos_id] + tokenizer.encode(input_str)
|
383 |
+
|
384 |
+
input_ids = input_ids[:max_inp_length]
|
385 |
+
|
386 |
+
input_ids = torch.tensor(input_ids, dtype=torch.int32)
|
387 |
+
|
388 |
+
image_start_tokens = torch.where(input_ids == tokenizer.im_start_id)[0]
|
389 |
+
|
390 |
+
# 跳过 im_start
|
391 |
+
image_start_tokens += 1
|
392 |
+
image_end_tokens = torch.where(input_ids == tokenizer.im_end_id)[0]
|
393 |
+
valid_image_nums = max(len(image_start_tokens), len(image_end_tokens))
|
394 |
+
|
395 |
+
image_bound = torch.hstack(
|
396 |
+
[
|
397 |
+
image_start_tokens[:valid_image_nums].unsqueeze(-1),
|
398 |
+
image_end_tokens[:valid_image_nums].unsqueeze(-1),
|
399 |
+
]
|
400 |
+
)
|
401 |
+
|
402 |
+
model_input = {}
|
403 |
+
model_input["input_ids"] = input_ids.unsqueeze(0)
|
404 |
+
model_input["image_bound"] = image_bound
|
405 |
+
|
406 |
+
return model_input
|
407 |
+
|
408 |
+
class MiniCPMVProcessor(ProcessorMixin):
|
409 |
+
r"""
|
410 |
+
Based on Siglip. Constructs a Siglip processor which wraps a Siglip image processor and a Siglip tokenizer into a single processor.
|
411 |
+
|
412 |
+
[`SiglipProcessor`] offers all the functionalities of [`SiglipImageProcessor`] and [`SiglipTokenizer`]. See the
|
413 |
+
[`~SiglipProcessor.__call__`] and [`~SiglipProcessor.decode`] for more information.
|
414 |
+
|
415 |
+
Args:
|
416 |
+
image_processor ([`SiglipImageProcessor`]):
|
417 |
+
The image processor is a required input.
|
418 |
+
tokenizer ([`SiglipTokenizer`]):
|
419 |
+
The tokenizer is a required input.
|
420 |
+
"""
|
421 |
+
|
422 |
+
attributes = ["image_processor", "tokenizer"]
|
423 |
+
image_processor_class = "AutoImageProcessor" # sorry, we can't find a way to make `image_processor_class` equal to `MiniCPMVImageProcessor`
|
424 |
+
tokenizer_class = "AutoTokenizer"
|
425 |
+
|
426 |
+
def __init__(self, image_processor, tokenizer, query_num=64, slice_mode=True, max_inp_length=2048):
|
427 |
+
super().__init__(image_processor, tokenizer)
|
428 |
+
self.query_num = query_num
|
429 |
+
self.slice_mode = slice_mode
|
430 |
+
self.max_inp_length = max_inp_length
|
431 |
+
|
432 |
+
def __call__(
|
433 |
+
self,
|
434 |
+
messages: Dict[str, Union[str, Image.Image]] = None, # ChatML format
|
435 |
+
slice_mode: bool = None,
|
436 |
+
max_inp_length: int = None,
|
437 |
+
padding: Union[bool, str, PaddingStrategy] = False,
|
438 |
+
padding_side: str = "left",
|
439 |
+
truncation: Union[bool, str, TruncationStrategy] = None,
|
440 |
+
return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
|
441 |
+
) -> BatchFeature:
|
442 |
+
"""
|
443 |
+
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
|
444 |
+
and `kwargs` arguments to SiglipTokenizer's [`~SiglipTokenizer.__call__`] if `text` is not `None` to encode
|
445 |
+
the text. To prepare the image(s), this method forwards the `images` argument to
|
446 |
+
SiglipImageProcessor's [`~SiglipImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
|
447 |
+
of the above two methods for more information.
|
448 |
+
|
449 |
+
Args:
|
450 |
+
text (`str`, `List[str]`, `List[List[str]]`):
|
451 |
+
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
|
452 |
+
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
|
453 |
+
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
|
454 |
+
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
|
455 |
+
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
|
456 |
+
tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
|
457 |
+
number of channels, H and W are image height and width.
|
458 |
+
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
|
459 |
+
Select a strategy to pad the returned sequences (according to the model's padding side and padding
|
460 |
+
index) among:
|
461 |
+
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
|
462 |
+
sequence if provided).
|
463 |
+
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
|
464 |
+
acceptable input length for the model if that argument is not provided.
|
465 |
+
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
|
466 |
+
lengths).
|
467 |
+
max_input_length (`int`, *optional*):
|
468 |
+
Maximum length of the returned list and optionally padding length (see above).
|
469 |
+
truncation (`bool`, *optional*):
|
470 |
+
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
|
471 |
+
|
472 |
+
Returns:
|
473 |
+
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
|
474 |
+
|
475 |
+
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
|
476 |
+
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
|
477 |
+
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
|
478 |
+
`None`).
|
479 |
+
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
|
480 |
+
"""
|
481 |
+
# assert len(messages) == 1, 'Do not support batch > 1'
|
482 |
+
|
483 |
+
if slice_mode is None:
|
484 |
+
if self.slice_mode is None:
|
485 |
+
raise ValueError("`slice_mode` is not specified by config or usage")
|
486 |
+
else:
|
487 |
+
slice_mode = self.slice_mode
|
488 |
+
|
489 |
+
if max_inp_length is None:
|
490 |
+
if self.max_inp_length is None:
|
491 |
+
raise ValueError("`max_inp_length` is not specified by config or usage")
|
492 |
+
else:
|
493 |
+
max_inp_length = self.max_inp_length
|
494 |
+
|
495 |
+
processed_subimages_all_data = []
|
496 |
+
processed_text_all_data = []
|
497 |
+
tgt_sizes_all_data = []
|
498 |
+
|
499 |
+
for msgs in messages:
|
500 |
+
assert len(msgs) > 0, 'msgs is empty'
|
501 |
+
|
502 |
+
processed_text_all_msgs = []
|
503 |
+
processed_subimages_all_msgs = []
|
504 |
+
tgt_sizes_all_msgs = []
|
505 |
+
|
506 |
+
# process each message, each message is look like [text/image, ...]
|
507 |
+
for i, msg in enumerate(msgs):
|
508 |
+
|
509 |
+
role = msg["role"]
|
510 |
+
c = msg["content"]
|
511 |
+
|
512 |
+
assert role in ["user", "assistant"]
|
513 |
+
|
514 |
+
if i == 0:
|
515 |
+
assert role == "user", "The role of first msg should be user"
|
516 |
+
|
517 |
+
if isinstance(c, Image.Image):
|
518 |
+
|
519 |
+
processed_subimages, tgt_sizes, best_grid = self.image_processor.preprocess(image=c, slice_mode=slice_mode)
|
520 |
+
|
521 |
+
# make image placeholders
|
522 |
+
if slice_mode:
|
523 |
+
cur_msg = (
|
524 |
+
self.tokenizer.im_start
|
525 |
+
+ self.tokenizer.unk_token * self.query_num
|
526 |
+
+ self.tokenizer.im_end
|
527 |
+
)
|
528 |
+
if len(processed_subimages) > 1:
|
529 |
+
cur_msg += get_grid_placeholder(
|
530 |
+
self.tokenizer, best_grid, self.query_num
|
531 |
+
)
|
532 |
+
|
533 |
+
else:
|
534 |
+
cur_msg = (
|
535 |
+
self.tokenizer.im_start
|
536 |
+
+ self.tokenizer.unk_token * self.query_num
|
537 |
+
+ self.tokenizer.im_end
|
538 |
+
)
|
539 |
+
|
540 |
+
tgt_sizes_all_msgs.extend(tgt_sizes)
|
541 |
+
processed_subimages_all_msgs.extend(processed_subimages)
|
542 |
+
|
543 |
+
elif isinstance(c, str):
|
544 |
+
cur_msg = c
|
545 |
+
|
546 |
+
else:
|
547 |
+
raise NotImplementedError(f"message {type(c)}: {c} can't be handled")
|
548 |
+
|
549 |
+
role_title = "<用户>" if role == "user" else "<AI>"
|
550 |
+
processed_text_all_msgs.append(role_title + cur_msg)
|
551 |
+
|
552 |
+
processed_text_all_msgs_concat = "".join(processed_text_all_msgs)
|
553 |
+
processed_text_all_msgs_concat += "<AI>"
|
554 |
+
processed_text_all_data.append(processed_text_all_msgs_concat)
|
555 |
+
|
556 |
+
processed_subimages_all_data.append(processed_subimages_all_msgs)
|
557 |
+
tgt_sizes_all_msgs = torch.vstack(tgt_sizes_all_msgs)
|
558 |
+
tgt_sizes_all_data.append(tgt_sizes_all_msgs)
|
559 |
+
|
560 |
+
# convert text string to tokens, at this step, `input_ids` and `image_bound` is added
|
561 |
+
model_inputs_uncollated = []
|
562 |
+
for text in processed_text_all_data:
|
563 |
+
model_inputs_ = convert_to_tokens(
|
564 |
+
text, max_inp_length=max_inp_length, tokenizer=self.tokenizer
|
565 |
+
)
|
566 |
+
model_inputs_uncollated.append(model_inputs_)
|
567 |
+
|
568 |
+
# pad: in this step, attention mask is added
|
569 |
+
model_inputs_final = pad([i["input_ids"] for i in model_inputs_uncollated], padding_side=padding_side)
|
570 |
+
|
571 |
+
# add image bound back
|
572 |
+
model_inputs_final["image_bound"] = [i["image_bound"] for i in model_inputs_uncollated]
|
573 |
+
|
574 |
+
# add pixels values
|
575 |
+
model_inputs_final["pixel_values"] = processed_subimages_all_data
|
576 |
+
|
577 |
+
# add target sizes
|
578 |
+
model_inputs_final["tgt_sizes"] = tgt_sizes_all_data
|
579 |
+
|
580 |
+
return BatchFeature(data=model_inputs_final, tensor_type=None)
|
581 |
+
|