got error when run the model in diffusers (seems related to safety checkers)

#2
by garyfang - opened

The run with diffusers but got the following problems, anyone know how to solve it.
I can successfully run the anything3.0 model with the same scripts


KeyError Traceback (most recent call last)
Input In [1], in <cell line: 17>()
15 negative_prompt = "lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name"
17 with torch.autocast("cuda"):
---> 18 image = pipe(prompt,
19 negative_prompt=negative_prompt,
20 width=512,
21 height=768,
22 guidance_scale=12,
23 num_inference_steps=50).images[0]
25 image.save("anime_girl.png")

File ~/miniconda3/lib/python3.8/site-packages/torch/autograd/grad_mode.py:27, in _DecoratorContextManager.call..decorate_context(*args, **kwargs)
24 @functools.wraps(func)
25 def decorate_context(*args, **kwargs):
26 with self.clone():
---> 27 return func(*args, **kwargs)

File ~/miniconda3/lib/python3.8/site-packages/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py:568, in StableDiffusionPipeline.call(self, prompt, height, width, num_inference_steps, guidance_scale, negative_prompt, num_images_per_prompt, eta, generator, latents, output_type, return_dict, callback, callback_steps, **kwargs)
565 image = self.decode_latents(latents)
567 # 9. Run safety checker
--> 568 image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype)
570 # 10. Convert to PIL
571 if output_type == "pil":

File ~/miniconda3/lib/python3.8/site-packages/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py:365, in StableDiffusionPipeline.run_safety_checker(self, image, device, dtype)
363 def run_safety_checker(self, image, device, dtype):
364 if self.safety_checker is not None:
--> 365 safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
366 image, has_nsfw_concept = self.safety_checker(
367 images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
368 )
369 else:

File ~/miniconda3/lib/python3.8/site-packages/transformers/models/clip/feature_extraction_clip.py:155, in CLIPFeatureExtractor.call(self, images, return_tensors, **kwargs)
153 images = [self.convert_rgb(image) for image in images]
154 if self.do_resize and self.size is not None and self.resample is not None:
--> 155 images = [
156 self.resize(image=image, size=self.size, resample=self.resample, default_to_square=False)
157 for image in images
158 ]
159 if self.do_center_crop and self.crop_size is not None:
160 images = [self.center_crop(image, self.crop_size) for image in images]

File ~/miniconda3/lib/python3.8/site-packages/transformers/models/clip/feature_extraction_clip.py:156, in (.0)
153 images = [self.convert_rgb(image) for image in images]
154 if self.do_resize and self.size is not None and self.resample is not None:
155 images = [
--> 156 self.resize(image=image, size=self.size, resample=self.resample, default_to_square=False)
157 for image in images
158 ]
159 if self.do_center_crop and self.crop_size is not None:
160 images = [self.center_crop(image, self.crop_size) for image in images]

File ~/miniconda3/lib/python3.8/site-packages/transformers/image_utils.py:400, in ImageFeatureExtractionMixin.resize(self, image, size, resample, default_to_square, max_size)
398 # specified size only for the smallest edge
399 short, long = (width, height) if width <= height else (height, width)
--> 400 requested_new_short = size if isinstance(size, int) else size[0]
402 if short == requested_new_short:
403 return image

KeyError: 0

Sign up or log in to comment