TakahashiShotaro commited on
Commit
2074ae5
1 Parent(s): 11edfba

Update models.py

Browse files
Files changed (1) hide show
  1. models.py +46 -26
models.py CHANGED
@@ -14,19 +14,29 @@ from diffusers import ControlNetModel, UniPCMultistepScheduler
14
 
15
  from config import WIDTH, HEIGHT
16
  from palette import ade_palette
17
- from stable_diffusion_controlnet_inpaint_img2img import StableDiffusionControlNetInpaintImg2ImgPipeline
 
 
18
  from helpers import flush, postprocess_image_masking, convolution
19
- from pipelines import ControlNetPipeline, SDPipeline, get_inpainting_pipeline, get_controlnet
 
 
 
 
 
20
 
21
  LOGGING = logging.getLogger(__name__)
22
 
23
 
24
  @torch.inference_mode()
25
- def make_image_controlnet(image: np.ndarray,
26
- mask_image: np.ndarray,
27
- controlnet_conditioning_image: np.ndarray,
28
- positive_prompt: str, negative_prompt: str,
29
- seed: int = 2356132) -> List[Image.Image]:
 
 
 
30
  """Method to make image using controlnet
31
  Args:
32
  image (np.ndarray): input image
@@ -43,22 +53,25 @@ def make_image_controlnet(image: np.ndarray,
43
  flush()
44
 
45
  image = Image.fromarray(image).convert("RGB")
46
- controlnet_conditioning_image = Image.fromarray(controlnet_conditioning_image).convert("RGB")#.filter(ImageFilter.GaussianBlur(radius = 9))
 
 
 
 
47
  mask_image = Image.fromarray((mask_image * 255).astype(np.uint8)).convert("RGB")
48
  mask_image_postproc = convolution(mask_image)
49
 
50
-
51
- st.success(f"{pipe.queue_size} images in the queue, can take up to {(pipe.queue_size+1) * 10} seconds")
52
  generated_image = pipe(
53
  prompt=positive_prompt,
54
  negative_prompt=negative_prompt,
55
  num_inference_steps=50,
56
  strength=1.00,
57
  guidance_scale=7.0,
58
- generator=[torch.Generator(device="cuda").manual_seed(seed)],
59
  image=image,
60
  mask_image=mask_image,
61
- controlnet_conditioning_image=controlnet_conditioning_image,
62
  ).images[0]
63
  generated_image = postprocess_image_masking(generated_image, image, mask_image_postproc)
64
 
@@ -66,10 +79,12 @@ def make_image_controlnet(image: np.ndarray,
66
 
67
 
68
  @torch.inference_mode()
69
- def make_inpainting(positive_prompt: str,
70
- image: Image,
71
- mask_image: np.ndarray,
72
- negative_prompt: str = "") -> List[Image.Image]:
 
 
73
  """Method to make inpainting
74
  Args:
75
  positive_prompt (str): positive prompt string
@@ -84,15 +99,20 @@ def make_inpainting(positive_prompt: str,
84
  mask_image_postproc = convolution(mask_image)
85
 
86
  flush()
87
- st.success(f"{pipe.queue_size} images in the queue, can take up to {(pipe.queue_size+1) * 10} seconds")
88
- generated_image = pipe(image=image,
89
- mask_image=mask_image,
90
- prompt=positive_prompt,
91
- negative_prompt=negative_prompt,
92
- num_inference_steps=50,
93
- height=HEIGHT,
94
- width=WIDTH,
95
- ).images[0]
96
- generated_image = postprocess_image_masking(generated_image, image, mask_image_postproc)
 
 
 
 
 
97
 
98
  return generated_image
 
14
 
15
  from config import WIDTH, HEIGHT
16
  from palette import ade_palette
17
+ from stable_diffusion_controlnet_inpaint_img2img import (
18
+ StableDiffusionControlNetInpaintImg2ImgPipeline,
19
+ )
20
  from helpers import flush, postprocess_image_masking, convolution
21
+ from pipelines import (
22
+ ControlNetPipeline,
23
+ SDPipeline,
24
+ get_inpainting_pipeline,
25
+ get_controlnet,
26
+ )
27
 
28
  LOGGING = logging.getLogger(__name__)
29
 
30
 
31
  @torch.inference_mode()
32
+ def make_image_controlnet(
33
+ image: np.ndarray,
34
+ mask_image: np.ndarray,
35
+ controlnet_conditioning_image: np.ndarray,
36
+ positive_prompt: str,
37
+ negative_prompt: str,
38
+ seed: int = 2356132,
39
+ ) -> List[Image.Image]:
40
  """Method to make image using controlnet
41
  Args:
42
  image (np.ndarray): input image
 
53
  flush()
54
 
55
  image = Image.fromarray(image).convert("RGB")
56
+ controlnet_conditioning_image = Image.fromarray(
57
+ controlnet_conditioning_image
58
+ ).convert(
59
+ "RGB"
60
+ ) # .filter(ImageFilter.GaussianBlur(radius = 9))
61
  mask_image = Image.fromarray((mask_image * 255).astype(np.uint8)).convert("RGB")
62
  mask_image_postproc = convolution(mask_image)
63
 
64
+ # st.success(f"{pipe.queue_size} images in the queue, can take up to {(pipe.queue_size+1) * 10} seconds")
 
65
  generated_image = pipe(
66
  prompt=positive_prompt,
67
  negative_prompt=negative_prompt,
68
  num_inference_steps=50,
69
  strength=1.00,
70
  guidance_scale=7.0,
71
+ generator=torch.Generator(device="cuda").manual_seed(seed),
72
  image=image,
73
  mask_image=mask_image,
74
+ controlnet_conditioning_image=controlnet_conditioning_image
75
  ).images[0]
76
  generated_image = postprocess_image_masking(generated_image, image, mask_image_postproc)
77
 
 
79
 
80
 
81
  @torch.inference_mode()
82
+ def make_inpainting(
83
+ positive_prompt: str,
84
+ image: Image,
85
+ mask_image: np.ndarray,
86
+ negative_prompt: str = "",
87
+ ) -> List[Image.Image]:
88
  """Method to make inpainting
89
  Args:
90
  positive_prompt (str): positive prompt string
 
99
  mask_image_postproc = convolution(mask_image)
100
 
101
  flush()
102
+ st.success(
103
+ f"{pipe.queue_size} images in the queue, can take up to {(pipe.queue_size+1) * 10} seconds"
104
+ )
105
+ generated_image = pipe(
106
+ image=image,
107
+ mask_image=mask_image,
108
+ prompt=positive_prompt,
109
+ negative_prompt=negative_prompt,
110
+ num_inference_steps=50,
111
+ height=HEIGHT,
112
+ width=WIDTH,
113
+ ).images[0]
114
+ generated_image = postprocess_image_masking(
115
+ generated_image, image, mask_image_postproc
116
+ )
117
 
118
  return generated_image