|
import torch |
|
from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel |
|
from transformers import CLIPTextModel, CLIPTokenizer |
|
from PIL import Image |
|
from tqdm import tqdm |
|
|
|
|
|
class StableDiffusion: |
|
def __init__( |
|
self, |
|
vae_arch="CompVis/stable-diffusion-v1-4", |
|
tokenizer_arch="openai/clip-vit-large-patch14", |
|
encoder_arch="openai/clip-vit-large-patch14", |
|
unet_arch="CompVis/stable-diffusion-v1-4", |
|
device="cpu", |
|
height=512, |
|
width=512, |
|
num_inference_steps=30, |
|
guidance_scale=7.5, |
|
manual_seed=1, |
|
) -> None: |
|
self.height = height |
|
self.width = width |
|
self.num_inference_steps = num_inference_steps |
|
self.guidance_scale = guidance_scale |
|
self.device = device |
|
self.manual_seed = manual_seed |
|
|
|
vae = AutoencoderKL.from_pretrained(vae_arch, subfolder="vae") |
|
|
|
self.tokenizer = CLIPTokenizer.from_pretrained(tokenizer_arch) |
|
text_encoder = CLIPTextModel.from_pretrained(encoder_arch) |
|
|
|
|
|
unet = UNet2DConditionModel.from_pretrained(unet_arch, subfolder="unet") |
|
|
|
|
|
self.scheduler = LMSDiscreteScheduler( |
|
beta_start=0.00085, |
|
beta_end=0.012, |
|
beta_schedule="scaled_linear", |
|
num_train_timesteps=1000, |
|
) |
|
|
|
|
|
self.vae = vae.to(self.device) |
|
self.text_encoder = text_encoder.to(self.device) |
|
self.unet = unet.to(self.device) |
|
|
|
self.token_emb_layer = text_encoder.text_model.embeddings.token_embedding |
|
pos_emb_layer = text_encoder.text_model.embeddings.position_embedding |
|
position_ids = text_encoder.text_model.embeddings.position_ids[:, :77] |
|
self.position_embeddings = pos_emb_layer(position_ids) |
|
|
|
def get_output_embeds(self, input_embeddings): |
|
|
|
bsz, seq_len = input_embeddings.shape[:2] |
|
causal_attention_mask = ( |
|
self.text_encoder.text_model._build_causal_attention_mask( |
|
bsz, seq_len, dtype=input_embeddings.dtype |
|
) |
|
) |
|
|
|
|
|
|
|
encoder_outputs = self.text_encoder.text_model.encoder( |
|
inputs_embeds=input_embeddings, |
|
attention_mask=None, |
|
causal_attention_mask=causal_attention_mask.to(self.device), |
|
output_attentions=None, |
|
output_hidden_states=True, |
|
return_dict=None, |
|
) |
|
|
|
|
|
output = encoder_outputs[0] |
|
|
|
|
|
output = self.text_encoder.text_model.final_layer_norm(output) |
|
|
|
|
|
return output |
|
|
|
def set_timesteps(self, scheduler, num_inference_steps): |
|
scheduler.set_timesteps(num_inference_steps) |
|
scheduler.timesteps = scheduler.timesteps.to(torch.float32) |
|
|
|
def latents_to_pil(self, latents): |
|
|
|
latents = (1 / 0.18215) * latents |
|
with torch.no_grad(): |
|
image = self.vae.decode(latents).sample |
|
image = (image / 2 + 0.5).clamp(0, 1) |
|
image = image.detach().cpu().permute(0, 2, 3, 1).numpy() |
|
images = (image * 255).round().astype("uint8") |
|
pil_images = [Image.fromarray(image) for image in images] |
|
return pil_images |
|
|
|
def generate_with_embs(self, text_embeddings, text_input, loss_fn, loss_scale): |
|
generator = torch.manual_seed( |
|
self.manual_seed |
|
) |
|
batch_size = 1 |
|
|
|
max_length = text_input.input_ids.shape[-1] |
|
uncond_input = self.tokenizer( |
|
[""] * batch_size, |
|
padding="max_length", |
|
max_length=max_length, |
|
return_tensors="pt", |
|
) |
|
with torch.no_grad(): |
|
uncond_embeddings = self.text_encoder( |
|
uncond_input.input_ids.to(self.device) |
|
)[0] |
|
text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) |
|
|
|
|
|
self.set_timesteps(self.scheduler, self.num_inference_steps) |
|
|
|
|
|
latents = torch.randn( |
|
(batch_size, self.unet.in_channels, self.height // 8, self.width // 8), |
|
generator=generator, |
|
) |
|
latents = latents.to(self.device) |
|
latents = latents * self.scheduler.init_noise_sigma |
|
|
|
|
|
for i, t in tqdm( |
|
enumerate(self.scheduler.timesteps), total=len(self.scheduler.timesteps) |
|
): |
|
|
|
latent_model_input = torch.cat([latents] * 2) |
|
sigma = self.scheduler.sigmas[i] |
|
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) |
|
|
|
|
|
with torch.no_grad(): |
|
noise_pred = self.unet( |
|
latent_model_input, t, encoder_hidden_states=text_embeddings |
|
)["sample"] |
|
|
|
|
|
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) |
|
noise_pred = noise_pred_uncond + self.guidance_scale * ( |
|
noise_pred_text - noise_pred_uncond |
|
) |
|
if i % 5 == 0: |
|
|
|
latents = latents.detach().requires_grad_() |
|
|
|
|
|
|
|
latents_x0 = self.scheduler.step( |
|
noise_pred, t, latents |
|
).pred_original_sample |
|
|
|
|
|
denoised_images = ( |
|
self.vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 |
|
) |
|
|
|
|
|
loss = loss_fn(denoised_images) * loss_scale |
|
|
|
|
|
|
|
|
|
|
|
|
|
cond_grad = torch.autograd.grad(loss, latents)[0] |
|
|
|
|
|
latents = latents.detach() - cond_grad * sigma**2 |
|
self.scheduler._step_index = self.scheduler._step_index - 1 |
|
|
|
|
|
latents = self.scheduler.step(noise_pred, t, latents).prev_sample |
|
|
|
return self.latents_to_pil(latents)[0] |
|
|
|
def generate_image( |
|
self, |
|
prompt="A campfire (oil on canvas)", |
|
loss_fn=None, |
|
loss_scale=200, |
|
concept_embed=None, |
|
): |
|
prompt += " in the style of cs" |
|
text_input = self.tokenizer( |
|
prompt, |
|
padding="max_length", |
|
max_length=self.tokenizer.model_max_length, |
|
truncation=True, |
|
return_tensors="pt", |
|
) |
|
input_ids = text_input.input_ids.to(self.device) |
|
custom_style_token = self.tokenizer.encode("cs", add_special_tokens=False)[0] |
|
|
|
token_embeddings = self.token_emb_layer(input_ids) |
|
|
|
|
|
embed_key = list(concept_embed.keys())[0] |
|
replacement_token_embedding = concept_embed[embed_key] |
|
|
|
|
|
token_embeddings[ |
|
0, torch.where(input_ids[0] == custom_style_token) |
|
] = replacement_token_embedding.to(self.device) |
|
|
|
|
|
input_embeddings = token_embeddings + self.position_embeddings |
|
|
|
|
|
modified_output_embeddings = self.get_output_embeds(input_embeddings) |
|
|
|
|
|
generated_image = self.generate_with_embs( |
|
modified_output_embeddings, text_input, loss_fn, loss_scale |
|
) |
|
return generated_image |
|
|