tree3po's picture
Upload 21 files
12aae2e verified
raw
history blame
3.25 kB
"""
Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/embeddings.py
"""
from typing import Optional
import math
import torch
from torch import nn
# pylint: disable=unused-import
from diffusers.models.embeddings import TimestepEmbedding
class Timesteps(nn.Module):
def __init__(
self,
num_channels: int,
flip_sin_to_cos: bool = True,
downscale_freq_shift: float = 0,
):
super().__init__()
self.num_channels = num_channels
self.flip_sin_to_cos = flip_sin_to_cos
self.downscale_freq_shift = downscale_freq_shift
def forward(self, timesteps):
t_emb = get_timestep_embedding(
timesteps,
self.num_channels,
flip_sin_to_cos=self.flip_sin_to_cos,
downscale_freq_shift=self.downscale_freq_shift,
)
return t_emb
class Positions2d(nn.Module):
def __init__(
self,
num_channels: int,
flip_sin_to_cos: bool = True,
downscale_freq_shift: float = 0,
):
super().__init__()
self.num_channels = num_channels
self.flip_sin_to_cos = flip_sin_to_cos
self.downscale_freq_shift = downscale_freq_shift
def forward(self, grid):
h_emb = get_timestep_embedding(
grid[0],
self.num_channels // 2,
flip_sin_to_cos=self.flip_sin_to_cos,
downscale_freq_shift=self.downscale_freq_shift,
)
w_emb = get_timestep_embedding(
grid[1],
self.num_channels // 2,
flip_sin_to_cos=self.flip_sin_to_cos,
downscale_freq_shift=self.downscale_freq_shift,
)
emb = torch.cat((h_emb, w_emb), dim=-1)
return emb
def get_timestep_embedding(
timesteps: torch.Tensor,
embedding_dim: int,
flip_sin_to_cos: bool = False,
downscale_freq_shift: float = 1,
scale: float = 1,
max_period: int = 10000,
):
"""
This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings.
:param timesteps: a 1-D or 2-D Tensor of N indices, one per batch element.
These may be fractional.
:param embedding_dim: the dimension of the output. :param max_period: controls the minimum frequency of the
embeddings. :return: an [N x dim] or [N x M x dim] Tensor of positional embeddings.
"""
if len(timesteps.shape) not in [1, 2]:
raise ValueError("Timesteps should be a 1D or 2D tensor")
half_dim = embedding_dim // 2
exponent = -math.log(max_period) * torch.arange(start=0, end=half_dim, dtype=torch.float32, device=timesteps.device)
exponent = exponent / (half_dim - downscale_freq_shift)
emb = torch.exp(exponent)
emb = timesteps[..., None].float() * emb
# scale embeddings
emb = scale * emb
# concat sine and cosine embeddings
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1)
# flip sine and cosine embeddings
if flip_sin_to_cos:
emb = torch.cat([emb[..., half_dim:], emb[..., :half_dim]], dim=-1)
# zero pad
if embedding_dim % 2 == 1:
emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
return emb