L0SG commited on
Commit
7d0dcb2
1 Parent(s): e79a149
.gitignore ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ *.pyc
2
+ __pycache__/
3
+ */__pycache__/
4
+ alias_free_cuda/build/
5
+ .DS_Store
6
+ exp/
7
+ tmp/
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2024 NVIDIA CORPORATION.
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md CHANGED
@@ -1,9 +1,105 @@
1
  ---
2
  license: mit
 
 
 
 
 
3
  pipeline_tag: audio-to-audio
4
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  This repository contains pretrained BigVGAN checkpoints with easy access to inference and additional `huggingface_hub` support.
6
 
7
- See here regarding how to use this model: https://huggingface.co/nvidia/BigVGAN
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
- If you are interested in training the model and additional functionalities, please visit the official GitHub repository for more information: https://github.com/NVIDIA/BigVGAN
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: mit
3
+ license_link: https://huggingface.co/nvidia/BigVGAN/blob/main/LICENSE
4
+ tags:
5
+ - neural-vocoder
6
+ - audio-generation
7
+ library_name: PyTorch
8
  pipeline_tag: audio-to-audio
9
  ---
10
+
11
+ ## BigVGAN: A Universal Neural Vocoder with Large-Scale Training
12
+
13
+ <center><img src="https://user-images.githubusercontent.com/15963413/218609148-881e39df-33af-4af9-ab95-1427c4ebf062.png" width="800"></center>
14
+
15
+ **Paper**: https://arxiv.org/abs/2206.04658
16
+
17
+ **Code**: https://github.com/NVIDIA/BigVGAN
18
+
19
+ **Project page**: https://research.nvidia.com/labs/adlr/projects/bigvgan/
20
+
21
+ **🤗 Spaces Demo**: https://huggingface.co/spaces/nvidia/BigVGAN
22
+
23
+ ## News
24
+ [Jul 2024] We release BigVGAN-v2 along with pretrained checkpoints. Below are the highlights:
25
+ * Custom CUDA kernel for inference: we provide a fused upsampling + activation kernel written in CUDA for accelerated inference speed. Our test shows 1.5 - 3x faster speed on a single A100 GPU.
26
+ * Improved discriminator and loss: BigVGAN-v2 is trained using a multi-scale sub-band CQT discriminator and a multi-scale mel spectrogram loss.
27
+ * Larger training data: BigVGAN-v2 is trained using datasets containing diverse audio types, including speech in multiple languages, environmental sounds, and instruments.
28
+ * We provide pretrained checkpoints of BigVGAN-v2 using diverse audio configurations, supporting up to 44 kHz sampling rate and 512x upsampling ratio.
29
+
30
+ ## Installation
31
  This repository contains pretrained BigVGAN checkpoints with easy access to inference and additional `huggingface_hub` support.
32
 
33
+ If you are interested in training the model and additional functionalities, please visit the official GitHub repository for more information: https://github.com/NVIDIA/BigVGAN
34
+
35
+ ```shell
36
+ git lfs install
37
+ git clone https://huggingface.co/nvidia/BigVGAN
38
+ ```
39
+
40
+ ## Usage
41
+
42
+ Below example describes how you can use load the pretrained BigVGAN generator, compute mel spectrogram from input waveform, and generate synthesized waveform using the mel spectrogram as the model's input.
43
+
44
+ ```python
45
+ device = 'cuda'
46
+
47
+ import torch
48
+ import bigvgan
49
+
50
+ # instantiate the model
51
+ model = bigvgan.BigVGAN.from_pretrained('nvidia/bigvgan_v2_44khz_128band_512x')
52
+
53
+ # remove weight norm in the model and set to eval mode
54
+ model.remove_weight_norm()
55
+ model = model.eval().to(device)
56
+
57
+ import librosa
58
+ from meldataset import get_mel_spectrogram
59
+
60
+ # load wav file and compute mel spectrogram
61
+ wav, sr = librosa.load('/path/to/your/audio.wav', sr=model.h.sampling_rate, mono=True) # wav is np.ndarray with shape [T_time] and values in [-1, 1]
62
+ wav = torch.FloatTensor(wav).to(device).unsqueeze(0) # wav is FloatTensor with shape [B(1), T_time]
63
+
64
+ # compute mel spectrogram from the ground truth audio
65
+ mel = get_mel_spectrogram(wav, model.h) # mel is FloatTensor with shape [B(1), C_mel, T_frame]
66
+
67
+ # generate waveform from mel
68
+ with torch.inference_mode():
69
+ wav_gen = model(mel) # wav_gen is FloatTensor with shape [B(1), 1, T_time] and values in [-1, 1]
70
+ wav_gen_float = wav_gen.squeeze(0).cpu() # wav_gen is FloatTensor with shape [1, T_time]
71
+
72
+ # you can convert the generated waveform to 16 bit linear PCM
73
+ wav_gen_int16 = (wav_gen_float * 32767.0).numpy().astype('int16') # wav_gen is now np.ndarray with int16 dtype
74
+ ```
75
+
76
+ ## Using Custom CUDA Kernel for Synthesis
77
+ You can apply the fast CUDA inference kernel by using a parameter `use_cuda_kernel` when instantiating BigVGAN:
78
+
79
+ ```python
80
+ import bigvgan
81
+ model = bigvgan.BigVGAN.from_pretrained('nvidia/bigvgan_v2_44khz_128band_512x', use_cuda_kernel=True)
82
+ ```
83
+
84
+ When applied for the first time, it builds the kernel using `nvcc` and `ninja`. If the build succeeds, the kernel is saved to `alias_free_cuda/build` and the model automatically loads the kernel. The codebase has been tested using CUDA `12.1`.
85
+
86
+ Please make sure that both are installed in your system and `nvcc` installed in your system matches the version your PyTorch build is using.
87
+
88
+ For detail, see the official GitHub repository: https://github.com/NVIDIA/BigVGAN?tab=readme-ov-file#using-custom-cuda-kernel-for-synthesis
89
+
90
+
91
+ ## Pretrained Models
92
+ We provide the pretrained models.
93
+ One can download the checkpoints of the pretrained generator weight, named as `bigvgan_generator.pt` within the listed HuggingFace repositories.
94
 
95
+ |Model Name|Sampling Rate|Mel band|fmax|Upsampling Ratio|Params|Dataset|Fine-Tuned|
96
+ |------|---|---|---|---|---|------|---|
97
+ |[bigvgan_v2_44khz_128band_512x](https://huggingface.co/nvidia/bigvgan_v2_44khz_128band_512x)|44 kHz|128|22050|512|122M|Large-scale Compilation|No|
98
+ |[bigvgan_v2_44khz_128band_256x](https://huggingface.co/nvidia/bigvgan_v2_44khz_128band_256x)|44 kHz|128|22050|256|112M|Large-scale Compilation|No|
99
+ |[bigvgan_v2_24khz_100band_256x](https://huggingface.co/nvidia/bigvgan_v2_24khz_100band_256x)|24 kHz|100|12000|256|112M|Large-scale Compilation|No|
100
+ |[bigvgan_v2_22khz_80band_256x](https://huggingface.co/nvidia/bigvgan_v2_22khz_80band_256x)|22 kHz|80|11025|256|112M|Large-scale Compilation|No|
101
+ |[bigvgan_v2_22khz_80band_fmax8k_256x](https://huggingface.co/nvidia/bigvgan_v2_22khz_80band_fmax8k_256x)|22 kHz|80|8000|256|112M|Large-scale Compilation|No|
102
+ |[bigvgan_24khz_100band](https://huggingface.co/nvidia/bigvgan_24khz_100band)|24 kHz|100|12000|256|112M|LibriTTS|No|
103
+ |[bigvgan_base_24khz_100band](https://huggingface.co/nvidia/bigvgan_base_24khz_100band)|24 kHz|100|12000|256|14M|LibriTTS|No|
104
+ |[bigvgan_22khz_80band](https://huggingface.co/nvidia/bigvgan_22khz_80band)|22 kHz|80|8000|256|112M|LibriTTS + VCTK + LJSpeech|No|
105
+ |[bigvgan_base_22khz_80band](https://huggingface.co/nvidia/bigvgan_base_22khz_80band)|22 kHz|80|8000|256|14M|LibriTTS + VCTK + LJSpeech|No|
activations.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license.
2
+ # LICENSE is in incl_licenses directory.
3
+
4
+ import torch
5
+ from torch import nn, sin, pow
6
+ from torch.nn import Parameter
7
+
8
+
9
+ class Snake(nn.Module):
10
+ '''
11
+ Implementation of a sine-based periodic activation function
12
+ Shape:
13
+ - Input: (B, C, T)
14
+ - Output: (B, C, T), same shape as the input
15
+ Parameters:
16
+ - alpha - trainable parameter
17
+ References:
18
+ - This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
19
+ https://arxiv.org/abs/2006.08195
20
+ Examples:
21
+ >>> a1 = snake(256)
22
+ >>> x = torch.randn(256)
23
+ >>> x = a1(x)
24
+ '''
25
+ def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
26
+ '''
27
+ Initialization.
28
+ INPUT:
29
+ - in_features: shape of the input
30
+ - alpha: trainable parameter
31
+ alpha is initialized to 1 by default, higher values = higher-frequency.
32
+ alpha will be trained along with the rest of your model.
33
+ '''
34
+ super(Snake, self).__init__()
35
+ self.in_features = in_features
36
+
37
+ # initialize alpha
38
+ self.alpha_logscale = alpha_logscale
39
+ if self.alpha_logscale: # log scale alphas initialized to zeros
40
+ self.alpha = Parameter(torch.zeros(in_features) * alpha)
41
+ else: # linear scale alphas initialized to ones
42
+ self.alpha = Parameter(torch.ones(in_features) * alpha)
43
+
44
+ self.alpha.requires_grad = alpha_trainable
45
+
46
+ self.no_div_by_zero = 0.000000001
47
+
48
+ def forward(self, x):
49
+ '''
50
+ Forward pass of the function.
51
+ Applies the function to the input elementwise.
52
+ Snake ∶= x + 1/a * sin^2 (xa)
53
+ '''
54
+ alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T]
55
+ if self.alpha_logscale:
56
+ alpha = torch.exp(alpha)
57
+ x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
58
+
59
+ return x
60
+
61
+
62
+ class SnakeBeta(nn.Module):
63
+ '''
64
+ A modified Snake function which uses separate parameters for the magnitude of the periodic components
65
+ Shape:
66
+ - Input: (B, C, T)
67
+ - Output: (B, C, T), same shape as the input
68
+ Parameters:
69
+ - alpha - trainable parameter that controls frequency
70
+ - beta - trainable parameter that controls magnitude
71
+ References:
72
+ - This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
73
+ https://arxiv.org/abs/2006.08195
74
+ Examples:
75
+ >>> a1 = snakebeta(256)
76
+ >>> x = torch.randn(256)
77
+ >>> x = a1(x)
78
+ '''
79
+ def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
80
+ '''
81
+ Initialization.
82
+ INPUT:
83
+ - in_features: shape of the input
84
+ - alpha - trainable parameter that controls frequency
85
+ - beta - trainable parameter that controls magnitude
86
+ alpha is initialized to 1 by default, higher values = higher-frequency.
87
+ beta is initialized to 1 by default, higher values = higher-magnitude.
88
+ alpha will be trained along with the rest of your model.
89
+ '''
90
+ super(SnakeBeta, self).__init__()
91
+ self.in_features = in_features
92
+
93
+ # initialize alpha
94
+ self.alpha_logscale = alpha_logscale
95
+ if self.alpha_logscale: # log scale alphas initialized to zeros
96
+ self.alpha = Parameter(torch.zeros(in_features) * alpha)
97
+ self.beta = Parameter(torch.zeros(in_features) * alpha)
98
+ else: # linear scale alphas initialized to ones
99
+ self.alpha = Parameter(torch.ones(in_features) * alpha)
100
+ self.beta = Parameter(torch.ones(in_features) * alpha)
101
+
102
+ self.alpha.requires_grad = alpha_trainable
103
+ self.beta.requires_grad = alpha_trainable
104
+
105
+ self.no_div_by_zero = 0.000000001
106
+
107
+ def forward(self, x):
108
+ '''
109
+ Forward pass of the function.
110
+ Applies the function to the input elementwise.
111
+ SnakeBeta ∶= x + 1/b * sin^2 (xa)
112
+ '''
113
+ alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T]
114
+ beta = self.beta.unsqueeze(0).unsqueeze(-1)
115
+ if self.alpha_logscale:
116
+ alpha = torch.exp(alpha)
117
+ beta = torch.exp(beta)
118
+ x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
119
+
120
+ return x
alias_free_cuda/__init__.py ADDED
File without changes
alias_free_cuda/activation1d.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 NVIDIA CORPORATION.
2
+ # Licensed under the MIT license.
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from alias_free_torch.resample import UpSample1d, DownSample1d
7
+ # load fused CUDA kernel: this enables importing anti_alias_activation_cuda
8
+ from alias_free_cuda import load
9
+ load.load()
10
+
11
+ class FusedAntiAliasActivation(torch.autograd.Function):
12
+ """
13
+ Assumes filter size 12, replication padding on upsampling, and logscale alpha/beta parameters as inputs
14
+ """
15
+ @staticmethod
16
+ def forward(ctx, inputs, ftr, alpha, beta):
17
+ import anti_alias_activation_cuda
18
+ activation_results = anti_alias_activation_cuda.forward(inputs, ftr, alpha, beta)
19
+ return activation_results
20
+
21
+ @staticmethod
22
+ def backward(ctx, output_grads):
23
+ # TODO: implement bwd pass
24
+ raise NotImplementedError
25
+ return output_grads, None, None
26
+
27
+ class Activation1d(nn.Module):
28
+ def __init__(self,
29
+ activation,
30
+ up_ratio: int = 2,
31
+ down_ratio: int = 2,
32
+ up_kernel_size: int = 12,
33
+ down_kernel_size: int = 12,
34
+ fused: bool = True
35
+ ):
36
+ super().__init__()
37
+ self.up_ratio = up_ratio
38
+ self.down_ratio = down_ratio
39
+ self.act = activation
40
+ self.upsample = UpSample1d(up_ratio, up_kernel_size)
41
+ self.downsample = DownSample1d(down_ratio, down_kernel_size)
42
+
43
+ self.fused = fused # whether to use fused CUDA kernel or not
44
+
45
+
46
+ def forward(self, x):
47
+ if not self.fused:
48
+ x = self.upsample(x)
49
+ x = self.act(x)
50
+ x = self.downsample(x)
51
+ return x
52
+ else:
53
+ if self.act.__class__.__name__ == "Snake":
54
+ beta = self.act.alpha.data # snake uses same params for alpha and beta
55
+ else:
56
+ beta = self.act.beta.data # snakebeta uses different params for alpha and beta
57
+ alpha = self.act.alpha.data
58
+ if not self.act.alpha_logscale: # exp baked into cuda kernel, cancel it out with a log
59
+ alpha = torch.log(alpha)
60
+ beta = torch.log(beta)
61
+ x = FusedAntiAliasActivation.apply(x, self.upsample.filter, alpha, beta)
62
+ x = self.downsample(x)
63
+ return x
alias_free_cuda/anti_alias_activation.cpp ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* coding=utf-8
2
+ * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #include <cuda_fp16.h>
18
+ #include <torch/extension.h>
19
+ #include <vector>
20
+
21
+ namespace anti_alias_activation {
22
+
23
+ torch::Tensor fwd_cuda(torch::Tensor const& input,
24
+ torch::Tensor const& filter,
25
+ torch::Tensor const& alpha,
26
+ torch::Tensor const& beta
27
+ );
28
+
29
+ torch::Tensor fwd(torch::Tensor const& input,
30
+ torch::Tensor const& filter,
31
+ torch::Tensor const& alpha,
32
+ torch::Tensor const& beta
33
+ ) {
34
+ AT_ASSERTM(input.dim() == 3, "expected 3D tensor");
35
+ //AT_ASSERTM((input.scalar_type() == at::ScalarType::Half) ||
36
+ // (input.scalar_type() == at::ScalarType::BFloat16),
37
+ // "Only fp16 and bf16 are supported");
38
+
39
+ return fwd_cuda(input, filter, alpha, beta);
40
+ }
41
+
42
+ } // end namespace anti_alias_activation
43
+
44
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
45
+ m.def("forward",
46
+ &anti_alias_activation::fwd,
47
+ "Anti Alias Activation -- Forward.");
48
+ }
alias_free_cuda/anti_alias_activation_cuda.cu ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* coding=utf-8
2
+ * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #include <ATen/ATen.h>
18
+ #include <cuda.h>
19
+ #include <cuda_runtime.h>
20
+ #include <cuda_fp16.h>
21
+ #include <cuda_profiler_api.h>
22
+ #include <ATen/cuda/CUDAContext.h>
23
+ #include <torch/extension.h>
24
+ #include "type_shim.h"
25
+ #include <assert.h>
26
+ #include <cfloat>
27
+ #include <limits>
28
+ #include <stdint.h>
29
+ #include <c10/macros/Macros.h>
30
+
31
+ namespace {
32
+
33
+ /*
34
+ template <typename Datatype, int ELEMENTS_PER_LDG>
35
+ __device__ __inline__ void copy_vector(Datatype *dst, const Datatype *src);
36
+
37
+ template <>
38
+ __device__ __inline__ void copy_vector<c10::BFloat16, 1>(c10::BFloat16 *dst, const c10::BFloat16 *src) { *dst = *src; }
39
+
40
+ template <>
41
+ __device__ __inline__ void copy_vector<c10::BFloat16, 4>(c10::BFloat16 *dst, const c10::BFloat16 *src) { *((float2*) dst) = *((float2*) src); }
42
+
43
+ template <>
44
+ __device__ __inline__ void copy_vector<c10::Half, 1>(c10::Half *dst, const c10::Half *src) { *dst = *src; }
45
+
46
+ template <>
47
+ __device__ __inline__ void copy_vector<c10::Half, 4>(c10::Half *dst, const c10::Half *src) { *((float2*) dst) = *((float2*) src); }
48
+
49
+ template <>
50
+ __device__ __inline__ void copy_vector<uint8_t, 1>(uint8_t *dst, const uint8_t *src) { *dst = *src; }
51
+
52
+ template <>
53
+ __device__ __inline__ void copy_vector<uint8_t, 4>(uint8_t *dst, const uint8_t *src) {*((half2*) dst) = *((half2*) src); }
54
+
55
+ int log2_ceil(int value) {
56
+ int log2_value = 0;
57
+ while ((1 << log2_value) < value) ++log2_value;
58
+ return log2_value;
59
+ }
60
+
61
+ template<typename T>
62
+ struct Add {
63
+ __device__ __forceinline__ T operator()(T a, T b) const {
64
+ return a + b;
65
+ }
66
+ };
67
+
68
+ template<typename T>
69
+ struct Max {
70
+ __device__ __forceinline__ T operator()(T a, T b) const {
71
+ return a < b ? b : a;
72
+ }
73
+ };
74
+
75
+ template <typename T>
76
+ __device__ __forceinline__ T WARP_SHFL_XOR_NATIVE(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff)
77
+ {
78
+ #if CUDA_VERSION >= 9000
79
+ return __shfl_xor_sync(mask, value, laneMask, width);
80
+ #else
81
+ return __shfl_xor(value, laneMask, width);
82
+ #endif
83
+ }
84
+
85
+ template <typename acc_t, int WARP_BATCH, int WARP_SIZE, template<typename> class ReduceOp>
86
+ __device__ __forceinline__ void warp_reduce(acc_t* sum) {
87
+ ReduceOp<acc_t> r;
88
+ #pragma unroll
89
+ for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) {
90
+ #pragma unroll
91
+ for (int i = 0; i < WARP_BATCH; ++i) {
92
+ acc_t b = WARP_SHFL_XOR_NATIVE(sum[i], offset, WARP_SIZE);
93
+ sum[i] = r(sum[i], b);
94
+ }
95
+ }
96
+ }
97
+ */
98
+
99
+ template <typename input_t, typename output_t, typename acc_t>
100
+ __global__ void anti_alias_activation_forward(
101
+ output_t *dst,
102
+ const input_t *src,
103
+ const input_t *ftr,
104
+ const input_t *alpha,
105
+ const input_t *beta,
106
+ int batch_size,
107
+ int channels,
108
+ int seq_len)
109
+ {
110
+ // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and
111
+ constexpr int ELEMENTS_PER_LDG_STG = 1; //(WARP_ITERATIONS < 4) ? 1 : 4;
112
+ constexpr int BUFFER_SIZE = 32;
113
+ constexpr int FILTER_SIZE = 12;
114
+ constexpr int HALF_FILTER_SIZE = 6;
115
+ constexpr int REPLICATION_PAD = 5; // 5 on each side
116
+
117
+ // blockDim/threadIdx = (128, 1, 1)
118
+ // gridDim/blockIdx = (seq_blocks, channels, batches)
119
+ int block_offset = (blockIdx.x * 128 * BUFFER_SIZE + seq_len * (blockIdx.y + gridDim.y * blockIdx.z));
120
+ int local_offset = threadIdx.x * BUFFER_SIZE;
121
+ int seq_offset = blockIdx.x * 128 * BUFFER_SIZE + local_offset;
122
+
123
+
124
+ //int intermediate_seq_len = seq_len * 2 - 1 + 4 * REPLICATION_PAD;
125
+ //int intermediate_block_offset = (blockIdx.x * 128 * BUFFER_SIZE * 2 + intermediate_seq_len * (blockIdx.y + gridDim.y * blockIdx.z));
126
+ //int intermediate_local_offset = threadIdx.x * BUFFER_SIZE * 2;
127
+
128
+ int output_seq_len = seq_len * 2 ; //
129
+ int output_block_offset = (blockIdx.x * 128 * BUFFER_SIZE * 2 + output_seq_len * (blockIdx.y + gridDim.y * blockIdx.z));
130
+ int output_local_offset = threadIdx.x * BUFFER_SIZE * 2;
131
+ int output_seq_offset = blockIdx.x * 128 * BUFFER_SIZE *2 + output_local_offset;
132
+ // get values needed for replication padding before moving pointer
133
+ const input_t *right_most_pntr = src + (seq_len * (blockIdx.y + gridDim.y * blockIdx.z));
134
+ input_t seq_left_most_value = right_most_pntr[0];
135
+ input_t seq_right_most_value = right_most_pntr[seq_len - 1];
136
+
137
+ src += block_offset + local_offset;
138
+ dst += output_block_offset + output_local_offset ;
139
+ alpha = alpha + blockIdx.y;
140
+ input_t alpha_val = expf(alpha[0]);
141
+ beta = beta + blockIdx.y;
142
+ input_t beta_val = expf(beta[0]);
143
+ // load data from global memory
144
+ input_t elements[2*FILTER_SIZE+2*BUFFER_SIZE] = {0};
145
+ input_t intermediates[2*FILTER_SIZE+2*BUFFER_SIZE] = {0};
146
+ //output_t output[2*BUFFER_SIZE];
147
+ input_t filter[FILTER_SIZE];
148
+ //input_t temp_data[ELEMENTS_PER_LDG_STG];
149
+ //uint8_t temp_mask[ELEMENTS_PER_LDG_STG];
150
+
151
+ #pragma unroll
152
+ for (int it = 0; it < FILTER_SIZE; it+=1) {
153
+ filter[it] = ftr[it];
154
+ }
155
+
156
+
157
+ #pragma unroll
158
+ for (int it = -HALF_FILTER_SIZE; it < BUFFER_SIZE + HALF_FILTER_SIZE ; it+=1) {
159
+ int element_index = seq_offset + it;
160
+ if ((element_index < 0) && (element_index >= -REPLICATION_PAD)) {
161
+ elements[2*(HALF_FILTER_SIZE+it)] = 2*seq_left_most_value;
162
+ }
163
+ if ((element_index >= seq_len) && (element_index < seq_len + REPLICATION_PAD)) {
164
+ elements[2*(HALF_FILTER_SIZE+it)] = 2*seq_right_most_value;
165
+ }
166
+ if ((element_index >= 0) && (element_index < seq_len)) {
167
+ elements[2*(HALF_FILTER_SIZE+it)] = 2*src[it];
168
+ }
169
+ }
170
+
171
+
172
+
173
+ // apply filter
174
+ #pragma unroll
175
+ for (int it = 0; it < (2 * BUFFER_SIZE + 2*FILTER_SIZE); it+=1) {
176
+ input_t acc = 0.0;
177
+
178
+ int element_index = output_seq_offset + it; // index for output
179
+ #pragma unroll
180
+ for (int f_idx = 0; f_idx < FILTER_SIZE; f_idx+=1){
181
+ if ((element_index + f_idx) >= 0){
182
+ acc += filter[f_idx] * elements[it+f_idx];
183
+ }
184
+ }
185
+ intermediates[it] = acc;
186
+ }
187
+
188
+ double no_div_by_zero = 0.000000001;
189
+ #pragma unroll
190
+ for (int it = 0; it < 12 + 2 * BUFFER_SIZE; it++) {
191
+ intermediates[it] += (1.0/(beta_val + no_div_by_zero)) * sinf(intermediates[it] * alpha_val) * sinf(intermediates[it] * alpha_val);
192
+ }
193
+
194
+
195
+ // now copy to output
196
+ #pragma unroll
197
+ for (int it = 0; it < 2*BUFFER_SIZE; it+=1){
198
+ int element_index = output_seq_offset + it;
199
+ if (element_index < output_seq_len) {
200
+ dst[it] = intermediates[it+6];
201
+ }
202
+ }
203
+
204
+
205
+
206
+ // for (int it = 0; it < BUFFER_SIZE; it+=ELEMENTS_PER_LDG_STG) {
207
+ // int element_index = seq_offset + it;
208
+ // if (element_index < seq_len) {
209
+ // dst[it] = output[it];
210
+ // }
211
+ // }
212
+
213
+
214
+ // // Upsample convolution
215
+ // for (int it = 0; it < 2 * BUFFER_SIZE + 12; it+=1) {
216
+ // input_t acc = 0.0;
217
+
218
+ // for (int f_idx = 0; f_idx < FILTER_SIZE; f_idx+=1){
219
+ // acc += filter[f_idx] * elements[it+f_idx];
220
+ // }
221
+ // intermediates[it] = acc;
222
+ // }
223
+
224
+ // // correct the corners of intermediates
225
+ // if (seq_offset == 0) {
226
+ // for (int it = 0; it < 6; it+=1)
227
+ // intermediates[it] = 0;
228
+ // }
229
+
230
+ // if (seq_offset + 32 >= seq_len) {
231
+ // int offset = seq_len % 32 == 0 ? 32 : seq_len % 32;
232
+
233
+ // for (int it = 0; it < 6; it++) {
234
+ // intermediates[6+2*offset+it] = 0;
235
+ // }
236
+ // }
237
+
238
+
239
+
240
+
241
+ // for (int it = 0; it < BUFFER_SIZE; it+=ELEMENTS_PER_LDG_STG) {
242
+ // int element_index = seq_offset + it;
243
+ // if (element_index < seq_len) {
244
+ // dst[it] = output[it];
245
+ // }
246
+ // }
247
+ }
248
+
249
+ template<typename input_t, typename output_t, typename acc_t>
250
+ void dispatch_anti_alias_activation_forward(
251
+ output_t *dst,
252
+ const input_t *src,
253
+ const input_t *ftr,
254
+ const input_t *alpha,
255
+ const input_t *beta,
256
+ int batch_size,
257
+ int channels,
258
+ int seq_len)
259
+ {
260
+ if (seq_len == 0) {
261
+ return;
262
+ } else {
263
+ // use 128 threads per block to maximimize gpu utilization
264
+ constexpr int threads_per_block = 128;
265
+ constexpr int seq_len_per_block = 4096;
266
+ int blocks_per_seq_len = (seq_len + seq_len_per_block - 1) / seq_len_per_block;
267
+ dim3 blocks(blocks_per_seq_len, channels, batch_size);
268
+ dim3 threads(threads_per_block, 1, 1);
269
+
270
+ anti_alias_activation_forward<input_t, output_t, acc_t>
271
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, ftr, alpha, beta, batch_size, channels, seq_len);
272
+ }
273
+ }
274
+ }
275
+
276
+ namespace anti_alias_activation {
277
+
278
+ torch::Tensor fwd_cuda(torch::Tensor const& input, torch::Tensor const& filter, torch::Tensor const& alpha, torch::Tensor const& beta)
279
+ {
280
+ // input is a 4d tensor with dimensions [batches, attn_heads, seq_len, seq_len]
281
+ const int batches = input.size(0);
282
+ const int channels = input.size(1);
283
+ const int seq_len = input.size(2);
284
+
285
+ // Output
286
+ auto act_options = input.options().requires_grad(false);
287
+ int output_seq_len = seq_len*2; // we'll be dilating between each element by interspersing with zeros
288
+
289
+ torch::Tensor anti_alias_activation_results =
290
+ torch::empty({batches, channels, output_seq_len}, act_options);
291
+
292
+ // Softmax Intermediate Result Ptr
293
+ void* input_ptr = static_cast<void*>(input.data_ptr());
294
+ void* filter_ptr = static_cast<void*>(filter.data_ptr());
295
+ void* alpha_ptr = static_cast<void*>(alpha.data_ptr());
296
+ void* beta_ptr = static_cast<void*>(beta.data_ptr());
297
+ void* anti_alias_activation_results_ptr = static_cast<void*>(anti_alias_activation_results.data_ptr());
298
+
299
+ DISPATCH_FLOAT_HALF_AND_BFLOAT(
300
+ input.scalar_type(),
301
+ "dispatch anti alias activation_forward",
302
+ dispatch_anti_alias_activation_forward<scalar_t, scalar_t, float>(
303
+ reinterpret_cast<scalar_t*>(anti_alias_activation_results_ptr),
304
+ reinterpret_cast<const scalar_t*>(input_ptr),
305
+ reinterpret_cast<const scalar_t*>(filter_ptr),
306
+ reinterpret_cast<const scalar_t*>(alpha_ptr),
307
+ reinterpret_cast<const scalar_t*>(beta_ptr),
308
+ batches,
309
+ channels,
310
+ seq_len);
311
+ );
312
+ return anti_alias_activation_results;
313
+ }
314
+ }
alias_free_cuda/compat.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* coding=utf-8
2
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ /*This code is copied fron NVIDIA apex:
18
+ * https://github.com/NVIDIA/apex
19
+ * with minor changes. */
20
+
21
+
22
+
23
+ #ifndef TORCH_CHECK
24
+ #define TORCH_CHECK AT_CHECK
25
+ #endif
26
+
27
+ #ifdef VERSION_GE_1_3
28
+ #define DATA_PTR data_ptr
29
+ #else
30
+ #define DATA_PTR data
31
+ #endif
alias_free_cuda/load.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 NVIDIA CORPORATION.
2
+ # Licensed under the MIT license.
3
+
4
+ import os
5
+ import pathlib
6
+ import subprocess
7
+
8
+ from torch.utils import cpp_extension
9
+
10
+ # Setting this param to a list has a problem of generating different
11
+ # compilation commands (with diferent order of architectures) and
12
+ # leading to recompilation of fused kernels. Set it to empty string
13
+ # to avoid recompilation and assign arch flags explicity in
14
+ # extra_cuda_cflags below
15
+ os.environ["TORCH_CUDA_ARCH_LIST"] = ""
16
+
17
+
18
+ def load():
19
+ # Check if cuda 11 is installed for compute capability 8.0
20
+ cc_flag = []
21
+ _, bare_metal_major, _ = _get_cuda_bare_metal_version(
22
+ cpp_extension.CUDA_HOME)
23
+ if int(bare_metal_major) >= 11:
24
+ cc_flag.append('-gencode')
25
+ cc_flag.append('arch=compute_80,code=sm_80')
26
+
27
+ # Build path
28
+ srcpath = pathlib.Path(__file__).parent.absolute()
29
+ buildpath = srcpath / 'build'
30
+ _create_build_dir(buildpath)
31
+
32
+ # Helper function to build the kernels.
33
+ def _cpp_extention_load_helper(name, sources, extra_cuda_flags):
34
+ return cpp_extension.load(
35
+ name=name,
36
+ sources=sources,
37
+ build_directory=buildpath,
38
+ extra_cflags=['-O3',],
39
+ extra_cuda_cflags=['-O3',
40
+ '-gencode', 'arch=compute_70,code=sm_70',
41
+ '--use_fast_math'] + extra_cuda_flags + cc_flag,
42
+ verbose=True
43
+ )
44
+
45
+ extra_cuda_flags = ['-U__CUDA_NO_HALF_OPERATORS__',
46
+ '-U__CUDA_NO_HALF_CONVERSIONS__',
47
+ '--expt-relaxed-constexpr',
48
+ '--expt-extended-lambda']
49
+
50
+ sources=[srcpath / 'anti_alias_activation.cpp',
51
+ srcpath / 'anti_alias_activation_cuda.cu']
52
+ anti_alias_activation_cuda = _cpp_extention_load_helper(
53
+ "anti_alias_activation_cuda", sources, extra_cuda_flags)
54
+
55
+ def _get_cuda_bare_metal_version(cuda_dir):
56
+ raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"],
57
+ universal_newlines=True)
58
+ output = raw_output.split()
59
+ release_idx = output.index("release") + 1
60
+ release = output[release_idx].split(".")
61
+ bare_metal_major = release[0]
62
+ bare_metal_minor = release[1][0]
63
+
64
+ return raw_output, bare_metal_major, bare_metal_minor
65
+
66
+
67
+ def _create_build_dir(buildpath):
68
+ try:
69
+ os.mkdir(buildpath)
70
+ except OSError:
71
+ if not os.path.isdir(buildpath):
72
+ print(f"Creation of the build directory {buildpath} failed")
alias_free_cuda/test_activation.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 NVIDIA CORPORATION.
2
+ # Licensed under the MIT license.
3
+
4
+ import math
5
+ import torch
6
+ import alias_free_cuda
7
+ from alias_free_cuda import activation1d
8
+ from activations import Snake, SnakeBeta
9
+
10
+ def test_load_fused_kernels():
11
+ try:
12
+ import alias_free_cuda
13
+ import torch
14
+ print("[Success] load_fused_kernels")
15
+ except ImportError as e:
16
+ print("[Fail] load_fused_kernels")
17
+ raise e
18
+
19
+ def test_anti_alias_activation():
20
+ data = torch.rand((10, 10, 50000), device='cuda')
21
+
22
+ # check activations.Snake cuda vs. torch
23
+ fused_anti_alias_activation = activation1d.Activation1d(activation=Snake(10), fused=True).cuda()
24
+ fused_activation_output = fused_anti_alias_activation(data)
25
+
26
+ torch_anti_alias_activation = activation1d.Activation1d(activation=Snake(10), fused=False).cuda()
27
+ torch_activation_output = torch_anti_alias_activation(data)
28
+
29
+ test_result = (fused_activation_output - torch_activation_output).abs()
30
+
31
+ while test_result.dim() != 1:
32
+ test_result = test_result.mean(dim=-1)
33
+
34
+ diff = test_result.mean(dim=-1)
35
+
36
+ if diff <= 1e-3:
37
+ print(
38
+ f"\n[Success] test_fused_anti_alias_activation"
39
+ f"\n > mean_difference={diff}"
40
+ f"\n > fused_values={fused_activation_output[-1][-1][-100:].tolist()}"
41
+ f"\n > torch_values={torch_activation_output[-1][-1][-100:].tolist()}"
42
+ )
43
+ else:
44
+ print(
45
+ f"\n[Fail] test_fused_anti_alias_activation"
46
+ f"\n > mean_difference={diff}, "
47
+ f"\n > fused_values={fused_activation_output[-1][-1][-30:].tolist()}, "
48
+ f"\n > torch_values={torch_activation_output[-1][-1][-30:].tolist()}"
49
+ )
50
+
51
+ if __name__ == "__main__":
52
+ from alias_free_cuda import load
53
+ load.load()
54
+ test_load_fused_kernels()
55
+ test_anti_alias_activation()
alias_free_cuda/test_activation_snake_beta.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 NVIDIA CORPORATION.
2
+ # Licensed under the MIT license.
3
+
4
+ import math
5
+ import torch
6
+ import alias_free_cuda
7
+ from alias_free_cuda import activation1d
8
+ from activations import Snake, SnakeBeta
9
+
10
+ def test_load_fused_kernels():
11
+ try:
12
+ import alias_free_cuda
13
+ import torch
14
+ print("[Success] load_fused_kernels")
15
+ except ImportError as e:
16
+ print("[Fail] load_fused_kernels")
17
+ raise e
18
+
19
+ def test_anti_alias_activation():
20
+ data = torch.rand((10, 10, 50000), device='cuda')
21
+
22
+ # check activations.Snake cuda vs. torch
23
+ fused_anti_alias_activation = activation1d.Activation1d(activation=SnakeBeta(10), fused=True).cuda()
24
+ fused_activation_output = fused_anti_alias_activation(data)
25
+
26
+ torch_anti_alias_activation = activation1d.Activation1d(activation=SnakeBeta(10), fused=False).cuda()
27
+ torch_activation_output = torch_anti_alias_activation(data)
28
+
29
+ test_result = (fused_activation_output - torch_activation_output).abs()
30
+
31
+ while test_result.dim() != 1:
32
+ test_result = test_result.mean(dim=-1)
33
+
34
+ diff = test_result.mean(dim=-1)
35
+
36
+ if diff <= 1e-3:
37
+ print(
38
+ f"\n[Success] test_fused_anti_alias_activation"
39
+ f"\n > mean_difference={diff}"
40
+ f"\n > fused_values={fused_activation_output[-1][-1][-100:].tolist()}"
41
+ f"\n > torch_values={torch_activation_output[-1][-1][-100:].tolist()}"
42
+ )
43
+ else:
44
+ print(
45
+ f"\n[Fail] test_fused_anti_alias_activation"
46
+ f"\n > mean_difference={diff}, "
47
+ f"\n > fused_values={fused_activation_output[-1][-1][-30:].tolist()}, "
48
+ f"\n > torch_values={torch_activation_output[-1][-1][-30:].tolist()}"
49
+ )
50
+
51
+ if __name__ == "__main__":
52
+ from alias_free_cuda import load
53
+ load.load()
54
+ test_load_fused_kernels()
55
+ test_anti_alias_activation()
alias_free_cuda/type_shim.h ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* coding=utf-8
2
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+
18
+ #include <ATen/ATen.h>
19
+ #include "compat.h"
20
+
21
+
22
+ #define DISPATCH_FLOAT_HALF_AND_BFLOAT(TYPE, NAME, ...) \
23
+ switch(TYPE) \
24
+ { \
25
+ case at::ScalarType::Float: \
26
+ { \
27
+ using scalar_t = float; \
28
+ __VA_ARGS__; \
29
+ break; \
30
+ } \
31
+ case at::ScalarType::Half: \
32
+ { \
33
+ using scalar_t = at::Half; \
34
+ __VA_ARGS__; \
35
+ break; \
36
+ } \
37
+ case at::ScalarType::BFloat16: \
38
+ { \
39
+ using scalar_t = at::BFloat16; \
40
+ __VA_ARGS__; \
41
+ break; \
42
+ } \
43
+ default: \
44
+ AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
45
+ }
46
+
47
+
48
+
49
+ #define DISPATCH_FLOAT_HALF_AND_BFLOAT_INOUT_TYPES(TYPEIN, TYPEOUT, NAME, ...) \
50
+ switch(TYPEIN) \
51
+ { \
52
+ case at::ScalarType::Float: \
53
+ { \
54
+ using scalar_t_in = float; \
55
+ switch(TYPEOUT) \
56
+ { \
57
+ case at::ScalarType::Float: \
58
+ { \
59
+ using scalar_t_out = float; \
60
+ __VA_ARGS__; \
61
+ break; \
62
+ } \
63
+ case at::ScalarType::Half: \
64
+ { \
65
+ using scalar_t_out = at::Half; \
66
+ __VA_ARGS__; \
67
+ break; \
68
+ } \
69
+ case at::ScalarType::BFloat16: \
70
+ { \
71
+ using scalar_t_out = at::BFloat16; \
72
+ __VA_ARGS__; \
73
+ break; \
74
+ } \
75
+ default: \
76
+ AT_ERROR(#NAME, " not implemented for '", toString(TYPEOUT), "'"); \
77
+ } \
78
+ break; \
79
+ } \
80
+ case at::ScalarType::Half: \
81
+ { \
82
+ using scalar_t_in = at::Half; \
83
+ using scalar_t_out = at::Half; \
84
+ __VA_ARGS__; \
85
+ break; \
86
+ } \
87
+ case at::ScalarType::BFloat16: \
88
+ { \
89
+ using scalar_t_in = at::BFloat16; \
90
+ using scalar_t_out = at::BFloat16; \
91
+ __VA_ARGS__; \
92
+ break; \
93
+ } \
94
+ default: \
95
+ AT_ERROR(#NAME, " not implemented for '", toString(TYPEIN), "'"); \
96
+ }
97
+
alias_free_torch/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
2
+ # LICENSE is in incl_licenses directory.
3
+
4
+ from .filter import *
5
+ from .resample import *
6
+ from .act import *
alias_free_torch/act.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
2
+ # LICENSE is in incl_licenses directory.
3
+
4
+ import torch.nn as nn
5
+ from .resample import UpSample1d, DownSample1d
6
+
7
+
8
+ class Activation1d(nn.Module):
9
+ def __init__(self,
10
+ activation,
11
+ up_ratio: int = 2,
12
+ down_ratio: int = 2,
13
+ up_kernel_size: int = 12,
14
+ down_kernel_size: int = 12):
15
+ super().__init__()
16
+ self.up_ratio = up_ratio
17
+ self.down_ratio = down_ratio
18
+ self.act = activation
19
+ self.upsample = UpSample1d(up_ratio, up_kernel_size)
20
+ self.downsample = DownSample1d(down_ratio, down_kernel_size)
21
+
22
+ # x: [B,C,T]
23
+ def forward(self, x):
24
+ x = self.upsample(x)
25
+ x = self.act(x)
26
+ x = self.downsample(x)
27
+
28
+ return x
alias_free_torch/filter.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
2
+ # LICENSE is in incl_licenses directory.
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ import math
8
+
9
+ if 'sinc' in dir(torch):
10
+ sinc = torch.sinc
11
+ else:
12
+ # This code is adopted from adefossez's julius.core.sinc under the MIT License
13
+ # https://adefossez.github.io/julius/julius/core.html
14
+ # LICENSE is in incl_licenses directory.
15
+ def sinc(x: torch.Tensor):
16
+ """
17
+ Implementation of sinc, i.e. sin(pi * x) / (pi * x)
18
+ __Warning__: Different to julius.sinc, the input is multiplied by `pi`!
19
+ """
20
+ return torch.where(x == 0,
21
+ torch.tensor(1., device=x.device, dtype=x.dtype),
22
+ torch.sin(math.pi * x) / math.pi / x)
23
+
24
+
25
+ # This code is adopted from adefossez's julius.lowpass.LowPassFilters under the MIT License
26
+ # https://adefossez.github.io/julius/julius/lowpass.html
27
+ # LICENSE is in incl_licenses directory.
28
+ def kaiser_sinc_filter1d(cutoff, half_width, kernel_size): # return filter [1,1,kernel_size]
29
+ even = (kernel_size % 2 == 0)
30
+ half_size = kernel_size // 2
31
+
32
+ #For kaiser window
33
+ delta_f = 4 * half_width
34
+ A = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95
35
+ if A > 50.:
36
+ beta = 0.1102 * (A - 8.7)
37
+ elif A >= 21.:
38
+ beta = 0.5842 * (A - 21)**0.4 + 0.07886 * (A - 21.)
39
+ else:
40
+ beta = 0.
41
+ window = torch.kaiser_window(kernel_size, beta=beta, periodic=False)
42
+
43
+ # ratio = 0.5/cutoff -> 2 * cutoff = 1 / ratio
44
+ if even:
45
+ time = (torch.arange(-half_size, half_size) + 0.5)
46
+ else:
47
+ time = torch.arange(kernel_size) - half_size
48
+ if cutoff == 0:
49
+ filter_ = torch.zeros_like(time)
50
+ else:
51
+ filter_ = 2 * cutoff * window * sinc(2 * cutoff * time)
52
+ # Normalize filter to have sum = 1, otherwise we will have a small leakage
53
+ # of the constant component in the input signal.
54
+ filter_ /= filter_.sum()
55
+ filter = filter_.view(1, 1, kernel_size)
56
+
57
+ return filter
58
+
59
+
60
+ class LowPassFilter1d(nn.Module):
61
+ def __init__(self,
62
+ cutoff=0.5,
63
+ half_width=0.6,
64
+ stride: int = 1,
65
+ padding: bool = True,
66
+ padding_mode: str = 'replicate',
67
+ kernel_size: int = 12):
68
+ # kernel_size should be even number for stylegan3 setup,
69
+ # in this implementation, odd number is also possible.
70
+ super().__init__()
71
+ if cutoff < -0.:
72
+ raise ValueError("Minimum cutoff must be larger than zero.")
73
+ if cutoff > 0.5:
74
+ raise ValueError("A cutoff above 0.5 does not make sense.")
75
+ self.kernel_size = kernel_size
76
+ self.even = (kernel_size % 2 == 0)
77
+ self.pad_left = kernel_size // 2 - int(self.even)
78
+ self.pad_right = kernel_size // 2
79
+ self.stride = stride
80
+ self.padding = padding
81
+ self.padding_mode = padding_mode
82
+ filter = kaiser_sinc_filter1d(cutoff, half_width, kernel_size)
83
+ self.register_buffer("filter", filter)
84
+
85
+ #input [B, C, T]
86
+ def forward(self, x):
87
+ _, C, _ = x.shape
88
+
89
+ if self.padding:
90
+ x = F.pad(x, (self.pad_left, self.pad_right),
91
+ mode=self.padding_mode)
92
+ out = F.conv1d(x, self.filter.expand(C, -1, -1),
93
+ stride=self.stride, groups=C)
94
+
95
+ return out
alias_free_torch/resample.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
2
+ # LICENSE is in incl_licenses directory.
3
+
4
+ import torch.nn as nn
5
+ from torch.nn import functional as F
6
+ from .filter import LowPassFilter1d
7
+ from .filter import kaiser_sinc_filter1d
8
+
9
+
10
+ class UpSample1d(nn.Module):
11
+ def __init__(self, ratio=2, kernel_size=None):
12
+ super().__init__()
13
+ self.ratio = ratio
14
+ self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size
15
+ self.stride = ratio
16
+ self.pad = self.kernel_size // ratio - 1
17
+ self.pad_left = self.pad * self.stride + (self.kernel_size - self.stride) // 2
18
+ self.pad_right = self.pad * self.stride + (self.kernel_size - self.stride + 1) // 2
19
+ filter = kaiser_sinc_filter1d(cutoff=0.5 / ratio,
20
+ half_width=0.6 / ratio,
21
+ kernel_size=self.kernel_size)
22
+ self.register_buffer("filter", filter)
23
+
24
+ # x: [B, C, T]
25
+ def forward(self, x):
26
+ _, C, _ = x.shape
27
+
28
+ x = F.pad(x, (self.pad, self.pad), mode='replicate')
29
+ x = self.ratio * F.conv_transpose1d(
30
+ x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C)
31
+ x = x[..., self.pad_left:-self.pad_right]
32
+
33
+ return x
34
+
35
+
36
+ class DownSample1d(nn.Module):
37
+ def __init__(self, ratio=2, kernel_size=None):
38
+ super().__init__()
39
+ self.ratio = ratio
40
+ self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size
41
+ self.lowpass = LowPassFilter1d(cutoff=0.5 / ratio,
42
+ half_width=0.6 / ratio,
43
+ stride=ratio,
44
+ kernel_size=self.kernel_size)
45
+
46
+ def forward(self, x):
47
+ xx = self.lowpass(x)
48
+
49
+ return xx
bigvgan.py ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 NVIDIA CORPORATION.
2
+ # Licensed under the MIT license.
3
+
4
+ # Adapted from https://github.com/jik876/hifi-gan under the MIT license.
5
+ # LICENSE is in incl_licenses directory.
6
+
7
+ import os
8
+ import json
9
+ from pathlib import Path
10
+
11
+ from collections import namedtuple
12
+ from typing import Optional, List, Union, Dict
13
+
14
+ import torch
15
+ import torch.nn.functional as F
16
+ import torch.nn as nn
17
+ from torch.nn import Conv1d, ConvTranspose1d
18
+ from torch.nn.utils import weight_norm, remove_weight_norm
19
+
20
+ import activations
21
+ from utils import init_weights, get_padding
22
+ from alias_free_torch.act import Activation1d as TorchActivation1d
23
+ from env import AttrDict
24
+
25
+ from huggingface_hub import PyTorchModelHubMixin, hf_hub_download
26
+
27
+ def load_hparams_from_json(path) -> AttrDict:
28
+ with open(path) as f:
29
+ data = f.read()
30
+ h = json.loads(data)
31
+ return AttrDict(h)
32
+
33
+ class AMPBlock1(torch.nn.Module):
34
+ def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5), activation=None):
35
+ super(AMPBlock1, self).__init__()
36
+ self.h = h
37
+
38
+ self.convs1 = nn.ModuleList([
39
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
40
+ padding=get_padding(kernel_size, dilation[0]))),
41
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
42
+ padding=get_padding(kernel_size, dilation[1]))),
43
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
44
+ padding=get_padding(kernel_size, dilation[2])))
45
+ ])
46
+ self.convs1.apply(init_weights)
47
+
48
+ self.convs2 = nn.ModuleList([
49
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
50
+ padding=get_padding(kernel_size, 1))),
51
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
52
+ padding=get_padding(kernel_size, 1))),
53
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
54
+ padding=get_padding(kernel_size, 1)))
55
+ ])
56
+ self.convs2.apply(init_weights)
57
+
58
+ self.num_layers = len(self.convs1) + len(self.convs2) # total number of conv layers
59
+
60
+ # select which Activation1d, lazy-load cuda version to ensure backward compatibility
61
+ if self.h.get("use_cuda_kernel", False):
62
+ # faster CUDA kernel implementation of Activation1d
63
+ from alias_free_cuda.activation1d import Activation1d as CudaActivation1d
64
+ Activation1d = CudaActivation1d
65
+ else:
66
+ Activation1d = TorchActivation1d
67
+
68
+ if activation == 'snake': # periodic nonlinearity with snake function and anti-aliasing
69
+ self.activations = nn.ModuleList([
70
+ Activation1d(
71
+ activation=activations.Snake(channels, alpha_logscale=h.snake_logscale))
72
+ for _ in range(self.num_layers)
73
+ ])
74
+ elif activation == 'snakebeta': # periodic nonlinearity with snakebeta function and anti-aliasing
75
+ self.activations = nn.ModuleList([
76
+ Activation1d(
77
+ activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale))
78
+ for _ in range(self.num_layers)
79
+ ])
80
+ else:
81
+ raise NotImplementedError("activation incorrectly specified. check the config file and look for 'activation'.")
82
+
83
+ def forward(self, x):
84
+ acts1, acts2 = self.activations[::2], self.activations[1::2]
85
+ for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2):
86
+ xt = a1(x)
87
+ xt = c1(xt)
88
+ xt = a2(xt)
89
+ xt = c2(xt)
90
+ x = xt + x
91
+
92
+ return x
93
+
94
+ def remove_weight_norm(self):
95
+ for l in self.convs1:
96
+ remove_weight_norm(l)
97
+ for l in self.convs2:
98
+ remove_weight_norm(l)
99
+
100
+
101
+ class AMPBlock2(torch.nn.Module):
102
+ def __init__(self, h, channels, kernel_size=3, dilation=(1, 3), activation=None):
103
+ super(AMPBlock2, self).__init__()
104
+ self.h = h
105
+
106
+ self.convs = nn.ModuleList([
107
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
108
+ padding=get_padding(kernel_size, dilation[0]))),
109
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
110
+ padding=get_padding(kernel_size, dilation[1])))
111
+ ])
112
+ self.convs.apply(init_weights)
113
+
114
+ self.num_layers = len(self.convs) # total number of conv layers
115
+
116
+ # select which Activation1d, lazy-load cuda version to ensure backward compatibility
117
+ if self.h.get("use_cuda_kernel", False):
118
+ # faster CUDA kernel implementation of Activation1d
119
+ from alias_free_cuda.activation1d import Activation1d as CudaActivation1d
120
+ Activation1d = CudaActivation1d
121
+ else:
122
+ Activation1d = TorchActivation1d
123
+
124
+ if activation == 'snake': # periodic nonlinearity with snake function and anti-aliasing
125
+ self.activations = nn.ModuleList([
126
+ Activation1d(
127
+ activation=activations.Snake(channels, alpha_logscale=h.snake_logscale))
128
+ for _ in range(self.num_layers)
129
+ ])
130
+ elif activation == 'snakebeta': # periodic nonlinearity with snakebeta function and anti-aliasing
131
+ self.activations = nn.ModuleList([
132
+ Activation1d(
133
+ activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale))
134
+ for _ in range(self.num_layers)
135
+ ])
136
+ else:
137
+ raise NotImplementedError("activation incorrectly specified. check the config file and look for 'activation'.")
138
+
139
+ def forward(self, x):
140
+ for c, a in zip (self.convs, self.activations):
141
+ xt = a(x)
142
+ xt = c(xt)
143
+ x = xt + x
144
+
145
+ return x
146
+
147
+ def remove_weight_norm(self):
148
+ for l in self.convs:
149
+ remove_weight_norm(l)
150
+
151
+
152
+ class BigVGAN(
153
+ torch.nn.Module,
154
+ PyTorchModelHubMixin,
155
+ library_name="bigvgan",
156
+ repo_url="https://github.com/NVIDIA/BigVGAN",
157
+ docs_url="https://github.com/NVIDIA/BigVGAN/blob/main/README.md",
158
+ pipeline_tag="audio-to-audio",
159
+ license="mit",
160
+ tags=["neural-vocoder", "audio-generation", "arxiv:2206.04658"]
161
+ ):
162
+ # this is our main BigVGAN model. Applies anti-aliased periodic activation for resblocks.
163
+ # New in v2: if use_cuda_kernel is set to True, it loads optimized CUDA kernels for AMP.
164
+ # NOTE: use_cuda_kernel=True should be used for inference only (training is not supported).
165
+ def __init__(
166
+ self,
167
+ h,
168
+ use_cuda_kernel: bool=False
169
+ ):
170
+ super(BigVGAN, self).__init__()
171
+ self.h = h
172
+ self.h["use_cuda_kernel"] = use_cuda_kernel # add it to global hyperparameters (h)
173
+
174
+ self.num_kernels = len(h.resblock_kernel_sizes)
175
+ self.num_upsamples = len(h.upsample_rates)
176
+
177
+ # pre conv
178
+ self.conv_pre = weight_norm(Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3))
179
+
180
+ # define which AMPBlock to use. BigVGAN uses AMPBlock1 as default
181
+ resblock = AMPBlock1 if h.resblock == '1' else AMPBlock2
182
+
183
+ # transposed conv-based upsamplers. does not apply anti-aliasing
184
+ self.ups = nn.ModuleList()
185
+ for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
186
+ self.ups.append(nn.ModuleList([
187
+ weight_norm(ConvTranspose1d(h.upsample_initial_channel // (2 ** i),
188
+ h.upsample_initial_channel // (2 ** (i + 1)),
189
+ k, u, padding=(k - u) // 2))
190
+ ]))
191
+
192
+ # residual blocks using anti-aliased multi-periodicity composition modules (AMP)
193
+ self.resblocks = nn.ModuleList()
194
+ for i in range(len(self.ups)):
195
+ ch = h.upsample_initial_channel // (2 ** (i + 1))
196
+ for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
197
+ self.resblocks.append(resblock(h, ch, k, d, activation=h.activation))
198
+
199
+ # select which Activation1d, lazy-load cuda version to ensure backward compatibility
200
+ if self.h.get("use_cuda_kernel", False):
201
+ # faster CUDA kernel implementation of Activation1d
202
+ from alias_free_cuda.activation1d import Activation1d as CudaActivation1d
203
+ Activation1d = CudaActivation1d
204
+ else:
205
+ Activation1d = TorchActivation1d
206
+
207
+ # post conv
208
+ if h.activation == "snake": # periodic nonlinearity with snake function and anti-aliasing
209
+ activation_post = activations.Snake(ch, alpha_logscale=h.snake_logscale)
210
+ self.activation_post = Activation1d(activation=activation_post)
211
+ elif h.activation == "snakebeta": # periodic nonlinearity with snakebeta function and anti-aliasing
212
+ activation_post = activations.SnakeBeta(ch, alpha_logscale=h.snake_logscale)
213
+ self.activation_post = Activation1d(activation=activation_post)
214
+ else:
215
+ raise NotImplementedError("activation incorrectly specified. check the config file and look for 'activation'.")
216
+
217
+ # whether to use bias for the final conv_post. Defaults to True for backward compatibility
218
+ self.use_bias_at_final = h.get("use_bias_at_final", True)
219
+ self.conv_post = weight_norm(Conv1d(
220
+ ch, 1, 7, 1, padding=3, bias=self.use_bias_at_final
221
+ ))
222
+
223
+ # weight initialization
224
+ for i in range(len(self.ups)):
225
+ self.ups[i].apply(init_weights)
226
+ self.conv_post.apply(init_weights)
227
+
228
+ # final tanh activation. Defaults to True for backward compatibility
229
+ self.use_tanh_at_final = h.get("use_tanh_at_final", True)
230
+
231
+ def forward(self, x):
232
+ # pre conv
233
+ x = self.conv_pre(x)
234
+
235
+ for i in range(self.num_upsamples):
236
+ # upsampling
237
+ for i_up in range(len(self.ups[i])):
238
+ x = self.ups[i][i_up](x)
239
+ # AMP blocks
240
+ xs = None
241
+ for j in range(self.num_kernels):
242
+ if xs is None:
243
+ xs = self.resblocks[i * self.num_kernels + j](x)
244
+ else:
245
+ xs += self.resblocks[i * self.num_kernels + j](x)
246
+ x = xs / self.num_kernels
247
+
248
+ # post conv
249
+ x = self.activation_post(x)
250
+ x = self.conv_post(x)
251
+ # final tanh activation
252
+ if self.use_tanh_at_final:
253
+ x = torch.tanh(x)
254
+ else:
255
+ x = torch.clamp(x, min=-1., max=1.) # bound the output to [-1, 1]
256
+
257
+ return x
258
+
259
+ def remove_weight_norm(self):
260
+ print('Removing weight norm...')
261
+ for l in self.ups:
262
+ for l_i in l:
263
+ remove_weight_norm(l_i)
264
+ for l in self.resblocks:
265
+ l.remove_weight_norm()
266
+ remove_weight_norm(self.conv_pre)
267
+ remove_weight_norm(self.conv_post)
268
+
269
+ ##################################################################
270
+ # additional methods for huggingface_hub support
271
+ ##################################################################
272
+ def _save_pretrained(self, save_directory: Path) -> None:
273
+ """Save weights and config.json from a Pytorch model to a local directory."""
274
+
275
+ model_path = save_directory / 'bigvgan_generator.pt'
276
+ torch.save(
277
+ {'generator': self.state_dict()},
278
+ model_path
279
+ )
280
+
281
+ config_path = save_directory / 'config.json'
282
+ with open(config_path, 'w') as config_file:
283
+ json.dump(self.h, config_file, indent=4)
284
+
285
+ @classmethod
286
+ def _from_pretrained(
287
+ cls,
288
+ *,
289
+ model_id: str,
290
+ revision: str,
291
+ cache_dir: str,
292
+ force_download: bool,
293
+ proxies: Optional[Dict],
294
+ resume_download: bool,
295
+ local_files_only: bool,
296
+ token: Union[str, bool, None],
297
+ map_location: str = "cpu", # additional argument
298
+ strict: bool = False, # additional argument
299
+ use_cuda_kernel: bool = False,
300
+ **model_kwargs,
301
+ ):
302
+ """Load Pytorch pretrained weights and return the loaded model."""
303
+
304
+ ##################################################################
305
+ # download and load hyperparameters (h) used by BigVGAN
306
+ ##################################################################
307
+ config_file = hf_hub_download(
308
+ repo_id=model_id,
309
+ filename='config.json',
310
+ revision=revision,
311
+ cache_dir=cache_dir,
312
+ force_download=force_download,
313
+ proxies=proxies,
314
+ resume_download=resume_download,
315
+ token=token,
316
+ local_files_only=local_files_only,
317
+ )
318
+ h = load_hparams_from_json(config_file)
319
+
320
+ ##################################################################
321
+ # instantiate BigVGAN using h
322
+ ##################################################################
323
+ if use_cuda_kernel:
324
+ print(f"[WARNING] You have specified use_cuda_kernel=True during BigVGAN.from_pretrained(). Only inference is supported (training is not implemented)!")
325
+ print(f"[WARNING] You need nvcc and ninja installed in your system that matches your PyTorch build is using to build the kernel. If not, the model will fail to initialize or generate incorrect waveform!")
326
+ print(f"[WARNING] For detail, see the official GitHub repository: https://github.com/NVIDIA/BigVGAN?tab=readme-ov-file#using-custom-cuda-kernel-for-synthesis")
327
+ model = cls(h, use_cuda_kernel=use_cuda_kernel)
328
+
329
+ ##################################################################
330
+ # download and load pretrained generator weight
331
+ ##################################################################
332
+ if os.path.isdir(model_id):
333
+ print("Loading weights from local directory")
334
+ model_file = os.path.join(model_id, 'bigvgan_generator.pt')
335
+ else:
336
+ print(f"Loading weights from {model_id}")
337
+ model_file = hf_hub_download(
338
+ repo_id=model_id,
339
+ filename='bigvgan_generator.pt',
340
+ revision=revision,
341
+ cache_dir=cache_dir,
342
+ force_download=force_download,
343
+ proxies=proxies,
344
+ resume_download=resume_download,
345
+ token=token,
346
+ local_files_only=local_files_only,
347
+ )
348
+
349
+ checkpoint_dict = torch.load(model_file, map_location=map_location)
350
+ model.load_state_dict(checkpoint_dict['generator'])
351
+
352
+ return model
env.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/jik876/hifi-gan under the MIT license.
2
+ # LICENSE is in incl_licenses directory.
3
+
4
+ import os
5
+ import shutil
6
+
7
+
8
+ class AttrDict(dict):
9
+ def __init__(self, *args, **kwargs):
10
+ super(AttrDict, self).__init__(*args, **kwargs)
11
+ self.__dict__ = self
12
+
13
+
14
+ def build_env(config, config_name, path):
15
+ t_path = os.path.join(path, config_name)
16
+ if config != t_path:
17
+ os.makedirs(path, exist_ok=True)
18
+ shutil.copyfile(config, os.path.join(path, config_name))
meldataset.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 NVIDIA CORPORATION.
2
+ # Licensed under the MIT license.
3
+
4
+ # Adapted from https://github.com/jik876/hifi-gan under the MIT license.
5
+ # LICENSE is in incl_licenses directory.
6
+
7
+ import torch
8
+ import torch.utils.data
9
+ import numpy as np
10
+ from scipy.io.wavfile import read
11
+ from librosa.filters import mel as librosa_mel_fn
12
+
13
+ MAX_WAV_VALUE = 32767.0 # NOTE: 32768.0 -1 to prevent int16 overflow (results in popping sound in corner cases)
14
+
15
+ def dynamic_range_compression(x, C=1, clip_val=1e-5):
16
+ return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
17
+
18
+ def dynamic_range_decompression(x, C=1):
19
+ return np.exp(x) / C
20
+
21
+ def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
22
+ return torch.log(torch.clamp(x, min=clip_val) * C)
23
+
24
+ def dynamic_range_decompression_torch(x, C=1):
25
+ return torch.exp(x) / C
26
+
27
+ def spectral_normalize_torch(magnitudes):
28
+ output = dynamic_range_compression_torch(magnitudes)
29
+ return output
30
+
31
+ def spectral_de_normalize_torch(magnitudes):
32
+ output = dynamic_range_decompression_torch(magnitudes)
33
+ return output
34
+
35
+ mel_basis = {}
36
+ hann_window = {}
37
+
38
+ def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
39
+ if torch.min(y) < -1.:
40
+ print('min value is ', torch.min(y))
41
+ if torch.max(y) > 1.:
42
+ print('max value is ', torch.max(y))
43
+
44
+ global mel_basis, hann_window
45
+ if fmax not in mel_basis:
46
+ mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
47
+ str_key_mel_basis = str(fmax)+'_'+str(y.device)
48
+ mel_basis[str_key_mel_basis] = torch.from_numpy(mel).float().to(y.device)
49
+ hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)
50
+
51
+ y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
52
+ y = y.squeeze(1)
53
+
54
+ # complex tensor as default, then use view_as_real for future pytorch compatibility
55
+ spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)],
56
+ center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=True)
57
+ spec = torch.view_as_real(spec)
58
+ spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))
59
+
60
+ spec = torch.matmul(mel_basis[str_key_mel_basis], spec)
61
+ spec = spectral_normalize_torch(spec)
62
+
63
+ return spec
64
+
65
+ def get_mel_spectrogram(wav, h):
66
+ return mel_spectrogram(wav, h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax)
nv-modelcard++/bias.md ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Field | Response
2
+ :---------------------------------------------------------------------------------------------------|:---------------
3
+ Participation considerations from adversely impacted groups protected classes in model design and testing: | None
4
+ Measures taken to mitigate against unwanted bias: | No measures taken to mitigate against unwanted bias.
nv-modelcard++/explainability.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Field | Response
2
+ :------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------
3
+ Intended Application & Domain: | Generating waveform from mel spectrogram.
4
+ Model Type: | Convolutional Neural Network (CNN)
5
+ Intended Users: | This model is intended for developers to synthesize and generate waveforms from the AI-generated mel spectrograms.
6
+ Output: | Audio Waveform
7
+ Describe how the model works: | Model generates audio waveform corresponding to the input mel spectrogram.
8
+ Name the adversely impacted groups this has been tested to deliver comparable outcomes regardless of: | Not Applicable
9
+ Technical Limitations: | This may not perform well on synthetically-generated mel spectrograms that deviate significantly from the profile of mel spectrograms on which this was trained.
10
+ Verified to have met prescribed NVIDIA quality standards: | Yes
11
+ Performance Metrics: | Perceptual Evaluation of Speech Quality (PESQ), Virtual Speech Quality Objective Listener (VISQOL), Multi-resolution STFT (MRSTFT), Mel cepstral distortion (MCD), Periodicity RMSE, Voice/Unvoiced F1 Score (V/UV F1)
12
+ Potential Known Risks: | This model may generate low-quality or distorted soundwaves.
13
+ Licensing: | https://github.com/NVIDIA/BigVGAN/blob/main/LICENSE
nv-modelcard++/overview.md ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Model Overview
2
+
3
+ ## Description:
4
+ BigVGAN is a generative AI model specialized in synthesizing audio waveforms using Mel spectrogram as inputs.
5
+
6
+ <center><img src="https://user-images.githubusercontent.com/15963413/218609148-881e39df-33af-4af9-ab95-1427c4ebf062.png" width="800"></center>
7
+
8
+ BigVGAN is a fully convolutional architecture with several upsampling blocks using transposed convolution followed by multiple residual dilated convolution layers.
9
+
10
+ BigVGAN consists of a novel module, called anti-aliased multi-periodicity composition (AMP), which is specifically designed for generating waveforms. AMP is specialized in synthesizing high-frequency and periodic soundwaves drawing inspiration from audio signal processing principles.
11
+
12
+ It applies a periodic activation function, called Snake, which provides an inductive bias to the architecture in generating periodic soundwaves. It also applies anti-aliasing filters to reduce undesired artifacts in the generated waveforms. <br>
13
+
14
+ This model is ready for commercial use.<br>
15
+
16
+
17
+ ## References(s):
18
+ * [BigVGAN: A Universal Neural Vocoder with Large-Scale Training](https://arxiv.org/abs/2206.04658) <br>
19
+ * [Project Page](https://research.nvidia.com/labs/adlr/projects/bigvgan/) <br>
20
+ * [Audio Demo](https://bigvgan-demo.github.io/) <br>
21
+
22
+ ## Model Architecture:
23
+ **Architecture Type:** Convolution Neural Network (CNN) <br>
24
+ **Network Architecture:** You can see the details of this model on this link: https://github.com/NVIDIA/BigVGAN and the related paper can be found here: https://arxiv.org/abs/2206.04658<br>
25
+ **Model Version:** 2.0 <br>
26
+
27
+ ## Input:
28
+ **Input Type:** Audio <br>
29
+ **Input Format:** Mel Spectrogram <br>
30
+ **Input Parameters:** None <br>
31
+ **Other Properties Related to Input:** The input mel spectrogram has shape `[batch, channels, frames]`, where `channels` refers to the number of mel bands defined by the model and `frames` refers to the temporal length. The model supports arbitrary long `frames` that fits into the GPU memory.
32
+
33
+ ## Output:
34
+ **Input Type:** Audio <br>
35
+ **Output Format:** Audio Waveform <br>
36
+ **Output Parameters:** None <br>
37
+ **Other Properties Related to Output:** The output audio waveform has shape `[batch, 1, time]`, where `1` refers to the mono audio channels and `time` refers to the temporal length. `time` is defined as a fixed integer multiple of input `frames`, which is an upsampling ratio of the model (`time = upsampling ratio * frames`). The output audio waveform consitutes float values with a range of `[-1, 1]`.
38
+
39
+ ## Software Integration:
40
+ **Runtime Engine(s):** PyTorch
41
+
42
+ **Supported Hardware Microarchitecture Compatibility:** NVIDIA Ampere, NVIDIA Hopper, NVIDIA Lovelace, NVIDIA Turing, NVIDIA Volta <br>
43
+
44
+
45
+ ## Preferred/Supported Operating System(s):
46
+ Linux
47
+
48
+
49
+ ## Model Version(s):
50
+ v2.0
51
+
52
+ ## Training, Testing, and Evaluation Datasets:
53
+
54
+ ### Training Dataset:
55
+ The dataset contains diverse audio types, including speech in multiple languages, environmental sounds, and instruments.
56
+
57
+ **Links:**
58
+ * [AAM: Artificial Audio Multitracks Dataset](https://zenodo.org/records/5794629)
59
+ * [AudioCaps](https://audiocaps.github.io/)
60
+ * [AudioSet](https://research.google.com/audioset/index.html)
61
+ * [common-accent](https://huggingface.co/datasets/DTU54DL/common-accent)
62
+ * [Crowd Sourced Emotional Multimodal Actors Dataset (CREMA-D)](https://ieeexplore.ieee.org/document/6849440)
63
+ * [DCASE2017 Challenge, Task 4: Large-scale weakly supervised sound event detection for smart cars](https://dcase.community/challenge2017/task-large-scale-sound-event-detection)
64
+ * [FSDnoisy18k](https://zenodo.org/records/2529934)
65
+ * [Free Universal Sound Separation Dataset](https://zenodo.org/records/3694384)
66
+ * [Greatest Hits dataset](https://andrewowens.com/vis/)
67
+ * [GTZAN](https://ieeexplore.ieee.org/document/1021072)
68
+ * [JL corpus](https://www.kaggle.com/datasets/tli725/jl-corpus)
69
+ * [Medley-solos-DB: a cross-collection dataset for musical instrument recognition](https://zenodo.org/records/3464194)
70
+ * [MUSAN: A Music, Speech, and Noise Corpus](https://www.openslr.org/17/)
71
+ * [MusicBench](https://huggingface.co/datasets/amaai-lab/MusicBench)
72
+ * [MusicCaps](https://www.kaggle.com/datasets/googleai/musiccaps)
73
+ * [MusicNet](https://www.kaggle.com/datasets/imsparsh/musicnet-dataset)
74
+ * [NSynth](https://magenta.tensorflow.org/datasets/nsynth)
75
+ * [OnAir-Music-Dataset](https://github.com/sevagh/OnAir-Music-Dataset)
76
+ * [Audio Piano Triads Dataset](https://zenodo.org/records/4740877)
77
+ * [Pitch Audio Dataset (Surge synthesizer)](https://zenodo.org/records/4677097)
78
+ * [SONYC Urban Sound Tagging (SONYC-UST): a multilabel dataset from an urban acoustic sensor network](https://zenodo.org/records/3966543)
79
+ * [VocalSound: A Dataset for Improving Human Vocal Sounds Recognition](https://github.com/YuanGongND/vocalsound)
80
+ * [WavText5K](https://github.com/microsoft/WavText5K)
81
+ * [CSS10: A Collection of Single Speaker Speech Datasets for 10 Languages](https://github.com/Kyubyong/css10)
82
+ * [Hi-Fi Multi-Speaker English TTS Dataset (Hi-Fi TTS)](https://www.openslr.org/109/)
83
+ * [IIIT-H Indic Speech Databases](http://festvox.org/databases/iiit_voices/)
84
+ * [Libri-Light: A Benchmark for ASR with Limited or No Supervision](https://github.com/facebookresearch/libri-light)
85
+ * [LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech](https://www.openslr.org/60)
86
+ * [LibriTTS-R: A Restored Multi-Speaker Text-to-Speech Corpus](https://www.openslr.org/141/)
87
+ * [The SIWIS French Speech Synthesis Database](https://datashare.ed.ac.uk/handle/10283/2353)
88
+ * [Crowdsourced high-quality Colombian Spanish speech data set](https://openslr.org/72/)
89
+ * [TTS-Portuguese Corpus](https://github.com/Edresson/TTS-Portuguese-Corpus)
90
+ * [CSTR VCTK Corpus: English Multi-speaker Corpus for CSTR Voice Cloning Toolkit](https://datashare.ed.ac.uk/handle/10283/3443)
91
+
92
+ ** Data Collection Method by dataset <br>
93
+ * Human <br>
94
+
95
+ ** Labeling Method by dataset (for those with labels) <br>
96
+ * Hybrid: Automated, Human, Unknown <br>
97
+
98
+ ### Evaluating Dataset:
99
+
100
+ Properties: The audio generation quality of BigVGAN is evaluated using `dev` splits of the [LibriTTS dataset](https://www.openslr.org/60/) and [Hi-Fi TTS dataset](https://www.openslr.org/109/). The datasets include speech in English language with equal balance of genders.
101
+
102
+ ** Data Collection Method by dataset <br>
103
+ * Human <br>
104
+
105
+ ** Labeling Method by dataset <br>
106
+ * Automated <br>
107
+
108
+
109
+ ## Inference:
110
+ **Engine:** PyTorch <br>
111
+ **Test Hardware:** NVIDIA A100 GPU <br>
112
+
113
+ ## Ethical Considerations:
114
+ NVIDIA believes Trustworthy AI is a shared responsibility and we have established policies and practices to enable development for a wide array of AI applications. When downloaded or used in accordance with our terms of service, developers should work with their internal model team to ensure this model meets requirements for the relevant industry and use case and addresses unforeseen product misuse. For more detailed information on ethical considerations for this model, please see the Model Card++ Explainability, Bias, Safety & Security, and Privacy Subcards. Please report security vulnerabilities or NVIDIA AI Concerns [here](https://www.nvidia.com/en-us/support/submit-security-vulnerability/).
nv-modelcard++/privacy.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Field | Response
2
+ :----------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------
3
+ Generatable or reverse engineerable personal information? | None
4
+ Protected class data used to create this model? | None
5
+ Was consent obtained for any personal data used? | Not Applicable (No Personal Data)
6
+ How often is dataset reviewed? | Before Release
7
+ Is a mechanism in place to honor data subject right of access or deletion of personal data? | Not Applicable
8
+ If personal collected for the development of the model, was it collected directly by NVIDIA? | Not Applicable
9
+ If personal collected for the development of the model by NVIDIA, do you maintain or have access to disclosures made to data subjects? | Not Applicable
10
+ If personal collected for the development of this AI model, was it minimized to only what was required? | Not Applicable
11
+ Is data in dataset traceable? | Yes
12
+ Is there provenance for all datasets used in training? | Yes
13
+ Does data labeling (annotation, metadata) comply with privacy laws? | Yes
14
+ Is data compliant with data subject requests for data correction or removal, if such a request was made? | No, not possible with externally-sourced data.
nv-modelcard++/safety.md ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Field | Response
2
+ :---------------------------------------------------|:----------------------------------
3
+ Model Application(s): | Synethic Audio Generation
4
+ Describe the life critical impact (if present). | Not Applicable
5
+ Use Case Restrictions: | None
6
+ Model and dataset restrictions: | The Principle of least privilege (PoLP) is applied limiting access for dataset generation and model development. Restrictions enforce dataset access during training, and dataset license constraints adhered to.
utils.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/jik876/hifi-gan under the MIT license.
2
+ # LICENSE is in incl_licenses directory.
3
+
4
+ import glob
5
+ import os
6
+ import matplotlib
7
+ import torch
8
+ from torch.nn.utils import weight_norm
9
+ matplotlib.use("Agg")
10
+ import matplotlib.pylab as plt
11
+ from meldataset import MAX_WAV_VALUE
12
+ from scipy.io.wavfile import write
13
+
14
+
15
+ def plot_spectrogram(spectrogram):
16
+ fig, ax = plt.subplots(figsize=(10, 2))
17
+ im = ax.imshow(spectrogram, aspect="auto", origin="lower",
18
+ interpolation='none')
19
+ plt.colorbar(im, ax=ax)
20
+
21
+ fig.canvas.draw()
22
+ plt.close()
23
+
24
+ return fig
25
+
26
+
27
+ def plot_spectrogram_clipped(spectrogram, clip_max=2.):
28
+ fig, ax = plt.subplots(figsize=(10, 2))
29
+ im = ax.imshow(spectrogram, aspect="auto", origin="lower",
30
+ interpolation='none', vmin=1e-6, vmax=clip_max)
31
+ plt.colorbar(im, ax=ax)
32
+
33
+ fig.canvas.draw()
34
+ plt.close()
35
+
36
+ return fig
37
+
38
+
39
+ def init_weights(m, mean=0.0, std=0.01):
40
+ classname = m.__class__.__name__
41
+ if classname.find("Conv") != -1:
42
+ m.weight.data.normal_(mean, std)
43
+
44
+
45
+ def apply_weight_norm(m):
46
+ classname = m.__class__.__name__
47
+ if classname.find("Conv") != -1:
48
+ weight_norm(m)
49
+
50
+
51
+ def get_padding(kernel_size, dilation=1):
52
+ return int((kernel_size*dilation - dilation)/2)
53
+
54
+
55
+ def load_checkpoint(filepath, device):
56
+ assert os.path.isfile(filepath)
57
+ print("Loading '{}'".format(filepath))
58
+ checkpoint_dict = torch.load(filepath, map_location=device)
59
+ print("Complete.")
60
+ return checkpoint_dict
61
+
62
+
63
+ def save_checkpoint(filepath, obj):
64
+ print("Saving checkpoint to {}".format(filepath))
65
+ torch.save(obj, filepath)
66
+ print("Complete.")
67
+
68
+
69
+ def scan_checkpoint(cp_dir, prefix):
70
+ pattern = os.path.join(cp_dir, prefix + '????????')
71
+ cp_list = glob.glob(pattern)
72
+ if len(cp_list) == 0:
73
+ return None
74
+ return sorted(cp_list)[-1]
75
+
76
+ def save_audio(audio, path, sr):
77
+ # wav: torch with 1d shape
78
+ audio = audio * MAX_WAV_VALUE
79
+ audio = audio.cpu().numpy().astype('int16')
80
+ write(path, sr, audio)