File size: 1,629 Bytes
9d3cb0a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
from transformers import HubertModel
import torch.nn as nn
import torch
import torch.nn.functional as F
import torchaudio
import librosa


class HubertModelWithFinalProj(HubertModel):
    def __init__(self, config):
        super().__init__(config)

        # The final projection layer is only used for backward compatibility.
        # Following https://github.com/auspicious3000/contentvec/issues/6
        # Remove this layer is necessary to achieve the desired outcome.
        self.final_proj = nn.Linear(config.hidden_size, config.classifier_proj_size)


class VoiceConversionExtractor(nn.Module):
    # training on the fly might be slow
    def __init__(self, config, sr):
        super().__init__()
        self.encoder = HubertModelWithFinalProj.from_pretrained(config)
        self.encoder.eval()
        self.sr = sr
        self.target_sr = 16000
        if self.sr != self.target_sr:
            self.resampler = torchaudio.transforms.Resample(orig_freq=self.sr,
                                                            new_freq=self.target_sr)

    def forward(self, audio):
        if self.sr != self.target_sr:
            audio = self.resampler(audio)
        audio = F.pad(audio, ((400 - 320) // 2, (400 - 320) // 2))
        logits = self.encoder(audio)['last_hidden_state']
        return logits


if __name__ == '__main__':
    model = VoiceConversionExtractor('lengyue233/content-vec-best', 24000)
    audio, sr = librosa.load('test.wav', sr=24000)
    audio = audio[:round(100*320*1.5)]
    audio = torch.tensor([audio])
    with torch.no_grad():
        content = model(audio)
    print(content.shape)