nijisakai manavisrani07 commited on
Commit
5b11db7
0 Parent(s):

Duplicate from manavisrani07/gradio-lipsync-wav2lip

Browse files

Co-authored-by: Manav Israni <[email protected]>

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +36 -0
  2. README.md +16 -0
  3. __pycache__/audio.cpython-310.pyc +0 -0
  4. __pycache__/audio.cpython-38.pyc +0 -0
  5. __pycache__/hparams.cpython-310.pyc +0 -0
  6. __pycache__/hparams.cpython-38.pyc +0 -0
  7. app.py +49 -0
  8. audio.py +142 -0
  9. basicsr/.DS_Store +0 -0
  10. basicsr/__init__.py +12 -0
  11. basicsr/__pycache__/__init__.cpython-38.pyc +0 -0
  12. basicsr/__pycache__/test.cpython-38.pyc +0 -0
  13. basicsr/__pycache__/train.cpython-38.pyc +0 -0
  14. basicsr/apply_sr.py +25 -0
  15. basicsr/archs/__init__.py +25 -0
  16. basicsr/archs/__pycache__/__init__.cpython-310.pyc +0 -0
  17. basicsr/archs/__pycache__/__init__.cpython-38.pyc +0 -0
  18. basicsr/archs/__pycache__/arch_util.cpython-310.pyc +0 -0
  19. basicsr/archs/__pycache__/arch_util.cpython-38.pyc +0 -0
  20. basicsr/archs/__pycache__/basicvsr_arch.cpython-310.pyc +0 -0
  21. basicsr/archs/__pycache__/basicvsr_arch.cpython-38.pyc +0 -0
  22. basicsr/archs/__pycache__/basicvsrpp_arch.cpython-310.pyc +0 -0
  23. basicsr/archs/__pycache__/basicvsrpp_arch.cpython-38.pyc +0 -0
  24. basicsr/archs/__pycache__/codeformer_arch.cpython-38.pyc +0 -0
  25. basicsr/archs/__pycache__/dfdnet_arch.cpython-310.pyc +0 -0
  26. basicsr/archs/__pycache__/dfdnet_arch.cpython-38.pyc +0 -0
  27. basicsr/archs/__pycache__/dfdnet_util.cpython-310.pyc +0 -0
  28. basicsr/archs/__pycache__/dfdnet_util.cpython-38.pyc +0 -0
  29. basicsr/archs/__pycache__/discriminator_arch.cpython-310.pyc +0 -0
  30. basicsr/archs/__pycache__/discriminator_arch.cpython-38.pyc +0 -0
  31. basicsr/archs/__pycache__/duf_arch.cpython-310.pyc +0 -0
  32. basicsr/archs/__pycache__/duf_arch.cpython-38.pyc +0 -0
  33. basicsr/archs/__pycache__/ecbsr_arch.cpython-310.pyc +0 -0
  34. basicsr/archs/__pycache__/ecbsr_arch.cpython-38.pyc +0 -0
  35. basicsr/archs/__pycache__/edsr_arch.cpython-310.pyc +0 -0
  36. basicsr/archs/__pycache__/edsr_arch.cpython-38.pyc +0 -0
  37. basicsr/archs/__pycache__/edvr_arch.cpython-310.pyc +0 -0
  38. basicsr/archs/__pycache__/edvr_arch.cpython-38.pyc +0 -0
  39. basicsr/archs/__pycache__/hifacegan_arch.cpython-310.pyc +0 -0
  40. basicsr/archs/__pycache__/hifacegan_arch.cpython-38.pyc +0 -0
  41. basicsr/archs/__pycache__/hifacegan_util.cpython-310.pyc +0 -0
  42. basicsr/archs/__pycache__/hifacegan_util.cpython-38.pyc +0 -0
  43. basicsr/archs/__pycache__/rcan_arch.cpython-310.pyc +0 -0
  44. basicsr/archs/__pycache__/rcan_arch.cpython-38.pyc +0 -0
  45. basicsr/archs/__pycache__/ridnet_arch.cpython-310.pyc +0 -0
  46. basicsr/archs/__pycache__/ridnet_arch.cpython-38.pyc +0 -0
  47. basicsr/archs/__pycache__/rrdbnet_arch.cpython-310.pyc +0 -0
  48. basicsr/archs/__pycache__/rrdbnet_arch.cpython-38.pyc +0 -0
  49. basicsr/archs/__pycache__/spynet_arch.cpython-310.pyc +0 -0
  50. basicsr/archs/__pycache__/spynet_arch.cpython-38.pyc +0 -0
.gitattributes ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ images/comparison.png filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Gradio Lipsync Wav2lip
3
+ emoji: 👄
4
+ colorFrom: indigo
5
+ colorTo: blue
6
+ sdk: gradio
7
+ python_version: 3.8
8
+ sdk_version: 3.40.1
9
+ suggested_hardware: t4-medium
10
+ app_file: app.py
11
+ pinned: false
12
+ license: apache-2.0
13
+ duplicated_from: manavisrani07/gradio-lipsync-wav2lip
14
+ ---
15
+
16
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
__pycache__/audio.cpython-310.pyc ADDED
Binary file (4.61 kB). View file
 
__pycache__/audio.cpython-38.pyc ADDED
Binary file (4.65 kB). View file
 
__pycache__/hparams.cpython-310.pyc ADDED
Binary file (2.69 kB). View file
 
__pycache__/hparams.cpython-38.pyc ADDED
Binary file (2.44 kB). View file
 
app.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import subprocess
3
+ from subprocess import call
4
+
5
+ with gr.Blocks() as ui:
6
+ with gr.Row():
7
+ video = gr.File(label="Video or Image", info="Filepath of video/image that contains faces to use")
8
+ audio = gr.File(label="Audio", info="Filepath of video/audio file to use as raw audio source")
9
+ with gr.Column():
10
+ checkpoint = gr.Radio(["wav2lip", "wav2lip_gan"], label="Checkpoint", info="Name of saved checkpoint to load weights from")
11
+ no_smooth = gr.Checkbox(label="No Smooth", info="Prevent smoothing face detections over a short temporal window")
12
+ resize_factor = gr.Slider(minimum=1, maximum=4, step=1, label="Resize Factor", info="Reduce the resolution by this factor. Sometimes, best results are obtained at 480p or 720p")
13
+ with gr.Row():
14
+ with gr.Column():
15
+ pad_top = gr.Slider(minimum=0, maximum=50, step=1, value=0, label="Pad Top", info="Padding above")
16
+ pad_bottom = gr.Slider(minimum=0, maximum=50, step=1, value=10, label="Pad Bottom (Often increasing this to 20 allows chin to be included)", info="Padding below lips")
17
+ pad_left = gr.Slider(minimum=0, maximum=50, step=1, value=0, label="Pad Left", info="Padding to the left of lips")
18
+ pad_right = gr.Slider(minimum=0, maximum=50, step=1, value=0, label="Pad Right", info="Padding to the right of lips")
19
+ generate_btn = gr.Button("Generate")
20
+ with gr.Column():
21
+ result = gr.Video()
22
+
23
+ def generate(video, audio, checkpoint, no_smooth, resize_factor, pad_top, pad_bottom, pad_left, pad_right):
24
+ if video is None or audio is None or checkpoint is None:
25
+ return
26
+
27
+ smooth = "--nosmooth" if no_smooth else ""
28
+
29
+
30
+ cmd = [
31
+ "python",
32
+ "inference.py",
33
+ "--checkpoint_path", f"checkpoints/{checkpoint}.pth",
34
+ "--segmentation_path", "checkpoints/face_segmentation.pth",
35
+ "--enhance_face", "gfpgan",
36
+ "--face", video.name,
37
+ "--audio", audio.name,
38
+ "--outfile", "results/output.mp4",
39
+ ]
40
+
41
+ call(cmd)
42
+ return "results/output.mp4"
43
+
44
+ generate_btn.click(
45
+ generate,
46
+ [video, audio, checkpoint, pad_top, pad_bottom, pad_left, pad_right, resize_factor],
47
+ result)
48
+
49
+ ui.queue().launch(debug=True)
audio.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import librosa
2
+ import librosa.filters
3
+ import numpy as np
4
+ # import tensorflow as tf
5
+ from scipy import signal
6
+ from scipy.io import wavfile
7
+ from hparams import hparams as hp
8
+
9
+ def load_wav(path, sr):
10
+ return librosa.core.load(path, sr=sr)[0]
11
+
12
+ def save_wav(wav, path, sr):
13
+ wav *= 32767 / max(0.01, np.max(np.abs(wav)))
14
+ #proposed by @dsmiller
15
+ wavfile.write(path, sr, wav.astype(np.int16))
16
+
17
+ def save_wavenet_wav(wav, path, sr):
18
+ librosa.output.write_wav(path, wav, sr=sr)
19
+
20
+ def preemphasis(wav, k, preemphasize=True):
21
+ if preemphasize:
22
+ return signal.lfilter([1, -k], [1], wav)
23
+ return wav
24
+
25
+ def inv_preemphasis(wav, k, inv_preemphasize=True):
26
+ if inv_preemphasize:
27
+ return signal.lfilter([1], [1, -k], wav)
28
+ return wav
29
+
30
+ def get_hop_size():
31
+ hop_size = hp.hop_size
32
+ if hop_size is None:
33
+ assert hp.frame_shift_ms is not None
34
+ hop_size = int(hp.frame_shift_ms / 1000 * hp.sample_rate)
35
+ return hop_size
36
+
37
+ def linearspectrogram(wav):
38
+ D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize))
39
+ S = _amp_to_db(np.abs(D)) - hp.ref_level_db
40
+
41
+ if hp.signal_normalization:
42
+ return _normalize(S)
43
+ return S
44
+
45
+ def melspectrogram(wav):
46
+ D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize))
47
+ S = _amp_to_db(_linear_to_mel(np.abs(D))) - hp.ref_level_db
48
+
49
+ if hp.signal_normalization:
50
+ return _normalize(S)
51
+ return S
52
+
53
+ def _lws_processor():
54
+ import lws
55
+ return lws.lws(hp.n_fft, get_hop_size(), fftsize=hp.win_size, mode="speech")
56
+
57
+ def _stft(y):
58
+ if hp.use_lws:
59
+ return _lws_processor(hp).stft(y).T
60
+ else:
61
+ return librosa.stft(y=y, n_fft=hp.n_fft, hop_length=get_hop_size(), win_length=hp.win_size)
62
+
63
+ ##########################################################
64
+ #Those are only correct when using lws!!! (This was messing with Wavenet quality for a long time!)
65
+ def num_frames(length, fsize, fshift):
66
+ """Compute number of time frames of spectrogram
67
+ """
68
+ pad = (fsize - fshift)
69
+ if length % fshift == 0:
70
+ M = (length + pad * 2 - fsize) // fshift + 1
71
+ else:
72
+ M = (length + pad * 2 - fsize) // fshift + 2
73
+ return M
74
+
75
+
76
+ def pad_lr(x, fsize, fshift):
77
+ """Compute left and right padding
78
+ """
79
+ M = num_frames(len(x), fsize, fshift)
80
+ pad = (fsize - fshift)
81
+ T = len(x) + 2 * pad
82
+ r = (M - 1) * fshift + fsize - T
83
+ return pad, pad + r
84
+ ##########################################################
85
+ #Librosa correct padding
86
+ def librosa_pad_lr(x, fsize, fshift):
87
+ return 0, (x.shape[0] // fshift + 1) * fshift - x.shape[0]
88
+
89
+ # Conversions
90
+ _mel_basis = None
91
+
92
+ def _linear_to_mel(spectogram):
93
+ global _mel_basis
94
+ if _mel_basis is None:
95
+ _mel_basis = _build_mel_basis()
96
+ return np.dot(_mel_basis, spectogram)
97
+
98
+ def _build_mel_basis():
99
+ assert hp.fmax <= hp.sample_rate // 2
100
+ return librosa.filters.mel(
101
+ sr=hp.sample_rate,
102
+ n_fft=hp.n_fft,
103
+ n_mels=hp.num_mels,
104
+ fmin=hp.fmin,
105
+ fmax=hp.fmax
106
+ )
107
+
108
+ def _amp_to_db(x):
109
+ min_level = np.exp(hp.min_level_db / 20 * np.log(10))
110
+ return 20 * np.log10(np.maximum(min_level, x))
111
+
112
+ def _db_to_amp(x):
113
+ return np.power(10.0, (x) * 0.05)
114
+
115
+
116
+ def _normalize(S):
117
+ if hp.allow_clipping_in_normalization:
118
+ if hp.symmetric_mels:
119
+ return np.clip((2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value,
120
+ -hp.max_abs_value, hp.max_abs_value)
121
+ else:
122
+ return np.clip(hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db)), 0, hp.max_abs_value)
123
+
124
+ assert S.max() <= 0 and S.min() - hp.min_level_db >= 0
125
+ if hp.symmetric_mels:
126
+ return (2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value
127
+ else:
128
+ return hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db))
129
+
130
+ def _denormalize(D):
131
+ if hp.allow_clipping_in_normalization:
132
+ if hp.symmetric_mels:
133
+ return (((np.clip(D, -hp.max_abs_value,
134
+ hp.max_abs_value) + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value))
135
+ + hp.min_level_db)
136
+ else:
137
+ return ((np.clip(D, 0, hp.max_abs_value) * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db)
138
+
139
+ if hp.symmetric_mels:
140
+ return (((D + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value)) + hp.min_level_db)
141
+ else:
142
+ return ((D * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db)
basicsr/.DS_Store ADDED
Binary file (8.2 kB). View file
 
basicsr/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://github.com/xinntao/BasicSR
2
+ # flake8: noqa
3
+ from .archs import *
4
+ from .data import *
5
+ from .losses import *
6
+ from .metrics import *
7
+ from .models import *
8
+ from .ops import *
9
+ from .test import *
10
+ from .train import *
11
+ from .utils import *
12
+ #from .version import __gitsha__, __version__
basicsr/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (334 Bytes). View file
 
basicsr/__pycache__/test.cpython-38.pyc ADDED
Binary file (1.64 kB). View file
 
basicsr/__pycache__/train.cpython-38.pyc ADDED
Binary file (6.42 kB). View file
 
basicsr/apply_sr.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import torch
4
+
5
+ from basicsr.archs.rrdbnet_arch import RRDBNet
6
+
7
+
8
+ def init_sr_model(model_path):
9
+ model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32)
10
+ model.load_state_dict(torch.load(model_path)['params'], strict=True)
11
+ model.eval()
12
+ model = model.cuda()
13
+ return model
14
+
15
+
16
+ def enhance(model, image):
17
+ img = image.astype(np.float32) / 255.
18
+ img = torch.from_numpy(np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))).float()
19
+ img = img.unsqueeze(0).cuda()
20
+ with torch.no_grad():
21
+ output = model(img)
22
+ output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy()
23
+ output = np.transpose(output[[2, 1, 0], :, :], (1, 2, 0))
24
+ output = (output * 255.0).round().astype(np.uint8)
25
+ return output
basicsr/archs/__init__.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ from copy import deepcopy
3
+ from os import path as osp
4
+
5
+ from basicsr.utils import get_root_logger, scandir
6
+ from basicsr.utils.registry import ARCH_REGISTRY
7
+
8
+ __all__ = ['build_network']
9
+
10
+ # automatically scan and import arch modules for registry
11
+ # scan all the files under the 'archs' folder and collect files ending with
12
+ # '_arch.py'
13
+ arch_folder = osp.dirname(osp.abspath(__file__))
14
+ arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')]
15
+ # import all the arch modules
16
+ _arch_modules = [importlib.import_module(f'basicsr.archs.{file_name}') for file_name in arch_filenames]
17
+
18
+
19
+ def build_network(opt):
20
+ opt = deepcopy(opt)
21
+ network_type = opt.pop('type')
22
+ net = ARCH_REGISTRY.get(network_type)(**opt)
23
+ logger = get_root_logger()
24
+ logger.info(f'Network [{net.__class__.__name__}] is created.')
25
+ return net
basicsr/archs/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
basicsr/archs/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (1.17 kB). View file
 
basicsr/archs/__pycache__/arch_util.cpython-310.pyc ADDED
Binary file (10.8 kB). View file
 
basicsr/archs/__pycache__/arch_util.cpython-38.pyc ADDED
Binary file (10.8 kB). View file
 
basicsr/archs/__pycache__/basicvsr_arch.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
basicsr/archs/__pycache__/basicvsr_arch.cpython-38.pyc ADDED
Binary file (10.4 kB). View file
 
basicsr/archs/__pycache__/basicvsrpp_arch.cpython-310.pyc ADDED
Binary file (13 kB). View file
 
basicsr/archs/__pycache__/basicvsrpp_arch.cpython-38.pyc ADDED
Binary file (13.1 kB). View file
 
basicsr/archs/__pycache__/codeformer_arch.cpython-38.pyc ADDED
Binary file (9.24 kB). View file
 
basicsr/archs/__pycache__/dfdnet_arch.cpython-310.pyc ADDED
Binary file (5.43 kB). View file
 
basicsr/archs/__pycache__/dfdnet_arch.cpython-38.pyc ADDED
Binary file (5.44 kB). View file
 
basicsr/archs/__pycache__/dfdnet_util.cpython-310.pyc ADDED
Binary file (5.49 kB). View file
 
basicsr/archs/__pycache__/dfdnet_util.cpython-38.pyc ADDED
Binary file (5.6 kB). View file
 
basicsr/archs/__pycache__/discriminator_arch.cpython-310.pyc ADDED
Binary file (4.93 kB). View file
 
basicsr/archs/__pycache__/discriminator_arch.cpython-38.pyc ADDED
Binary file (4.98 kB). View file
 
basicsr/archs/__pycache__/duf_arch.cpython-310.pyc ADDED
Binary file (9.23 kB). View file
 
basicsr/archs/__pycache__/duf_arch.cpython-38.pyc ADDED
Binary file (9.39 kB). View file
 
basicsr/archs/__pycache__/ecbsr_arch.cpython-310.pyc ADDED
Binary file (8.37 kB). View file
 
basicsr/archs/__pycache__/ecbsr_arch.cpython-38.pyc ADDED
Binary file (8.39 kB). View file
 
basicsr/archs/__pycache__/edsr_arch.cpython-310.pyc ADDED
Binary file (2.31 kB). View file
 
basicsr/archs/__pycache__/edsr_arch.cpython-38.pyc ADDED
Binary file (2.33 kB). View file
 
basicsr/archs/__pycache__/edvr_arch.cpython-310.pyc ADDED
Binary file (11.3 kB). View file
 
basicsr/archs/__pycache__/edvr_arch.cpython-38.pyc ADDED
Binary file (11.4 kB). View file
 
basicsr/archs/__pycache__/hifacegan_arch.cpython-310.pyc ADDED
Binary file (7.58 kB). View file
 
basicsr/archs/__pycache__/hifacegan_arch.cpython-38.pyc ADDED
Binary file (7.59 kB). View file
 
basicsr/archs/__pycache__/hifacegan_util.cpython-310.pyc ADDED
Binary file (8.49 kB). View file
 
basicsr/archs/__pycache__/hifacegan_util.cpython-38.pyc ADDED
Binary file (8.5 kB). View file
 
basicsr/archs/__pycache__/rcan_arch.cpython-310.pyc ADDED
Binary file (4.96 kB). View file
 
basicsr/archs/__pycache__/rcan_arch.cpython-38.pyc ADDED
Binary file (5.04 kB). View file
 
basicsr/archs/__pycache__/ridnet_arch.cpython-310.pyc ADDED
Binary file (6.59 kB). View file
 
basicsr/archs/__pycache__/ridnet_arch.cpython-38.pyc ADDED
Binary file (6.67 kB). View file
 
basicsr/archs/__pycache__/rrdbnet_arch.cpython-310.pyc ADDED
Binary file (4.43 kB). View file
 
basicsr/archs/__pycache__/rrdbnet_arch.cpython-38.pyc ADDED
Binary file (4.46 kB). View file
 
basicsr/archs/__pycache__/spynet_arch.cpython-310.pyc ADDED
Binary file (3.89 kB). View file
 
basicsr/archs/__pycache__/spynet_arch.cpython-38.pyc ADDED
Binary file (3.92 kB). View file