diff --git a/.idea/.gitignore b/.idea/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..26d33521af10bcc7fd8cea344038eaaeb78d0ef5
--- /dev/null
+++ b/.idea/.gitignore
@@ -0,0 +1,3 @@
+# Default ignored files
+/shelf/
+/workspace.xml
diff --git a/.idea/VITS_voice_conversion.iml b/.idea/VITS_voice_conversion.iml
new file mode 100644
index 0000000000000000000000000000000000000000..e3dac1e0a1e0831fd1f305669f5e907879ac17f6
--- /dev/null
+++ b/.idea/VITS_voice_conversion.iml
@@ -0,0 +1,12 @@
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml
new file mode 100644
index 0000000000000000000000000000000000000000..7390b08f5fdd07c99617161a16a4db7689051383
--- /dev/null
+++ b/.idea/inspectionProfiles/Project_Default.xml
@@ -0,0 +1,154 @@
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml
new file mode 100644
index 0000000000000000000000000000000000000000..105ce2da2d6447d11dfe32bfb846c3d5b199fc99
--- /dev/null
+++ b/.idea/inspectionProfiles/profiles_settings.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
new file mode 100644
index 0000000000000000000000000000000000000000..d6e0253c83b838ea8848a31028184e828bfa536b
--- /dev/null
+++ b/.idea/misc.xml
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
new file mode 100644
index 0000000000000000000000000000000000000000..9238ba4f29887a444a9d60b47dca55b65bc016d6
--- /dev/null
+++ b/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
new file mode 100644
index 0000000000000000000000000000000000000000..94a25f7f4cb416c083d265558da75d457237d671
--- /dev/null
+++ b/.idea/vcs.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/DATA.MD b/DATA.MD
new file mode 100644
index 0000000000000000000000000000000000000000..ae8c4e9c43009d550a3adba317ccb30b97094739
--- /dev/null
+++ b/DATA.MD
@@ -0,0 +1,42 @@
+本仓库的pipeline支持多种声音样本上传方式,您只需根据您所持有的样本选择任意一种或其中几种即可。
+
+1.`.zip`文件打包的,按角色名排列的短音频,该压缩文件结构应如下所示:
+```
+Your-zip-file.zip
+├───Character_name_1
+├ ├───xxx.wav
+├ ├───...
+├ ├───yyy.mp3
+├ └───zzz.wav
+├───Character_name_2
+├ ├───xxx.wav
+├ ├───...
+├ ├───yyy.mp3
+├ └───zzz.wav
+├───...
+├
+└───Character_name_n
+ ├───xxx.wav
+ ├───...
+ ├───yyy.mp3
+ └───zzz.wav
+```
+注意音频的格式和名称都不重要,只要它们是音频文件。
+质量要求:2秒以上,10秒以内,尽量不要有背景噪音。
+数量要求:一个角色至少10条,最好每个角色20条以上。
+2. 以角色名命名的长音频文件,音频内只能有单说话人,背景音会被自动去除。命名格式为:`{CharacterName}_{random_number}.wav`
+(例如:`Diana_234135.wav`, `MinatoAqua_234252.wav`),必须是`.wav`文件,长度要在20分钟以内(否则会内存不足)。
+
+3. 以角色名命名的长视频文件,视频内只能有单说话人,背景音会被自动去除。命名格式为:`{CharacterName}_{random_number}.mp4`
+(例如:`Taffy_332452.mp4`, `Dingzhen_957315.mp4`),必须是`.mp4`文件,长度要在20分钟以内(否则会内存不足)。
+注意:命名中,`CharacterName`必须是英文字符,`random_number`是为了区分同一个角色的多个文件,必须要添加,该数字可以为0~999999之间的任意整数。
+
+4. 包含多行`{CharacterName}|{video_url}`的`.txt`文件,格式应如下所示:
+```
+Char1|https://xyz.com/video1/
+Char2|https://xyz.com/video2/
+Char2|https://xyz.com/video3/
+Char3|https://xyz.com/video4/
+```
+视频内只能有单说话人,背景音会被自动去除。目前仅支持来自bilibili的视频,其它网站视频的url还没测试过。
+若对格式有疑问,可以在[这里](https://drive.google.com/file/d/132l97zjanpoPY4daLgqXoM7HKXPRbS84/view?usp=sharing)找到所有格式对应的数据样本。
diff --git a/DATA_EN.MD b/DATA_EN.MD
new file mode 100644
index 0000000000000000000000000000000000000000..2e861b66c3e91cfacc4391b607e6b49328a54ed4
--- /dev/null
+++ b/DATA_EN.MD
@@ -0,0 +1,46 @@
+The pipeline of this repo supports multiple voice uploading options,you can choose one or more options depending on the data you have.
+
+1. Short audios packed by a single `.zip` file, whose file structure should be as shown below:
+```
+Your-zip-file.zip
+├───Character_name_1
+├ ├───xxx.wav
+├ ├───...
+├ ├───yyy.mp3
+├ └───zzz.wav
+├───Character_name_2
+├ ├───xxx.wav
+├ ├───...
+├ ├───yyy.mp3
+├ └───zzz.wav
+├───...
+├
+└───Character_name_n
+ ├───xxx.wav
+ ├───...
+ ├───yyy.mp3
+ └───zzz.wav
+```
+Note that the format of the audio files does not matter as long as they are audio files。
+Quality requirement: >=2s, <=10s, contain as little background sound as possible.
+Quantity requirement: at least 10 per character, 20+ per character is recommended.
+2. Long audio files named by character names, which should contain single character voice only. Background sound is
+acceptable since they will be automatically removed. File name format `{CharacterName}_{random_number}.wav`
+(E.G. `Diana_234135.wav`, `MinatoAqua_234252.wav`), must be `.wav` files.
+
+
+3. Long video files named by character names, which should contain single character voice only. Background sound is
+acceptable since they will be automatically removed. File name format `{CharacterName}_{random_number}.mp4`
+(E.G. `Taffy_332452.mp4`, `Dingzhen_957315.mp4`), must be `.mp4` files.
+Note: `CharacterName` must be English characters only, `random_number` is to identify multiple files for one character,
+which is compulsory to add. It could be a random integer between 0~999999.
+
+4. A `.txt` containing multiple lines of`{CharacterName}|{video_url}`, which should be formatted as follows:
+```
+Char1|https://xyz.com/video1/
+Char2|https://xyz.com/video2/
+Char2|https://xyz.com/video3/
+Char3|https://xyz.com/video4/
+```
+One video should contain single speaker only. Currently supports videos links from bilibili, other websites are yet to be tested.
+Having questions regarding to data format? Fine data samples of all format from [here](https://drive.google.com/file/d/132l97zjanpoPY4daLgqXoM7HKXPRbS84/view?usp=sharing).
diff --git a/G_latest.pth b/G_latest.pth
new file mode 100644
index 0000000000000000000000000000000000000000..3f5920c82f6c4d61369378e7095f5e849d9a5e8e
--- /dev/null
+++ b/G_latest.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2cd6357e9e860ce69b2ad1ba09271eadc098a279b37c91868b2f348d2cffff7f
+size 158884841
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/README.md b/README.md
index abf5b089fac32f7139762d4839d6e318d79f68d3..9e1e1985cb4a82091ad1006650f9f42b9098255b 100644
--- a/README.md
+++ b/README.md
@@ -1,12 +1,58 @@
----
-title: Vits
-emoji: 🦀
-colorFrom: green
-colorTo: red
-sdk: gradio
-sdk_version: 3.28.2
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
+[中文文档请点击这里](https://github.com/Plachtaa/VITS-fast-fine-tuning/blob/main/README_ZH.md)
+# VITS Fast Fine-tuning
+This repo will guide you to add your own character voices, or even your own voice, into existing VITS TTS model
+to make it able to do the following tasks in less than 1 hour:
+
+1. Many-to-many voice conversion between any characters you added & preset characters in the model.
+2. English, Japanese & Chinese Text-to-Speech synthesis with the characters you added & preset characters
+
+
+Welcome to play around with the base models!
+Chinese & English & Japanese:[![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/Plachta/VITS-Umamusume-voice-synthesizer) Author: Me
+
+Chinese & Japanese:[![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/sayashi/vits-uma-genshin-honkai) Author: [SayaSS](https://github.com/SayaSS)
+
+
+### Currently Supported Tasks:
+- [x] Clone character voice from 10+ short audios
+- [x] Clone character voice from long audio(s) >= 3 minutes (one audio should contain single speaker only)
+- [x] Clone character voice from videos(s) >= 3 minutes (one video should contain single speaker only)
+- [x] Clone character voice from BILIBILI video links (one video should contain single speaker only)
+
+### Currently Supported Characters for TTS & VC:
+- [x] Any character you wish as long as you have their voices!
+(Note that voice conversion can only be conducted between any two speakers in the model)
+
+
+
+## Fine-tuning
+It's recommended to perform fine-tuning on [Google Colab](https://colab.research.google.com/drive/1pn1xnFfdLK63gVXDwV4zCXfVeo8c-I-0?usp=sharing)
+because the original VITS has some dependencies that are difficult to configure.
+
+### How long does it take?
+1. Install dependencies (3 min)
+2. Choose pretrained model to start. The detailed differences between them are described in [Colab Notebook](https://colab.research.google.com/drive/1pn1xnFfdLK63gVXDwV4zCXfVeo8c-I-0?usp=sharing)
+3. Upload the voice samples of the characters you wish to add,see [DATA.MD](https://github.com/Plachtaa/VITS-fast-fine-tuning/blob/main/DATA_EN.MD) for detailed uploading options.
+4. Start fine-tuning. Time taken varies from 20 minutes ~ 2 hours, depending on the number of voices you uploaded.
+
+
+## Inference or Usage (Currently support Windows only)
+0. Remember to download your fine-tuned model!
+1. Download the latest release
+2. Put your model & config file into the folder `inference`, which are named `G_latest.pth` and `finetune_speaker.json`, respectively.
+3. The file structure should be as follows:
+```
+inference
+├───inference.exe
+├───...
+├───finetune_speaker.json
+└───G_latest.pth
+```
+4. run `inference.exe`, the browser should pop up automatically.
+
+## Use in MoeGoe
+0. Prepare downloaded model & config file, which are named `G_latest.pth` and `moegoe_config.json`, respectively.
+1. Follow [MoeGoe](https://github.com/CjangCjengh/MoeGoe) page instructions to install, configure path, and use.
+
+## Looking for help?
+If you have any questions, please feel free to open an [issue](https://github.com/Plachtaa/VITS-fast-fine-tuning/issues/new) or join our [Discord](https://discord.gg/TcrjDFvm5A) server.
\ No newline at end of file
diff --git a/README_ZH.md b/README_ZH.md
new file mode 100644
index 0000000000000000000000000000000000000000..4d41305f80331b6cf2de4724a19be6c833e6de17
--- /dev/null
+++ b/README_ZH.md
@@ -0,0 +1,62 @@
+English Documentation Please Click [here](https://github.com/Plachtaa/VITS-fast-fine-tuning/blob/main/README.md)
+# VITS 快速微调
+这个代码库会指导你如何将自定义角色(甚至你自己),加入预训练的VITS模型中,在1小时内的微调使模型具备如下功能:
+1. 在 模型所包含的任意两个角色 之间进行声线转换
+2. 以 你加入的角色声线 进行中日英三语 文本到语音合成。
+
+本项目使用的底模涵盖常见二次元男/女配音声线(来自原神数据集)以及现实世界常见男/女声线(来自VCTK数据集),支持中日英三语,保证能够在微调时快速适应新的声线。
+
+欢迎体验微调所使用的底模!
+
+中日英:[![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/Plachta/VITS-Umamusume-voice-synthesizer) 作者:我
+
+中日:[![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/sayashi/vits-uma-genshin-honkai) 作者:[SayaSS](https://github.com/SayaSS)
+
+### 目前支持的任务:
+- [x] 从 10条以上的短音频 克隆角色声音
+- [x] 从 3分钟以上的长音频(单个音频只能包含单说话人) 克隆角色声音
+- [x] 从 3分钟以上的视频(单个视频只能包含单说话人) 克隆角色声音
+- [x] 通过输入 bilibili视频链接(单个视频只能包含单说话人) 克隆角色声音
+
+### 目前支持声线转换和中日英三语TTS的角色
+- [x] 任意角色(只要你有角色的声音样本)
+(注意:声线转换只能在任意两个存在于模型中的说话人之间进行)
+
+
+
+
+## 微调
+建议使用 [Google Colab](https://colab.research.google.com/drive/1pn1xnFfdLK63gVXDwV4zCXfVeo8c-I-0?usp=sharing)
+进行微调任务,因为VITS在多语言情况下的某些环境依赖相当难以配置。
+### 在Google Colab里,我需要花多长时间?
+1. 安装依赖 (3 min)
+2. 选择预训练模型,详细区别参见[Colab 笔记本页面](https://colab.research.google.com/drive/1pn1xnFfdLK63gVXDwV4zCXfVeo8c-I-0?usp=sharing)。
+3. 上传你希望加入的其它角色声音,详细上传方式见[DATA.MD](https://github.com/Plachtaa/VITS-fast-fine-tuning/blob/main/DATA.MD)
+4. 进行微调,根据选择的微调方式和样本数量不同,花费时长可能在20分钟到2小时不等。
+
+微调结束后可以直接下载微调好的模型,日后在本地运行(不需要GPU)
+
+## 本地运行和推理
+0. 记得下载微调好的模型和config文件!
+1. 下载最新的Release包(在Github页面的右侧)
+2. 把下载的模型和config文件放在 `inference`文件夹下, 其文件名分别为 `G_latest.pth` 和 `finetune_speaker.json`。
+3. 一切准备就绪后,文件结构应该如下所示:
+```
+inference
+├───inference.exe
+├───...
+├───finetune_speaker.json
+└───G_latest.pth
+```
+4. 运行 `inference.exe`, 浏览器会自动弹出窗口, 注意其所在路径不能有中文字符或者空格.
+
+## 在MoeGoe使用
+0. MoeGoe以及类似其它VITS推理UI使用的config格式略有不同,需要下载的文件为模型`G_latest.pth`和配置文件`moegoe_config.json`
+1. 按照[MoeGoe](https://github.com/CjangCjengh/MoeGoe)页面的提示配置路径即可使用。
+2. MoeGoe在输入句子时需要使用相应的语言标记包裹句子才能正常合成。(日语用[JA], 中文用[ZH], 英文用[EN]),例如:
+[JA]こんにちわ。[JA]
+[ZH]你好![ZH]
+[EN]Hello![EN]
+
+## 帮助
+如果你在使用过程中遇到了任何问题,可以在[这里](https://github.com/Plachtaa/VITS-fast-fine-tuning/issues/new)开一个issue,或者加入Discord服务器寻求帮助:[Discord](https://discord.gg/TcrjDFvm5A)。
\ No newline at end of file
diff --git a/VC_inference.py b/VC_inference.py
new file mode 100644
index 0000000000000000000000000000000000000000..33c7c6df274913dc32c3596e184a0dc3ce1e8c26
--- /dev/null
+++ b/VC_inference.py
@@ -0,0 +1,146 @@
+import os
+import numpy as np
+import torch
+from torch import no_grad, LongTensor
+import argparse
+import commons
+from mel_processing import spectrogram_torch
+import utils
+from models import SynthesizerTrn
+import gradio as gr
+import librosa
+import webbrowser
+
+from text import text_to_sequence, _clean_text
+device = "cuda:0" if torch.cuda.is_available() else "cpu"
+import logging
+logging.getLogger("PIL").setLevel(logging.WARNING)
+logging.getLogger("urllib3").setLevel(logging.WARNING)
+logging.getLogger("markdown_it").setLevel(logging.WARNING)
+logging.getLogger("httpx").setLevel(logging.WARNING)
+logging.getLogger("asyncio").setLevel(logging.WARNING)
+
+language_marks = {
+ "Japanese": "",
+ "日本語": "[JA]",
+ "简体中文": "[ZH]",
+ "English": "[EN]",
+ "Mix": "",
+}
+lang = ['日本語', '简体中文', 'English', 'Mix']
+def get_text(text, hps, is_symbol):
+ text_norm = text_to_sequence(text, hps.symbols, [] if is_symbol else hps.data.text_cleaners)
+ if hps.data.add_blank:
+ text_norm = commons.intersperse(text_norm, 0)
+ text_norm = LongTensor(text_norm)
+ return text_norm
+
+def create_tts_fn(model, hps, speaker_ids):
+ def tts_fn(text, speaker, language, speed):
+ if language is not None:
+ text = language_marks[language] + text + language_marks[language]
+ speaker_id = speaker_ids[speaker]
+ stn_tst = get_text(text, hps, False)
+ with no_grad():
+ x_tst = stn_tst.unsqueeze(0).to(device)
+ x_tst_lengths = LongTensor([stn_tst.size(0)]).to(device)
+ sid = LongTensor([speaker_id]).to(device)
+ audio = model.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8,
+ length_scale=1.0 / speed)[0][0, 0].data.cpu().float().numpy()
+ del stn_tst, x_tst, x_tst_lengths, sid
+ return "Success", (hps.data.sampling_rate, audio)
+
+ return tts_fn
+
+def create_vc_fn(model, hps, speaker_ids):
+ def vc_fn(original_speaker, target_speaker, record_audio, upload_audio):
+ input_audio = record_audio if record_audio is not None else upload_audio
+ if input_audio is None:
+ return "You need to record or upload an audio", None
+ sampling_rate, audio = input_audio
+ original_speaker_id = speaker_ids[original_speaker]
+ target_speaker_id = speaker_ids[target_speaker]
+
+ audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
+ if len(audio.shape) > 1:
+ audio = librosa.to_mono(audio.transpose(1, 0))
+ if sampling_rate != hps.data.sampling_rate:
+ audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=hps.data.sampling_rate)
+ with no_grad():
+ y = torch.FloatTensor(audio)
+ y = y / max(-y.min(), y.max()) / 0.99
+ y = y.to(device)
+ y = y.unsqueeze(0)
+ spec = spectrogram_torch(y, hps.data.filter_length,
+ hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length,
+ center=False).to(device)
+ spec_lengths = LongTensor([spec.size(-1)]).to(device)
+ sid_src = LongTensor([original_speaker_id]).to(device)
+ sid_tgt = LongTensor([target_speaker_id]).to(device)
+ audio = model.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt)[0][
+ 0, 0].data.cpu().float().numpy()
+ del y, spec, spec_lengths, sid_src, sid_tgt
+ return "Success", (hps.data.sampling_rate, audio)
+
+ return vc_fn
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--model_dir", default="./G_latest.pth", help="directory to your fine-tuned model")
+ parser.add_argument("--config_dir", default="./finetune_speaker.json", help="directory to your model config file")
+ parser.add_argument("--share", default=True, help="make link public (used in colab)")
+
+ args = parser.parse_args()
+ hps = utils.get_hparams_from_file(args.config_dir)
+
+
+ net_g = SynthesizerTrn(
+ len(hps.symbols),
+ hps.data.filter_length // 2 + 1,
+ hps.train.segment_size // hps.data.hop_length,
+ n_speakers=hps.data.n_speakers,
+ **hps.model).to(device)
+ _ = net_g.eval()
+
+ _ = utils.load_checkpoint(args.model_dir, net_g, None)
+ speaker_ids = hps.speakers
+ speakers = list(hps.speakers.keys())
+ tts_fn = create_tts_fn(net_g, hps, speaker_ids)
+ vc_fn = create_vc_fn(net_g, hps, speaker_ids)
+ app = gr.Blocks()
+ with app:
+ with gr.Tab("Text-to-Speech"):
+ with gr.Row():
+ with gr.Column():
+ textbox = gr.TextArea(label="Text",
+ placeholder="Type your sentence here",
+ value="", elem_id=f"tts-input")
+ # select character
+ char_dropdown = gr.Dropdown(choices=speakers, value=speakers[0], label='character')
+ language_dropdown = gr.Dropdown(choices=lang, value=lang[1], label='language')
+ duration_slider = gr.Slider(minimum=0.1, maximum=5, value=1, step=0.1,
+ label='速度 Speed')
+ with gr.Column():
+ text_output = gr.Textbox(label="Message")
+ audio_output = gr.Audio(label="Output Audio", elem_id="tts-audio")
+ btn = gr.Button("Generate!")
+ btn.click(tts_fn,
+ inputs=[textbox, char_dropdown, language_dropdown, duration_slider,],
+ outputs=[text_output, audio_output],api_name="voice")
+ with gr.Tab("Voice Conversion"):
+ gr.Markdown("""
+ 录制或上传声音,并选择要转换的音色。
+ """)
+ with gr.Column():
+ record_audio = gr.Audio(label="record your voice", source="microphone")
+ upload_audio = gr.Audio(label="or upload audio here", source="upload")
+ source_speaker = gr.Dropdown(choices=speakers, value=speakers[0], label="source speaker")
+ target_speaker = gr.Dropdown(choices=speakers, value=speakers[0], label="target speaker")
+ with gr.Column():
+ message_box = gr.Textbox(label="Message")
+ converted_audio = gr.Audio(label='converted audio')
+ btn = gr.Button("Convert!")
+ btn.click(vc_fn, inputs=[source_speaker, target_speaker, record_audio, upload_audio],
+ outputs=[message_box, converted_audio])
+ webbrowser.open("http://127.0.0.1:7860")
+ app.launch(share=args.share,show_api=True)
+
diff --git a/__pycache__/attentions.cpython-310.pyc b/__pycache__/attentions.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..de1ac3754032ce9c22c018765a715c4d217c42e9
Binary files /dev/null and b/__pycache__/attentions.cpython-310.pyc differ
diff --git a/__pycache__/commons.cpython-310.pyc b/__pycache__/commons.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6df74db8266813057b8692b6c0f57a08f3cd80d7
Binary files /dev/null and b/__pycache__/commons.cpython-310.pyc differ
diff --git a/__pycache__/mel_processing.cpython-310.pyc b/__pycache__/mel_processing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a95cecc5c762c4420dd75f53eb0a84d2cba2590e
Binary files /dev/null and b/__pycache__/mel_processing.cpython-310.pyc differ
diff --git a/__pycache__/models.cpython-310.pyc b/__pycache__/models.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8e50ae9e84ce2f6f75837696f7bdfd4ac707f316
Binary files /dev/null and b/__pycache__/models.cpython-310.pyc differ
diff --git a/__pycache__/modules.cpython-310.pyc b/__pycache__/modules.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c510a13de6f92125165242cedc6c50ceb7680754
Binary files /dev/null and b/__pycache__/modules.cpython-310.pyc differ
diff --git a/__pycache__/transforms.cpython-310.pyc b/__pycache__/transforms.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..508bd23bc647af3ca6f789d5f410fcb3c3418d60
Binary files /dev/null and b/__pycache__/transforms.cpython-310.pyc differ
diff --git a/__pycache__/utils.cpython-310.pyc b/__pycache__/utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..db7eed21876fc239059ecd7fe1644edbab8b09e8
Binary files /dev/null and b/__pycache__/utils.cpython-310.pyc differ
diff --git a/attentions.py b/attentions.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e0b0c1fd48c962e21e1fbe60b23fc574927435c
--- /dev/null
+++ b/attentions.py
@@ -0,0 +1,303 @@
+import copy
+import math
+import numpy as np
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+import commons
+import modules
+from modules import LayerNorm
+
+
+class Encoder(nn.Module):
+ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
+ super().__init__()
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.window_size = window_size
+
+ self.drop = nn.Dropout(p_dropout)
+ self.attn_layers = nn.ModuleList()
+ self.norm_layers_1 = nn.ModuleList()
+ self.ffn_layers = nn.ModuleList()
+ self.norm_layers_2 = nn.ModuleList()
+ for i in range(self.n_layers):
+ self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
+ self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
+
+ def forward(self, x, x_mask):
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
+ x = x * x_mask
+ for i in range(self.n_layers):
+ y = self.attn_layers[i](x, x, attn_mask)
+ y = self.drop(y)
+ x = self.norm_layers_1[i](x + y)
+
+ y = self.ffn_layers[i](x, x_mask)
+ y = self.drop(y)
+ x = self.norm_layers_2[i](x + y)
+ x = x * x_mask
+ return x
+
+
+class Decoder(nn.Module):
+ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
+ super().__init__()
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.proximal_bias = proximal_bias
+ self.proximal_init = proximal_init
+
+ self.drop = nn.Dropout(p_dropout)
+ self.self_attn_layers = nn.ModuleList()
+ self.norm_layers_0 = nn.ModuleList()
+ self.encdec_attn_layers = nn.ModuleList()
+ self.norm_layers_1 = nn.ModuleList()
+ self.ffn_layers = nn.ModuleList()
+ self.norm_layers_2 = nn.ModuleList()
+ for i in range(self.n_layers):
+ self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
+ self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
+ self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
+
+ def forward(self, x, x_mask, h, h_mask):
+ """
+ x: decoder input
+ h: encoder output
+ """
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
+ encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
+ x = x * x_mask
+ for i in range(self.n_layers):
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
+ y = self.drop(y)
+ x = self.norm_layers_0[i](x + y)
+
+ y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
+ y = self.drop(y)
+ x = self.norm_layers_1[i](x + y)
+
+ y = self.ffn_layers[i](x, x_mask)
+ y = self.drop(y)
+ x = self.norm_layers_2[i](x + y)
+ x = x * x_mask
+ return x
+
+
+class MultiHeadAttention(nn.Module):
+ def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
+ super().__init__()
+ assert channels % n_heads == 0
+
+ self.channels = channels
+ self.out_channels = out_channels
+ self.n_heads = n_heads
+ self.p_dropout = p_dropout
+ self.window_size = window_size
+ self.heads_share = heads_share
+ self.block_length = block_length
+ self.proximal_bias = proximal_bias
+ self.proximal_init = proximal_init
+ self.attn = None
+
+ self.k_channels = channels // n_heads
+ self.conv_q = nn.Conv1d(channels, channels, 1)
+ self.conv_k = nn.Conv1d(channels, channels, 1)
+ self.conv_v = nn.Conv1d(channels, channels, 1)
+ self.conv_o = nn.Conv1d(channels, out_channels, 1)
+ self.drop = nn.Dropout(p_dropout)
+
+ if window_size is not None:
+ n_heads_rel = 1 if heads_share else n_heads
+ rel_stddev = self.k_channels**-0.5
+ self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
+ self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
+
+ nn.init.xavier_uniform_(self.conv_q.weight)
+ nn.init.xavier_uniform_(self.conv_k.weight)
+ nn.init.xavier_uniform_(self.conv_v.weight)
+ if proximal_init:
+ with torch.no_grad():
+ self.conv_k.weight.copy_(self.conv_q.weight)
+ self.conv_k.bias.copy_(self.conv_q.bias)
+
+ def forward(self, x, c, attn_mask=None):
+ q = self.conv_q(x)
+ k = self.conv_k(c)
+ v = self.conv_v(c)
+
+ x, self.attn = self.attention(q, k, v, mask=attn_mask)
+
+ x = self.conv_o(x)
+ return x
+
+ def attention(self, query, key, value, mask=None):
+ # reshape [b, d, t] -> [b, n_h, t, d_k]
+ b, d, t_s, t_t = (*key.size(), query.size(2))
+ query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
+ key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
+ value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
+
+ scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
+ if self.window_size is not None:
+ assert t_s == t_t, "Relative attention is only available for self-attention."
+ key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
+ rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
+ scores_local = self._relative_position_to_absolute_position(rel_logits)
+ scores = scores + scores_local
+ if self.proximal_bias:
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
+ scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
+ if mask is not None:
+ scores = scores.masked_fill(mask == 0, -1e4)
+ if self.block_length is not None:
+ assert t_s == t_t, "Local attention is only available for self-attention."
+ block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
+ scores = scores.masked_fill(block_mask == 0, -1e4)
+ p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
+ p_attn = self.drop(p_attn)
+ output = torch.matmul(p_attn, value)
+ if self.window_size is not None:
+ relative_weights = self._absolute_position_to_relative_position(p_attn)
+ value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
+ output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
+ output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
+ return output, p_attn
+
+ def _matmul_with_relative_values(self, x, y):
+ """
+ x: [b, h, l, m]
+ y: [h or 1, m, d]
+ ret: [b, h, l, d]
+ """
+ ret = torch.matmul(x, y.unsqueeze(0))
+ return ret
+
+ def _matmul_with_relative_keys(self, x, y):
+ """
+ x: [b, h, l, d]
+ y: [h or 1, m, d]
+ ret: [b, h, l, m]
+ """
+ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
+ return ret
+
+ def _get_relative_embeddings(self, relative_embeddings, length):
+ max_relative_position = 2 * self.window_size + 1
+ # Pad first before slice to avoid using cond ops.
+ pad_length = max(length - (self.window_size + 1), 0)
+ slice_start_position = max((self.window_size + 1) - length, 0)
+ slice_end_position = slice_start_position + 2 * length - 1
+ if pad_length > 0:
+ padded_relative_embeddings = F.pad(
+ relative_embeddings,
+ commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
+ else:
+ padded_relative_embeddings = relative_embeddings
+ used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
+ return used_relative_embeddings
+
+ def _relative_position_to_absolute_position(self, x):
+ """
+ x: [b, h, l, 2*l-1]
+ ret: [b, h, l, l]
+ """
+ batch, heads, length, _ = x.size()
+ # Concat columns of pad to shift from relative to absolute indexing.
+ x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
+
+ # Concat extra elements so to add up to shape (len+1, 2*len-1).
+ x_flat = x.view([batch, heads, length * 2 * length])
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
+
+ # Reshape and slice out the padded elements.
+ x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
+ return x_final
+
+ def _absolute_position_to_relative_position(self, x):
+ """
+ x: [b, h, l, l]
+ ret: [b, h, l, 2*l-1]
+ """
+ batch, heads, length, _ = x.size()
+ # padd along column
+ x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
+ x_flat = x.view([batch, heads, length**2 + length*(length -1)])
+ # add 0's in the beginning that will skew the elements after reshape
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
+ x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
+ return x_final
+
+ def _attention_bias_proximal(self, length):
+ """Bias for self-attention to encourage attention to close positions.
+ Args:
+ length: an integer scalar.
+ Returns:
+ a Tensor with shape [1, 1, length, length]
+ """
+ r = torch.arange(length, dtype=torch.float32)
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
+
+
+class FFN(nn.Module):
+ def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.filter_channels = filter_channels
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.activation = activation
+ self.causal = causal
+
+ if causal:
+ self.padding = self._causal_padding
+ else:
+ self.padding = self._same_padding
+
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
+ self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
+ self.drop = nn.Dropout(p_dropout)
+
+ def forward(self, x, x_mask):
+ x = self.conv_1(self.padding(x * x_mask))
+ if self.activation == "gelu":
+ x = x * torch.sigmoid(1.702 * x)
+ else:
+ x = torch.relu(x)
+ x = self.drop(x)
+ x = self.conv_2(self.padding(x * x_mask))
+ return x * x_mask
+
+ def _causal_padding(self, x):
+ if self.kernel_size == 1:
+ return x
+ pad_l = self.kernel_size - 1
+ pad_r = 0
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
+ x = F.pad(x, commons.convert_pad_shape(padding))
+ return x
+
+ def _same_padding(self, x):
+ if self.kernel_size == 1:
+ return x
+ pad_l = (self.kernel_size - 1) // 2
+ pad_r = self.kernel_size // 2
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
+ x = F.pad(x, commons.convert_pad_shape(padding))
+ return x
diff --git a/cmd_inference.py b/cmd_inference.py
new file mode 100644
index 0000000000000000000000000000000000000000..cfaee189e3905d5e6f0fc6c85f36fbc978cb1508
--- /dev/null
+++ b/cmd_inference.py
@@ -0,0 +1,106 @@
+"""该模块用于生成VITS文件
+使用方法
+
+python cmd_inference.py -m 模型路径 -c 配置文件路径 -o 输出文件路径 -l 输入的语言 -t 输入文本 -s 合成目标说话人名称
+
+可选参数
+-ns 感情变化程度
+-nsw 音素发音长度
+-ls 整体语速
+-on 输出文件的名称
+
+"""
+
+from pathlib import Path
+import utils
+from models import SynthesizerTrn
+import torch
+from torch import no_grad, LongTensor
+import librosa
+from text import text_to_sequence, _clean_text
+import commons
+import scipy.io.wavfile as wavf
+import os
+
+device = "cuda:0" if torch.cuda.is_available() else "cpu"
+
+language_marks = {
+ "Japanese": "",
+ "日本語": "[JA]",
+ "简体中文": "[ZH]",
+ "English": "[EN]",
+ "Mix": "",
+}
+
+
+def get_text(text, hps, is_symbol):
+ text_norm = text_to_sequence(text, hps.symbols, [] if is_symbol else hps.data.text_cleaners)
+ if hps.data.add_blank:
+ text_norm = commons.intersperse(text_norm, 0)
+ text_norm = LongTensor(text_norm)
+ return text_norm
+
+
+
+if __name__ == "__main__":
+ import argparse
+
+ parser = argparse.ArgumentParser(description='vits inference')
+ #必须参数
+ parser.add_argument('-m', '--model_path', type=str, default="logs/44k/G_0.pth", help='模型路径')
+ parser.add_argument('-c', '--config_path', type=str, default="configs/config.json", help='配置文件路径')
+ parser.add_argument('-o', '--output_path', type=str, default="output/vits", help='输出文件路径')
+ parser.add_argument('-l', '--language', type=str, default="日本語", help='输入的语言')
+ parser.add_argument('-t', '--text', type=str, help='输入文本')
+ parser.add_argument('-s', '--spk', type=str, help='合成目标说话人名称')
+ #可选参数
+ parser.add_argument('-on', '--output_name', type=str, default="output", help='输出文件的名称')
+ parser.add_argument('-ns', '--noise_scale', type=float,default= .667,help='感情变化程度')
+ parser.add_argument('-nsw', '--noise_scale_w', type=float,default=0.6, help='音素发音长度')
+ parser.add_argument('-ls', '--length_scale', type=float,default=1, help='整体语速')
+
+ args = parser.parse_args()
+
+ model_path = args.model_path
+ config_path = args.config_path
+ output_dir = Path(args.output_path)
+ output_dir.mkdir(parents=True, exist_ok=True)
+
+ language = args.language
+ text = args.text
+ spk = args.spk
+ noise_scale = args.noise_scale
+ noise_scale_w = args.noise_scale_w
+ length = args.length_scale
+ output_name = args.output_name
+
+ hps = utils.get_hparams_from_file(config_path)
+ net_g = SynthesizerTrn(
+ len(hps.symbols),
+ hps.data.filter_length // 2 + 1,
+ hps.train.segment_size // hps.data.hop_length,
+ n_speakers=hps.data.n_speakers,
+ **hps.model).to(device)
+ _ = net_g.eval()
+ _ = utils.load_checkpoint(model_path, net_g, None)
+
+ speaker_ids = hps.speakers
+
+
+ if language is not None:
+ text = language_marks[language] + text + language_marks[language]
+ speaker_id = speaker_ids[spk]
+ stn_tst = get_text(text, hps, False)
+ with no_grad():
+ x_tst = stn_tst.unsqueeze(0).to(device)
+ x_tst_lengths = LongTensor([stn_tst.size(0)]).to(device)
+ sid = LongTensor([speaker_id]).to(device)
+ audio = net_g.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=noise_scale, noise_scale_w=noise_scale_w,
+ length_scale=1.0 / length)[0][0, 0].data.cpu().float().numpy()
+ del stn_tst, x_tst, x_tst_lengths, sid
+
+ wavf.write(str(output_dir)+"/"+output_name+".wav",hps.data.sampling_rate,audio)
+
+
+
+
\ No newline at end of file
diff --git a/commons.py b/commons.py
new file mode 100644
index 0000000000000000000000000000000000000000..db17cf0914ba6e445fe613e3ec3411b3a74b28aa
--- /dev/null
+++ b/commons.py
@@ -0,0 +1,164 @@
+import math
+import numpy as np
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+
+def init_weights(m, mean=0.0, std=0.01):
+ classname = m.__class__.__name__
+ if classname.find("Conv") != -1:
+ m.weight.data.normal_(mean, std)
+
+
+def get_padding(kernel_size, dilation=1):
+ return int((kernel_size*dilation - dilation)/2)
+
+
+def convert_pad_shape(pad_shape):
+ l = pad_shape[::-1]
+ pad_shape = [item for sublist in l for item in sublist]
+ return pad_shape
+
+
+def intersperse(lst, item):
+ result = [item] * (len(lst) * 2 + 1)
+ result[1::2] = lst
+ return result
+
+
+def kl_divergence(m_p, logs_p, m_q, logs_q):
+ """KL(P||Q)"""
+ kl = (logs_q - logs_p) - 0.5
+ kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
+ return kl
+
+
+def rand_gumbel(shape):
+ """Sample from the Gumbel distribution, protect from overflows."""
+ uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
+ return -torch.log(-torch.log(uniform_samples))
+
+
+def rand_gumbel_like(x):
+ g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
+ return g
+
+
+def slice_segments(x, ids_str, segment_size=4):
+ ret = torch.zeros_like(x[:, :, :segment_size])
+ for i in range(x.size(0)):
+ idx_str = ids_str[i]
+ idx_end = idx_str + segment_size
+ try:
+ ret[i] = x[i, :, idx_str:idx_end]
+ except RuntimeError:
+ print("?")
+ return ret
+
+
+def rand_slice_segments(x, x_lengths=None, segment_size=4):
+ b, d, t = x.size()
+ if x_lengths is None:
+ x_lengths = t
+ ids_str_max = x_lengths - segment_size + 1
+ ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
+ ret = slice_segments(x, ids_str, segment_size)
+ return ret, ids_str
+
+
+def get_timing_signal_1d(
+ length, channels, min_timescale=1.0, max_timescale=1.0e4):
+ position = torch.arange(length, dtype=torch.float)
+ num_timescales = channels // 2
+ log_timescale_increment = (
+ math.log(float(max_timescale) / float(min_timescale)) /
+ (num_timescales - 1))
+ inv_timescales = min_timescale * torch.exp(
+ torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
+ scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
+ signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
+ signal = F.pad(signal, [0, 0, 0, channels % 2])
+ signal = signal.view(1, channels, length)
+ return signal
+
+
+def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
+ b, channels, length = x.size()
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
+ return x + signal.to(dtype=x.dtype, device=x.device)
+
+
+def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
+ b, channels, length = x.size()
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
+ return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
+
+
+def subsequent_mask(length):
+ mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
+ return mask
+
+
+@torch.jit.script
+def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
+ n_channels_int = n_channels[0]
+ in_act = input_a + input_b
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
+ acts = t_act * s_act
+ return acts
+
+
+def convert_pad_shape(pad_shape):
+ l = pad_shape[::-1]
+ pad_shape = [item for sublist in l for item in sublist]
+ return pad_shape
+
+
+def shift_1d(x):
+ x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
+ return x
+
+
+def sequence_mask(length, max_length=None):
+ if max_length is None:
+ max_length = length.max()
+ x = torch.arange(max_length, dtype=length.dtype, device=length.device)
+ return x.unsqueeze(0) < length.unsqueeze(1)
+
+
+def generate_path(duration, mask):
+ """
+ duration: [b, 1, t_x]
+ mask: [b, 1, t_y, t_x]
+ """
+ device = duration.device
+
+ b, _, t_y, t_x = mask.shape
+ cum_duration = torch.cumsum(duration, -1)
+
+ cum_duration_flat = cum_duration.view(b * t_x)
+ path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
+ path = path.view(b, t_x, t_y)
+ path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
+ path = path.unsqueeze(1).transpose(2,3) * mask
+ return path
+
+
+def clip_grad_value_(parameters, clip_value, norm_type=2):
+ if isinstance(parameters, torch.Tensor):
+ parameters = [parameters]
+ parameters = list(filter(lambda p: p.grad is not None, parameters))
+ norm_type = float(norm_type)
+ if clip_value is not None:
+ clip_value = float(clip_value)
+
+ total_norm = 0
+ for p in parameters:
+ param_norm = p.grad.data.norm(norm_type)
+ total_norm += param_norm.item() ** norm_type
+ if clip_value is not None:
+ p.grad.data.clamp_(min=-clip_value, max=clip_value)
+ total_norm = total_norm ** (1. / norm_type)
+ return total_norm
diff --git a/configs/modified_finetune_speaker.json b/configs/modified_finetune_speaker.json
new file mode 100644
index 0000000000000000000000000000000000000000..7f0512e4a1404d50be38f634769b255602f9e257
--- /dev/null
+++ b/configs/modified_finetune_speaker.json
@@ -0,0 +1,172 @@
+{
+ "train": {
+ "log_interval": 10,
+ "eval_interval": 100,
+ "seed": 1234,
+ "epochs": 10000,
+ "learning_rate": 0.0002,
+ "betas": [
+ 0.8,
+ 0.99
+ ],
+ "eps": 1e-09,
+ "batch_size": 16,
+ "fp16_run": true,
+ "lr_decay": 0.999875,
+ "segment_size": 8192,
+ "init_lr_ratio": 1,
+ "warmup_epochs": 0,
+ "c_mel": 45,
+ "c_kl": 1.0
+ },
+ "data": {
+ "training_files": "final_annotation_train.txt",
+ "validation_files": "final_annotation_val.txt",
+ "text_cleaners": [
+ "chinese_cleaners"
+ ],
+ "max_wav_value": 32768.0,
+ "sampling_rate": 22050,
+ "filter_length": 1024,
+ "hop_length": 256,
+ "win_length": 1024,
+ "n_mel_channels": 80,
+ "mel_fmin": 0.0,
+ "mel_fmax": null,
+ "add_blank": true,
+ "n_speakers": 2,
+ "cleaned_text": true
+ },
+ "model": {
+ "inter_channels": 192,
+ "hidden_channels": 192,
+ "filter_channels": 768,
+ "n_heads": 2,
+ "n_layers": 6,
+ "kernel_size": 3,
+ "p_dropout": 0.1,
+ "resblock": "1",
+ "resblock_kernel_sizes": [
+ 3,
+ 7,
+ 11
+ ],
+ "resblock_dilation_sizes": [
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ]
+ ],
+ "upsample_rates": [
+ 8,
+ 8,
+ 2,
+ 2
+ ],
+ "upsample_initial_channel": 512,
+ "upsample_kernel_sizes": [
+ 16,
+ 16,
+ 4,
+ 4
+ ],
+ "n_layers_q": 3,
+ "use_spectral_norm": false,
+ "gin_channels": 256
+ },
+ "symbols": [
+ "_",
+ "\uff1b",
+ "\uff1a",
+ "\uff0c",
+ "\u3002",
+ "\uff01",
+ "\uff1f",
+ "-",
+ "\u201c",
+ "\u201d",
+ "\u300a",
+ "\u300b",
+ "\u3001",
+ "\uff08",
+ "\uff09",
+ "\u2026",
+ "\u2014",
+ " ",
+ "A",
+ "B",
+ "C",
+ "D",
+ "E",
+ "F",
+ "G",
+ "H",
+ "I",
+ "J",
+ "K",
+ "L",
+ "M",
+ "N",
+ "O",
+ "P",
+ "Q",
+ "R",
+ "S",
+ "T",
+ "U",
+ "V",
+ "W",
+ "X",
+ "Y",
+ "Z",
+ "a",
+ "b",
+ "c",
+ "d",
+ "e",
+ "f",
+ "g",
+ "h",
+ "i",
+ "j",
+ "k",
+ "l",
+ "m",
+ "n",
+ "o",
+ "p",
+ "q",
+ "r",
+ "s",
+ "t",
+ "u",
+ "v",
+ "w",
+ "x",
+ "y",
+ "z",
+ "1",
+ "2",
+ "3",
+ "4",
+ "5",
+ "0",
+ "\uff22",
+ "\uff30"
+ ],
+ "speakers": {
+ "dingzhen": 0,
+ "taffy": 1
+ }
+}
\ No newline at end of file
diff --git a/configs/uma_trilingual.json b/configs/uma_trilingual.json
new file mode 100644
index 0000000000000000000000000000000000000000..7a3224630f07c7039df3e9ff789a82bf78d19a40
--- /dev/null
+++ b/configs/uma_trilingual.json
@@ -0,0 +1,54 @@
+{
+ "train": {
+ "log_interval": 200,
+ "eval_interval": 1000,
+ "seed": 1234,
+ "epochs": 10000,
+ "learning_rate": 2e-4,
+ "betas": [0.8, 0.99],
+ "eps": 1e-9,
+ "batch_size": 16,
+ "fp16_run": true,
+ "lr_decay": 0.999875,
+ "segment_size": 8192,
+ "init_lr_ratio": 1,
+ "warmup_epochs": 0,
+ "c_mel": 45,
+ "c_kl": 1.0
+ },
+ "data": {
+ "training_files":"../CH_JA_EN_mix_voice/clipped_3_vits_trilingual_annotations.train.txt.cleaned",
+ "validation_files":"../CH_JA_EN_mix_voice/clipped_3_vits_trilingual_annotations.val.txt.cleaned",
+ "text_cleaners":["cjke_cleaners2"],
+ "max_wav_value": 32768.0,
+ "sampling_rate": 22050,
+ "filter_length": 1024,
+ "hop_length": 256,
+ "win_length": 1024,
+ "n_mel_channels": 80,
+ "mel_fmin": 0.0,
+ "mel_fmax": null,
+ "add_blank": true,
+ "n_speakers": 999,
+ "cleaned_text": true
+ },
+ "model": {
+ "inter_channels": 192,
+ "hidden_channels": 192,
+ "filter_channels": 768,
+ "n_heads": 2,
+ "n_layers": 6,
+ "kernel_size": 3,
+ "p_dropout": 0.1,
+ "resblock": "1",
+ "resblock_kernel_sizes": [3,7,11],
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
+ "upsample_rates": [8,8,2,2],
+ "upsample_initial_channel": 512,
+ "upsample_kernel_sizes": [16,16,4,4],
+ "n_layers_q": 3,
+ "use_spectral_norm": false,
+ "gin_channels": 256
+ },
+ "symbols": ["_", ",", ".", "!", "?", "-", "~", "\u2026", "N", "Q", "a", "b", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "s", "t", "u", "v", "w", "x", "y", "z", "\u0251", "\u00e6", "\u0283", "\u0291", "\u00e7", "\u026f", "\u026a", "\u0254", "\u025b", "\u0279", "\u00f0", "\u0259", "\u026b", "\u0265", "\u0278", "\u028a", "\u027e", "\u0292", "\u03b8", "\u03b2", "\u014b", "\u0266", "\u207c", "\u02b0", "`", "^", "#", "*", "=", "\u02c8", "\u02cc", "\u2192", "\u2193", "\u2191", " "]
+}
\ No newline at end of file
diff --git a/data_utils.py b/data_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..b3f7853d5d280e098939eec242f1acab48dbb300
--- /dev/null
+++ b/data_utils.py
@@ -0,0 +1,276 @@
+import time
+import os
+import random
+import numpy as np
+import torch
+import torch.utils.data
+import torchaudio
+
+import commons
+from mel_processing import spectrogram_torch
+from utils import load_wav_to_torch, load_filepaths_and_text
+from text import text_to_sequence, cleaned_text_to_sequence
+"""Multi speaker version"""
+
+
+class TextAudioSpeakerLoader(torch.utils.data.Dataset):
+ """
+ 1) loads audio, speaker_id, text pairs
+ 2) normalizes text and converts them to sequences of integers
+ 3) computes spectrograms from audio files.
+ """
+
+ def __init__(self, audiopaths_sid_text, hparams, symbols):
+ self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)
+ self.text_cleaners = hparams.text_cleaners
+ self.max_wav_value = hparams.max_wav_value
+ self.sampling_rate = hparams.sampling_rate
+ self.filter_length = hparams.filter_length
+ self.hop_length = hparams.hop_length
+ self.win_length = hparams.win_length
+ self.sampling_rate = hparams.sampling_rate
+
+ self.cleaned_text = getattr(hparams, "cleaned_text", False)
+
+ self.add_blank = hparams.add_blank
+ self.min_text_len = getattr(hparams, "min_text_len", 1)
+ self.max_text_len = getattr(hparams, "max_text_len", 190)
+ self.symbols = symbols
+
+ random.seed(1234)
+ random.shuffle(self.audiopaths_sid_text)
+ self._filter()
+
+ def _filter(self):
+ """
+ Filter text & store spec lengths
+ """
+ # Store spectrogram lengths for Bucketing
+ # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
+ # spec_length = wav_length // hop_length
+
+ audiopaths_sid_text_new = []
+ lengths = []
+ for audiopath, sid, text in self.audiopaths_sid_text:
+ # audiopath = "./user_voice/" + audiopath
+
+ if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
+ audiopaths_sid_text_new.append([audiopath, sid, text])
+ lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
+ self.audiopaths_sid_text = audiopaths_sid_text_new
+ self.lengths = lengths
+
+ def get_audio_text_speaker_pair(self, audiopath_sid_text):
+ # separate filename, speaker_id and text
+ audiopath, sid, text = audiopath_sid_text[0], audiopath_sid_text[1], audiopath_sid_text[2]
+ text = self.get_text(text)
+ spec, wav = self.get_audio(audiopath)
+ sid = self.get_sid(sid)
+ return (text, spec, wav, sid)
+
+ def get_audio(self, filename):
+ # audio, sampling_rate = load_wav_to_torch(filename)
+ # if sampling_rate != self.sampling_rate:
+ # raise ValueError("{} {} SR doesn't match target {} SR".format(
+ # sampling_rate, self.sampling_rate))
+ # audio_norm = audio / self.max_wav_value if audio.max() > 10 else audio
+ # audio_norm = audio_norm.unsqueeze(0)
+ audio_norm, sampling_rate = torchaudio.load(filename, frame_offset=0, num_frames=-1, normalize=True, channels_first=True)
+ # spec_filename = filename.replace(".wav", ".spec.pt")
+ # if os.path.exists(spec_filename):
+ # spec = torch.load(spec_filename)
+ # else:
+ # try:
+ spec = spectrogram_torch(audio_norm, self.filter_length,
+ self.sampling_rate, self.hop_length, self.win_length,
+ center=False)
+ spec = spec.squeeze(0)
+ # except NotImplementedError:
+ # print("?")
+ # spec = torch.squeeze(spec, 0)
+ # torch.save(spec, spec_filename)
+ return spec, audio_norm
+
+ def get_text(self, text):
+ if self.cleaned_text:
+ text_norm = cleaned_text_to_sequence(text, self.symbols)
+ else:
+ text_norm = text_to_sequence(text, self.text_cleaners)
+ if self.add_blank:
+ text_norm = commons.intersperse(text_norm, 0)
+ text_norm = torch.LongTensor(text_norm)
+ return text_norm
+
+ def get_sid(self, sid):
+ sid = torch.LongTensor([int(sid)])
+ return sid
+
+ def __getitem__(self, index):
+ return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])
+
+ def __len__(self):
+ return len(self.audiopaths_sid_text)
+
+
+class TextAudioSpeakerCollate():
+ """ Zero-pads model inputs and targets
+ """
+
+ def __init__(self, return_ids=False):
+ self.return_ids = return_ids
+
+ def __call__(self, batch):
+ """Collate's training batch from normalized text, audio and speaker identities
+ PARAMS
+ ------
+ batch: [text_normalized, spec_normalized, wav_normalized, sid]
+ """
+ # Right zero-pad all one-hot text sequences to max input length
+ _, ids_sorted_decreasing = torch.sort(
+ torch.LongTensor([x[1].size(1) for x in batch]),
+ dim=0, descending=True)
+
+ max_text_len = max([len(x[0]) for x in batch])
+ max_spec_len = max([x[1].size(1) for x in batch])
+ max_wav_len = max([x[2].size(1) for x in batch])
+
+ text_lengths = torch.LongTensor(len(batch))
+ spec_lengths = torch.LongTensor(len(batch))
+ wav_lengths = torch.LongTensor(len(batch))
+ sid = torch.LongTensor(len(batch))
+
+ text_padded = torch.LongTensor(len(batch), max_text_len)
+ spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
+ wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
+ text_padded.zero_()
+ spec_padded.zero_()
+ wav_padded.zero_()
+ for i in range(len(ids_sorted_decreasing)):
+ row = batch[ids_sorted_decreasing[i]]
+
+ text = row[0]
+ text_padded[i, :text.size(0)] = text
+ text_lengths[i] = text.size(0)
+
+ spec = row[1]
+ spec_padded[i, :, :spec.size(1)] = spec
+ spec_lengths[i] = spec.size(1)
+
+ wav = row[2]
+ wav_padded[i, :, :wav.size(1)] = wav
+ wav_lengths[i] = wav.size(1)
+
+ sid[i] = row[3]
+
+ if self.return_ids:
+ return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, ids_sorted_decreasing
+ return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid
+
+
+class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
+ """
+ Maintain similar input lengths in a batch.
+ Length groups are specified by boundaries.
+ Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
+
+ It removes samples which are not included in the boundaries.
+ Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
+ """
+
+ def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True):
+ super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
+ self.lengths = dataset.lengths
+ self.batch_size = batch_size
+ self.boundaries = boundaries
+
+ self.buckets, self.num_samples_per_bucket = self._create_buckets()
+ self.total_size = sum(self.num_samples_per_bucket)
+ self.num_samples = self.total_size // self.num_replicas
+
+ def _create_buckets(self):
+ buckets = [[] for _ in range(len(self.boundaries) - 1)]
+ for i in range(len(self.lengths)):
+ length = self.lengths[i]
+ idx_bucket = self._bisect(length)
+ if idx_bucket != -1:
+ buckets[idx_bucket].append(i)
+
+ try:
+ for i in range(len(buckets) - 1, 0, -1):
+ if len(buckets[i]) == 0:
+ buckets.pop(i)
+ self.boundaries.pop(i + 1)
+ assert all(len(bucket) > 0 for bucket in buckets)
+ # When one bucket is not traversed
+ except Exception as e:
+ print('Bucket warning ', e)
+ for i in range(len(buckets) - 1, -1, -1):
+ if len(buckets[i]) == 0:
+ buckets.pop(i)
+ self.boundaries.pop(i + 1)
+
+ num_samples_per_bucket = []
+ for i in range(len(buckets)):
+ len_bucket = len(buckets[i])
+ total_batch_size = self.num_replicas * self.batch_size
+ rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size
+ num_samples_per_bucket.append(len_bucket + rem)
+ return buckets, num_samples_per_bucket
+
+ def __iter__(self):
+ # deterministically shuffle based on epoch
+ g = torch.Generator()
+ g.manual_seed(self.epoch)
+
+ indices = []
+ if self.shuffle:
+ for bucket in self.buckets:
+ indices.append(torch.randperm(len(bucket), generator=g).tolist())
+ else:
+ for bucket in self.buckets:
+ indices.append(list(range(len(bucket))))
+
+ batches = []
+ for i in range(len(self.buckets)):
+ bucket = self.buckets[i]
+ len_bucket = len(bucket)
+ ids_bucket = indices[i]
+ num_samples_bucket = self.num_samples_per_bucket[i]
+
+ # add extra samples to make it evenly divisible
+ rem = num_samples_bucket - len_bucket
+ ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)]
+
+ # subsample
+ ids_bucket = ids_bucket[self.rank::self.num_replicas]
+
+ # batching
+ for j in range(len(ids_bucket) // self.batch_size):
+ batch = [bucket[idx] for idx in ids_bucket[j * self.batch_size:(j + 1) * self.batch_size]]
+ batches.append(batch)
+
+ if self.shuffle:
+ batch_ids = torch.randperm(len(batches), generator=g).tolist()
+ batches = [batches[i] for i in batch_ids]
+ self.batches = batches
+
+ assert len(self.batches) * self.batch_size == self.num_samples
+ return iter(self.batches)
+
+ def _bisect(self, x, lo=0, hi=None):
+ if hi is None:
+ hi = len(self.boundaries) - 1
+
+ if hi > lo:
+ mid = (hi + lo) // 2
+ if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:
+ return mid
+ elif x <= self.boundaries[mid]:
+ return self._bisect(x, lo, mid)
+ else:
+ return self._bisect(x, mid + 1, hi)
+ else:
+ return -1
+
+ def __len__(self):
+ return self.num_samples // self.batch_size
diff --git a/finetune_speaker.json b/finetune_speaker.json
new file mode 100644
index 0000000000000000000000000000000000000000..68592f8fc2e6c0ee003c808de509acf7b787e0d0
--- /dev/null
+++ b/finetune_speaker.json
@@ -0,0 +1,144 @@
+{
+ "train": {
+ "log_interval": 10,
+ "eval_interval": 100,
+ "seed": 1234,
+ "epochs": 10000,
+ "learning_rate": 0.0002,
+ "betas": [
+ 0.8,
+ 0.99
+ ],
+ "eps": 1e-09,
+ "batch_size": 16,
+ "fp16_run": true,
+ "lr_decay": 0.999875,
+ "segment_size": 8192,
+ "init_lr_ratio": 1,
+ "warmup_epochs": 0,
+ "c_mel": 45,
+ "c_kl": 1.0
+ },
+ "data": {
+ "training_files": "final_annotation_train.txt",
+ "validation_files": "final_annotation_val.txt",
+ "text_cleaners": [
+ "chinese_cleaners"
+ ],
+ "max_wav_value": 32768.0,
+ "sampling_rate": 16000,
+ "filter_length": 1024,
+ "hop_length": 256,
+ "win_length": 1024,
+ "n_mel_channels": 80,
+ "mel_fmin": 0.0,
+ "mel_fmax": null,
+ "add_blank": true,
+ "n_speakers": 2,
+ "cleaned_text": true
+ },
+ "model": {
+ "inter_channels": 192,
+ "hidden_channels": 192,
+ "filter_channels": 768,
+ "n_heads": 2,
+ "n_layers": 6,
+ "kernel_size": 3,
+ "p_dropout": 0.1,
+ "resblock": "1",
+ "resblock_kernel_sizes": [
+ 3,
+ 7,
+ 11
+ ],
+ "resblock_dilation_sizes": [
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ]
+ ],
+ "upsample_rates": [
+ 8,
+ 8,
+ 2,
+ 2
+ ],
+ "upsample_initial_channel": 512,
+ "upsample_kernel_sizes": [
+ 16,
+ 16,
+ 4,
+ 4
+ ],
+ "n_layers_q": 3,
+ "use_spectral_norm": false,
+ "gin_channels": 256
+ },
+ "speakers": {
+ "yutou_1": 0,
+ "zhongli": 1
+ },
+ "symbols": [
+ "_",
+ "\uff0c",
+ "\u3002",
+ "\uff01",
+ "\uff1f",
+ "\u2014",
+ "\u2026",
+ "\u3105",
+ "\u3106",
+ "\u3107",
+ "\u3108",
+ "\u3109",
+ "\u310a",
+ "\u310b",
+ "\u310c",
+ "\u310d",
+ "\u310e",
+ "\u310f",
+ "\u3110",
+ "\u3111",
+ "\u3112",
+ "\u3113",
+ "\u3114",
+ "\u3115",
+ "\u3116",
+ "\u3117",
+ "\u3118",
+ "\u3119",
+ "\u311a",
+ "\u311b",
+ "\u311c",
+ "\u311d",
+ "\u311e",
+ "\u311f",
+ "\u3120",
+ "\u3121",
+ "\u3122",
+ "\u3123",
+ "\u3124",
+ "\u3125",
+ "\u3126",
+ "\u3127",
+ "\u3128",
+ "\u3129",
+ "\u02c9",
+ "\u02ca",
+ "\u02c7",
+ "\u02cb",
+ "\u02d9",
+ " "
+ ]
+}
\ No newline at end of file
diff --git a/finetune_speaker_v2.py b/finetune_speaker_v2.py
new file mode 100644
index 0000000000000000000000000000000000000000..85fa044c2fa8e05da688cf937963fc9f592f9f6c
--- /dev/null
+++ b/finetune_speaker_v2.py
@@ -0,0 +1,321 @@
+import os
+import json
+import argparse
+import itertools
+import math
+import torch
+from torch import nn, optim
+from torch.nn import functional as F
+from torch.utils.data import DataLoader
+from torch.utils.tensorboard import SummaryWriter
+import torch.multiprocessing as mp
+import torch.distributed as dist
+from torch.nn.parallel import DistributedDataParallel as DDP
+from torch.cuda.amp import autocast, GradScaler
+from tqdm import tqdm
+
+import librosa
+import logging
+
+logging.getLogger('numba').setLevel(logging.WARNING)
+
+import commons
+import utils
+from data_utils import (
+ TextAudioSpeakerLoader,
+ TextAudioSpeakerCollate,
+ DistributedBucketSampler
+)
+from models import (
+ SynthesizerTrn,
+ MultiPeriodDiscriminator,
+)
+from losses import (
+ generator_loss,
+ discriminator_loss,
+ feature_loss,
+ kl_loss
+)
+from mel_processing import mel_spectrogram_torch, spec_to_mel_torch
+
+
+torch.backends.cudnn.benchmark = True
+global_step = 0
+
+
+def main():
+ """Assume Single Node Multi GPUs Training Only"""
+ assert torch.cuda.is_available(), "CPU training is not allowed."
+
+ n_gpus = torch.cuda.device_count()
+ os.environ['MASTER_ADDR'] = 'localhost'
+ os.environ['MASTER_PORT'] = '8000'
+
+ hps = utils.get_hparams()
+ mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
+
+
+def run(rank, n_gpus, hps):
+ global global_step
+ symbols = hps['symbols']
+ if rank == 0:
+ logger = utils.get_logger(hps.model_dir)
+ logger.info(hps)
+ utils.check_git_hash(hps.model_dir)
+ writer = SummaryWriter(log_dir=hps.model_dir)
+ writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
+
+ # Use gloo backend on Windows for Pytorch
+ dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank)
+ torch.manual_seed(hps.train.seed)
+ torch.cuda.set_device(rank)
+
+ train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data, symbols)
+ train_sampler = DistributedBucketSampler(
+ train_dataset,
+ hps.train.batch_size,
+ [32,300,400,500,600,700,800,900,1000],
+ num_replicas=n_gpus,
+ rank=rank,
+ shuffle=True)
+ collate_fn = TextAudioSpeakerCollate()
+ train_loader = DataLoader(train_dataset, num_workers=2, shuffle=False, pin_memory=True,
+ collate_fn=collate_fn, batch_sampler=train_sampler)
+ # train_loader = DataLoader(train_dataset, batch_size=hps.train.batch_size, num_workers=2, shuffle=False, pin_memory=True,
+ # collate_fn=collate_fn)
+ if rank == 0:
+ eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data, symbols)
+ eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False,
+ batch_size=hps.train.batch_size, pin_memory=True,
+ drop_last=False, collate_fn=collate_fn)
+
+ net_g = SynthesizerTrn(
+ len(symbols),
+ hps.data.filter_length // 2 + 1,
+ hps.train.segment_size // hps.data.hop_length,
+ n_speakers=hps.data.n_speakers,
+ **hps.model).cuda(rank)
+ net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
+
+ # load existing model
+ _, _, _, _ = utils.load_checkpoint("./pretrained_models/G_0.pth", net_g, None, drop_speaker_emb=hps.drop_speaker_embed)
+ _, _, _, _ = utils.load_checkpoint("./pretrained_models/D_0.pth", net_d, None)
+ epoch_str = 1
+ global_step = 0
+ # freeze all other layers except speaker embedding
+ for p in net_g.parameters():
+ p.requires_grad = True
+ for p in net_d.parameters():
+ p.requires_grad = True
+ # for p in net_d.parameters():
+ # p.requires_grad = False
+ # net_g.emb_g.weight.requires_grad = True
+ optim_g = torch.optim.AdamW(
+ net_g.parameters(),
+ hps.train.learning_rate,
+ betas=hps.train.betas,
+ eps=hps.train.eps)
+ optim_d = torch.optim.AdamW(
+ net_d.parameters(),
+ hps.train.learning_rate,
+ betas=hps.train.betas,
+ eps=hps.train.eps)
+ # optim_d = None
+ net_g = DDP(net_g, device_ids=[rank])
+ net_d = DDP(net_d, device_ids=[rank])
+
+ scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay)
+ scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay)
+
+ scaler = GradScaler(enabled=hps.train.fp16_run)
+
+ for epoch in range(epoch_str, hps.train.epochs + 1):
+ if rank==0:
+ train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, eval_loader], logger, [writer, writer_eval])
+ else:
+ train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, None], None, None)
+ scheduler_g.step()
+ scheduler_d.step()
+
+
+def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
+ net_g, net_d = nets
+ optim_g, optim_d = optims
+ scheduler_g, scheduler_d = schedulers
+ train_loader, eval_loader = loaders
+ if writers is not None:
+ writer, writer_eval = writers
+
+ # train_loader.batch_sampler.set_epoch(epoch)
+ global global_step
+
+ net_g.train()
+ net_d.train()
+ for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers) in enumerate(tqdm(train_loader)):
+ x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
+ spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True)
+ y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
+ speakers = speakers.cuda(rank, non_blocking=True)
+
+ with autocast(enabled=hps.train.fp16_run):
+ y_hat, l_length, attn, ids_slice, x_mask, z_mask,\
+ (z, z_p, m_p, logs_p, m_q, logs_q) = net_g(x, x_lengths, spec, spec_lengths, speakers)
+
+ mel = spec_to_mel_torch(
+ spec,
+ hps.data.filter_length,
+ hps.data.n_mel_channels,
+ hps.data.sampling_rate,
+ hps.data.mel_fmin,
+ hps.data.mel_fmax)
+ y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length)
+ y_hat_mel = mel_spectrogram_torch(
+ y_hat.squeeze(1),
+ hps.data.filter_length,
+ hps.data.n_mel_channels,
+ hps.data.sampling_rate,
+ hps.data.hop_length,
+ hps.data.win_length,
+ hps.data.mel_fmin,
+ hps.data.mel_fmax
+ )
+
+ y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice
+
+ # Discriminator
+ y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
+ with autocast(enabled=False):
+ loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
+ loss_disc_all = loss_disc
+ optim_d.zero_grad()
+ scaler.scale(loss_disc_all).backward()
+ scaler.unscale_(optim_d)
+ grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
+ scaler.step(optim_d)
+
+ with autocast(enabled=hps.train.fp16_run):
+ # Generator
+ y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
+ with autocast(enabled=False):
+ loss_dur = torch.sum(l_length.float())
+ loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
+ loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
+
+ loss_fm = feature_loss(fmap_r, fmap_g)
+ loss_gen, losses_gen = generator_loss(y_d_hat_g)
+ loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl
+ optim_g.zero_grad()
+ scaler.scale(loss_gen_all).backward()
+ scaler.unscale_(optim_g)
+ grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
+ scaler.step(optim_g)
+ scaler.update()
+
+ if rank==0:
+ if global_step % hps.train.log_interval == 0:
+ lr = optim_g.param_groups[0]['lr']
+ losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl]
+ logger.info('Train Epoch: {} [{:.0f}%]'.format(
+ epoch,
+ 100. * batch_idx / len(train_loader)))
+ logger.info([x.item() for x in losses] + [global_step, lr])
+
+ scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, "grad_norm_g": grad_norm_g}
+ scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl})
+
+ scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)})
+ scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)})
+ scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)})
+ image_dict = {
+ "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()),
+ "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()),
+ "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
+ "all/attn": utils.plot_alignment_to_numpy(attn[0,0].data.cpu().numpy())
+ }
+ utils.summarize(
+ writer=writer,
+ global_step=global_step,
+ images=image_dict,
+ scalars=scalar_dict)
+
+ if global_step % hps.train.eval_interval == 0:
+ evaluate(hps, net_g, eval_loader, writer_eval)
+ utils.save_checkpoint(net_g, None, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "G_{}.pth".format(global_step)))
+ utils.save_checkpoint(net_g, None, hps.train.learning_rate, epoch,
+ os.path.join(hps.model_dir, "G_latest.pth".format(global_step)))
+ # utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "D_{}.pth".format(global_step)))
+ old_g=os.path.join(hps.model_dir, "G_{}.pth".format(global_step-4000))
+ # old_d=os.path.join(hps.model_dir, "D_{}.pth".format(global_step-400))
+ if os.path.exists(old_g):
+ os.remove(old_g)
+ # if os.path.exists(old_d):
+ # os.remove(old_d)
+ global_step += 1
+ if epoch > hps.max_epochs:
+ print("Maximum epoch reached, closing training...")
+ exit()
+
+ if rank == 0:
+ logger.info('====> Epoch: {}'.format(epoch))
+
+
+def evaluate(hps, generator, eval_loader, writer_eval):
+ generator.eval()
+ with torch.no_grad():
+ for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers) in enumerate(eval_loader):
+ x, x_lengths = x.cuda(0), x_lengths.cuda(0)
+ spec, spec_lengths = spec.cuda(0), spec_lengths.cuda(0)
+ y, y_lengths = y.cuda(0), y_lengths.cuda(0)
+ speakers = speakers.cuda(0)
+
+ # remove else
+ x = x[:1]
+ x_lengths = x_lengths[:1]
+ spec = spec[:1]
+ spec_lengths = spec_lengths[:1]
+ y = y[:1]
+ y_lengths = y_lengths[:1]
+ speakers = speakers[:1]
+ break
+ y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, speakers, max_len=1000)
+ y_hat_lengths = mask.sum([1,2]).long() * hps.data.hop_length
+
+ mel = spec_to_mel_torch(
+ spec,
+ hps.data.filter_length,
+ hps.data.n_mel_channels,
+ hps.data.sampling_rate,
+ hps.data.mel_fmin,
+ hps.data.mel_fmax)
+ y_hat_mel = mel_spectrogram_torch(
+ y_hat.squeeze(1).float(),
+ hps.data.filter_length,
+ hps.data.n_mel_channels,
+ hps.data.sampling_rate,
+ hps.data.hop_length,
+ hps.data.win_length,
+ hps.data.mel_fmin,
+ hps.data.mel_fmax
+ )
+ image_dict = {
+ "gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy())
+ }
+ audio_dict = {
+ "gen/audio": y_hat[0,:,:y_hat_lengths[0]]
+ }
+ if global_step == 0:
+ image_dict.update({"gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())})
+ audio_dict.update({"gt/audio": y[0,:,:y_lengths[0]]})
+
+ utils.summarize(
+ writer=writer_eval,
+ global_step=global_step,
+ images=image_dict,
+ audios=audio_dict,
+ audio_sampling_rate=hps.data.sampling_rate
+ )
+ generator.train()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/losses.py b/losses.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb22a0e834dd87edaa37bb8190eee2c3c7abe0d5
--- /dev/null
+++ b/losses.py
@@ -0,0 +1,61 @@
+import torch
+from torch.nn import functional as F
+
+import commons
+
+
+def feature_loss(fmap_r, fmap_g):
+ loss = 0
+ for dr, dg in zip(fmap_r, fmap_g):
+ for rl, gl in zip(dr, dg):
+ rl = rl.float().detach()
+ gl = gl.float()
+ loss += torch.mean(torch.abs(rl - gl))
+
+ return loss * 2
+
+
+def discriminator_loss(disc_real_outputs, disc_generated_outputs):
+ loss = 0
+ r_losses = []
+ g_losses = []
+ for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
+ dr = dr.float()
+ dg = dg.float()
+ r_loss = torch.mean((1-dr)**2)
+ g_loss = torch.mean(dg**2)
+ loss += (r_loss + g_loss)
+ r_losses.append(r_loss.item())
+ g_losses.append(g_loss.item())
+
+ return loss, r_losses, g_losses
+
+
+def generator_loss(disc_outputs):
+ loss = 0
+ gen_losses = []
+ for dg in disc_outputs:
+ dg = dg.float()
+ l = torch.mean((1-dg)**2)
+ gen_losses.append(l)
+ loss += l
+
+ return loss, gen_losses
+
+
+def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):
+ """
+ z_p, logs_q: [b, h, t_t]
+ m_p, logs_p: [b, h, t_t]
+ """
+ z_p = z_p.float()
+ logs_q = logs_q.float()
+ m_p = m_p.float()
+ logs_p = logs_p.float()
+ z_mask = z_mask.float()
+
+ kl = logs_p - logs_q - 0.5
+ kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p)
+ kl = torch.sum(kl * z_mask)
+ l = kl / torch.sum(z_mask)
+ return l
diff --git a/mel_processing.py b/mel_processing.py
new file mode 100644
index 0000000000000000000000000000000000000000..3614150259809983e776d3fed83021decca06a9c
--- /dev/null
+++ b/mel_processing.py
@@ -0,0 +1,112 @@
+import math
+import os
+import random
+import torch
+from torch import nn
+import torch.nn.functional as F
+import torch.utils.data
+import numpy as np
+import librosa
+import librosa.util as librosa_util
+from librosa.util import normalize, pad_center, tiny
+from scipy.signal import get_window
+from scipy.io.wavfile import read
+from librosa.filters import mel as librosa_mel_fn
+
+MAX_WAV_VALUE = 32768.0
+
+
+def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
+ """
+ PARAMS
+ ------
+ C: compression factor
+ """
+ return torch.log(torch.clamp(x, min=clip_val) * C)
+
+
+def dynamic_range_decompression_torch(x, C=1):
+ """
+ PARAMS
+ ------
+ C: compression factor used to compress
+ """
+ return torch.exp(x) / C
+
+
+def spectral_normalize_torch(magnitudes):
+ output = dynamic_range_compression_torch(magnitudes)
+ return output
+
+
+def spectral_de_normalize_torch(magnitudes):
+ output = dynamic_range_decompression_torch(magnitudes)
+ return output
+
+
+mel_basis = {}
+hann_window = {}
+
+
+def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
+ if torch.min(y) < -1.:
+ print('min value is ', torch.min(y))
+ if torch.max(y) > 1.:
+ print('max value is ', torch.max(y))
+
+ global hann_window
+ dtype_device = str(y.dtype) + '_' + str(y.device)
+ wnsize_dtype_device = str(win_size) + '_' + dtype_device
+ if wnsize_dtype_device not in hann_window:
+ hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
+
+ y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
+ y = y.squeeze(1)
+
+ spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
+ center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
+
+ spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
+ return spec
+
+
+def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
+ global mel_basis
+ dtype_device = str(spec.dtype) + '_' + str(spec.device)
+ fmax_dtype_device = str(fmax) + '_' + dtype_device
+ if fmax_dtype_device not in mel_basis:
+ mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
+ mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
+ spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
+ spec = spectral_normalize_torch(spec)
+ return spec
+
+
+def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
+ if torch.min(y) < -1.:
+ print('min value is ', torch.min(y))
+ if torch.max(y) > 1.:
+ print('max value is ', torch.max(y))
+
+ global mel_basis, hann_window
+ dtype_device = str(y.dtype) + '_' + str(y.device)
+ fmax_dtype_device = str(fmax) + '_' + dtype_device
+ wnsize_dtype_device = str(win_size) + '_' + dtype_device
+ if fmax_dtype_device not in mel_basis:
+ mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
+ mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
+ if wnsize_dtype_device not in hann_window:
+ hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
+
+ y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
+ y = y.squeeze(1)
+
+ spec = torch.stft(y.float(), n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
+ center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
+
+ spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
+
+ spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
+ spec = spectral_normalize_torch(spec)
+
+ return spec
diff --git a/models.py b/models.py
new file mode 100644
index 0000000000000000000000000000000000000000..676d883f92711412da4b1f822704a75b65ae6196
--- /dev/null
+++ b/models.py
@@ -0,0 +1,533 @@
+import copy
+import math
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+import commons
+import modules
+import attentions
+import monotonic_align
+
+from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
+from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
+from commons import init_weights, get_padding
+
+
+class StochasticDurationPredictor(nn.Module):
+ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
+ super().__init__()
+ filter_channels = in_channels # it needs to be removed from future version.
+ self.in_channels = in_channels
+ self.filter_channels = filter_channels
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.n_flows = n_flows
+ self.gin_channels = gin_channels
+
+ self.log_flow = modules.Log()
+ self.flows = nn.ModuleList()
+ self.flows.append(modules.ElementwiseAffine(2))
+ for i in range(n_flows):
+ self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
+ self.flows.append(modules.Flip())
+
+ self.post_pre = nn.Conv1d(1, filter_channels, 1)
+ self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
+ self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
+ self.post_flows = nn.ModuleList()
+ self.post_flows.append(modules.ElementwiseAffine(2))
+ for i in range(4):
+ self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
+ self.post_flows.append(modules.Flip())
+
+ self.pre = nn.Conv1d(in_channels, filter_channels, 1)
+ self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
+ self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
+ if gin_channels != 0:
+ self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
+
+ def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
+ x = torch.detach(x)
+ x = self.pre(x)
+ if g is not None:
+ g = torch.detach(g)
+ x = x + self.cond(g)
+ x = self.convs(x, x_mask)
+ x = self.proj(x) * x_mask
+
+ if not reverse:
+ flows = self.flows
+ assert w is not None
+
+ logdet_tot_q = 0
+ h_w = self.post_pre(w)
+ h_w = self.post_convs(h_w, x_mask)
+ h_w = self.post_proj(h_w) * x_mask
+ e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
+ z_q = e_q
+ for flow in self.post_flows:
+ z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
+ logdet_tot_q += logdet_q
+ z_u, z1 = torch.split(z_q, [1, 1], 1)
+ u = torch.sigmoid(z_u) * x_mask
+ z0 = (w - u) * x_mask
+ logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2])
+ logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q
+
+ logdet_tot = 0
+ z0, logdet = self.log_flow(z0, x_mask)
+ logdet_tot += logdet
+ z = torch.cat([z0, z1], 1)
+ for flow in flows:
+ z, logdet = flow(z, x_mask, g=x, reverse=reverse)
+ logdet_tot = logdet_tot + logdet
+ nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot
+ return nll + logq # [b]
+ else:
+ flows = list(reversed(self.flows))
+ flows = flows[:-2] + [flows[-1]] # remove a useless vflow
+ z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
+ for flow in flows:
+ z = flow(z, x_mask, g=x, reverse=reverse)
+ z0, z1 = torch.split(z, [1, 1], 1)
+ logw = z0
+ return logw
+
+
+class DurationPredictor(nn.Module):
+ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
+ super().__init__()
+
+ self.in_channels = in_channels
+ self.filter_channels = filter_channels
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.gin_channels = gin_channels
+
+ self.drop = nn.Dropout(p_dropout)
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)
+ self.norm_1 = modules.LayerNorm(filter_channels)
+ self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
+ self.norm_2 = modules.LayerNorm(filter_channels)
+ self.proj = nn.Conv1d(filter_channels, 1, 1)
+
+ if gin_channels != 0:
+ self.cond = nn.Conv1d(gin_channels, in_channels, 1)
+
+ def forward(self, x, x_mask, g=None):
+ x = torch.detach(x)
+ if g is not None:
+ g = torch.detach(g)
+ x = x + self.cond(g)
+ x = self.conv_1(x * x_mask)
+ x = torch.relu(x)
+ x = self.norm_1(x)
+ x = self.drop(x)
+ x = self.conv_2(x * x_mask)
+ x = torch.relu(x)
+ x = self.norm_2(x)
+ x = self.drop(x)
+ x = self.proj(x * x_mask)
+ return x * x_mask
+
+
+class TextEncoder(nn.Module):
+ def __init__(self,
+ n_vocab,
+ out_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout):
+ super().__init__()
+ self.n_vocab = n_vocab
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+
+ self.emb = nn.Embedding(n_vocab, hidden_channels)
+ nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
+
+ self.encoder = attentions.Encoder(
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout)
+ self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, x, x_lengths):
+ x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
+ x = torch.transpose(x, 1, -1) # [b, h, t]
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
+
+ x = self.encoder(x * x_mask, x_mask)
+ stats = self.proj(x) * x_mask
+
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ return x, m, logs, x_mask
+
+
+class ResidualCouplingBlock(nn.Module):
+ def __init__(self,
+ channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ n_flows=4,
+ gin_channels=0):
+ super().__init__()
+ self.channels = channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.n_flows = n_flows
+ self.gin_channels = gin_channels
+
+ self.flows = nn.ModuleList()
+ for i in range(n_flows):
+ self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
+ self.flows.append(modules.Flip())
+
+ def forward(self, x, x_mask, g=None, reverse=False):
+ if not reverse:
+ for flow in self.flows:
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
+ else:
+ for flow in reversed(self.flows):
+ x = flow(x, x_mask, g=g, reverse=reverse)
+ return x
+
+
+class PosteriorEncoder(nn.Module):
+ def __init__(self,
+ in_channels,
+ out_channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ gin_channels=0):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.gin_channels = gin_channels
+
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
+ self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, x, x_lengths, g=None):
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
+ x = self.pre(x) * x_mask
+ x = self.enc(x, x_mask, g=g)
+ stats = self.proj(x) * x_mask
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
+ return z, m, logs, x_mask
+
+
+class Generator(torch.nn.Module):
+ def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
+ super(Generator, self).__init__()
+ self.num_kernels = len(resblock_kernel_sizes)
+ self.num_upsamples = len(upsample_rates)
+ self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
+ resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
+
+ self.ups = nn.ModuleList()
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
+ self.ups.append(weight_norm(
+ ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
+ k, u, padding=(k-u)//2)))
+
+ self.resblocks = nn.ModuleList()
+ for i in range(len(self.ups)):
+ ch = upsample_initial_channel//(2**(i+1))
+ for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
+ self.resblocks.append(resblock(ch, k, d))
+
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
+ self.ups.apply(init_weights)
+
+ if gin_channels != 0:
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
+
+ def forward(self, x, g=None):
+ x = self.conv_pre(x)
+ if g is not None:
+ x = x + self.cond(g)
+
+ for i in range(self.num_upsamples):
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ x = self.ups[i](x)
+ xs = None
+ for j in range(self.num_kernels):
+ if xs is None:
+ xs = self.resblocks[i*self.num_kernels+j](x)
+ else:
+ xs += self.resblocks[i*self.num_kernels+j](x)
+ x = xs / self.num_kernels
+ x = F.leaky_relu(x)
+ x = self.conv_post(x)
+ x = torch.tanh(x)
+
+ return x
+
+ def remove_weight_norm(self):
+ print('Removing weight norm...')
+ for l in self.ups:
+ remove_weight_norm(l)
+ for l in self.resblocks:
+ l.remove_weight_norm()
+
+
+class DiscriminatorP(torch.nn.Module):
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
+ super(DiscriminatorP, self).__init__()
+ self.period = period
+ self.use_spectral_norm = use_spectral_norm
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
+ self.convs = nn.ModuleList([
+ norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
+ norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
+ norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
+ norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
+ norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
+ ])
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
+
+ def forward(self, x):
+ fmap = []
+
+ # 1d to 2d
+ b, c, t = x.shape
+ if t % self.period != 0: # pad first
+ n_pad = self.period - (t % self.period)
+ x = F.pad(x, (0, n_pad), "reflect")
+ t = t + n_pad
+ x = x.view(b, c, t // self.period, self.period)
+
+ for l in self.convs:
+ x = l(x)
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ fmap.append(x)
+ x = self.conv_post(x)
+ fmap.append(x)
+ x = torch.flatten(x, 1, -1)
+
+ return x, fmap
+
+
+class DiscriminatorS(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(DiscriminatorS, self).__init__()
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
+ self.convs = nn.ModuleList([
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
+ ])
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
+
+ def forward(self, x):
+ fmap = []
+
+ for l in self.convs:
+ x = l(x)
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ fmap.append(x)
+ x = self.conv_post(x)
+ fmap.append(x)
+ x = torch.flatten(x, 1, -1)
+
+ return x, fmap
+
+
+class MultiPeriodDiscriminator(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(MultiPeriodDiscriminator, self).__init__()
+ periods = [2,3,5,7,11]
+
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
+ discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
+ self.discriminators = nn.ModuleList(discs)
+
+ def forward(self, y, y_hat):
+ y_d_rs = []
+ y_d_gs = []
+ fmap_rs = []
+ fmap_gs = []
+ for i, d in enumerate(self.discriminators):
+ y_d_r, fmap_r = d(y)
+ y_d_g, fmap_g = d(y_hat)
+ y_d_rs.append(y_d_r)
+ y_d_gs.append(y_d_g)
+ fmap_rs.append(fmap_r)
+ fmap_gs.append(fmap_g)
+
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
+
+
+
+class SynthesizerTrn(nn.Module):
+ """
+ Synthesizer for Training
+ """
+
+ def __init__(self,
+ n_vocab,
+ spec_channels,
+ segment_size,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ n_speakers=0,
+ gin_channels=0,
+ use_sdp=True,
+ **kwargs):
+
+ super().__init__()
+ self.n_vocab = n_vocab
+ self.spec_channels = spec_channels
+ self.inter_channels = inter_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.resblock = resblock
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.upsample_rates = upsample_rates
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.segment_size = segment_size
+ self.n_speakers = n_speakers
+ self.gin_channels = gin_channels
+
+ self.use_sdp = use_sdp
+
+ self.enc_p = TextEncoder(n_vocab,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout)
+ self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
+ self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
+ self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
+
+ if use_sdp:
+ self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
+ else:
+ self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
+
+ if n_speakers >= 1:
+ self.emb_g = nn.Embedding(n_speakers, gin_channels)
+
+ def forward(self, x, x_lengths, y, y_lengths, sid=None):
+
+ x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
+ if self.n_speakers > 0:
+ g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
+ else:
+ g = None
+
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
+ z_p = self.flow(z, y_mask, g=g)
+
+ with torch.no_grad():
+ # negative cross-entropy
+ s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
+ neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]
+ neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
+ neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
+ neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
+ neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
+
+ attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
+ attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
+
+ w = attn.sum(2)
+ if self.use_sdp:
+ l_length = self.dp(x, x_mask, w, g=g)
+ l_length = l_length / torch.sum(x_mask)
+ else:
+ logw_ = torch.log(w + 1e-6) * x_mask
+ logw = self.dp(x, x_mask, g=g)
+ l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging
+
+ # expand prior
+ m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
+ logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
+
+ z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
+ o = self.dec(z_slice, g=g)
+ return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
+
+ def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
+ x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
+ if self.n_speakers > 0:
+ g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
+ else:
+ g = None
+
+ if self.use_sdp:
+ logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
+ else:
+ logw = self.dp(x, x_mask, g=g)
+ w = torch.exp(logw) * x_mask * length_scale
+ w_ceil = torch.ceil(w)
+ y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
+ y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
+ attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
+ attn = commons.generate_path(w_ceil, attn_mask)
+
+ m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
+ logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
+
+ z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
+ z = self.flow(z_p, y_mask, g=g, reverse=True)
+ o = self.dec((z * y_mask)[:,:,:max_len], g=g)
+ return o, attn, y_mask, (z, z_p, m_p, logs_p)
+
+ def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):
+ assert self.n_speakers > 0, "n_speakers have to be larger than 0."
+ g_src = self.emb_g(sid_src).unsqueeze(-1)
+ g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
+ z_p = self.flow(z, y_mask, g=g_src)
+ z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
+ o_hat = self.dec(z_hat * y_mask, g=g_tgt)
+ return o_hat, y_mask, (z, z_p, z_hat)
diff --git a/models_infer.py b/models_infer.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b9bb82bf5831c5264f3e1e52b23e8e875f5fd9e
--- /dev/null
+++ b/models_infer.py
@@ -0,0 +1,402 @@
+import math
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+import commons
+import modules
+import attentions
+
+from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
+from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
+from commons import init_weights, get_padding
+
+
+class StochasticDurationPredictor(nn.Module):
+ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
+ super().__init__()
+ filter_channels = in_channels # it needs to be removed from future version.
+ self.in_channels = in_channels
+ self.filter_channels = filter_channels
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.n_flows = n_flows
+ self.gin_channels = gin_channels
+
+ self.log_flow = modules.Log()
+ self.flows = nn.ModuleList()
+ self.flows.append(modules.ElementwiseAffine(2))
+ for i in range(n_flows):
+ self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
+ self.flows.append(modules.Flip())
+
+ self.post_pre = nn.Conv1d(1, filter_channels, 1)
+ self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
+ self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
+ self.post_flows = nn.ModuleList()
+ self.post_flows.append(modules.ElementwiseAffine(2))
+ for i in range(4):
+ self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
+ self.post_flows.append(modules.Flip())
+
+ self.pre = nn.Conv1d(in_channels, filter_channels, 1)
+ self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
+ self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
+ if gin_channels != 0:
+ self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
+
+ def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
+ x = torch.detach(x)
+ x = self.pre(x)
+ if g is not None:
+ g = torch.detach(g)
+ x = x + self.cond(g)
+ x = self.convs(x, x_mask)
+ x = self.proj(x) * x_mask
+
+ if not reverse:
+ flows = self.flows
+ assert w is not None
+
+ logdet_tot_q = 0
+ h_w = self.post_pre(w)
+ h_w = self.post_convs(h_w, x_mask)
+ h_w = self.post_proj(h_w) * x_mask
+ e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
+ z_q = e_q
+ for flow in self.post_flows:
+ z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
+ logdet_tot_q += logdet_q
+ z_u, z1 = torch.split(z_q, [1, 1], 1)
+ u = torch.sigmoid(z_u) * x_mask
+ z0 = (w - u) * x_mask
+ logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2])
+ logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q
+
+ logdet_tot = 0
+ z0, logdet = self.log_flow(z0, x_mask)
+ logdet_tot += logdet
+ z = torch.cat([z0, z1], 1)
+ for flow in flows:
+ z, logdet = flow(z, x_mask, g=x, reverse=reverse)
+ logdet_tot = logdet_tot + logdet
+ nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot
+ return nll + logq # [b]
+ else:
+ flows = list(reversed(self.flows))
+ flows = flows[:-2] + [flows[-1]] # remove a useless vflow
+ z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
+ for flow in flows:
+ z = flow(z, x_mask, g=x, reverse=reverse)
+ z0, z1 = torch.split(z, [1, 1], 1)
+ logw = z0
+ return logw
+
+
+class DurationPredictor(nn.Module):
+ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
+ super().__init__()
+
+ self.in_channels = in_channels
+ self.filter_channels = filter_channels
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.gin_channels = gin_channels
+
+ self.drop = nn.Dropout(p_dropout)
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)
+ self.norm_1 = modules.LayerNorm(filter_channels)
+ self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
+ self.norm_2 = modules.LayerNorm(filter_channels)
+ self.proj = nn.Conv1d(filter_channels, 1, 1)
+
+ if gin_channels != 0:
+ self.cond = nn.Conv1d(gin_channels, in_channels, 1)
+
+ def forward(self, x, x_mask, g=None):
+ x = torch.detach(x)
+ if g is not None:
+ g = torch.detach(g)
+ x = x + self.cond(g)
+ x = self.conv_1(x * x_mask)
+ x = torch.relu(x)
+ x = self.norm_1(x)
+ x = self.drop(x)
+ x = self.conv_2(x * x_mask)
+ x = torch.relu(x)
+ x = self.norm_2(x)
+ x = self.drop(x)
+ x = self.proj(x * x_mask)
+ return x * x_mask
+
+
+class TextEncoder(nn.Module):
+ def __init__(self,
+ n_vocab,
+ out_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout):
+ super().__init__()
+ self.n_vocab = n_vocab
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+
+ self.emb = nn.Embedding(n_vocab, hidden_channels)
+ nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
+
+ self.encoder = attentions.Encoder(
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout)
+ self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, x, x_lengths):
+ x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
+ x = torch.transpose(x, 1, -1) # [b, h, t]
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
+
+ x = self.encoder(x * x_mask, x_mask)
+ stats = self.proj(x) * x_mask
+
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ return x, m, logs, x_mask
+
+
+class ResidualCouplingBlock(nn.Module):
+ def __init__(self,
+ channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ n_flows=4,
+ gin_channels=0):
+ super().__init__()
+ self.channels = channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.n_flows = n_flows
+ self.gin_channels = gin_channels
+
+ self.flows = nn.ModuleList()
+ for i in range(n_flows):
+ self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
+ self.flows.append(modules.Flip())
+
+ def forward(self, x, x_mask, g=None, reverse=False):
+ if not reverse:
+ for flow in self.flows:
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
+ else:
+ for flow in reversed(self.flows):
+ x = flow(x, x_mask, g=g, reverse=reverse)
+ return x
+
+
+class PosteriorEncoder(nn.Module):
+ def __init__(self,
+ in_channels,
+ out_channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ gin_channels=0):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.gin_channels = gin_channels
+
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
+ self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, x, x_lengths, g=None):
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
+ x = self.pre(x) * x_mask
+ x = self.enc(x, x_mask, g=g)
+ stats = self.proj(x) * x_mask
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
+ return z, m, logs, x_mask
+
+
+class Generator(torch.nn.Module):
+ def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
+ super(Generator, self).__init__()
+ self.num_kernels = len(resblock_kernel_sizes)
+ self.num_upsamples = len(upsample_rates)
+ self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
+ resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
+
+ self.ups = nn.ModuleList()
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
+ self.ups.append(weight_norm(
+ ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
+ k, u, padding=(k-u)//2)))
+
+ self.resblocks = nn.ModuleList()
+ for i in range(len(self.ups)):
+ ch = upsample_initial_channel//(2**(i+1))
+ for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
+ self.resblocks.append(resblock(ch, k, d))
+
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
+ self.ups.apply(init_weights)
+
+ if gin_channels != 0:
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
+
+ def forward(self, x, g=None):
+ x = self.conv_pre(x)
+ if g is not None:
+ x = x + self.cond(g)
+
+ for i in range(self.num_upsamples):
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ x = self.ups[i](x)
+ xs = None
+ for j in range(self.num_kernels):
+ if xs is None:
+ xs = self.resblocks[i*self.num_kernels+j](x)
+ else:
+ xs += self.resblocks[i*self.num_kernels+j](x)
+ x = xs / self.num_kernels
+ x = F.leaky_relu(x)
+ x = self.conv_post(x)
+ x = torch.tanh(x)
+
+ return x
+
+ def remove_weight_norm(self):
+ print('Removing weight norm...')
+ for l in self.ups:
+ remove_weight_norm(l)
+ for l in self.resblocks:
+ l.remove_weight_norm()
+
+
+
+class SynthesizerTrn(nn.Module):
+ """
+ Synthesizer for Training
+ """
+
+ def __init__(self,
+ n_vocab,
+ spec_channels,
+ segment_size,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ n_speakers=0,
+ gin_channels=0,
+ use_sdp=True,
+ **kwargs):
+
+ super().__init__()
+ self.n_vocab = n_vocab
+ self.spec_channels = spec_channels
+ self.inter_channels = inter_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.resblock = resblock
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.upsample_rates = upsample_rates
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.segment_size = segment_size
+ self.n_speakers = n_speakers
+ self.gin_channels = gin_channels
+
+ self.use_sdp = use_sdp
+
+ self.enc_p = TextEncoder(n_vocab,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout)
+ self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
+ self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
+ self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
+
+ if use_sdp:
+ self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
+ else:
+ self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
+
+ if n_speakers > 1:
+ self.emb_g = nn.Embedding(n_speakers, gin_channels)
+
+ def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
+ x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
+ if self.n_speakers > 0:
+ g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
+ else:
+ g = None
+
+ if self.use_sdp:
+ logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
+ else:
+ logw = self.dp(x, x_mask, g=g)
+ w = torch.exp(logw) * x_mask * length_scale
+ w_ceil = torch.ceil(w)
+ y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
+ y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
+ attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
+ attn = commons.generate_path(w_ceil, attn_mask)
+
+ m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
+ logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
+
+ z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
+ z = self.flow(z_p, y_mask, g=g, reverse=True)
+ o = self.dec((z * y_mask)[:,:,:max_len], g=g)
+ return o, attn, y_mask, (z, z_p, m_p, logs_p)
+
+ def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):
+ assert self.n_speakers > 0, "n_speakers have to be larger than 0."
+ g_src = self.emb_g(sid_src).unsqueeze(-1)
+ g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
+ z_p = self.flow(z, y_mask, g=g_src)
+ z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
+ o_hat = self.dec(z_hat * y_mask, g=g_tgt)
+ return o_hat, y_mask, (z, z_p, z_hat)
+
diff --git a/modules.py b/modules.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c7fd9cd6eb8b7e0ec0e08957e970744a374a924
--- /dev/null
+++ b/modules.py
@@ -0,0 +1,390 @@
+import copy
+import math
+import numpy as np
+import scipy
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
+from torch.nn.utils import weight_norm, remove_weight_norm
+
+import commons
+from commons import init_weights, get_padding
+from transforms import piecewise_rational_quadratic_transform
+
+
+LRELU_SLOPE = 0.1
+
+
+class LayerNorm(nn.Module):
+ def __init__(self, channels, eps=1e-5):
+ super().__init__()
+ self.channels = channels
+ self.eps = eps
+
+ self.gamma = nn.Parameter(torch.ones(channels))
+ self.beta = nn.Parameter(torch.zeros(channels))
+
+ def forward(self, x):
+ x = x.transpose(1, -1)
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
+ return x.transpose(1, -1)
+
+
+class ConvReluNorm(nn.Module):
+ def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
+ super().__init__()
+ self.in_channels = in_channels
+ self.hidden_channels = hidden_channels
+ self.out_channels = out_channels
+ self.kernel_size = kernel_size
+ self.n_layers = n_layers
+ self.p_dropout = p_dropout
+ assert n_layers > 1, "Number of layers should be larger than 0."
+
+ self.conv_layers = nn.ModuleList()
+ self.norm_layers = nn.ModuleList()
+ self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
+ self.norm_layers.append(LayerNorm(hidden_channels))
+ self.relu_drop = nn.Sequential(
+ nn.ReLU(),
+ nn.Dropout(p_dropout))
+ for _ in range(n_layers-1):
+ self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
+ self.norm_layers.append(LayerNorm(hidden_channels))
+ self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
+ self.proj.weight.data.zero_()
+ self.proj.bias.data.zero_()
+
+ def forward(self, x, x_mask):
+ x_org = x
+ for i in range(self.n_layers):
+ x = self.conv_layers[i](x * x_mask)
+ x = self.norm_layers[i](x)
+ x = self.relu_drop(x)
+ x = x_org + self.proj(x)
+ return x * x_mask
+
+
+class DDSConv(nn.Module):
+ """
+ Dialted and Depth-Separable Convolution
+ """
+ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
+ super().__init__()
+ self.channels = channels
+ self.kernel_size = kernel_size
+ self.n_layers = n_layers
+ self.p_dropout = p_dropout
+
+ self.drop = nn.Dropout(p_dropout)
+ self.convs_sep = nn.ModuleList()
+ self.convs_1x1 = nn.ModuleList()
+ self.norms_1 = nn.ModuleList()
+ self.norms_2 = nn.ModuleList()
+ for i in range(n_layers):
+ dilation = kernel_size ** i
+ padding = (kernel_size * dilation - dilation) // 2
+ self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
+ groups=channels, dilation=dilation, padding=padding
+ ))
+ self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
+ self.norms_1.append(LayerNorm(channels))
+ self.norms_2.append(LayerNorm(channels))
+
+ def forward(self, x, x_mask, g=None):
+ if g is not None:
+ x = x + g
+ for i in range(self.n_layers):
+ y = self.convs_sep[i](x * x_mask)
+ y = self.norms_1[i](y)
+ y = F.gelu(y)
+ y = self.convs_1x1[i](y)
+ y = self.norms_2[i](y)
+ y = F.gelu(y)
+ y = self.drop(y)
+ x = x + y
+ return x * x_mask
+
+
+class WN(torch.nn.Module):
+ def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
+ super(WN, self).__init__()
+ assert(kernel_size % 2 == 1)
+ self.hidden_channels =hidden_channels
+ self.kernel_size = kernel_size,
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.gin_channels = gin_channels
+ self.p_dropout = p_dropout
+
+ self.in_layers = torch.nn.ModuleList()
+ self.res_skip_layers = torch.nn.ModuleList()
+ self.drop = nn.Dropout(p_dropout)
+
+ if gin_channels != 0:
+ cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
+ self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
+
+ for i in range(n_layers):
+ dilation = dilation_rate ** i
+ padding = int((kernel_size * dilation - dilation) / 2)
+ in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
+ dilation=dilation, padding=padding)
+ in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
+ self.in_layers.append(in_layer)
+
+ # last one is not necessary
+ if i < n_layers - 1:
+ res_skip_channels = 2 * hidden_channels
+ else:
+ res_skip_channels = hidden_channels
+
+ res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
+ res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
+ self.res_skip_layers.append(res_skip_layer)
+
+ def forward(self, x, x_mask, g=None, **kwargs):
+ output = torch.zeros_like(x)
+ n_channels_tensor = torch.IntTensor([self.hidden_channels])
+
+ if g is not None:
+ g = self.cond_layer(g)
+
+ for i in range(self.n_layers):
+ x_in = self.in_layers[i](x)
+ if g is not None:
+ cond_offset = i * 2 * self.hidden_channels
+ g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
+ else:
+ g_l = torch.zeros_like(x_in)
+
+ acts = commons.fused_add_tanh_sigmoid_multiply(
+ x_in,
+ g_l,
+ n_channels_tensor)
+ acts = self.drop(acts)
+
+ res_skip_acts = self.res_skip_layers[i](acts)
+ if i < self.n_layers - 1:
+ res_acts = res_skip_acts[:,:self.hidden_channels,:]
+ x = (x + res_acts) * x_mask
+ output = output + res_skip_acts[:,self.hidden_channels:,:]
+ else:
+ output = output + res_skip_acts
+ return output * x_mask
+
+ def remove_weight_norm(self):
+ if self.gin_channels != 0:
+ torch.nn.utils.remove_weight_norm(self.cond_layer)
+ for l in self.in_layers:
+ torch.nn.utils.remove_weight_norm(l)
+ for l in self.res_skip_layers:
+ torch.nn.utils.remove_weight_norm(l)
+
+
+class ResBlock1(torch.nn.Module):
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
+ super(ResBlock1, self).__init__()
+ self.convs1 = nn.ModuleList([
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
+ padding=get_padding(kernel_size, dilation[0]))),
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
+ padding=get_padding(kernel_size, dilation[1]))),
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
+ padding=get_padding(kernel_size, dilation[2])))
+ ])
+ self.convs1.apply(init_weights)
+
+ self.convs2 = nn.ModuleList([
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
+ padding=get_padding(kernel_size, 1))),
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
+ padding=get_padding(kernel_size, 1))),
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
+ padding=get_padding(kernel_size, 1)))
+ ])
+ self.convs2.apply(init_weights)
+
+ def forward(self, x, x_mask=None):
+ for c1, c2 in zip(self.convs1, self.convs2):
+ xt = F.leaky_relu(x, LRELU_SLOPE)
+ if x_mask is not None:
+ xt = xt * x_mask
+ xt = c1(xt)
+ xt = F.leaky_relu(xt, LRELU_SLOPE)
+ if x_mask is not None:
+ xt = xt * x_mask
+ xt = c2(xt)
+ x = xt + x
+ if x_mask is not None:
+ x = x * x_mask
+ return x
+
+ def remove_weight_norm(self):
+ for l in self.convs1:
+ remove_weight_norm(l)
+ for l in self.convs2:
+ remove_weight_norm(l)
+
+
+class ResBlock2(torch.nn.Module):
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
+ super(ResBlock2, self).__init__()
+ self.convs = nn.ModuleList([
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
+ padding=get_padding(kernel_size, dilation[0]))),
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
+ padding=get_padding(kernel_size, dilation[1])))
+ ])
+ self.convs.apply(init_weights)
+
+ def forward(self, x, x_mask=None):
+ for c in self.convs:
+ xt = F.leaky_relu(x, LRELU_SLOPE)
+ if x_mask is not None:
+ xt = xt * x_mask
+ xt = c(xt)
+ x = xt + x
+ if x_mask is not None:
+ x = x * x_mask
+ return x
+
+ def remove_weight_norm(self):
+ for l in self.convs:
+ remove_weight_norm(l)
+
+
+class Log(nn.Module):
+ def forward(self, x, x_mask, reverse=False, **kwargs):
+ if not reverse:
+ y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
+ logdet = torch.sum(-y, [1, 2])
+ return y, logdet
+ else:
+ x = torch.exp(x) * x_mask
+ return x
+
+
+class Flip(nn.Module):
+ def forward(self, x, *args, reverse=False, **kwargs):
+ x = torch.flip(x, [1])
+ if not reverse:
+ logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
+ return x, logdet
+ else:
+ return x
+
+
+class ElementwiseAffine(nn.Module):
+ def __init__(self, channels):
+ super().__init__()
+ self.channels = channels
+ self.m = nn.Parameter(torch.zeros(channels,1))
+ self.logs = nn.Parameter(torch.zeros(channels,1))
+
+ def forward(self, x, x_mask, reverse=False, **kwargs):
+ if not reverse:
+ y = self.m + torch.exp(self.logs) * x
+ y = y * x_mask
+ logdet = torch.sum(self.logs * x_mask, [1,2])
+ return y, logdet
+ else:
+ x = (x - self.m) * torch.exp(-self.logs) * x_mask
+ return x
+
+
+class ResidualCouplingLayer(nn.Module):
+ def __init__(self,
+ channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ p_dropout=0,
+ gin_channels=0,
+ mean_only=False):
+ assert channels % 2 == 0, "channels should be divisible by 2"
+ super().__init__()
+ self.channels = channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.half_channels = channels // 2
+ self.mean_only = mean_only
+
+ self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
+ self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
+ self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
+ self.post.weight.data.zero_()
+ self.post.bias.data.zero_()
+
+ def forward(self, x, x_mask, g=None, reverse=False):
+ x0, x1 = torch.split(x, [self.half_channels]*2, 1)
+ h = self.pre(x0) * x_mask
+ h = self.enc(h, x_mask, g=g)
+ stats = self.post(h) * x_mask
+ if not self.mean_only:
+ m, logs = torch.split(stats, [self.half_channels]*2, 1)
+ else:
+ m = stats
+ logs = torch.zeros_like(m)
+
+ if not reverse:
+ x1 = m + x1 * torch.exp(logs) * x_mask
+ x = torch.cat([x0, x1], 1)
+ logdet = torch.sum(logs, [1,2])
+ return x, logdet
+ else:
+ x1 = (x1 - m) * torch.exp(-logs) * x_mask
+ x = torch.cat([x0, x1], 1)
+ return x
+
+
+class ConvFlow(nn.Module):
+ def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
+ super().__init__()
+ self.in_channels = in_channels
+ self.filter_channels = filter_channels
+ self.kernel_size = kernel_size
+ self.n_layers = n_layers
+ self.num_bins = num_bins
+ self.tail_bound = tail_bound
+ self.half_channels = in_channels // 2
+
+ self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
+ self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
+ self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
+ self.proj.weight.data.zero_()
+ self.proj.bias.data.zero_()
+
+ def forward(self, x, x_mask, g=None, reverse=False):
+ x0, x1 = torch.split(x, [self.half_channels]*2, 1)
+ h = self.pre(x0)
+ h = self.convs(h, x_mask, g=g)
+ h = self.proj(h) * x_mask
+
+ b, c, t = x0.shape
+ h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
+
+ unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
+ unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
+ unnormalized_derivatives = h[..., 2 * self.num_bins:]
+
+ x1, logabsdet = piecewise_rational_quadratic_transform(x1,
+ unnormalized_widths,
+ unnormalized_heights,
+ unnormalized_derivatives,
+ inverse=reverse,
+ tails='linear',
+ tail_bound=self.tail_bound
+ )
+
+ x = torch.cat([x0, x1], 1) * x_mask
+ logdet = torch.sum(logabsdet * x_mask, [1,2])
+ if not reverse:
+ return x, logdet
+ else:
+ return x
diff --git a/preprocess_v2.py b/preprocess_v2.py
new file mode 100644
index 0000000000000000000000000000000000000000..89018fa7ea8c41400d7348f635335cd0e58261f2
--- /dev/null
+++ b/preprocess_v2.py
@@ -0,0 +1,151 @@
+import os
+import argparse
+import json
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--add_auxiliary_data", type=bool, help="Whether to add extra data as fine-tuning helper")
+ parser.add_argument("--languages", default="CJE")
+ args = parser.parse_args()
+ if args.languages == "CJE":
+ langs = ["[ZH]", "[JA]", "[EN]"]
+ elif args.languages == "CJ":
+ langs = ["[ZH]", "[JA]"]
+ elif args.languages == "C":
+ langs = ["[ZH]"]
+ new_annos = []
+ # Source 1: transcribed short audios
+ if os.path.exists("short_character_anno.txt"):
+ with open("short_character_anno.txt", 'r', encoding='utf-8') as f:
+ short_character_anno = f.readlines()
+ new_annos += short_character_anno
+ # Source 2: transcribed long audio segments
+ if os.path.exists("./long_character_anno.txt"):
+ with open("./long_character_anno.txt", 'r', encoding='utf-8') as f:
+ long_character_anno = f.readlines()
+ new_annos += long_character_anno
+
+ # Get all speaker names
+ speakers = []
+ for line in new_annos:
+ path, speaker, text = line.split("|")
+ if speaker not in speakers:
+ speakers.append(speaker)
+ assert (len(speakers) != 0), "No audio file found. Please check your uploaded file structure."
+ # Source 3 (Optional): sampled audios as extra training helpers
+ if args.add_auxiliary_data:
+ with open("./sampled_audio4ft.txt", 'r', encoding='utf-8') as f:
+ old_annos = f.readlines()
+ # filter old_annos according to supported languages
+ filtered_old_annos = []
+ for line in old_annos:
+ for lang in langs:
+ if lang in line:
+ filtered_old_annos.append(line)
+ old_annos = filtered_old_annos
+ for line in old_annos:
+ path, speaker, text = line.split("|")
+ if speaker not in speakers:
+ speakers.append(speaker)
+ num_old_voices = len(old_annos)
+ num_new_voices = len(new_annos)
+ # STEP 1: balance number of new & old voices
+ cc_duplicate = num_old_voices // num_new_voices
+ if cc_duplicate == 0:
+ cc_duplicate = 1
+
+
+ # STEP 2: modify config file
+ with open("./configs/finetune_speaker.json", 'r', encoding='utf-8') as f:
+ hps = json.load(f)
+
+ # assign ids to new speakers
+ speaker2id = {}
+ for i, speaker in enumerate(speakers):
+ speaker2id[speaker] = i
+ # modify n_speakers
+ hps['data']["n_speakers"] = len(speakers)
+ # overwrite speaker names
+ hps['speakers'] = speaker2id
+ hps['train']['log_interval'] = 10
+ hps['train']['eval_interval'] = 100
+ hps['train']['batch_size'] = 16
+ hps['data']['training_files'] = "final_annotation_train.txt"
+ hps['data']['validation_files'] = "final_annotation_val.txt"
+ # save modified config
+ with open("./configs/modified_finetune_speaker.json", 'w', encoding='utf-8') as f:
+ json.dump(hps, f, indent=2)
+
+ # STEP 3: clean annotations, replace speaker names with assigned speaker IDs
+ import text
+ cleaned_new_annos = []
+ for i, line in enumerate(new_annos):
+ path, speaker, txt = line.split("|")
+ if len(txt) > 150:
+ continue
+ cleaned_text = text._clean_text(txt, hps['data']['text_cleaners'])
+ cleaned_text += "\n" if not cleaned_text.endswith("\n") else ""
+ cleaned_new_annos.append(path + "|" + str(speaker2id[speaker]) + "|" + cleaned_text)
+ cleaned_old_annos = []
+ for i, line in enumerate(old_annos):
+ path, speaker, txt = line.split("|")
+ if len(txt) > 150:
+ continue
+ cleaned_text = text._clean_text(txt, hps['data']['text_cleaners'])
+ cleaned_text += "\n" if not cleaned_text.endswith("\n") else ""
+ cleaned_old_annos.append(path + "|" + str(speaker2id[speaker]) + "|" + cleaned_text)
+ # merge with old annotation
+ final_annos = cleaned_old_annos + cc_duplicate * cleaned_new_annos
+ # save annotation file
+ with open("./final_annotation_train.txt", 'w', encoding='utf-8') as f:
+ for line in final_annos:
+ f.write(line)
+ # save annotation file for validation
+ with open("./final_annotation_val.txt", 'w', encoding='utf-8') as f:
+ for line in cleaned_new_annos:
+ f.write(line)
+ print("finished")
+ else:
+ # Do not add extra helper data
+ # STEP 1: modify config file
+ with open("./configs/finetune_speaker.json", 'r', encoding='utf-8') as f:
+ hps = json.load(f)
+
+ # assign ids to new speakers
+ speaker2id = {}
+ for i, speaker in enumerate(speakers):
+ speaker2id[speaker] = i
+ # modify n_speakers
+ hps['data']["n_speakers"] = len(speakers)
+ # overwrite speaker names
+ hps['speakers'] = speaker2id
+ hps['train']['log_interval'] = 10
+ hps['train']['eval_interval'] = 100
+ hps['train']['batch_size'] = 16
+ hps['data']['training_files'] = "final_annotation_train.txt"
+ hps['data']['validation_files'] = "final_annotation_val.txt"
+ # save modified config
+ with open("./configs/modified_finetune_speaker.json", 'w', encoding='utf-8') as f:
+ json.dump(hps, f, indent=2)
+
+ # STEP 2: clean annotations, replace speaker names with assigned speaker IDs
+ import text
+
+ cleaned_new_annos = []
+ for i, line in enumerate(new_annos):
+ path, speaker, txt = line.split("|")
+ if len(txt) > 150:
+ continue
+ cleaned_text = text._clean_text(txt, hps['data']['text_cleaners']).replace("[ZH]", "")
+ cleaned_text += "\n" if not cleaned_text.endswith("\n") else ""
+ cleaned_new_annos.append(path + "|" + str(speaker2id[speaker]) + "|" + cleaned_text)
+
+ final_annos = cleaned_new_annos
+ # save annotation file
+ with open("./final_annotation_train.txt", 'w', encoding='utf-8') as f:
+ for line in final_annos:
+ f.write(line)
+ # save annotation file for validation
+ with open("./final_annotation_val.txt", 'w', encoding='utf-8') as f:
+ for line in cleaned_new_annos:
+ f.write(line)
+ print("finished")
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8a0fe54b37af9c6072a7b7c3a6aa7790c7e75a9d
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,26 @@
+Cython==0.29.21
+librosa==0.9.2
+matplotlib==3.3.1
+scikit-learn==1.0.2
+scipy==1.5.2
+numpy==1.21.6
+tensorboard
+torch==1.13.1
+torchvision==0.14.1
+torchaudio==0.13.1
+unidecode
+pyopenjtalk==0.1.3
+jamo
+pypinyin
+jieba
+protobuf
+cn2an
+inflect
+eng_to_ipa
+ko_pron
+indic_transliteration==2.3.37
+num_thai==0.0.5
+opencc==1.1.1
+demucs
+openai-whisper
+gradio
diff --git a/scripts/denoise_audio.py b/scripts/denoise_audio.py
new file mode 100644
index 0000000000000000000000000000000000000000..fc061c61bdd470ced61bc7fb448c94e2c5fd8b30
--- /dev/null
+++ b/scripts/denoise_audio.py
@@ -0,0 +1,22 @@
+import os
+import json
+import torchaudio
+raw_audio_dir = "./raw_audio/"
+denoise_audio_dir = "./denoised_audio/"
+filelist = list(os.walk(raw_audio_dir))[0][2]
+# 2023/4/21: Get the target sampling rate
+with open("./configs/finetune_speaker.json", 'r', encoding='utf-8') as f:
+ hps = json.load(f)
+target_sr = hps['data']['sampling_rate']
+for file in filelist:
+ if file.endswith(".wav"):
+ os.system(f"demucs --two-stems=vocals {raw_audio_dir}{file}")
+for file in filelist:
+ file = file.replace(".wav", "")
+ wav, sr = torchaudio.load(f"./separated/htdemucs/{file}/vocals.wav", frame_offset=0, num_frames=-1, normalize=True,
+ channels_first=True)
+ # merge two channels into one
+ wav = wav.mean(dim=0).unsqueeze(0)
+ if sr != target_sr:
+ wav = torchaudio.transforms.Resample(orig_freq=sr, new_freq=target_sr)(wav)
+ torchaudio.save(denoise_audio_dir + file + ".wav", wav, target_sr, channels_first=True)
\ No newline at end of file
diff --git a/scripts/download_model.py b/scripts/download_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f1ab59aa549afdf107bf2ff97d48149a87da6f4
--- /dev/null
+++ b/scripts/download_model.py
@@ -0,0 +1,4 @@
+from google.colab import files
+files.download("./G_latest.pth")
+files.download("./finetune_speaker.json")
+files.download("./moegoe_config.json")
\ No newline at end of file
diff --git a/scripts/download_video.py b/scripts/download_video.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ab1a7f3698d373c3fff6119f1f222c061f4c20f
--- /dev/null
+++ b/scripts/download_video.py
@@ -0,0 +1,37 @@
+import os
+import random
+import shutil
+from concurrent.futures import ThreadPoolExecutor
+from google.colab import files
+
+basepath = os.getcwd()
+uploaded = files.upload() # 上传文件
+for filename in uploaded.keys():
+ assert (filename.endswith(".txt")), "speaker-videolink info could only be .txt file!"
+ shutil.move(os.path.join(basepath, filename), os.path.join("./speaker_links.txt"))
+
+
+def generate_infos():
+ infos = []
+ with open("./speaker_links.txt", 'r', encoding='utf-8') as f:
+ lines = f.readlines()
+ for line in lines:
+ line = line.replace("\n", "").replace(" ", "")
+ if line == "":
+ continue
+ speaker, link = line.split("|")
+ filename = speaker + "_" + str(random.randint(0, 1000000))
+ infos.append({"link": link, "filename": filename})
+ return infos
+
+
+def download_video(info):
+ link = info["link"]
+ filename = info["filename"]
+ os.system(f"youtube-dl -f 0 {link} -o ./video_data/{filename}.mp4 --no-check-certificate")
+
+
+if __name__ == "__main__":
+ infos = generate_infos()
+ with ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
+ executor.map(download_video, infos)
diff --git a/scripts/long_audio_transcribe.py b/scripts/long_audio_transcribe.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d0026c2d5061e0f0e113d7b6b9742ef4dcc09f7
--- /dev/null
+++ b/scripts/long_audio_transcribe.py
@@ -0,0 +1,75 @@
+from moviepy.editor import AudioFileClip
+import whisper
+import os
+import json
+import torchaudio
+import librosa
+import torch
+import argparse
+parent_dir = "./denoised_audio/"
+filelist = list(os.walk(parent_dir))[0][2]
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--languages", default="CJE")
+ parser.add_argument("--whisper_size", default="medium")
+ args = parser.parse_args()
+ if args.languages == "CJE":
+ lang2token = {
+ 'zh': "[ZH]",
+ 'ja': "[JA]",
+ "en": "[EN]",
+ }
+ elif args.languages == "CJ":
+ lang2token = {
+ 'zh': "[ZH]",
+ 'ja': "[JA]",
+ }
+ elif args.languages == "C":
+ lang2token = {
+ 'zh': "[ZH]",
+ }
+ assert(torch.cuda.is_available()), "Please enable GPU in order to run Whisper!"
+ with open("./configs/finetune_speaker.json", 'r', encoding='utf-8') as f:
+ hps = json.load(f)
+ target_sr = hps['data']['sampling_rate']
+ model = whisper.load_model(args.whisper_size)
+ speaker_annos = []
+ for file in filelist:
+ print(f"transcribing {parent_dir + file}...\n")
+ options = dict(beam_size=5, best_of=5)
+ transcribe_options = dict(task="transcribe", **options)
+ result = model.transcribe(parent_dir + file, word_timestamps=True, **transcribe_options)
+ segments = result["segments"]
+ # result = model.transcribe(parent_dir + file)
+ lang = result['language']
+ if result['language'] not in list(lang2token.keys()):
+ print(f"{lang} not supported, ignoring...\n")
+ continue
+ # segment audio based on segment results
+ character_name = file.rstrip(".wav").split("_")[0]
+ code = file.rstrip(".wav").split("_")[1]
+ if not os.path.exists("./segmented_character_voice/" + character_name):
+ os.mkdir("./segmented_character_voice/" + character_name)
+ wav, sr = torchaudio.load(parent_dir + file, frame_offset=0, num_frames=-1, normalize=True,
+ channels_first=True)
+
+ for i, seg in enumerate(result['segments']):
+ start_time = seg['start']
+ end_time = seg['end']
+ text = seg['text']
+ text = lang2token[lang] + text.replace("\n", "") + lang2token[lang]
+ text = text + "\n"
+ wav_seg = wav[:, int(start_time*sr):int(end_time*sr)]
+ wav_seg_name = f"{character_name}_{code}_{i}.wav"
+ savepth = "./segmented_character_voice/" + character_name + "/" + wav_seg_name
+ speaker_annos.append(savepth + "|" + character_name + "|" + text)
+ print(f"Transcribed segment: {speaker_annos[-1]}")
+ # trimmed_wav_seg = librosa.effects.trim(wav_seg.squeeze().numpy())
+ # trimmed_wav_seg = torch.tensor(trimmed_wav_seg[0]).unsqueeze(0)
+ torchaudio.save(savepth, wav_seg, target_sr, channels_first=True)
+ if len(speaker_annos) == 0:
+ print("Warning: no long audios & videos found, this IS expected if you have only uploaded short audios")
+ print("this IS NOT expected if you have uploaded any long audios, videos or video links. Please check your file structure or make sure your audio/video language is supported.")
+ with open("./long_character_anno.txt", 'w', encoding='utf-8') as f:
+ for line in speaker_annos:
+ f.write(line)
diff --git a/scripts/rearrange_speaker.py b/scripts/rearrange_speaker.py
new file mode 100644
index 0000000000000000000000000000000000000000..de0f7545904cc088377c552cc6d9b058c5e9d342
--- /dev/null
+++ b/scripts/rearrange_speaker.py
@@ -0,0 +1,37 @@
+import torch
+import argparse
+import json
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--model_dir", type=str, default="./OUTPUT_MODEL/G_latest.pth")
+ parser.add_argument("--config_dir", type=str, default="./configs/modified_finetune_speaker.json")
+ args = parser.parse_args()
+
+ model_sd = torch.load(args.model_dir, map_location='cpu')
+ with open(args.config_dir, 'r', encoding='utf-8') as f:
+ hps = json.load(f)
+
+ valid_speakers = list(hps['speakers'].keys())
+ if hps['data']['n_speakers'] > len(valid_speakers):
+ new_emb_g = torch.zeros([len(valid_speakers), 256])
+ old_emb_g = model_sd['model']['emb_g.weight']
+ for i, speaker in enumerate(valid_speakers):
+ new_emb_g[i, :] = old_emb_g[hps['speakers'][speaker], :]
+ hps['speakers'][speaker] = i
+ hps['data']['n_speakers'] = len(valid_speakers)
+ model_sd['model']['emb_g.weight'] = new_emb_g
+ with open("./finetune_speaker.json", 'w', encoding='utf-8') as f:
+ json.dump(hps, f, indent=2)
+ torch.save(model_sd, "./G_latest.pth")
+ else:
+ with open("./finetune_speaker.json", 'w', encoding='utf-8') as f:
+ json.dump(hps, f, indent=2)
+ torch.save(model_sd, "./G_latest.pth")
+ # save another config file copy in MoeGoe format
+ hps['speakers'] = valid_speakers
+ with open("./moegoe_config.json", 'w', encoding='utf-8') as f:
+ json.dump(hps, f, indent=2)
+
+
+
diff --git a/scripts/resample.py b/scripts/resample.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ed44ff6036cb6cd6f89cfc8b14c7d97115f4df4
--- /dev/null
+++ b/scripts/resample.py
@@ -0,0 +1,20 @@
+import os
+import json
+import argparse
+import torchaudio
+
+
+def main():
+ with open("./configs/finetune_speaker.json", 'r', encoding='utf-8') as f:
+ hps = json.load(f)
+ target_sr = hps['data']['sampling_rate']
+ filelist = list(os.walk("./sampled_audio4ft"))[0][2]
+ if target_sr != 22050:
+ for wavfile in filelist:
+ wav, sr = torchaudio.load("./sampled_audio4ft" + "/" + wavfile, frame_offset=0, num_frames=-1,
+ normalize=True, channels_first=True)
+ wav = torchaudio.transforms.Resample(orig_freq=sr, new_freq=target_sr)(wav)
+ torchaudio.save("./sampled_audio4ft" + "/" + wavfile, wav, target_sr, channels_first=True)
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/scripts/short_audio_transcribe.py b/scripts/short_audio_transcribe.py
new file mode 100644
index 0000000000000000000000000000000000000000..c82c8ced1d7016d283c9b2c83c9cc3b0b88d0db4
--- /dev/null
+++ b/scripts/short_audio_transcribe.py
@@ -0,0 +1,121 @@
+import whisper
+import os
+import json
+import torchaudio
+import argparse
+import torch
+
+lang2token = {
+ 'zh': "[ZH]",
+ 'ja': "[JA]",
+ "en": "[EN]",
+ }
+def transcribe_one(audio_path):
+ # load audio and pad/trim it to fit 30 seconds
+ audio = whisper.load_audio(audio_path)
+ audio = whisper.pad_or_trim(audio)
+
+ # make log-Mel spectrogram and move to the same device as the model
+ mel = whisper.log_mel_spectrogram(audio).to(model.device)
+
+ # detect the spoken language
+ _, probs = model.detect_language(mel)
+ print(f"Detected language: {max(probs, key=probs.get)}")
+ lang = max(probs, key=probs.get)
+ # decode the audio
+ options = whisper.DecodingOptions(beam_size=5)
+ result = whisper.decode(model, mel, options)
+
+ # print the recognized text
+ print(result.text)
+ return lang, result.text
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--languages", default="CJE")
+ parser.add_argument("--whisper_size", default="medium")
+ args = parser.parse_args()
+ if args.languages == "CJE":
+ lang2token = {
+ 'zh': "[ZH]",
+ 'ja': "[JA]",
+ "en": "[EN]",
+ }
+ elif args.languages == "CJ":
+ lang2token = {
+ 'zh': "[ZH]",
+ 'ja': "[JA]",
+ }
+ elif args.languages == "C":
+ lang2token = {
+ 'zh': "[ZH]",
+ }
+ assert (torch.cuda.is_available()), "Please enable GPU in order to run Whisper!"
+ model = whisper.load_model(args.whisper_size)
+ parent_dir = "./custom_character_voice/"
+ speaker_names = list(os.walk(parent_dir))[0][1]
+ speaker_annos = []
+ total_files = sum([len(files) for r, d, files in os.walk(parent_dir)])
+ # resample audios
+ # 2023/4/21: Get the target sampling rate
+ with open("./configs/finetune_speaker.json", 'r', encoding='utf-8') as f:
+ hps = json.load(f)
+ target_sr = hps['data']['sampling_rate']
+ processed_files = 0
+ for speaker in speaker_names:
+ for i, wavfile in enumerate(list(os.walk(parent_dir + speaker))[0][2]):
+ # try to load file as audio
+ if wavfile.startswith("processed_"):
+ continue
+ try:
+ wav, sr = torchaudio.load(parent_dir + speaker + "/" + wavfile, frame_offset=0, num_frames=-1, normalize=True,
+ channels_first=True)
+ wav = wav.mean(dim=0).unsqueeze(0)
+ if sr != target_sr:
+ wav = torchaudio.transforms.Resample(orig_freq=sr, new_freq=target_sr)(wav)
+ if wav.shape[1] / sr > 20:
+ print(f"{wavfile} too long, ignoring\n")
+ save_path = parent_dir + speaker + "/" + f"processed_{i}.wav"
+ torchaudio.save(save_path, wav, target_sr, channels_first=True)
+ # transcribe text
+ lang, text = transcribe_one(save_path)
+ if lang not in list(lang2token.keys()):
+ print(f"{lang} not supported, ignoring\n")
+ continue
+ text = lang2token[lang] + text + lang2token[lang] + "\n"
+ speaker_annos.append(save_path + "|" + speaker + "|" + text)
+
+ processed_files += 1
+ print(f"Processed: {processed_files}/{total_files}")
+ except:
+ continue
+
+ # # clean annotation
+ # import argparse
+ # import text
+ # from utils import load_filepaths_and_text
+ # for i, line in enumerate(speaker_annos):
+ # path, sid, txt = line.split("|")
+ # cleaned_text = text._clean_text(txt, ["cjke_cleaners2"])
+ # cleaned_text += "\n" if not cleaned_text.endswith("\n") else ""
+ # speaker_annos[i] = path + "|" + sid + "|" + cleaned_text
+ # write into annotation
+ if len(speaker_annos) == 0:
+ print("Warning: no short audios found, this IS expected if you have only uploaded long audios, videos or video links.")
+ print("this IS NOT expected if you have uploaded a zip file of short audios. Please check your file structure or make sure your audio language is supported.")
+ with open("short_character_anno.txt", 'w', encoding='utf-8') as f:
+ for line in speaker_annos:
+ f.write(line)
+
+ # import json
+ # # generate new config
+ # with open("./configs/finetune_speaker.json", 'r', encoding='utf-8') as f:
+ # hps = json.load(f)
+ # # modify n_speakers
+ # hps['data']["n_speakers"] = 1000 + len(speaker2id)
+ # # add speaker names
+ # for speaker in speaker_names:
+ # hps['speakers'][speaker] = speaker2id[speaker]
+ # # save modified config
+ # with open("./configs/modified_finetune_speaker.json", 'w', encoding='utf-8') as f:
+ # json.dump(hps, f, indent=2)
+ # print("finished")
diff --git a/scripts/video2audio.py b/scripts/video2audio.py
new file mode 100644
index 0000000000000000000000000000000000000000..db50a5c6b62c4c1faea5fefbb078f16aa9bb7fa3
--- /dev/null
+++ b/scripts/video2audio.py
@@ -0,0 +1,27 @@
+import os
+from concurrent.futures import ThreadPoolExecutor
+
+from moviepy.editor import AudioFileClip
+
+video_dir = "./video_data/"
+audio_dir = "./raw_audio/"
+filelist = list(os.walk(video_dir))[0][2]
+
+
+def generate_infos():
+ videos = []
+ for file in filelist:
+ if file.endswith(".mp4"):
+ videos.append(file)
+ return videos
+
+
+def clip_file(file):
+ my_audio_clip = AudioFileClip(video_dir + file)
+ my_audio_clip.write_audiofile(audio_dir + file.rstrip(".mp4") + ".wav")
+
+
+if __name__ == "__main__":
+ infos = generate_infos()
+ with ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
+ executor.map(clip_file, infos)
diff --git a/scripts/voice_upload.py b/scripts/voice_upload.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c825a933a7970e17e57c381b59a5fc4e06ea569
--- /dev/null
+++ b/scripts/voice_upload.py
@@ -0,0 +1,28 @@
+from google.colab import files
+import shutil
+import os
+import argparse
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--type", type=str, required=True, help="type of file to upload")
+ args = parser.parse_args()
+ file_type = args.type
+
+ basepath = os.getcwd()
+ uploaded = files.upload() # 上传文件
+ assert(file_type in ['zip', 'audio', 'video'])
+ if file_type == "zip":
+ upload_path = "./custom_character_voice/"
+ for filename in uploaded.keys():
+ #将上传的文件移动到指定的位置上
+ shutil.move(os.path.join(basepath, filename), os.path.join(upload_path, "custom_character_voice.zip"))
+ elif file_type == "audio":
+ upload_path = "./raw_audio/"
+ for filename in uploaded.keys():
+ #将上传的文件移动到指定的位置上
+ shutil.move(os.path.join(basepath, filename), os.path.join(upload_path, filename))
+ elif file_type == "video":
+ upload_path = "./video_data/"
+ for filename in uploaded.keys():
+ # 将上传的文件移动到指定的位置上
+ shutil.move(os.path.join(basepath, filename), os.path.join(upload_path, filename))
\ No newline at end of file
diff --git a/text/LICENSE b/text/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..4ad4ed1d5e34d95c8380768ec16405d789cc6de4
--- /dev/null
+++ b/text/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2017 Keith Ito
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/text/__init__.py b/text/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..11e5586c347c3071a9d1aca0425d112f45402e85
--- /dev/null
+++ b/text/__init__.py
@@ -0,0 +1,60 @@
+""" from https://github.com/keithito/tacotron """
+from text import cleaners
+from text.symbols import symbols
+
+
+# Mappings from symbol to numeric ID and vice versa:
+_symbol_to_id = {s: i for i, s in enumerate(symbols)}
+_id_to_symbol = {i: s for i, s in enumerate(symbols)}
+
+
+def text_to_sequence(text, symbols, cleaner_names):
+ '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
+ Args:
+ text: string to convert to a sequence
+ cleaner_names: names of the cleaner functions to run the text through
+ Returns:
+ List of integers corresponding to the symbols in the text
+ '''
+ sequence = []
+ symbol_to_id = {s: i for i, s in enumerate(symbols)}
+ clean_text = _clean_text(text, cleaner_names)
+ print(clean_text)
+ print(f" length:{len(clean_text)}")
+ for symbol in clean_text:
+ if symbol not in symbol_to_id.keys():
+ continue
+ symbol_id = symbol_to_id[symbol]
+ sequence += [symbol_id]
+ print(f" length:{len(sequence)}")
+ return sequence
+
+
+def cleaned_text_to_sequence(cleaned_text, symbols):
+ '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
+ Args:
+ text: string to convert to a sequence
+ Returns:
+ List of integers corresponding to the symbols in the text
+ '''
+ symbol_to_id = {s: i for i, s in enumerate(symbols)}
+ sequence = [symbol_to_id[symbol] for symbol in cleaned_text if symbol in symbol_to_id.keys()]
+ return sequence
+
+
+def sequence_to_text(sequence):
+ '''Converts a sequence of IDs back to a string'''
+ result = ''
+ for symbol_id in sequence:
+ s = _id_to_symbol[symbol_id]
+ result += s
+ return result
+
+
+def _clean_text(text, cleaner_names):
+ for name in cleaner_names:
+ cleaner = getattr(cleaners, name)
+ if not cleaner:
+ raise Exception('Unknown cleaner: %s' % name)
+ text = cleaner(text)
+ return text
diff --git a/text/__pycache__/__init__.cpython-310.pyc b/text/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..431ec7cef077236fc88e0fea7b39333426464116
Binary files /dev/null and b/text/__pycache__/__init__.cpython-310.pyc differ
diff --git a/text/__pycache__/__init__.cpython-37.pyc b/text/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fefc959a48283e047f64dfd10b000d282fa8470a
Binary files /dev/null and b/text/__pycache__/__init__.cpython-37.pyc differ
diff --git a/text/__pycache__/cleaners.cpython-310.pyc b/text/__pycache__/cleaners.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b0ee05bce24bc721b0568efb3955914d2486cbc8
Binary files /dev/null and b/text/__pycache__/cleaners.cpython-310.pyc differ
diff --git a/text/__pycache__/cleaners.cpython-37.pyc b/text/__pycache__/cleaners.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a8e957e607713b127c687d71add433256868f66c
Binary files /dev/null and b/text/__pycache__/cleaners.cpython-37.pyc differ
diff --git a/text/__pycache__/english.cpython-310.pyc b/text/__pycache__/english.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c2e0d0a154d556927312952299c25067220cd5fa
Binary files /dev/null and b/text/__pycache__/english.cpython-310.pyc differ
diff --git a/text/__pycache__/english.cpython-37.pyc b/text/__pycache__/english.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2321c25c4a9ca0008b76392986ac7d5b7859de95
Binary files /dev/null and b/text/__pycache__/english.cpython-37.pyc differ
diff --git a/text/__pycache__/japanese.cpython-310.pyc b/text/__pycache__/japanese.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f2f20164853788666b42303505476309e2279e61
Binary files /dev/null and b/text/__pycache__/japanese.cpython-310.pyc differ
diff --git a/text/__pycache__/japanese.cpython-37.pyc b/text/__pycache__/japanese.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6b555d5d3310eed956b44850695afa3e3d091f60
Binary files /dev/null and b/text/__pycache__/japanese.cpython-37.pyc differ
diff --git a/text/__pycache__/korean.cpython-310.pyc b/text/__pycache__/korean.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d90ba789da81261cb5f4122b538deffae5bed5c7
Binary files /dev/null and b/text/__pycache__/korean.cpython-310.pyc differ
diff --git a/text/__pycache__/korean.cpython-37.pyc b/text/__pycache__/korean.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a9eb67ffcee7e16d875dd3c6c873c77e5abc5ad0
Binary files /dev/null and b/text/__pycache__/korean.cpython-37.pyc differ
diff --git a/text/__pycache__/mandarin.cpython-310.pyc b/text/__pycache__/mandarin.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..69f0c5147f56cfd71314e8adbcec6d863c9ad23d
Binary files /dev/null and b/text/__pycache__/mandarin.cpython-310.pyc differ
diff --git a/text/__pycache__/mandarin.cpython-37.pyc b/text/__pycache__/mandarin.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..400d5a254cf7de02d7c64ea34acb49b081c08f4d
Binary files /dev/null and b/text/__pycache__/mandarin.cpython-37.pyc differ
diff --git a/text/__pycache__/sanskrit.cpython-310.pyc b/text/__pycache__/sanskrit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..91ac1aa7d111d996f63d0f0ba70d78e13f68cea5
Binary files /dev/null and b/text/__pycache__/sanskrit.cpython-310.pyc differ
diff --git a/text/__pycache__/sanskrit.cpython-37.pyc b/text/__pycache__/sanskrit.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..131f9ac8f7638c1168ce59dc10a49a9a20a062e7
Binary files /dev/null and b/text/__pycache__/sanskrit.cpython-37.pyc differ
diff --git a/text/__pycache__/symbols.cpython-310.pyc b/text/__pycache__/symbols.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7b21261e5e4886e20025034a2c42fe64b810fa27
Binary files /dev/null and b/text/__pycache__/symbols.cpython-310.pyc differ
diff --git a/text/__pycache__/symbols.cpython-37.pyc b/text/__pycache__/symbols.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0b094158529277d2768ed2609d5eccb196fdc222
Binary files /dev/null and b/text/__pycache__/symbols.cpython-37.pyc differ
diff --git a/text/__pycache__/thai.cpython-310.pyc b/text/__pycache__/thai.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8adea5757789bd459435cc101e75c5a71c10a8e1
Binary files /dev/null and b/text/__pycache__/thai.cpython-310.pyc differ
diff --git a/text/__pycache__/thai.cpython-37.pyc b/text/__pycache__/thai.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..903b8ba8a1b5b968ae015f7cc7fce3fb35b5fdc8
Binary files /dev/null and b/text/__pycache__/thai.cpython-37.pyc differ
diff --git a/text/cantonese.py b/text/cantonese.py
new file mode 100644
index 0000000000000000000000000000000000000000..b66d12138b81b70b86f18217d24a08fce76305c0
--- /dev/null
+++ b/text/cantonese.py
@@ -0,0 +1,59 @@
+import re
+import cn2an
+import opencc
+
+
+converter = opencc.OpenCC('jyutjyu')
+
+# List of (Latin alphabet, ipa) pairs:
+_latin_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
+ ('A', 'ei˥'),
+ ('B', 'biː˥'),
+ ('C', 'siː˥'),
+ ('D', 'tiː˥'),
+ ('E', 'iː˥'),
+ ('F', 'e˥fuː˨˩'),
+ ('G', 'tsiː˥'),
+ ('H', 'ɪk̚˥tsʰyː˨˩'),
+ ('I', 'ɐi˥'),
+ ('J', 'tsei˥'),
+ ('K', 'kʰei˥'),
+ ('L', 'e˥llou˨˩'),
+ ('M', 'ɛːm˥'),
+ ('N', 'ɛːn˥'),
+ ('O', 'ou˥'),
+ ('P', 'pʰiː˥'),
+ ('Q', 'kʰiːu˥'),
+ ('R', 'aː˥lou˨˩'),
+ ('S', 'ɛː˥siː˨˩'),
+ ('T', 'tʰiː˥'),
+ ('U', 'juː˥'),
+ ('V', 'wiː˥'),
+ ('W', 'tʊk̚˥piː˥juː˥'),
+ ('X', 'ɪk̚˥siː˨˩'),
+ ('Y', 'waːi˥'),
+ ('Z', 'iː˨sɛːt̚˥')
+]]
+
+
+def number_to_cantonese(text):
+ return re.sub(r'\d+(?:\.?\d+)?', lambda x: cn2an.an2cn(x.group()), text)
+
+
+def latin_to_ipa(text):
+ for regex, replacement in _latin_to_ipa:
+ text = re.sub(regex, replacement, text)
+ return text
+
+
+def cantonese_to_ipa(text):
+ text = number_to_cantonese(text.upper())
+ text = converter.convert(text).replace('-','').replace('$',' ')
+ text = re.sub(r'[A-Z]', lambda x: latin_to_ipa(x.group())+' ', text)
+ text = re.sub(r'[、;:]', ',', text)
+ text = re.sub(r'\s*,\s*', ', ', text)
+ text = re.sub(r'\s*。\s*', '. ', text)
+ text = re.sub(r'\s*?\s*', '? ', text)
+ text = re.sub(r'\s*!\s*', '! ', text)
+ text = re.sub(r'\s*$', '', text)
+ return text
diff --git a/text/cleaners.py b/text/cleaners.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d1bbffb95d20bfacb5d86bf64d55c953c3d75e9
--- /dev/null
+++ b/text/cleaners.py
@@ -0,0 +1,129 @@
+import re
+from text.japanese import japanese_to_romaji_with_accent, japanese_to_ipa, japanese_to_ipa2, japanese_to_ipa3
+from text.korean import latin_to_hangul, number_to_hangul, divide_hangul, korean_to_lazy_ipa, korean_to_ipa
+from text.mandarin import number_to_chinese, chinese_to_bopomofo, latin_to_bopomofo, chinese_to_romaji, chinese_to_lazy_ipa, chinese_to_ipa, chinese_to_ipa2
+from text.sanskrit import devanagari_to_ipa
+from text.english import english_to_lazy_ipa, english_to_ipa2, english_to_lazy_ipa2
+from text.thai import num_to_thai, latin_to_thai
+# from text.shanghainese import shanghainese_to_ipa
+# from text.cantonese import cantonese_to_ipa
+# from text.ngu_dialect import ngu_dialect_to_ipa
+
+
+def japanese_cleaners(text):
+ text = japanese_to_romaji_with_accent(text)
+ text = re.sub(r'([A-Za-z])$', r'\1.', text)
+ return text
+
+
+def japanese_cleaners2(text):
+ return japanese_cleaners(text).replace('ts', 'ʦ').replace('...', '…')
+
+
+def korean_cleaners(text):
+ '''Pipeline for Korean text'''
+ text = latin_to_hangul(text)
+ text = number_to_hangul(text)
+ text = divide_hangul(text)
+ text = re.sub(r'([\u3131-\u3163])$', r'\1.', text)
+ return text
+
+
+def chinese_cleaners(text):
+ '''Pipeline for Chinese text'''
+ text = text.replace("[ZH]", "")
+ text = number_to_chinese(text)
+ text = chinese_to_bopomofo(text)
+ text = latin_to_bopomofo(text)
+ text = re.sub(r'([ˉˊˇˋ˙])$', r'\1。', text)
+ return text
+
+
+def zh_ja_mixture_cleaners(text):
+ text = re.sub(r'\[ZH\](.*?)\[ZH\]',
+ lambda x: chinese_to_romaji(x.group(1))+' ', text)
+ text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_romaji_with_accent(
+ x.group(1)).replace('ts', 'ʦ').replace('u', 'ɯ').replace('...', '…')+' ', text)
+ text = re.sub(r'\s+$', '', text)
+ text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
+ return text
+
+
+def sanskrit_cleaners(text):
+ text = text.replace('॥', '।').replace('ॐ', 'ओम्')
+ text = re.sub(r'([^।])$', r'\1।', text)
+ return text
+
+
+def cjks_cleaners(text):
+ text = re.sub(r'\[ZH\](.*?)\[ZH\]',
+ lambda x: chinese_to_lazy_ipa(x.group(1))+' ', text)
+ text = re.sub(r'\[JA\](.*?)\[JA\]',
+ lambda x: japanese_to_ipa(x.group(1))+' ', text)
+ text = re.sub(r'\[KO\](.*?)\[KO\]',
+ lambda x: korean_to_lazy_ipa(x.group(1))+' ', text)
+ text = re.sub(r'\[SA\](.*?)\[SA\]',
+ lambda x: devanagari_to_ipa(x.group(1))+' ', text)
+ text = re.sub(r'\[EN\](.*?)\[EN\]',
+ lambda x: english_to_lazy_ipa(x.group(1))+' ', text)
+ text = re.sub(r'\s+$', '', text)
+ text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
+ return text
+
+
+def cjke_cleaners(text):
+ text = re.sub(r'\[ZH\](.*?)\[ZH\]', lambda x: chinese_to_lazy_ipa(x.group(1)).replace(
+ 'ʧ', 'tʃ').replace('ʦ', 'ts').replace('ɥan', 'ɥæn')+' ', text)
+ text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_ipa(x.group(1)).replace('ʧ', 'tʃ').replace(
+ 'ʦ', 'ts').replace('ɥan', 'ɥæn').replace('ʥ', 'dz')+' ', text)
+ text = re.sub(r'\[KO\](.*?)\[KO\]',
+ lambda x: korean_to_ipa(x.group(1))+' ', text)
+ text = re.sub(r'\[EN\](.*?)\[EN\]', lambda x: english_to_ipa2(x.group(1)).replace('ɑ', 'a').replace(
+ 'ɔ', 'o').replace('ɛ', 'e').replace('ɪ', 'i').replace('ʊ', 'u')+' ', text)
+ text = re.sub(r'\s+$', '', text)
+ text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
+ return text
+
+
+def cjke_cleaners2(text):
+ text = re.sub(r'\[ZH\](.*?)\[ZH\]',
+ lambda x: chinese_to_ipa(x.group(1))+' ', text)
+ text = re.sub(r'\[JA\](.*?)\[JA\]',
+ lambda x: japanese_to_ipa2(x.group(1))+' ', text)
+ text = re.sub(r'\[KO\](.*?)\[KO\]',
+ lambda x: korean_to_ipa(x.group(1))+' ', text)
+ text = re.sub(r'\[EN\](.*?)\[EN\]',
+ lambda x: english_to_ipa2(x.group(1))+' ', text)
+ text = re.sub(r'\s+$', '', text)
+ text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
+ return text
+
+
+def thai_cleaners(text):
+ text = num_to_thai(text)
+ text = latin_to_thai(text)
+ return text
+
+
+# def shanghainese_cleaners(text):
+# text = shanghainese_to_ipa(text)
+# text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
+# return text
+
+
+# def chinese_dialect_cleaners(text):
+# text = re.sub(r'\[ZH\](.*?)\[ZH\]',
+# lambda x: chinese_to_ipa2(x.group(1))+' ', text)
+# text = re.sub(r'\[JA\](.*?)\[JA\]',
+# lambda x: japanese_to_ipa3(x.group(1)).replace('Q', 'ʔ')+' ', text)
+# text = re.sub(r'\[SH\](.*?)\[SH\]', lambda x: shanghainese_to_ipa(x.group(1)).replace('1', '˥˧').replace('5',
+# '˧˧˦').replace('6', '˩˩˧').replace('7', '˥').replace('8', '˩˨').replace('ᴀ', 'ɐ').replace('ᴇ', 'e')+' ', text)
+# text = re.sub(r'\[GD\](.*?)\[GD\]',
+# lambda x: cantonese_to_ipa(x.group(1))+' ', text)
+# text = re.sub(r'\[EN\](.*?)\[EN\]',
+# lambda x: english_to_lazy_ipa2(x.group(1))+' ', text)
+# text = re.sub(r'\[([A-Z]{2})\](.*?)\[\1\]', lambda x: ngu_dialect_to_ipa(x.group(2), x.group(
+# 1)).replace('ʣ', 'dz').replace('ʥ', 'dʑ').replace('ʦ', 'ts').replace('ʨ', 'tɕ')+' ', text)
+# text = re.sub(r'\s+$', '', text)
+# text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
+# return text
diff --git a/text/english.py b/text/english.py
new file mode 100644
index 0000000000000000000000000000000000000000..6817392ba8a9eb830351de89fb7afc5ad72f5e42
--- /dev/null
+++ b/text/english.py
@@ -0,0 +1,188 @@
+""" from https://github.com/keithito/tacotron """
+
+'''
+Cleaners are transformations that run over the input text at both training and eval time.
+
+Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
+hyperparameter. Some cleaners are English-specific. You'll typically want to use:
+ 1. "english_cleaners" for English text
+ 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
+ the Unidecode library (https://pypi.python.org/pypi/Unidecode)
+ 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
+ the symbols in symbols.py to match your data).
+'''
+
+
+# Regular expression matching whitespace:
+
+
+import re
+import inflect
+from unidecode import unidecode
+import eng_to_ipa as ipa
+_inflect = inflect.engine()
+_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
+_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
+_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
+_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
+_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
+_number_re = re.compile(r'[0-9]+')
+
+# List of (regular expression, replacement) pairs for abbreviations:
+_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
+ ('mrs', 'misess'),
+ ('mr', 'mister'),
+ ('dr', 'doctor'),
+ ('st', 'saint'),
+ ('co', 'company'),
+ ('jr', 'junior'),
+ ('maj', 'major'),
+ ('gen', 'general'),
+ ('drs', 'doctors'),
+ ('rev', 'reverend'),
+ ('lt', 'lieutenant'),
+ ('hon', 'honorable'),
+ ('sgt', 'sergeant'),
+ ('capt', 'captain'),
+ ('esq', 'esquire'),
+ ('ltd', 'limited'),
+ ('col', 'colonel'),
+ ('ft', 'fort'),
+]]
+
+
+# List of (ipa, lazy ipa) pairs:
+_lazy_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
+ ('r', 'ɹ'),
+ ('æ', 'e'),
+ ('ɑ', 'a'),
+ ('ɔ', 'o'),
+ ('ð', 'z'),
+ ('θ', 's'),
+ ('ɛ', 'e'),
+ ('ɪ', 'i'),
+ ('ʊ', 'u'),
+ ('ʒ', 'ʥ'),
+ ('ʤ', 'ʥ'),
+ ('ˈ', '↓'),
+]]
+
+# List of (ipa, lazy ipa2) pairs:
+_lazy_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
+ ('r', 'ɹ'),
+ ('ð', 'z'),
+ ('θ', 's'),
+ ('ʒ', 'ʑ'),
+ ('ʤ', 'dʑ'),
+ ('ˈ', '↓'),
+]]
+
+# List of (ipa, ipa2) pairs
+_ipa_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
+ ('r', 'ɹ'),
+ ('ʤ', 'dʒ'),
+ ('ʧ', 'tʃ')
+]]
+
+
+def expand_abbreviations(text):
+ for regex, replacement in _abbreviations:
+ text = re.sub(regex, replacement, text)
+ return text
+
+
+def collapse_whitespace(text):
+ return re.sub(r'\s+', ' ', text)
+
+
+def _remove_commas(m):
+ return m.group(1).replace(',', '')
+
+
+def _expand_decimal_point(m):
+ return m.group(1).replace('.', ' point ')
+
+
+def _expand_dollars(m):
+ match = m.group(1)
+ parts = match.split('.')
+ if len(parts) > 2:
+ return match + ' dollars' # Unexpected format
+ dollars = int(parts[0]) if parts[0] else 0
+ cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
+ if dollars and cents:
+ dollar_unit = 'dollar' if dollars == 1 else 'dollars'
+ cent_unit = 'cent' if cents == 1 else 'cents'
+ return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
+ elif dollars:
+ dollar_unit = 'dollar' if dollars == 1 else 'dollars'
+ return '%s %s' % (dollars, dollar_unit)
+ elif cents:
+ cent_unit = 'cent' if cents == 1 else 'cents'
+ return '%s %s' % (cents, cent_unit)
+ else:
+ return 'zero dollars'
+
+
+def _expand_ordinal(m):
+ return _inflect.number_to_words(m.group(0))
+
+
+def _expand_number(m):
+ num = int(m.group(0))
+ if num > 1000 and num < 3000:
+ if num == 2000:
+ return 'two thousand'
+ elif num > 2000 and num < 2010:
+ return 'two thousand ' + _inflect.number_to_words(num % 100)
+ elif num % 100 == 0:
+ return _inflect.number_to_words(num // 100) + ' hundred'
+ else:
+ return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
+ else:
+ return _inflect.number_to_words(num, andword='')
+
+
+def normalize_numbers(text):
+ text = re.sub(_comma_number_re, _remove_commas, text)
+ text = re.sub(_pounds_re, r'\1 pounds', text)
+ text = re.sub(_dollars_re, _expand_dollars, text)
+ text = re.sub(_decimal_number_re, _expand_decimal_point, text)
+ text = re.sub(_ordinal_re, _expand_ordinal, text)
+ text = re.sub(_number_re, _expand_number, text)
+ return text
+
+
+def mark_dark_l(text):
+ return re.sub(r'l([^aeiouæɑɔəɛɪʊ ]*(?: |$))', lambda x: 'ɫ'+x.group(1), text)
+
+
+def english_to_ipa(text):
+ text = unidecode(text).lower()
+ text = expand_abbreviations(text)
+ text = normalize_numbers(text)
+ phonemes = ipa.convert(text)
+ phonemes = collapse_whitespace(phonemes)
+ return phonemes
+
+
+def english_to_lazy_ipa(text):
+ text = english_to_ipa(text)
+ for regex, replacement in _lazy_ipa:
+ text = re.sub(regex, replacement, text)
+ return text
+
+
+def english_to_ipa2(text):
+ text = english_to_ipa(text)
+ text = mark_dark_l(text)
+ for regex, replacement in _ipa_to_ipa2:
+ text = re.sub(regex, replacement, text)
+ return text.replace('...', '…')
+
+
+def english_to_lazy_ipa2(text):
+ text = english_to_ipa(text)
+ for regex, replacement in _lazy_ipa2:
+ text = re.sub(regex, replacement, text)
+ return text
diff --git a/text/japanese.py b/text/japanese.py
new file mode 100644
index 0000000000000000000000000000000000000000..375e4d50872d5c68ee57ca17470a2ca425425eba
--- /dev/null
+++ b/text/japanese.py
@@ -0,0 +1,153 @@
+import re
+from unidecode import unidecode
+import pyopenjtalk
+
+
+# Regular expression matching Japanese without punctuation marks:
+_japanese_characters = re.compile(
+ r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
+
+# Regular expression matching non-Japanese characters or punctuation marks:
+_japanese_marks = re.compile(
+ r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
+
+# List of (symbol, Japanese) pairs for marks:
+_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [
+ ('%', 'パーセント')
+]]
+
+# List of (romaji, ipa) pairs for marks:
+_romaji_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
+ ('ts', 'ʦ'),
+ ('u', 'ɯ'),
+ ('j', 'ʥ'),
+ ('y', 'j'),
+ ('ni', 'n^i'),
+ ('nj', 'n^'),
+ ('hi', 'çi'),
+ ('hj', 'ç'),
+ ('f', 'ɸ'),
+ ('I', 'i*'),
+ ('U', 'ɯ*'),
+ ('r', 'ɾ')
+]]
+
+# List of (romaji, ipa2) pairs for marks:
+_romaji_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
+ ('u', 'ɯ'),
+ ('ʧ', 'tʃ'),
+ ('j', 'dʑ'),
+ ('y', 'j'),
+ ('ni', 'n^i'),
+ ('nj', 'n^'),
+ ('hi', 'çi'),
+ ('hj', 'ç'),
+ ('f', 'ɸ'),
+ ('I', 'i*'),
+ ('U', 'ɯ*'),
+ ('r', 'ɾ')
+]]
+
+# List of (consonant, sokuon) pairs:
+_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [
+ (r'Q([↑↓]*[kg])', r'k#\1'),
+ (r'Q([↑↓]*[tdjʧ])', r't#\1'),
+ (r'Q([↑↓]*[sʃ])', r's\1'),
+ (r'Q([↑↓]*[pb])', r'p#\1')
+]]
+
+# List of (consonant, hatsuon) pairs:
+_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [
+ (r'N([↑↓]*[pbm])', r'm\1'),
+ (r'N([↑↓]*[ʧʥj])', r'n^\1'),
+ (r'N([↑↓]*[tdn])', r'n\1'),
+ (r'N([↑↓]*[kg])', r'ŋ\1')
+]]
+
+
+def symbols_to_japanese(text):
+ for regex, replacement in _symbols_to_japanese:
+ text = re.sub(regex, replacement, text)
+ return text
+
+
+def japanese_to_romaji_with_accent(text):
+ '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html'''
+ text = symbols_to_japanese(text)
+ sentences = re.split(_japanese_marks, text)
+ marks = re.findall(_japanese_marks, text)
+ text = ''
+ for i, sentence in enumerate(sentences):
+ if re.match(_japanese_characters, sentence):
+ if text != '':
+ text += ' '
+ labels = pyopenjtalk.extract_fullcontext(sentence)
+ for n, label in enumerate(labels):
+ phoneme = re.search(r'\-([^\+]*)\+', label).group(1)
+ if phoneme not in ['sil', 'pau']:
+ text += phoneme.replace('ch', 'ʧ').replace('sh',
+ 'ʃ').replace('cl', 'Q')
+ else:
+ continue
+ # n_moras = int(re.search(r'/F:(\d+)_', label).group(1))
+ a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1))
+ a2 = int(re.search(r"\+(\d+)\+", label).group(1))
+ a3 = int(re.search(r"\+(\d+)/", label).group(1))
+ if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil', 'pau']:
+ a2_next = -1
+ else:
+ a2_next = int(
+ re.search(r"\+(\d+)\+", labels[n + 1]).group(1))
+ # Accent phrase boundary
+ if a3 == 1 and a2_next == 1:
+ text += ' '
+ # Falling
+ elif a1 == 0 and a2_next == a2 + 1:
+ text += '↓'
+ # Rising
+ elif a2 == 1 and a2_next == 2:
+ text += '↑'
+ if i < len(marks):
+ text += unidecode(marks[i]).replace(' ', '')
+ return text
+
+
+def get_real_sokuon(text):
+ for regex, replacement in _real_sokuon:
+ text = re.sub(regex, replacement, text)
+ return text
+
+
+def get_real_hatsuon(text):
+ for regex, replacement in _real_hatsuon:
+ text = re.sub(regex, replacement, text)
+ return text
+
+
+def japanese_to_ipa(text):
+ text = japanese_to_romaji_with_accent(text).replace('...', '…')
+ text = re.sub(
+ r'([aiueo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text)
+ text = get_real_sokuon(text)
+ text = get_real_hatsuon(text)
+ for regex, replacement in _romaji_to_ipa:
+ text = re.sub(regex, replacement, text)
+ return text
+
+
+def japanese_to_ipa2(text):
+ text = japanese_to_romaji_with_accent(text).replace('...', '…')
+ text = get_real_sokuon(text)
+ text = get_real_hatsuon(text)
+ for regex, replacement in _romaji_to_ipa2:
+ text = re.sub(regex, replacement, text)
+ return text
+
+
+def japanese_to_ipa3(text):
+ text = japanese_to_ipa2(text).replace('n^', 'ȵ').replace(
+ 'ʃ', 'ɕ').replace('*', '\u0325').replace('#', '\u031a')
+ text = re.sub(
+ r'([aiɯeo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text)
+ text = re.sub(r'((?:^|\s)(?:ts|tɕ|[kpt]))', r'\1ʰ', text)
+ return text
diff --git a/text/korean.py b/text/korean.py
new file mode 100644
index 0000000000000000000000000000000000000000..edee07429a450c55e3d8e246997faaa1e0b89cc9
--- /dev/null
+++ b/text/korean.py
@@ -0,0 +1,210 @@
+import re
+from jamo import h2j, j2hcj
+import ko_pron
+
+
+# This is a list of Korean classifiers preceded by pure Korean numerals.
+_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통'
+
+# List of (hangul, hangul divided) pairs:
+_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [
+ ('ㄳ', 'ㄱㅅ'),
+ ('ㄵ', 'ㄴㅈ'),
+ ('ㄶ', 'ㄴㅎ'),
+ ('ㄺ', 'ㄹㄱ'),
+ ('ㄻ', 'ㄹㅁ'),
+ ('ㄼ', 'ㄹㅂ'),
+ ('ㄽ', 'ㄹㅅ'),
+ ('ㄾ', 'ㄹㅌ'),
+ ('ㄿ', 'ㄹㅍ'),
+ ('ㅀ', 'ㄹㅎ'),
+ ('ㅄ', 'ㅂㅅ'),
+ ('ㅘ', 'ㅗㅏ'),
+ ('ㅙ', 'ㅗㅐ'),
+ ('ㅚ', 'ㅗㅣ'),
+ ('ㅝ', 'ㅜㅓ'),
+ ('ㅞ', 'ㅜㅔ'),
+ ('ㅟ', 'ㅜㅣ'),
+ ('ㅢ', 'ㅡㅣ'),
+ ('ㅑ', 'ㅣㅏ'),
+ ('ㅒ', 'ㅣㅐ'),
+ ('ㅕ', 'ㅣㅓ'),
+ ('ㅖ', 'ㅣㅔ'),
+ ('ㅛ', 'ㅣㅗ'),
+ ('ㅠ', 'ㅣㅜ')
+]]
+
+# List of (Latin alphabet, hangul) pairs:
+_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
+ ('a', '에이'),
+ ('b', '비'),
+ ('c', '시'),
+ ('d', '디'),
+ ('e', '이'),
+ ('f', '에프'),
+ ('g', '지'),
+ ('h', '에이치'),
+ ('i', '아이'),
+ ('j', '제이'),
+ ('k', '케이'),
+ ('l', '엘'),
+ ('m', '엠'),
+ ('n', '엔'),
+ ('o', '오'),
+ ('p', '피'),
+ ('q', '큐'),
+ ('r', '아르'),
+ ('s', '에스'),
+ ('t', '티'),
+ ('u', '유'),
+ ('v', '브이'),
+ ('w', '더블유'),
+ ('x', '엑스'),
+ ('y', '와이'),
+ ('z', '제트')
+]]
+
+# List of (ipa, lazy ipa) pairs:
+_ipa_to_lazy_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
+ ('t͡ɕ','ʧ'),
+ ('d͡ʑ','ʥ'),
+ ('ɲ','n^'),
+ ('ɕ','ʃ'),
+ ('ʷ','w'),
+ ('ɭ','l`'),
+ ('ʎ','ɾ'),
+ ('ɣ','ŋ'),
+ ('ɰ','ɯ'),
+ ('ʝ','j'),
+ ('ʌ','ə'),
+ ('ɡ','g'),
+ ('\u031a','#'),
+ ('\u0348','='),
+ ('\u031e',''),
+ ('\u0320',''),
+ ('\u0339','')
+]]
+
+
+def latin_to_hangul(text):
+ for regex, replacement in _latin_to_hangul:
+ text = re.sub(regex, replacement, text)
+ return text
+
+
+def divide_hangul(text):
+ text = j2hcj(h2j(text))
+ for regex, replacement in _hangul_divided:
+ text = re.sub(regex, replacement, text)
+ return text
+
+
+def hangul_number(num, sino=True):
+ '''Reference https://github.com/Kyubyong/g2pK'''
+ num = re.sub(',', '', num)
+
+ if num == '0':
+ return '영'
+ if not sino and num == '20':
+ return '스무'
+
+ digits = '123456789'
+ names = '일이삼사오육칠팔구'
+ digit2name = {d: n for d, n in zip(digits, names)}
+
+ modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉'
+ decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔'
+ digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())}
+ digit2dec = {d: dec for d, dec in zip(digits, decimals.split())}
+
+ spelledout = []
+ for i, digit in enumerate(num):
+ i = len(num) - i - 1
+ if sino:
+ if i == 0:
+ name = digit2name.get(digit, '')
+ elif i == 1:
+ name = digit2name.get(digit, '') + '십'
+ name = name.replace('일십', '십')
+ else:
+ if i == 0:
+ name = digit2mod.get(digit, '')
+ elif i == 1:
+ name = digit2dec.get(digit, '')
+ if digit == '0':
+ if i % 4 == 0:
+ last_three = spelledout[-min(3, len(spelledout)):]
+ if ''.join(last_three) == '':
+ spelledout.append('')
+ continue
+ else:
+ spelledout.append('')
+ continue
+ if i == 2:
+ name = digit2name.get(digit, '') + '백'
+ name = name.replace('일백', '백')
+ elif i == 3:
+ name = digit2name.get(digit, '') + '천'
+ name = name.replace('일천', '천')
+ elif i == 4:
+ name = digit2name.get(digit, '') + '만'
+ name = name.replace('일만', '만')
+ elif i == 5:
+ name = digit2name.get(digit, '') + '십'
+ name = name.replace('일십', '십')
+ elif i == 6:
+ name = digit2name.get(digit, '') + '백'
+ name = name.replace('일백', '백')
+ elif i == 7:
+ name = digit2name.get(digit, '') + '천'
+ name = name.replace('일천', '천')
+ elif i == 8:
+ name = digit2name.get(digit, '') + '억'
+ elif i == 9:
+ name = digit2name.get(digit, '') + '십'
+ elif i == 10:
+ name = digit2name.get(digit, '') + '백'
+ elif i == 11:
+ name = digit2name.get(digit, '') + '천'
+ elif i == 12:
+ name = digit2name.get(digit, '') + '조'
+ elif i == 13:
+ name = digit2name.get(digit, '') + '십'
+ elif i == 14:
+ name = digit2name.get(digit, '') + '백'
+ elif i == 15:
+ name = digit2name.get(digit, '') + '천'
+ spelledout.append(name)
+ return ''.join(elem for elem in spelledout)
+
+
+def number_to_hangul(text):
+ '''Reference https://github.com/Kyubyong/g2pK'''
+ tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text))
+ for token in tokens:
+ num, classifier = token
+ if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers:
+ spelledout = hangul_number(num, sino=False)
+ else:
+ spelledout = hangul_number(num, sino=True)
+ text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}')
+ # digit by digit for remaining digits
+ digits = '0123456789'
+ names = '영일이삼사오육칠팔구'
+ for d, n in zip(digits, names):
+ text = text.replace(d, n)
+ return text
+
+
+def korean_to_lazy_ipa(text):
+ text = latin_to_hangul(text)
+ text = number_to_hangul(text)
+ text=re.sub('[\uac00-\ud7af]+',lambda x:ko_pron.romanise(x.group(0),'ipa').split('] ~ [')[0],text)
+ for regex, replacement in _ipa_to_lazy_ipa:
+ text = re.sub(regex, replacement, text)
+ return text
+
+
+def korean_to_ipa(text):
+ text = korean_to_lazy_ipa(text)
+ return text.replace('ʧ','tʃ').replace('ʥ','dʑ')
diff --git a/text/mandarin.py b/text/mandarin.py
new file mode 100644
index 0000000000000000000000000000000000000000..162e1b912dabec4b448ccd3d00d56306f82ce076
--- /dev/null
+++ b/text/mandarin.py
@@ -0,0 +1,326 @@
+import os
+import sys
+import re
+from pypinyin import lazy_pinyin, BOPOMOFO
+import jieba
+import cn2an
+import logging
+
+
+# List of (Latin alphabet, bopomofo) pairs:
+_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
+ ('a', 'ㄟˉ'),
+ ('b', 'ㄅㄧˋ'),
+ ('c', 'ㄙㄧˉ'),
+ ('d', 'ㄉㄧˋ'),
+ ('e', 'ㄧˋ'),
+ ('f', 'ㄝˊㄈㄨˋ'),
+ ('g', 'ㄐㄧˋ'),
+ ('h', 'ㄝˇㄑㄩˋ'),
+ ('i', 'ㄞˋ'),
+ ('j', 'ㄐㄟˋ'),
+ ('k', 'ㄎㄟˋ'),
+ ('l', 'ㄝˊㄛˋ'),
+ ('m', 'ㄝˊㄇㄨˋ'),
+ ('n', 'ㄣˉ'),
+ ('o', 'ㄡˉ'),
+ ('p', 'ㄆㄧˉ'),
+ ('q', 'ㄎㄧㄡˉ'),
+ ('r', 'ㄚˋ'),
+ ('s', 'ㄝˊㄙˋ'),
+ ('t', 'ㄊㄧˋ'),
+ ('u', 'ㄧㄡˉ'),
+ ('v', 'ㄨㄧˉ'),
+ ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'),
+ ('x', 'ㄝˉㄎㄨˋㄙˋ'),
+ ('y', 'ㄨㄞˋ'),
+ ('z', 'ㄗㄟˋ')
+]]
+
+# List of (bopomofo, romaji) pairs:
+_bopomofo_to_romaji = [(re.compile('%s' % x[0]), x[1]) for x in [
+ ('ㄅㄛ', 'p⁼wo'),
+ ('ㄆㄛ', 'pʰwo'),
+ ('ㄇㄛ', 'mwo'),
+ ('ㄈㄛ', 'fwo'),
+ ('ㄅ', 'p⁼'),
+ ('ㄆ', 'pʰ'),
+ ('ㄇ', 'm'),
+ ('ㄈ', 'f'),
+ ('ㄉ', 't⁼'),
+ ('ㄊ', 'tʰ'),
+ ('ㄋ', 'n'),
+ ('ㄌ', 'l'),
+ ('ㄍ', 'k⁼'),
+ ('ㄎ', 'kʰ'),
+ ('ㄏ', 'h'),
+ ('ㄐ', 'ʧ⁼'),
+ ('ㄑ', 'ʧʰ'),
+ ('ㄒ', 'ʃ'),
+ ('ㄓ', 'ʦ`⁼'),
+ ('ㄔ', 'ʦ`ʰ'),
+ ('ㄕ', 's`'),
+ ('ㄖ', 'ɹ`'),
+ ('ㄗ', 'ʦ⁼'),
+ ('ㄘ', 'ʦʰ'),
+ ('ㄙ', 's'),
+ ('ㄚ', 'a'),
+ ('ㄛ', 'o'),
+ ('ㄜ', 'ə'),
+ ('ㄝ', 'e'),
+ ('ㄞ', 'ai'),
+ ('ㄟ', 'ei'),
+ ('ㄠ', 'au'),
+ ('ㄡ', 'ou'),
+ ('ㄧㄢ', 'yeNN'),
+ ('ㄢ', 'aNN'),
+ ('ㄧㄣ', 'iNN'),
+ ('ㄣ', 'əNN'),
+ ('ㄤ', 'aNg'),
+ ('ㄧㄥ', 'iNg'),
+ ('ㄨㄥ', 'uNg'),
+ ('ㄩㄥ', 'yuNg'),
+ ('ㄥ', 'əNg'),
+ ('ㄦ', 'əɻ'),
+ ('ㄧ', 'i'),
+ ('ㄨ', 'u'),
+ ('ㄩ', 'ɥ'),
+ ('ˉ', '→'),
+ ('ˊ', '↑'),
+ ('ˇ', '↓↑'),
+ ('ˋ', '↓'),
+ ('˙', ''),
+ (',', ','),
+ ('。', '.'),
+ ('!', '!'),
+ ('?', '?'),
+ ('—', '-')
+]]
+
+# List of (romaji, ipa) pairs:
+_romaji_to_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
+ ('ʃy', 'ʃ'),
+ ('ʧʰy', 'ʧʰ'),
+ ('ʧ⁼y', 'ʧ⁼'),
+ ('NN', 'n'),
+ ('Ng', 'ŋ'),
+ ('y', 'j'),
+ ('h', 'x')
+]]
+
+# List of (bopomofo, ipa) pairs:
+_bopomofo_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
+ ('ㄅㄛ', 'p⁼wo'),
+ ('ㄆㄛ', 'pʰwo'),
+ ('ㄇㄛ', 'mwo'),
+ ('ㄈㄛ', 'fwo'),
+ ('ㄅ', 'p⁼'),
+ ('ㄆ', 'pʰ'),
+ ('ㄇ', 'm'),
+ ('ㄈ', 'f'),
+ ('ㄉ', 't⁼'),
+ ('ㄊ', 'tʰ'),
+ ('ㄋ', 'n'),
+ ('ㄌ', 'l'),
+ ('ㄍ', 'k⁼'),
+ ('ㄎ', 'kʰ'),
+ ('ㄏ', 'x'),
+ ('ㄐ', 'tʃ⁼'),
+ ('ㄑ', 'tʃʰ'),
+ ('ㄒ', 'ʃ'),
+ ('ㄓ', 'ts`⁼'),
+ ('ㄔ', 'ts`ʰ'),
+ ('ㄕ', 's`'),
+ ('ㄖ', 'ɹ`'),
+ ('ㄗ', 'ts⁼'),
+ ('ㄘ', 'tsʰ'),
+ ('ㄙ', 's'),
+ ('ㄚ', 'a'),
+ ('ㄛ', 'o'),
+ ('ㄜ', 'ə'),
+ ('ㄝ', 'ɛ'),
+ ('ㄞ', 'aɪ'),
+ ('ㄟ', 'eɪ'),
+ ('ㄠ', 'ɑʊ'),
+ ('ㄡ', 'oʊ'),
+ ('ㄧㄢ', 'jɛn'),
+ ('ㄩㄢ', 'ɥæn'),
+ ('ㄢ', 'an'),
+ ('ㄧㄣ', 'in'),
+ ('ㄩㄣ', 'ɥn'),
+ ('ㄣ', 'ən'),
+ ('ㄤ', 'ɑŋ'),
+ ('ㄧㄥ', 'iŋ'),
+ ('ㄨㄥ', 'ʊŋ'),
+ ('ㄩㄥ', 'jʊŋ'),
+ ('ㄥ', 'əŋ'),
+ ('ㄦ', 'əɻ'),
+ ('ㄧ', 'i'),
+ ('ㄨ', 'u'),
+ ('ㄩ', 'ɥ'),
+ ('ˉ', '→'),
+ ('ˊ', '↑'),
+ ('ˇ', '↓↑'),
+ ('ˋ', '↓'),
+ ('˙', ''),
+ (',', ','),
+ ('。', '.'),
+ ('!', '!'),
+ ('?', '?'),
+ ('—', '-')
+]]
+
+# List of (bopomofo, ipa2) pairs:
+_bopomofo_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
+ ('ㄅㄛ', 'pwo'),
+ ('ㄆㄛ', 'pʰwo'),
+ ('ㄇㄛ', 'mwo'),
+ ('ㄈㄛ', 'fwo'),
+ ('ㄅ', 'p'),
+ ('ㄆ', 'pʰ'),
+ ('ㄇ', 'm'),
+ ('ㄈ', 'f'),
+ ('ㄉ', 't'),
+ ('ㄊ', 'tʰ'),
+ ('ㄋ', 'n'),
+ ('ㄌ', 'l'),
+ ('ㄍ', 'k'),
+ ('ㄎ', 'kʰ'),
+ ('ㄏ', 'h'),
+ ('ㄐ', 'tɕ'),
+ ('ㄑ', 'tɕʰ'),
+ ('ㄒ', 'ɕ'),
+ ('ㄓ', 'tʂ'),
+ ('ㄔ', 'tʂʰ'),
+ ('ㄕ', 'ʂ'),
+ ('ㄖ', 'ɻ'),
+ ('ㄗ', 'ts'),
+ ('ㄘ', 'tsʰ'),
+ ('ㄙ', 's'),
+ ('ㄚ', 'a'),
+ ('ㄛ', 'o'),
+ ('ㄜ', 'ɤ'),
+ ('ㄝ', 'ɛ'),
+ ('ㄞ', 'aɪ'),
+ ('ㄟ', 'eɪ'),
+ ('ㄠ', 'ɑʊ'),
+ ('ㄡ', 'oʊ'),
+ ('ㄧㄢ', 'jɛn'),
+ ('ㄩㄢ', 'yæn'),
+ ('ㄢ', 'an'),
+ ('ㄧㄣ', 'in'),
+ ('ㄩㄣ', 'yn'),
+ ('ㄣ', 'ən'),
+ ('ㄤ', 'ɑŋ'),
+ ('ㄧㄥ', 'iŋ'),
+ ('ㄨㄥ', 'ʊŋ'),
+ ('ㄩㄥ', 'jʊŋ'),
+ ('ㄥ', 'ɤŋ'),
+ ('ㄦ', 'əɻ'),
+ ('ㄧ', 'i'),
+ ('ㄨ', 'u'),
+ ('ㄩ', 'y'),
+ ('ˉ', '˥'),
+ ('ˊ', '˧˥'),
+ ('ˇ', '˨˩˦'),
+ ('ˋ', '˥˩'),
+ ('˙', ''),
+ (',', ','),
+ ('。', '.'),
+ ('!', '!'),
+ ('?', '?'),
+ ('—', '-')
+]]
+
+
+def number_to_chinese(text):
+ numbers = re.findall(r'\d+(?:\.?\d+)?', text)
+ for number in numbers:
+ text = text.replace(number, cn2an.an2cn(number), 1)
+ return text
+
+
+def chinese_to_bopomofo(text):
+ text = text.replace('、', ',').replace(';', ',').replace(':', ',')
+ words = jieba.lcut(text, cut_all=False)
+ text = ''
+ for word in words:
+ bopomofos = lazy_pinyin(word, BOPOMOFO)
+ if not re.search('[\u4e00-\u9fff]', word):
+ text += word
+ continue
+ for i in range(len(bopomofos)):
+ bopomofos[i] = re.sub(r'([\u3105-\u3129])$', r'\1ˉ', bopomofos[i])
+ if text != '':
+ text += ' '
+ text += ''.join(bopomofos)
+ return text
+
+
+def latin_to_bopomofo(text):
+ for regex, replacement in _latin_to_bopomofo:
+ text = re.sub(regex, replacement, text)
+ return text
+
+
+def bopomofo_to_romaji(text):
+ for regex, replacement in _bopomofo_to_romaji:
+ text = re.sub(regex, replacement, text)
+ return text
+
+
+def bopomofo_to_ipa(text):
+ for regex, replacement in _bopomofo_to_ipa:
+ text = re.sub(regex, replacement, text)
+ return text
+
+
+def bopomofo_to_ipa2(text):
+ for regex, replacement in _bopomofo_to_ipa2:
+ text = re.sub(regex, replacement, text)
+ return text
+
+
+def chinese_to_romaji(text):
+ text = number_to_chinese(text)
+ text = chinese_to_bopomofo(text)
+ text = latin_to_bopomofo(text)
+ text = bopomofo_to_romaji(text)
+ text = re.sub('i([aoe])', r'y\1', text)
+ text = re.sub('u([aoəe])', r'w\1', text)
+ text = re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑ ]+|$)',
+ r'\1ɹ`\2', text).replace('ɻ', 'ɹ`')
+ text = re.sub('([ʦs][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text)
+ return text
+
+
+def chinese_to_lazy_ipa(text):
+ text = chinese_to_romaji(text)
+ for regex, replacement in _romaji_to_ipa:
+ text = re.sub(regex, replacement, text)
+ return text
+
+
+def chinese_to_ipa(text):
+ text = number_to_chinese(text)
+ text = chinese_to_bopomofo(text)
+ text = latin_to_bopomofo(text)
+ text = bopomofo_to_ipa(text)
+ text = re.sub('i([aoe])', r'j\1', text)
+ text = re.sub('u([aoəe])', r'w\1', text)
+ text = re.sub('([sɹ]`[⁼ʰ]?)([→↓↑ ]+|$)',
+ r'\1ɹ`\2', text).replace('ɻ', 'ɹ`')
+ text = re.sub('([s][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text)
+ return text
+
+
+def chinese_to_ipa2(text):
+ text = number_to_chinese(text)
+ text = chinese_to_bopomofo(text)
+ text = latin_to_bopomofo(text)
+ text = bopomofo_to_ipa2(text)
+ text = re.sub(r'i([aoe])', r'j\1', text)
+ text = re.sub(r'u([aoəe])', r'w\1', text)
+ text = re.sub(r'([ʂɹ]ʰ?)([˩˨˧˦˥ ]+|$)', r'\1ʅ\2', text)
+ text = re.sub(r'(sʰ?)([˩˨˧˦˥ ]+|$)', r'\1ɿ\2', text)
+ return text
diff --git a/text/ngu_dialect.py b/text/ngu_dialect.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce3e12bbf0469426872eed5f681985d3e1be9b26
--- /dev/null
+++ b/text/ngu_dialect.py
@@ -0,0 +1,30 @@
+import re
+import opencc
+
+
+dialects = {'SZ': 'suzhou', 'WX': 'wuxi', 'CZ': 'changzhou', 'HZ': 'hangzhou',
+ 'SX': 'shaoxing', 'NB': 'ningbo', 'JJ': 'jingjiang', 'YX': 'yixing',
+ 'JD': 'jiading', 'ZR': 'zhenru', 'PH': 'pinghu', 'TX': 'tongxiang',
+ 'JS': 'jiashan', 'HN': 'xiashi', 'LP': 'linping', 'XS': 'xiaoshan',
+ 'FY': 'fuyang', 'RA': 'ruao', 'CX': 'cixi', 'SM': 'sanmen',
+ 'TT': 'tiantai', 'WZ': 'wenzhou', 'SC': 'suichang', 'YB': 'youbu'}
+
+converters = {}
+
+for dialect in dialects.values():
+ try:
+ converters[dialect] = opencc.OpenCC(dialect)
+ except:
+ pass
+
+
+def ngu_dialect_to_ipa(text, dialect):
+ dialect = dialects[dialect]
+ text = converters[dialect].convert(text).replace('-','').replace('$',' ')
+ text = re.sub(r'[、;:]', ',', text)
+ text = re.sub(r'\s*,\s*', ', ', text)
+ text = re.sub(r'\s*。\s*', '. ', text)
+ text = re.sub(r'\s*?\s*', '? ', text)
+ text = re.sub(r'\s*!\s*', '! ', text)
+ text = re.sub(r'\s*$', '', text)
+ return text
diff --git a/text/sanskrit.py b/text/sanskrit.py
new file mode 100644
index 0000000000000000000000000000000000000000..0223aaac384a2f850f5bc20651fc18eb964607d0
--- /dev/null
+++ b/text/sanskrit.py
@@ -0,0 +1,62 @@
+import re
+from indic_transliteration import sanscript
+
+
+# List of (iast, ipa) pairs:
+_iast_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
+ ('a', 'ə'),
+ ('ā', 'aː'),
+ ('ī', 'iː'),
+ ('ū', 'uː'),
+ ('ṛ', 'ɹ`'),
+ ('ṝ', 'ɹ`ː'),
+ ('ḷ', 'l`'),
+ ('ḹ', 'l`ː'),
+ ('e', 'eː'),
+ ('o', 'oː'),
+ ('k', 'k⁼'),
+ ('k⁼h', 'kʰ'),
+ ('g', 'g⁼'),
+ ('g⁼h', 'gʰ'),
+ ('ṅ', 'ŋ'),
+ ('c', 'ʧ⁼'),
+ ('ʧ⁼h', 'ʧʰ'),
+ ('j', 'ʥ⁼'),
+ ('ʥ⁼h', 'ʥʰ'),
+ ('ñ', 'n^'),
+ ('ṭ', 't`⁼'),
+ ('t`⁼h', 't`ʰ'),
+ ('ḍ', 'd`⁼'),
+ ('d`⁼h', 'd`ʰ'),
+ ('ṇ', 'n`'),
+ ('t', 't⁼'),
+ ('t⁼h', 'tʰ'),
+ ('d', 'd⁼'),
+ ('d⁼h', 'dʰ'),
+ ('p', 'p⁼'),
+ ('p⁼h', 'pʰ'),
+ ('b', 'b⁼'),
+ ('b⁼h', 'bʰ'),
+ ('y', 'j'),
+ ('ś', 'ʃ'),
+ ('ṣ', 's`'),
+ ('r', 'ɾ'),
+ ('l̤', 'l`'),
+ ('h', 'ɦ'),
+ ("'", ''),
+ ('~', '^'),
+ ('ṃ', '^')
+]]
+
+
+def devanagari_to_ipa(text):
+ text = text.replace('ॐ', 'ओम्')
+ text = re.sub(r'\s*।\s*$', '.', text)
+ text = re.sub(r'\s*।\s*', ', ', text)
+ text = re.sub(r'\s*॥', '.', text)
+ text = sanscript.transliterate(text, sanscript.DEVANAGARI, sanscript.IAST)
+ for regex, replacement in _iast_to_ipa:
+ text = re.sub(regex, replacement, text)
+ text = re.sub('(.)[`ː]*ḥ', lambda x: x.group(0)
+ [:-1]+'h'+x.group(1)+'*', text)
+ return text
diff --git a/text/shanghainese.py b/text/shanghainese.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb29c24a08d2e406e8399cf7bc9fe5cb43cb9c61
--- /dev/null
+++ b/text/shanghainese.py
@@ -0,0 +1,64 @@
+import re
+import cn2an
+import opencc
+
+
+converter = opencc.OpenCC('zaonhe')
+
+# List of (Latin alphabet, ipa) pairs:
+_latin_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
+ ('A', 'ᴇ'),
+ ('B', 'bi'),
+ ('C', 'si'),
+ ('D', 'di'),
+ ('E', 'i'),
+ ('F', 'ᴇf'),
+ ('G', 'dʑi'),
+ ('H', 'ᴇtɕʰ'),
+ ('I', 'ᴀi'),
+ ('J', 'dʑᴇ'),
+ ('K', 'kʰᴇ'),
+ ('L', 'ᴇl'),
+ ('M', 'ᴇm'),
+ ('N', 'ᴇn'),
+ ('O', 'o'),
+ ('P', 'pʰi'),
+ ('Q', 'kʰiu'),
+ ('R', 'ᴀl'),
+ ('S', 'ᴇs'),
+ ('T', 'tʰi'),
+ ('U', 'ɦiu'),
+ ('V', 'vi'),
+ ('W', 'dᴀbɤliu'),
+ ('X', 'ᴇks'),
+ ('Y', 'uᴀi'),
+ ('Z', 'zᴇ')
+]]
+
+
+def _number_to_shanghainese(num):
+ num = cn2an.an2cn(num).replace('一十','十').replace('二十', '廿').replace('二', '两')
+ return re.sub(r'((?:^|[^三四五六七八九])十|廿)两', r'\1二', num)
+
+
+def number_to_shanghainese(text):
+ return re.sub(r'\d+(?:\.?\d+)?', lambda x: _number_to_shanghainese(x.group()), text)
+
+
+def latin_to_ipa(text):
+ for regex, replacement in _latin_to_ipa:
+ text = re.sub(regex, replacement, text)
+ return text
+
+
+def shanghainese_to_ipa(text):
+ text = number_to_shanghainese(text.upper())
+ text = converter.convert(text).replace('-','').replace('$',' ')
+ text = re.sub(r'[A-Z]', lambda x: latin_to_ipa(x.group())+' ', text)
+ text = re.sub(r'[、;:]', ',', text)
+ text = re.sub(r'\s*,\s*', ', ', text)
+ text = re.sub(r'\s*。\s*', '. ', text)
+ text = re.sub(r'\s*?\s*', '? ', text)
+ text = re.sub(r'\s*!\s*', '! ', text)
+ text = re.sub(r'\s*$', '', text)
+ return text
diff --git a/text/symbols.py b/text/symbols.py
new file mode 100644
index 0000000000000000000000000000000000000000..789e9df25d3d93d1976ef22d15d77f51d170ed00
--- /dev/null
+++ b/text/symbols.py
@@ -0,0 +1,76 @@
+'''
+Defines the set of symbols used in text input to the model.
+'''
+
+# japanese_cleaners
+# _pad = '_'
+# _punctuation = ',.!?-'
+# _letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ '
+
+
+'''# japanese_cleaners2
+_pad = '_'
+_punctuation = ',.!?-~…'
+_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ '
+'''
+
+
+'''# korean_cleaners
+_pad = '_'
+_punctuation = ',.!?…~'
+_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ '
+'''
+
+'''# chinese_cleaners
+_pad = '_'
+_punctuation = ',。!?—…'
+_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ '
+'''
+
+# # zh_ja_mixture_cleaners
+# _pad = '_'
+# _punctuation = ',.!?-~…'
+# _letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ '
+
+
+'''# sanskrit_cleaners
+_pad = '_'
+_punctuation = '।'
+_letters = 'ँंःअआइईउऊऋएऐओऔकखगघङचछजझञटठडढणतथदधनपफबभमयरलळवशषसहऽािीुूृॄेैोौ्ॠॢ '
+'''
+
+'''# cjks_cleaners
+_pad = '_'
+_punctuation = ',.!?-~…'
+_letters = 'NQabdefghijklmnopstuvwxyzʃʧʥʦɯɹəɥçɸɾβŋɦː⁼ʰ`^#*=→↓↑ '
+'''
+
+'''# thai_cleaners
+_pad = '_'
+_punctuation = '.!? '
+_letters = 'กขฃคฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลวศษสหฬอฮฯะัาำิีึืุูเแโใไๅๆ็่้๊๋์'
+'''
+
+# # cjke_cleaners2
+_pad = '_'
+_punctuation = ',.!?-~…'
+_letters = 'NQabdefghijklmnopstuvwxyzɑæʃʑçɯɪɔɛɹðəɫɥɸʊɾʒθβŋɦ⁼ʰ`^#*=ˈˌ→↓↑ '
+
+
+'''# shanghainese_cleaners
+_pad = '_'
+_punctuation = ',.!?…'
+_letters = 'abdfghiklmnopstuvyzøŋȵɑɔɕəɤɦɪɿʑʔʰ̩̃ᴀᴇ15678 '
+'''
+
+'''# chinese_dialect_cleaners
+_pad = '_'
+_punctuation = ',.!?~…─'
+_letters = '#Nabdefghijklmnoprstuvwxyzæçøŋœȵɐɑɒɓɔɕɗɘəɚɛɜɣɤɦɪɭɯɵɷɸɻɾɿʂʅʊʋʌʏʑʔʦʮʰʷˀː˥˦˧˨˩̥̩̃̚ᴀᴇ↑↓∅ⱼ '
+'''
+
+# Export all symbols:
+symbols = [_pad] + list(_punctuation) + list(_letters)
+
+# Special symbol ids
+SPACE_ID = symbols.index(" ")
diff --git a/text/thai.py b/text/thai.py
new file mode 100644
index 0000000000000000000000000000000000000000..998207c01a85c710a46db1ec8b62c39c2d94bc84
--- /dev/null
+++ b/text/thai.py
@@ -0,0 +1,44 @@
+import re
+from num_thai.thainumbers import NumThai
+
+
+num = NumThai()
+
+# List of (Latin alphabet, Thai) pairs:
+_latin_to_thai = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
+ ('a', 'เอ'),
+ ('b','บี'),
+ ('c','ซี'),
+ ('d','ดี'),
+ ('e','อี'),
+ ('f','เอฟ'),
+ ('g','จี'),
+ ('h','เอช'),
+ ('i','ไอ'),
+ ('j','เจ'),
+ ('k','เค'),
+ ('l','แอล'),
+ ('m','เอ็ม'),
+ ('n','เอ็น'),
+ ('o','โอ'),
+ ('p','พี'),
+ ('q','คิว'),
+ ('r','แอร์'),
+ ('s','เอส'),
+ ('t','ที'),
+ ('u','ยู'),
+ ('v','วี'),
+ ('w','ดับเบิลยู'),
+ ('x','เอ็กซ์'),
+ ('y','วาย'),
+ ('z','ซี')
+]]
+
+
+def num_to_thai(text):
+ return re.sub(r'(?:\d+(?:,?\d+)?)+(?:\.\d+(?:,?\d+)?)?', lambda x: ''.join(num.NumberToTextThai(float(x.group(0).replace(',', '')))), text)
+
+def latin_to_thai(text):
+ for regex, replacement in _latin_to_thai:
+ text = re.sub(regex, replacement, text)
+ return text
diff --git a/transforms.py b/transforms.py
new file mode 100644
index 0000000000000000000000000000000000000000..4793d67ca5a5630e0ffe0f9fb29445c949e64dae
--- /dev/null
+++ b/transforms.py
@@ -0,0 +1,193 @@
+import torch
+from torch.nn import functional as F
+
+import numpy as np
+
+
+DEFAULT_MIN_BIN_WIDTH = 1e-3
+DEFAULT_MIN_BIN_HEIGHT = 1e-3
+DEFAULT_MIN_DERIVATIVE = 1e-3
+
+
+def piecewise_rational_quadratic_transform(inputs,
+ unnormalized_widths,
+ unnormalized_heights,
+ unnormalized_derivatives,
+ inverse=False,
+ tails=None,
+ tail_bound=1.,
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
+ min_derivative=DEFAULT_MIN_DERIVATIVE):
+
+ if tails is None:
+ spline_fn = rational_quadratic_spline
+ spline_kwargs = {}
+ else:
+ spline_fn = unconstrained_rational_quadratic_spline
+ spline_kwargs = {
+ 'tails': tails,
+ 'tail_bound': tail_bound
+ }
+
+ outputs, logabsdet = spline_fn(
+ inputs=inputs,
+ unnormalized_widths=unnormalized_widths,
+ unnormalized_heights=unnormalized_heights,
+ unnormalized_derivatives=unnormalized_derivatives,
+ inverse=inverse,
+ min_bin_width=min_bin_width,
+ min_bin_height=min_bin_height,
+ min_derivative=min_derivative,
+ **spline_kwargs
+ )
+ return outputs, logabsdet
+
+
+def searchsorted(bin_locations, inputs, eps=1e-6):
+ bin_locations[..., -1] += eps
+ return torch.sum(
+ inputs[..., None] >= bin_locations,
+ dim=-1
+ ) - 1
+
+
+def unconstrained_rational_quadratic_spline(inputs,
+ unnormalized_widths,
+ unnormalized_heights,
+ unnormalized_derivatives,
+ inverse=False,
+ tails='linear',
+ tail_bound=1.,
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
+ min_derivative=DEFAULT_MIN_DERIVATIVE):
+ inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
+ outside_interval_mask = ~inside_interval_mask
+
+ outputs = torch.zeros_like(inputs)
+ logabsdet = torch.zeros_like(inputs)
+
+ if tails == 'linear':
+ unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
+ constant = np.log(np.exp(1 - min_derivative) - 1)
+ unnormalized_derivatives[..., 0] = constant
+ unnormalized_derivatives[..., -1] = constant
+
+ outputs[outside_interval_mask] = inputs[outside_interval_mask]
+ logabsdet[outside_interval_mask] = 0
+ else:
+ raise RuntimeError('{} tails are not implemented.'.format(tails))
+
+ outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
+ inputs=inputs[inside_interval_mask],
+ unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
+ unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
+ unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
+ inverse=inverse,
+ left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
+ min_bin_width=min_bin_width,
+ min_bin_height=min_bin_height,
+ min_derivative=min_derivative
+ )
+
+ return outputs, logabsdet
+
+def rational_quadratic_spline(inputs,
+ unnormalized_widths,
+ unnormalized_heights,
+ unnormalized_derivatives,
+ inverse=False,
+ left=0., right=1., bottom=0., top=1.,
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
+ min_derivative=DEFAULT_MIN_DERIVATIVE):
+ if torch.min(inputs) < left or torch.max(inputs) > right:
+ raise ValueError('Input to a transform is not within its domain')
+
+ num_bins = unnormalized_widths.shape[-1]
+
+ if min_bin_width * num_bins > 1.0:
+ raise ValueError('Minimal bin width too large for the number of bins')
+ if min_bin_height * num_bins > 1.0:
+ raise ValueError('Minimal bin height too large for the number of bins')
+
+ widths = F.softmax(unnormalized_widths, dim=-1)
+ widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
+ cumwidths = torch.cumsum(widths, dim=-1)
+ cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
+ cumwidths = (right - left) * cumwidths + left
+ cumwidths[..., 0] = left
+ cumwidths[..., -1] = right
+ widths = cumwidths[..., 1:] - cumwidths[..., :-1]
+
+ derivatives = min_derivative + F.softplus(unnormalized_derivatives)
+
+ heights = F.softmax(unnormalized_heights, dim=-1)
+ heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
+ cumheights = torch.cumsum(heights, dim=-1)
+ cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
+ cumheights = (top - bottom) * cumheights + bottom
+ cumheights[..., 0] = bottom
+ cumheights[..., -1] = top
+ heights = cumheights[..., 1:] - cumheights[..., :-1]
+
+ if inverse:
+ bin_idx = searchsorted(cumheights, inputs)[..., None]
+ else:
+ bin_idx = searchsorted(cumwidths, inputs)[..., None]
+
+ input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
+ input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
+
+ input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
+ delta = heights / widths
+ input_delta = delta.gather(-1, bin_idx)[..., 0]
+
+ input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
+ input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
+
+ input_heights = heights.gather(-1, bin_idx)[..., 0]
+
+ if inverse:
+ a = (((inputs - input_cumheights) * (input_derivatives
+ + input_derivatives_plus_one
+ - 2 * input_delta)
+ + input_heights * (input_delta - input_derivatives)))
+ b = (input_heights * input_derivatives
+ - (inputs - input_cumheights) * (input_derivatives
+ + input_derivatives_plus_one
+ - 2 * input_delta))
+ c = - input_delta * (inputs - input_cumheights)
+
+ discriminant = b.pow(2) - 4 * a * c
+ assert (discriminant >= 0).all()
+
+ root = (2 * c) / (-b - torch.sqrt(discriminant))
+ outputs = root * input_bin_widths + input_cumwidths
+
+ theta_one_minus_theta = root * (1 - root)
+ denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
+ * theta_one_minus_theta)
+ derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
+ + 2 * input_delta * theta_one_minus_theta
+ + input_derivatives * (1 - root).pow(2))
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
+
+ return outputs, -logabsdet
+ else:
+ theta = (inputs - input_cumwidths) / input_bin_widths
+ theta_one_minus_theta = theta * (1 - theta)
+
+ numerator = input_heights * (input_delta * theta.pow(2)
+ + input_derivatives * theta_one_minus_theta)
+ denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
+ * theta_one_minus_theta)
+ outputs = input_cumheights + numerator / denominator
+
+ derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
+ + 2 * input_delta * theta_one_minus_theta
+ + input_derivatives * (1 - theta).pow(2))
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
+
+ return outputs, logabsdet
diff --git a/utils.py b/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..9be920642581ae69f4a4c96795e8382c4f11b50b
--- /dev/null
+++ b/utils.py
@@ -0,0 +1,400 @@
+import os
+import glob
+import sys
+import argparse
+import logging
+import json
+import subprocess
+import numpy as np
+from scipy.io.wavfile import read
+import torch
+import regex as re
+
+MATPLOTLIB_FLAG = False
+
+logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
+logger = logging
+
+
+
+zh_pattern = re.compile(r'[\u4e00-\u9fa5]')
+en_pattern = re.compile(r'[a-zA-Z]')
+jp_pattern = re.compile(r'[\u3040-\u30ff\u31f0-\u31ff]')
+kr_pattern = re.compile(r'[\uac00-\ud7af\u1100-\u11ff\u3130-\u318f\ua960-\ua97f]')
+num_pattern=re.compile(r'[0-9]')
+comma=r"(?<=[.。!!??;;,,、::'\"‘“”’()()《》「」~——])" #向前匹配但固定长度
+tags={'ZH':'[ZH]','EN':'[EN]','JP':'[JA]','KR':'[KR]'}
+
+def tag_cjke(text):
+ '''为中英日韩加tag,中日正则分不开,故先分句分离中日再识别,以应对大部分情况'''
+ sentences = re.split(r"([.。!!??;;,,、::'\"‘“”’()()【】《》「」~——]+ *(?![0-9]))", text) #分句,排除小数点
+ sentences.append("")
+ sentences = ["".join(i) for i in zip(sentences[0::2],sentences[1::2])]
+ # print(sentences)
+ prev_lang=None
+ tagged_text = ""
+ for s in sentences:
+ #全为符号跳过
+ nu = re.sub(r'[\s\p{P}]+', '', s, flags=re.U).strip()
+ if len(nu)==0:
+ continue
+ s = re.sub(r'[()()《》「」【】‘“”’]+', '', s)
+ jp=re.findall(jp_pattern, s)
+ #本句含日语字符判断为日语
+ if len(jp)>0:
+ prev_lang,tagged_jke=tag_jke(s,prev_lang)
+ tagged_text +=tagged_jke
+ else:
+ prev_lang,tagged_cke=tag_cke(s,prev_lang)
+ tagged_text +=tagged_cke
+ return tagged_text
+
+def tag_jke(text,prev_sentence=None):
+ '''为英日韩加tag'''
+ # 初始化标记变量
+ tagged_text = ""
+ prev_lang = None
+ tagged=0
+ # 遍历文本
+ for char in text:
+ # 判断当前字符属于哪种语言
+ if jp_pattern.match(char):
+ lang = "JP"
+ elif zh_pattern.match(char):
+ lang = "JP"
+ elif kr_pattern.match(char):
+ lang = "KR"
+ elif en_pattern.match(char):
+ lang = "EN"
+ # elif num_pattern.match(char):
+ # lang = prev_sentence
+ else:
+ lang = None
+ tagged_text += char
+ continue
+ # 如果当前语言与上一个语言不同,就添加标记
+ if lang != prev_lang:
+ tagged=1
+ if prev_lang==None: # 开头
+ tagged_text =tags[lang]+tagged_text
+ else:
+ tagged_text =tagged_text+tags[prev_lang]+tags[lang]
+
+ # 重置标记变量
+ prev_lang = lang
+
+ # 添加当前字符到标记文本中
+ tagged_text += char
+
+ # 在最后一个语言的结尾添加对应的标记
+ if prev_lang:
+ tagged_text += tags[prev_lang]
+ if not tagged:
+ prev_lang=prev_sentence
+ tagged_text =tags[prev_lang]+tagged_text+tags[prev_lang]
+
+ return prev_lang,tagged_text
+
+def tag_cke(text,prev_sentence=None):
+ '''为中英韩加tag'''
+ # 初始化标记变量
+ tagged_text = ""
+ prev_lang = None
+ # 是否全略过未标签
+ tagged=0
+
+ # 遍历文本
+ for char in text:
+ # 判断当前字符属于哪种语言
+ if zh_pattern.match(char):
+ lang = "ZH"
+ elif kr_pattern.match(char):
+ lang = "KR"
+ elif en_pattern.match(char):
+ lang = "EN"
+ # elif num_pattern.match(char):
+ # lang = prev_sentence
+ else:
+ # 略过
+ lang = None
+ tagged_text += char
+ continue
+
+ # 如果当前语言与上一个语言不同,添加标记
+ if lang != prev_lang:
+ tagged=1
+ if prev_lang==None: # 开头
+ tagged_text =tags[lang]+tagged_text
+ else:
+ tagged_text =tagged_text+tags[prev_lang]+tags[lang]
+
+ # 重置标记变量
+ prev_lang = lang
+
+ # 添加当前字符到标记文本中
+ tagged_text += char
+
+ # 在最后一个语言的结尾添加对应的标记
+ if prev_lang:
+ tagged_text += tags[prev_lang]
+ # 未标签则继承上一句标签
+ if tagged==0:
+ prev_lang=prev_sentence
+ tagged_text =tags[prev_lang]+tagged_text+tags[prev_lang]
+ return prev_lang,tagged_text
+
+
+
+def load_checkpoint(checkpoint_path, model, optimizer=None, drop_speaker_emb=False):
+ assert os.path.isfile(checkpoint_path)
+ checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
+ iteration = checkpoint_dict['iteration']
+ learning_rate = checkpoint_dict['learning_rate']
+ if optimizer is not None:
+ optimizer.load_state_dict(checkpoint_dict['optimizer'])
+ saved_state_dict = checkpoint_dict['model']
+ if hasattr(model, 'module'):
+ state_dict = model.module.state_dict()
+ else:
+ state_dict = model.state_dict()
+ new_state_dict = {}
+ for k, v in state_dict.items():
+ try:
+ if k == 'emb_g.weight':
+ if drop_speaker_emb:
+ new_state_dict[k] = v
+ continue
+ v[:saved_state_dict[k].shape[0], :] = saved_state_dict[k]
+ new_state_dict[k] = v
+ else:
+ new_state_dict[k] = saved_state_dict[k]
+ except:
+ logger.info("%s is not in the checkpoint" % k)
+ new_state_dict[k] = v
+ if hasattr(model, 'module'):
+ model.module.load_state_dict(new_state_dict)
+ else:
+ model.load_state_dict(new_state_dict)
+ logger.info("Loaded checkpoint '{}' (iteration {})".format(
+ checkpoint_path, iteration))
+ return model, optimizer, learning_rate, iteration
+
+
+def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
+ logger.info("Saving model and optimizer state at iteration {} to {}".format(
+ iteration, checkpoint_path))
+ if hasattr(model, 'module'):
+ state_dict = model.module.state_dict()
+ else:
+ state_dict = model.state_dict()
+ torch.save({'model': state_dict,
+ 'iteration': iteration,
+ 'optimizer': optimizer.state_dict() if optimizer is not None else None,
+ 'learning_rate': learning_rate}, checkpoint_path)
+
+
+def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050):
+ for k, v in scalars.items():
+ writer.add_scalar(k, v, global_step)
+ for k, v in histograms.items():
+ writer.add_histogram(k, v, global_step)
+ for k, v in images.items():
+ writer.add_image(k, v, global_step, dataformats='HWC')
+ for k, v in audios.items():
+ writer.add_audio(k, v, global_step, audio_sampling_rate)
+
+
+def latest_checkpoint_path(dir_path, regex="G_*.pth"):
+ f_list = glob.glob(os.path.join(dir_path, regex))
+ f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
+ x = f_list[-1]
+ print(x)
+ return x
+
+
+def plot_spectrogram_to_numpy(spectrogram):
+ global MATPLOTLIB_FLAG
+ if not MATPLOTLIB_FLAG:
+ import matplotlib
+ matplotlib.use("Agg")
+ MATPLOTLIB_FLAG = True
+ mpl_logger = logging.getLogger('matplotlib')
+ mpl_logger.setLevel(logging.WARNING)
+ import matplotlib.pylab as plt
+ import numpy as np
+
+ fig, ax = plt.subplots(figsize=(10, 2))
+ im = ax.imshow(spectrogram, aspect="auto", origin="lower",
+ interpolation='none')
+ plt.colorbar(im, ax=ax)
+ plt.xlabel("Frames")
+ plt.ylabel("Channels")
+ plt.tight_layout()
+
+ fig.canvas.draw()
+ data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
+ data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
+ plt.close()
+ return data
+
+
+def plot_alignment_to_numpy(alignment, info=None):
+ global MATPLOTLIB_FLAG
+ if not MATPLOTLIB_FLAG:
+ import matplotlib
+ matplotlib.use("Agg")
+ MATPLOTLIB_FLAG = True
+ mpl_logger = logging.getLogger('matplotlib')
+ mpl_logger.setLevel(logging.WARNING)
+ import matplotlib.pylab as plt
+ import numpy as np
+
+ fig, ax = plt.subplots(figsize=(6, 4))
+ im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
+ interpolation='none')
+ fig.colorbar(im, ax=ax)
+ xlabel = 'Decoder timestep'
+ if info is not None:
+ xlabel += '\n\n' + info
+ plt.xlabel(xlabel)
+ plt.ylabel('Encoder timestep')
+ plt.tight_layout()
+
+ fig.canvas.draw()
+ data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
+ data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
+ plt.close()
+ return data
+
+
+def load_wav_to_torch(full_path):
+ sampling_rate, data = read(full_path)
+ return torch.FloatTensor(data.astype(np.float32)), sampling_rate
+
+
+def load_filepaths_and_text(filename, split="|"):
+ with open(filename, encoding='utf-8') as f:
+ filepaths_and_text = [line.strip().split(split) for line in f]
+ return filepaths_and_text
+
+
+def get_hparams(init=True):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-c', '--config', type=str, default="./configs/modified_finetune_speaker.json",
+ help='JSON file for configuration')
+ parser.add_argument('-m', '--model', type=str, default="pretrained_models",
+ help='Model name')
+ parser.add_argument('-n', '--max_epochs', type=int, default=50,
+ help='finetune epochs')
+ parser.add_argument('--drop_speaker_embed', type=bool, default=False, help='whether to drop existing characters')
+
+ args = parser.parse_args()
+ model_dir = os.path.join("./", args.model)
+
+ if not os.path.exists(model_dir):
+ os.makedirs(model_dir)
+
+ config_path = args.config
+ config_save_path = os.path.join(model_dir, "config.json")
+ if init:
+ with open(config_path, "r") as f:
+ data = f.read()
+ with open(config_save_path, "w") as f:
+ f.write(data)
+ else:
+ with open(config_save_path, "r") as f:
+ data = f.read()
+ config = json.loads(data)
+
+ hparams = HParams(**config)
+ hparams.model_dir = model_dir
+ hparams.max_epochs = args.max_epochs
+ hparams.drop_speaker_embed = args.drop_speaker_embed
+ return hparams
+
+
+def get_hparams_from_dir(model_dir):
+ config_save_path = os.path.join(model_dir, "config.json")
+ with open(config_save_path, "r") as f:
+ data = f.read()
+ config = json.loads(data)
+
+ hparams = HParams(**config)
+ hparams.model_dir = model_dir
+ return hparams
+
+
+def get_hparams_from_file(config_path):
+ with open(config_path, "r", encoding="utf-8") as f:
+ data = f.read()
+ config = json.loads(data)
+
+ hparams = HParams(**config)
+ return hparams
+
+
+def check_git_hash(model_dir):
+ source_dir = os.path.dirname(os.path.realpath(__file__))
+ if not os.path.exists(os.path.join(source_dir, ".git")):
+ logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
+ source_dir
+ ))
+ return
+
+ cur_hash = subprocess.getoutput("git rev-parse HEAD")
+
+ path = os.path.join(model_dir, "githash")
+ if os.path.exists(path):
+ saved_hash = open(path).read()
+ if saved_hash != cur_hash:
+ logger.warn("git hash values are different. {}(saved) != {}(current)".format(
+ saved_hash[:8], cur_hash[:8]))
+ else:
+ open(path, "w").write(cur_hash)
+
+
+def get_logger(model_dir, filename="train.log"):
+ global logger
+ logger = logging.getLogger(os.path.basename(model_dir))
+ logger.setLevel(logging.DEBUG)
+
+ formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
+ if not os.path.exists(model_dir):
+ os.makedirs(model_dir)
+ h = logging.FileHandler(os.path.join(model_dir, filename))
+ h.setLevel(logging.DEBUG)
+ h.setFormatter(formatter)
+ logger.addHandler(h)
+ return logger
+
+
+class HParams():
+ def __init__(self, **kwargs):
+ for k, v in kwargs.items():
+ if type(v) == dict:
+ v = HParams(**v)
+ self[k] = v
+
+ def keys(self):
+ return self.__dict__.keys()
+
+ def items(self):
+ return self.__dict__.items()
+
+ def values(self):
+ return self.__dict__.values()
+
+ def __len__(self):
+ return len(self.__dict__)
+
+ def __getitem__(self, key):
+ return getattr(self, key)
+
+ def __setitem__(self, key, value):
+ return setattr(self, key, value)
+
+ def __contains__(self, key):
+ return key in self.__dict__
+
+ def __repr__(self):
+ return self.__dict__.__repr__()
\ No newline at end of file