Skip to content

Commit

Permalink
Merge branch 'pr/agrinh/457-2' into dev
Browse files Browse the repository at this point in the history
  • Loading branch information
erogol committed Apr 28, 2021
2 parents 79663bc + 7ea9bc6 commit ed1de4e
Show file tree
Hide file tree
Showing 9 changed files with 54 additions and 34 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ jobs:
run: python3 -m pip install --upgrade pip
- name: Install TTS
run: |
python3 -m pip install .
python3 -m pip install .[all]
python3 setup.py egg_info
- name: Lint check
run: |
Expand Down
8 changes: 6 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
.DEFAULT_GOAL := help
.PHONY: test deps style lint install help
.PHONY: test system-deps dev-deps deps style lint install help

help:
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
Expand All @@ -10,6 +10,10 @@ system-deps: ## install linux system deps
sudo apt-get install -y espeak-ng
sudo apt-get install -y libsndfile1-dev

dev-deps: ## install development deps
pip install -r requirements.dev.txt
pip install -r requirements.tf.txt

deps: ## install 🐸 requirements.
pip install -r requirements.txt

Expand All @@ -25,4 +29,4 @@ lint: ## run pylint linter.
pylint ${target_dirs}

install: ## install 🐸 TTS for development.
pip install -e .
pip install -e .[all]
8 changes: 7 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -111,11 +111,17 @@ If you are only interested in [synthesizing speech](https://github.com/coqui-ai/
pip install TTS
```

By default this only installs the requirements for PyTorch. To install the tensorflow dependencies as well, use the `tf` extra.

```bash
pip install TTS[tf]
```

If you plan to code or train models, clone 🐸TTS and install it locally.

```bash
git clone https://github.com/coqui-ai/TTS
pip install -e .
pip install -e .[all,dev,notebooks,tf] # Select the relevant extras
```

We use ```espeak-ng``` to convert graphemes to phonemes. You might need to install separately.
Expand Down
11 changes: 4 additions & 7 deletions TTS/speaker_encoder/utils/prepare_voxceleb.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,8 @@

import pandas
import soundfile as sf
import tensorflow as tf
from absl import logging

gfile = tf.compat.v1.gfile

SUBSETS = {
"vox1_dev_wav": [
Expand Down Expand Up @@ -73,8 +71,7 @@ def download_and_extract(directory, subset, urls):
subset: subset name of the corpus.
urls: the list of urls to download the data file.
"""
if not gfile.Exists(directory):
gfile.MakeDirs(directory)
os.makedirs(directory, exist_ok=True)

try:
for url in urls:
Expand Down Expand Up @@ -107,7 +104,7 @@ def download_and_extract(directory, subset, urls):
extract_path_ori = os.path.join(directory, zfile.infolist()[0].filename)
subprocess.call("mv %s %s" % (extract_path_ori, extract_path), shell=True)
finally:
# gfile.Remove(zip_filepath)
# os.remove(zip_filepath)
pass


Expand Down Expand Up @@ -160,7 +157,7 @@ def convert_audio_and_make_label(input_dir, subset, output_dir, output_file):

files = []
# Convert all AAC file into WAV format. At the same time, generate the csv
for root, _, filenames in gfile.Walk(source_dir):
for root, _, filenames in os.walk(source_dir):
for filename in filenames:
name, ext = os.path.splitext(filename)
if ext.lower() == ".wav":
Expand All @@ -172,7 +169,7 @@ def convert_audio_and_make_label(input_dir, subset, output_dir, output_file):
# Convert AAC to WAV.
aac_file = os.path.join(root, filename)
wav_file = aac_file + ".wav"
if not gfile.Exists(wav_file):
if not os.path.exists(wav_file):
if not decode_aac_with_ffmpeg(aac_file, wav_file):
raise RuntimeError("Audio decoding failed.")
else:
Expand Down
5 changes: 5 additions & 0 deletions requirements.dev.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
black
coverage
isort
nose
pylint==2.7.4
2 changes: 2 additions & 0 deletions requirements.notebooks.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
bokeh==1.4.0
numba==0.48
1 change: 1 addition & 0 deletions requirements.tf.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
tensorflow==2.3.1
37 changes: 14 additions & 23 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,29 +1,20 @@
torch>=1.7
tensorflow==2.3.1
numpy==1.18.5
scipy>=0.19.0
numba==0.48
cython
flask
gdown
inflect
jieba
librosa==0.7.2
matplotlib
numpy==1.18.5
pandas
phonemizer>=2.2.0
unidecode==0.4.20
pypinyin
jieba
tensorboardX
matplotlib
Pillow
flask
tqdm
inflect
bokeh==1.4.0
pysbd
pyyaml
scipy>=0.19.0
soundfile
gdown
tensorboardX
torch>=1.7
tqdm
umap-learn==0.4.6
cython
pyyaml
# quality and style
nose
coverage
black
isort
pylint==2.7.4
unidecode==0.4.20
14 changes: 14 additions & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,14 @@ def pip_install(package_name):


requirements = open(os.path.join(cwd, 'requirements.txt'), 'r').readlines()
with open(os.path.join(cwd, 'requirements.notebooks.txt'), 'r') as f:
requirements_notebooks = f.readlines()
with open(os.path.join(cwd, 'requirements.dev.txt'), 'r') as f:
requirements_dev = f.readlines()
with open(os.path.join(cwd, 'requirements.tf.txt'), 'r') as f:
requirements_tf = f.readlines()
requirements_all = requirements_dev + requirements_notebooks + requirements_tf

with open('README.md', "r", encoding="utf-8") as readme_file:
README = readme_file.read()

Expand Down Expand Up @@ -82,6 +90,12 @@ def pip_install(package_name):
# 'build_ext': build_ext
},
install_requires=requirements,
extras_require={
"all": requirements_all,
"dev": requirements_dev,
"notebooks": requirements_notebooks,
"tf": requirements_tf,
},
python_requires='>=3.6.0, <3.9',
entry_points={
'console_scripts': [
Expand Down

0 comments on commit ed1de4e

Please sign in to comment.