-
Notifications
You must be signed in to change notification settings - Fork 25
/
Copy pathtts.py
222 lines (187 loc) · 6.87 KB
/
tts.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
# !pip install torch==1.11.0
# !pip install onnxruntime==1.15.1
# !pip install piper-phonemize==1.1.0
import json
import math
import time
from pathlib import Path
from enum import Enum
import os
import numpy as np
import onnxruntime
import hashlib
from typing import List
from wavfile import write as write_wav
from piper_phonemize import phonemize_codepoints, phonemize_espeak, tashkeel_run
SPEED_VALUES = {"very_slow":1.5,
"slow":1.2,
"normal":1,
"fast":0.6,
"very_fast":0.4}
SAMPLE_RATE = 22050
NOISE_SCALE_W = 0.8
NOISE_SCALE = 0.667
PAD = "_" # padding (0)
BOS = "^" # beginning of sentence
EOS = "$" # end of sentence
def text_to_speech(text:str,speed:str,model_name:str,hash_of_text:str):
"""Main entry point"""
speed = speed.strip()
length_scale = float(SPEED_VALUES[speed])
output_dir = os.getcwd()+"/audio/"
sess_options = onnxruntime.SessionOptions()
model = onnxruntime.InferenceSession(model_name, sess_options=sess_options)
config = load_config(model_name)
text = text.strip()
phonemes_list = phonemize(config, text)
phoneme_ids = []
for phonemes in phonemes_list:
phoneme_ids.append(phonemes_to_ids(config, phonemes))
speaker_id = None
phoneme_ids_flatten = []
for i in phoneme_ids:
phoneme_ids_flatten += i + [0,0,0]
text = np.expand_dims(np.array(phoneme_ids_flatten, dtype=np.int64), 0)
text_lengths = np.array([text.shape[1]], dtype=np.int64)
scales = np.array(
[NOISE_SCALE, length_scale, NOISE_SCALE_W],
dtype=np.float32,
)
sid = None
if speaker_id is not None:
sid = np.array([speaker_id], dtype=np.int64)
start_time = time.perf_counter()
audio = model.run(
None,
{
"input": text,
"input_lengths": text_lengths,
"scales": scales,
"sid": sid,
},
)[0].squeeze((0, 1))
# audio = denoise(audio, bias_spec, 10)
audio = audio_float_to_int16(audio.squeeze())
end_time = time.perf_counter()
audio_duration_sec = audio.shape[-1] / SAMPLE_RATE
infer_sec = end_time - start_time
# real_time_factor = (
# infer_sec / audio_duration_sec if audio_duration_sec > 0 else 0.0
# )
output_path = output_dir + f"/{hash_of_text}.wav"
write_wav(str(output_path), SAMPLE_RATE, audio)
return output_path
def audio_float_to_int16(
audio: np.ndarray, max_wav_value: float = 32767.0
) -> np.ndarray:
"""Normalize audio and convert to int16 range"""
audio_norm = audio * (max_wav_value / max(0.01, np.max(np.abs(audio))))
audio_norm = np.clip(audio_norm, -max_wav_value, max_wav_value)
audio_norm = audio_norm.astype("int16")
return audio_norm
class PhonemeType(str, Enum):
ESPEAK = "espeak"
TEXT = "text"
def phonemize(config, text: str) -> List[List[str]]:
"""Text to phonemes grouped by sentence."""
if config["phoneme_type"] == PhonemeType.ESPEAK:
if config["espeak"]["voice"] == "ar":
# Arabic diacritization
# https://github.com/mush42/libtashkeel/
text = tashkeel_run(text)
return phonemize_espeak(text, config["espeak"]["voice"])
if config["phoneme_type"] == PhonemeType.TEXT:
return phonemize_codepoints(text)
raise ValueError(f'Unexpected phoneme type: {config["phoneme_type"]}')
def phonemes_to_ids(config, phonemes: List[str]) -> List[int]:
"""Phonemes to ids."""
id_map = config["phoneme_id_map"]
ids: List[int] = list(id_map[BOS])
for phoneme in phonemes:
if phoneme not in id_map:
print("Missing phoneme from id map: %s", phoneme)
continue
ids.extend(id_map[phoneme])
ids.extend(id_map[PAD])
ids.extend(id_map[EOS])
return ids
def load_config(model):
with open(f"{model}.json", "r") as file:
config = json.load(file)
return config
def denoise(
audio: np.ndarray, bias_spec: np.ndarray, denoiser_strength: float
) -> np.ndarray:
audio_spec, audio_angles = transform(audio)
a = bias_spec.shape[-1]
b = audio_spec.shape[-1]
repeats = max(1, math.ceil(b / a))
bias_spec_repeat = np.repeat(bias_spec, repeats, axis=-1)[..., :b]
audio_spec_denoised = audio_spec - (bias_spec_repeat * denoiser_strength)
audio_spec_denoised = np.clip(audio_spec_denoised, a_min=0.0, a_max=None)
audio_denoised = inverse(audio_spec_denoised, audio_angles)
return audio_denoised
def stft(x, fft_size, hopsamp):
"""Compute and return the STFT of the supplied time domain signal x.
Args:
x (1-dim Numpy array): A time domain signal.
fft_size (int): FFT size. Should be a power of 2, otherwise DFT will be used.
hopsamp (int):
Returns:
The STFT. The rows are the time slices and columns are the frequency bins.
"""
window = np.hanning(fft_size)
fft_size = int(fft_size)
hopsamp = int(hopsamp)
return np.array(
[
np.fft.rfft(window * x[i : i + fft_size])
for i in range(0, len(x) - fft_size, hopsamp)
]
)
def istft(X, fft_size, hopsamp):
"""Invert a STFT into a time domain signal.
Args:
X (2-dim Numpy array): Input spectrogram. The rows are the time slices and columns are the frequency bins.
fft_size (int):
hopsamp (int): The hop size, in samples.
Returns:
The inverse STFT.
"""
fft_size = int(fft_size)
hopsamp = int(hopsamp)
window = np.hanning(fft_size)
time_slices = X.shape[0]
len_samples = int(time_slices * hopsamp + fft_size)
x = np.zeros(len_samples)
for n, i in enumerate(range(0, len(x) - fft_size, hopsamp)):
x[i : i + fft_size] += window * np.real(np.fft.irfft(X[n]))
return x
def inverse(magnitude, phase):
recombine_magnitude_phase = np.concatenate(
[magnitude * np.cos(phase), magnitude * np.sin(phase)], axis=1
)
x_org = recombine_magnitude_phase
n_b, n_f, n_t = x_org.shape # pylint: disable=unpacking-non-sequence
x = np.empty([n_b, n_f // 2, n_t], dtype=np.complex64)
x.real = x_org[:, : n_f // 2]
x.imag = x_org[:, n_f // 2 :]
inverse_transform = []
for y in x:
y_ = istft(y.T, fft_size=1024, hopsamp=256)
inverse_transform.append(y_[None, :])
inverse_transform = np.concatenate(inverse_transform, 0)
return inverse_transform
def transform(input_data):
x = input_data
real_part = []
imag_part = []
for y in x:
y_ = stft(y, fft_size=1024, hopsamp=256).T
real_part.append(y_.real[None, :, :]) # pylint: disable=unsubscriptable-object
imag_part.append(y_.imag[None, :, :]) # pylint: disable=unsubscriptable-object
real_part = np.concatenate(real_part, 0)
imag_part = np.concatenate(imag_part, 0)
magnitude = np.sqrt(real_part**2 + imag_part**2)
phase = np.arctan2(imag_part.data, real_part.data)
return magnitude, phase