forked from andabi/deep-voice-conversion
-
Notifications
You must be signed in to change notification settings - Fork 0
/
utils.py
46 lines (34 loc) · 1.28 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
# -*- coding: utf-8 -*-
#/usr/bin/python2
from __future__ import print_function
import librosa
import numpy as np
from scipy import signal
def spectrogram2wav(mag, n_fft, win_length, hop_length, num_iters, phase_angle=None, length=None):
assert(num_iters > 0)
if phase_angle is None:
phase_angle = np.pi * np.random.rand(*mag.shape)
spec = mag * np.exp(1.j * phase_angle)
for i in range(num_iters):
wav = librosa.istft(spec, win_length=win_length, hop_length=hop_length, length=length)
if i != num_iters - 1:
spec = librosa.stft(wav, n_fft=n_fft, win_length=win_length, hop_length=hop_length)
_, phase = librosa.magphase(spec)
phase_angle = np.angle(phase)
spec = mag * np.exp(1.j * phase_angle)
return wav
def preemphasis(x, coeff=0.97):
return signal.lfilter([1, -coeff], [1], x)
def inv_preemphasis(x, coeff=0.97):
return signal.lfilter([1], [1, -coeff], x)
def wav_random_crop(wav, sr, duration):
assert (wav.ndim <= 2)
target_len = sr * duration
wav_len = wav.shape[-1]
start = np.random.choice(range(np.maximum(1, wav_len - target_len)), 1)[0]
end = start + target_len
if wav.ndim == 1:
wav = wav[start:end]
else:
wav = wav[:, start:end]
return wav