-
Notifications
You must be signed in to change notification settings - Fork 80
/
config_vocals_segm_models.yaml
78 lines (72 loc) · 1.73 KB
/
config_vocals_segm_models.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
audio:
chunk_size: 261632
dim_f: 4096
dim_t: 512
hop_length: 512
n_fft: 8192
num_channels: 2
sample_rate: 44100
min_mean_abs: 0.001
model:
encoder_name: tu-maxvit_large_tf_512 # look here for possibilities: https://github.com/qubvel/segmentation_models.pytorch#encoders-
decoder_type: unet # unet, fpn
act: gelu
num_channels: 128
num_subbands: 8
loss_multistft:
fft_sizes:
- 1024
- 2048
- 4096
hop_sizes:
- 512
- 1024
- 2048
win_lengths:
- 1024
- 2048
- 4096
window: "hann_window"
scale: "mel"
n_bins: 128
sample_rate: 44100
perceptual_weighting: true
w_sc: 1.0
w_log_mag: 1.0
w_lin_mag: 0.0
w_phs: 0.0
mag_distance: "L1"
training:
batch_size: 8
gradient_accumulation_steps: 1
grad_clip: 0
instruments:
- vocals
- other
lr: 5.0e-05
patience: 2
reduce_factor: 0.95
target_instrument: null
num_epochs: 1000
num_steps: 2000
q: 0.95
coarse_loss_clip: true
ema_momentum: 0.999
optimizer: adamw
other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental
use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
augmentations:
enable: true # enable or disable all augmentations (to fast disable if needed)
loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max)
loudness_min: 0.5
loudness_max: 1.5
mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3)
mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02)
- 0.2
- 0.02
mixup_loudness_min: 0.5
mixup_loudness_max: 1.5
inference:
batch_size: 1
dim_t: 512
num_overlap: 4