Plachta commited on
Commit
fb9893d
1 Parent(s): b8b54b5

Upload 2 files

Browse files
Files changed (2) hide show
  1. config.yml +56 -0
  2. pytorch_model.bin +3 -0
config.yml ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ log_dir: "Models/run_redecoder_wavenet"
2
+ save_freq: 1
3
+ log_interval: 10
4
+ save_interval: 1000
5
+ device: "cuda"
6
+ epochs: 1000 # number of epochs for first stage training (pre-training)
7
+ batch_size: 4
8
+ batch_length: 100 # maximum duration of audio in a batch (in seconds)
9
+ max_len: 80 # maximum number of frames
10
+ pretrained_model: ""
11
+ pretrained_encoder: "./temp_ckpt.pth"
12
+ load_only_params: False # set to true if do not want to load epoch numbers and optimizer parameters
13
+
14
+ F0_path: "modules/JDC/bst.t7"
15
+
16
+ data_params:
17
+ train_data: "./data/train.txt"
18
+ val_data: "./data/val.txt"
19
+ root_path: "./data/"
20
+
21
+ preprocess_params:
22
+ sr: 24000
23
+ spect_params:
24
+ n_fft: 2048
25
+ win_length: 1200
26
+ hop_length: 300
27
+
28
+ model_params:
29
+ encoder_causal: True
30
+ decoder_causal: False
31
+ encoder_lstm: 2
32
+ decoder_lstm: 0
33
+ n_c_codebooks: 2
34
+ n_p_codebooks: 1
35
+ timbre_norm: True
36
+ separate_prosody_encoder: True
37
+ encoder_type: 'wavenet' # should be one of mamba, transformer or wavenet
38
+ wavenet_embed_dim: 512
39
+ mamba_embed_dim: 768
40
+ prob_random_mask_prosody: 1.0
41
+ prob_random_mask_content: [0.0, 1.0]
42
+
43
+ DAC:
44
+ encoder_dim: 64
45
+ encoder_rates: [2, 5, 5, 6]
46
+ decoder_dim: 1536
47
+ decoder_rates: [ 6, 5, 5, 2 ]
48
+ sr: 24000
49
+
50
+ loss_params:
51
+ base_lr: 0.0001
52
+ discriminator_iter_start: 2000
53
+ lambda_spk: 1.0
54
+ lambda_mel: 45
55
+ lambda_f0: 1.0
56
+ lambda_uv: 1.0
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c37f88e294cea2879a816805a400ef938d532732fb3fec7533521790e7b289b1
3
+ size 637487702