Blame view

egs/aishell/s5/local/chain/tuning/run_tdnn_2a.sh 7.66 KB
8dcb6dfcb   Yannick Estève   first commit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
  #!/bin/bash
  
  # This script is based on run_tdnn_1a.sh.
  # This setup used online pitch to train the neural network.
  # It requires a online_pitch.conf in the conf dir.
  
  set -e
  
  # configs for 'chain'
  affix=
  stage=0
  train_stage=-10
  get_egs_stage=-10
  dir=exp/chain/tdnn_2a  # Note: _sp will get added to this
  decode_iter=
  
  # training options
  num_epochs=4
  initial_effective_lrate=0.001
  final_effective_lrate=0.0001
  max_param_change=2.0
  final_layer_normalize_target=0.5
  num_jobs_initial=2
  num_jobs_final=12
  minibatch_size=128
  frames_per_eg=150,110,90
  remove_egs=true
  common_egs_dir=
  xent_regularize=0.1
  
  # End configuration section.
  echo "$0 $@"  # Print the command line for logging
  
  . ./cmd.sh
  . ./path.sh
  . ./utils/parse_options.sh
  
  if ! cuda-compiled; then
    cat <<EOF && exit 1
  This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA
  If you want to use GPUs (and have them), go to src/, and configure and make on a machine
  where "nvcc" is installed.
  EOF
  fi
  
  # The iVector-extraction and feature-dumping parts are the same as the standard
  # nnet3 setup, and you can skip them by setting "--stage 8" if you have already
  # run those things.
  
  dir=${dir}${affix:+_$affix}_sp
  train_set=train_sp
  ali_dir=exp/tri5a_sp_ali
  treedir=exp/chain/tri6_7d_tree_sp
  lang=data/lang_chain
  
  
  # if we are using the speed-perturbed data we need to generate
  # alignments for it.
  local/nnet3/run_ivector_common.sh --stage $stage --online true || exit 1;
  
  if [ $stage -le 7 ]; then
    # Get the alignments as lattices (gives the LF-MMI training more freedom).
    # use the same num-jobs as the alignments
    nj=$(cat $ali_dir/num_jobs) || exit 1;
    steps/align_fmllr_lats.sh --nj $nj --cmd "$train_cmd" data/$train_set \
      data/lang exp/tri5a exp/tri5a_sp_lats
    rm exp/tri5a_sp_lats/fsts.*.gz # save space
  fi
  
  if [ $stage -le 8 ]; then
    # Create a version of the lang/ directory that has one state per phone in the
    # topo file. [note, it really has two states.. the first one is only repeated
    # once, the second one has zero or more repeats.]
    rm -rf $lang
    cp -r data/lang $lang
    silphonelist=$(cat $lang/phones/silence.csl) || exit 1;
    nonsilphonelist=$(cat $lang/phones/nonsilence.csl) || exit 1;
    # Use our special topology... note that later on may have to tune this
    # topology.
    steps/nnet3/chain/gen_topo.py $nonsilphonelist $silphonelist >$lang/topo
  fi
  
  if [ $stage -le 9 ]; then
    # Build a tree using our new topology. This is the critically different
    # step compared with other recipes.
    steps/nnet3/chain/build_tree.sh --frame-subsampling-factor 3 \
        --context-opts "--context-width=2 --central-position=1" \
        --cmd "$train_cmd" 5000 data/$train_set $lang $ali_dir $treedir
  fi
  
  if [ $stage -le 10 ]; then
    echo "$0: creating neural net configs using the xconfig parser";
  
    num_targets=$(tree-info $treedir/tree |grep num-pdfs|awk '{print $2}')
    learning_rate_factor=$(echo "print (0.5/$xent_regularize)" | python)
  
    mkdir -p $dir/configs
    cat <<EOF > $dir/configs/network.xconfig
    input dim=100 name=ivector
    input dim=43 name=input
  
    # please note that it is important to have input layer with the name=input
    # as the layer immediately preceding the fixed-affine-layer to enable
    # the use of short notation for the descriptor
    fixed-affine-layer name=lda input=Append(-1,0,1,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat
  
    # the first splicing is moved before the lda layer, so no splicing here
    relu-batchnorm-layer name=tdnn1 dim=625
    relu-batchnorm-layer name=tdnn2 input=Append(-1,0,1) dim=625
    relu-batchnorm-layer name=tdnn3 input=Append(-1,0,1) dim=625
    relu-batchnorm-layer name=tdnn4 input=Append(-3,0,3) dim=625
    relu-batchnorm-layer name=tdnn5 input=Append(-3,0,3) dim=625
    relu-batchnorm-layer name=tdnn6 input=Append(-3,0,3) dim=625
  
    ## adding the layers for chain branch
    relu-batchnorm-layer name=prefinal-chain input=tdnn6 dim=625 target-rms=0.5
    output-layer name=output include-log-softmax=false dim=$num_targets max-change=1.5
  
    # adding the layers for xent branch
    # This block prints the configs for a separate output that will be
    # trained with a cross-entropy objective in the 'chain' models... this
    # has the effect of regularizing the hidden parts of the model.  we use
    # 0.5 / args.xent_regularize as the learning rate factor- the factor of
    # 0.5 / args.xent_regularize is suitable as it means the xent
    # final-layer learns at a rate independent of the regularization
    # constant; and the 0.5 was tuned so as to make the relative progress
    # similar in the xent and regular final layers.
    relu-batchnorm-layer name=prefinal-xent input=tdnn6 dim=625 target-rms=0.5
    output-layer name=output-xent dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5
  
  EOF
    steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/
  fi
  
  if [ $stage -le 11 ]; then
    if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then
      utils/create_split_dir.pl \
       /export/b0{5,6,7,8}/$USER/kaldi-data/egs/aishell-$(date +'%m_%d_%H_%M')/s5c/$dir/egs/storage $dir/egs/storage
    fi
  
    steps/nnet3/chain/train.py --stage $train_stage \
      --cmd "$decode_cmd" \
      --feat.online-ivector-dir exp/nnet3/ivectors_${train_set} \
      --feat.cmvn-opts "--norm-means=false --norm-vars=false" \
      --chain.xent-regularize $xent_regularize \
      --chain.leaky-hmm-coefficient 0.1 \
      --chain.l2-regularize 0.00005 \
      --chain.apply-deriv-weights false \
      --chain.lm-opts="--num-extra-lm-states=2000" \
      --egs.dir "$common_egs_dir" \
      --egs.stage $get_egs_stage \
      --egs.opts "--frames-overlap-per-eg 0" \
      --egs.chunk-width $frames_per_eg \
      --trainer.num-chunk-per-minibatch $minibatch_size \
      --trainer.frames-per-iter 1500000 \
      --trainer.num-epochs $num_epochs \
      --trainer.optimization.num-jobs-initial $num_jobs_initial \
      --trainer.optimization.num-jobs-final $num_jobs_final \
      --trainer.optimization.initial-effective-lrate $initial_effective_lrate \
      --trainer.optimization.final-effective-lrate $final_effective_lrate \
      --trainer.max-param-change $max_param_change \
      --cleanup.remove-egs $remove_egs \
      --feat-dir data/${train_set}_hires_online \
      --tree-dir $treedir \
      --lat-dir exp/tri5a_sp_lats \
      --dir $dir  || exit 1;
  fi
  
  if [ $stage -le 12 ]; then
    # Note: it might appear that this $lang directory is mismatched, and it is as
    # far as the 'topo' is concerned, but this script doesn't read the 'topo' from
    # the lang directory.
    utils/mkgraph.sh --self-loop-scale 1.0 data/lang_test $dir $dir/graph
  fi
  
  graph_dir=$dir/graph
  if [ $stage -le 13 ]; then
    for test_set in dev test; do
      steps/nnet3/decode.sh --acwt 1.0 --post-decode-acwt 10.0 \
        --nj 10 --cmd "$decode_cmd" \
        --online-ivector-dir exp/nnet3/ivectors_$test_set \
        $graph_dir data/${test_set}_hires_online $dir/decode_${test_set} || exit 1;
    done
  fi
  
  if [ $stage -le 14 ]; then
    steps/online/nnet3/prepare_online_decoding.sh --mfcc-config conf/mfcc_hires.conf \
      --add-pitch true \
      $lang exp/nnet3/extractor "$dir" ${dir}_online || exit 1;
  fi
  
  dir=${dir}_online
  if [ $stage -le 15 ]; then
    for test_set in dev test; do
      steps/online/nnet3/decode.sh --acwt 1.0 --post-decode-acwt 10.0 \
        --nj 10 --cmd "$decode_cmd" \
        --config conf/decode.config \
        $graph_dir data/${test_set}_hires_online $dir/decode_${test_set} || exit 1;
    done
  fi
  
  if [ $stage -le 16 ]; then
    for test_set in dev test; do
      steps/online/nnet3/decode.sh --acwt 1.0 --post-decode-acwt 10.0 \
        --nj 10 --cmd "$decode_cmd" --per-utt true \
        --config conf/decode.config \
        $graph_dir data/${test_set}_hires_online $dir/decode_${test_set}_per_utt || exit 1;
    done
  fi
  
  exit;