Blame view

egs/wsj/s5/local/chain/tuning/run_tdnn_1c.sh 12.8 KB
8dcb6dfcb   Yannick Estève   first commit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
  #!/bin/bash
  
  # 1c is as 1b but using batchnorm instead of renorm
  # 1b is as 1a but using --proportional-shrink=60.0
  
  # local/chain/compare_wer.sh exp/chain/tdnn1a_sp exp/chain/tdnn1b_sp
  # System                tdnn1a_sp tdnn1b_sp
  #WER dev93 (tgpr)                7.87      7.24
  #WER dev93 (tg)                  7.61      6.95
  #WER dev93 (big-dict,tgpr)       5.71      5.19
  #WER dev93 (big-dict,fg)         5.10      4.52
  #WER eval92 (tgpr)               5.23      5.09
  #WER eval92 (tg)                 4.87      4.64
  #WER eval92 (big-dict,tgpr)      3.24      2.91
  #WER eval92 (big-dict,fg)        2.71      2.39
  # Final train prob        -0.0414   -0.0570
  # Final valid prob        -0.0634   -0.0680
  # Final train prob (xent)   -0.8216   -0.9587
  # Final valid prob (xent)   -0.9208   -1.0039
  
  
  # steps/info/chain_dir_info.pl exp/chain/tdnn1b_sp
  # exp/chain/tdnn1b_sp: num-iters=102 nj=2..5 num-params=7.6M dim=40+100->2889 combine=-0.066->-0.063 xent:train/valid[67,101,final]=(-1.12,-0.979,-0.959/-1.13,-1.03,-1.00) logprob:train/valid[67,101,final]=(-0.071,-0.058,-0.057/-0.077,-0.069,-0.068)
  
  set -e -o pipefail
  
  # First the options that are passed through to run_ivector_common.sh
  # (some of which are also used in this script directly).
  stage=0
  nj=30
  train_set=train_si284
  test_sets="test_dev93 test_eval92"
  gmm=tri4b        # this is the source gmm-dir that we'll use for alignments; it
                   # should have alignments for the specified training data.
  num_threads_ubm=32
  nnet3_affix=       # affix for exp dirs, e.g. it was _cleaned in tedlium.
  
  # Options which are not passed through to run_ivector_common.sh
  affix=1c  #affix for TDNN+LSTM directory e.g. "1a" or "1b", in case we change the configuration.
  common_egs_dir=
  reporting_email=
  
  # LSTM/chain options
  train_stage=-10
  xent_regularize=0.1
  
  # training chunk-options
  chunk_width=140,100,160
  # we don't need extra left/right context for TDNN systems.
  chunk_left_context=0
  chunk_right_context=0
  
  # training options
  srand=0
  remove_egs=true
  
  #decode options
  test_online_decoding=false  # if true, it will run the last decoding stage.
  
  # End configuration section.
  echo "$0 $@"  # Print the command line for logging
  
  
  . ./cmd.sh
  . ./path.sh
  . ./utils/parse_options.sh
  
  
  if ! cuda-compiled; then
    cat <<EOF && exit 1
  This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA
  If you want to use GPUs (and have them), go to src/, and configure and make on a machine
  where "nvcc" is installed.
  EOF
  fi
  
  local/nnet3/run_ivector_common.sh \
    --stage $stage --nj $nj \
    --train-set $train_set --gmm $gmm \
    --num-threads-ubm $num_threads_ubm \
    --nnet3-affix "$nnet3_affix"
  
  
  
  gmm_dir=exp/${gmm}
  ali_dir=exp/${gmm}_ali_${train_set}_sp
  lat_dir=exp/chain${nnet3_affix}/${gmm}_${train_set}_sp_lats
  dir=exp/chain${nnet3_affix}/tdnn${affix}_sp
  train_data_dir=data/${train_set}_sp_hires
  train_ivector_dir=exp/nnet3${nnet3_affix}/ivectors_${train_set}_sp_hires
  lores_train_data_dir=data/${train_set}_sp
  
  # note: you don't necessarily have to change the treedir name
  # each time you do a new experiment-- only if you change the
  # configuration in a way that affects the tree.
  tree_dir=exp/chain${nnet3_affix}/tree_a_sp
  # the 'lang' directory is created by this script.
  # If you create such a directory with a non-standard topology
  # you should probably name it differently.
  lang=data/lang_chain
  
  for f in $train_data_dir/feats.scp $train_ivector_dir/ivector_online.scp \
      $lores_train_data_dir/feats.scp $gmm_dir/final.mdl \
      $ali_dir/ali.1.gz $gmm_dir/final.mdl; do
    [ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1
  done
  
  
  if [ $stage -le 12 ]; then
    echo "$0: creating lang directory $lang with chain-type topology"
    # Create a version of the lang/ directory that has one state per phone in the
    # topo file. [note, it really has two states.. the first one is only repeated
    # once, the second one has zero or more repeats.]
    if [ -d $lang ]; then
      if [ $lang/L.fst -nt data/lang/L.fst ]; then
        echo "$0: $lang already exists, not overwriting it; continuing"
      else
        echo "$0: $lang already exists and seems to be older than data/lang..."
        echo " ... not sure what to do.  Exiting."
        exit 1;
      fi
    else
      cp -r data/lang $lang
      silphonelist=$(cat $lang/phones/silence.csl) || exit 1;
      nonsilphonelist=$(cat $lang/phones/nonsilence.csl) || exit 1;
      # Use our special topology... note that later on may have to tune this
      # topology.
      steps/nnet3/chain/gen_topo.py $nonsilphonelist $silphonelist >$lang/topo
    fi
  fi
  
  if [ $stage -le 13 ]; then
    # Get the alignments as lattices (gives the chain training more freedom).
    # use the same num-jobs as the alignments
    steps/align_fmllr_lats.sh --nj 100 --cmd "$train_cmd" ${lores_train_data_dir} \
      data/lang $gmm_dir $lat_dir
    rm $lat_dir/fsts.*.gz # save space
  fi
  
  if [ $stage -le 14 ]; then
    # Build a tree using our new topology.  We know we have alignments for the
    # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use
    # those.  The num-leaves is always somewhat less than the num-leaves from
    # the GMM baseline.
     if [ -f $tree_dir/final.mdl ]; then
       echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it."
       exit 1;
    fi
    steps/nnet3/chain/build_tree.sh \
      --frame-subsampling-factor 3 \
      --context-opts "--context-width=2 --central-position=1" \
      --cmd "$train_cmd" 3500 ${lores_train_data_dir} \
      $lang $ali_dir $tree_dir
  fi
  
  
  if [ $stage -le 15 ]; then
    mkdir -p $dir
    echo "$0: creating neural net configs using the xconfig parser";
  
    num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}')
    learning_rate_factor=$(echo "print (0.5/$xent_regularize)" | python)
  
    mkdir -p $dir/configs
    cat <<EOF > $dir/configs/network.xconfig
    input dim=100 name=ivector
    input dim=40 name=input
  
    # please note that it is important to have input layer with the name=input
    # as the layer immediately preceding the fixed-affine-layer to enable
    # the use of short notation for the descriptor
    fixed-affine-layer name=lda input=Append(-2,-1,0,1,2,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat
  
    # the first splicing is moved before the lda layer, so no splicing here
    relu-batchnorm-layer name=tdnn1 dim=512
    relu-batchnorm-layer name=tdnn2 dim=512 input=Append(-1,0,1)
    relu-batchnorm-layer name=tdnn3 dim=512 input=Append(-1,0,1)
    relu-batchnorm-layer name=tdnn4 dim=512 input=Append(-3,0,3)
    relu-batchnorm-layer name=tdnn5 dim=512 input=Append(-3,0,3)
    relu-batchnorm-layer name=tdnn6 dim=512 input=Append(-6,-3,0)
  
    ## adding the layers for chain branch
    relu-batchnorm-layer name=prefinal-chain dim=512 target-rms=0.5
    output-layer name=output include-log-softmax=false dim=$num_targets max-change=1.5
  
    # adding the layers for xent branch
    # This block prints the configs for a separate output that will be
    # trained with a cross-entropy objective in the 'chain' models... this
    # has the effect of regularizing the hidden parts of the model.  we use
    # 0.5 / args.xent_regularize as the learning rate factor- the factor of
    # 0.5 / args.xent_regularize is suitable as it means the xent
    # final-layer learns at a rate independent of the regularization
    # constant; and the 0.5 was tuned so as to make the relative progress
    # similar in the xent and regular final layers.
    relu-batchnorm-layer name=prefinal-xent input=tdnn6 dim=512 target-rms=0.5
    output-layer name=output-xent dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5
  EOF
    steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/
  fi
  
  
  if [ $stage -le 16 ]; then
    if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then
      utils/create_split_dir.pl \
       /export/b0{3,4,5,6}/$USER/kaldi-data/egs/wsj-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage
    fi
  
    steps/nnet3/chain/train.py --stage=$train_stage \
      --cmd="$decode_cmd" \
      --feat.online-ivector-dir=$train_ivector_dir \
      --feat.cmvn-opts="--norm-means=false --norm-vars=false" \
      --chain.xent-regularize $xent_regularize \
      --chain.leaky-hmm-coefficient=0.1 \
      --chain.l2-regularize=0.00005 \
      --chain.apply-deriv-weights=false \
      --chain.lm-opts="--num-extra-lm-states=2000" \
      --trainer.srand=$srand \
      --trainer.max-param-change=2.0 \
      --trainer.num-epochs=4 \
      --trainer.frames-per-iter=3000000 \
      --trainer.optimization.num-jobs-initial=2 \
      --trainer.optimization.num-jobs-final=5 \
      --trainer.optimization.initial-effective-lrate=0.001 \
      --trainer.optimization.final-effective-lrate=0.0001 \
      --trainer.optimization.shrink-value=1.0 \
      --trainer.optimization.proportional-shrink=60.0 \
      --trainer.num-chunk-per-minibatch=256,128,64 \
      --trainer.optimization.momentum=0.0 \
      --egs.chunk-width=$chunk_width \
      --egs.chunk-left-context=0 \
      --egs.chunk-right-context=0 \
      --egs.chunk-left-context-initial=0 \
      --egs.chunk-right-context-final=0 \
      --egs.dir="$common_egs_dir" \
      --egs.opts="--frames-overlap-per-eg 0" \
      --cleanup.remove-egs=$remove_egs \
      --use-gpu=true \
      --reporting.email="$reporting_email" \
      --feat-dir=$train_data_dir \
      --tree-dir=$tree_dir \
      --lat-dir=$lat_dir \
      --dir=$dir  || exit 1;
  fi
  
  if [ $stage -le 17 ]; then
    # The reason we are using data/lang here, instead of $lang, is just to
    # emphasize that it's not actually important to give mkgraph.sh the
    # lang directory with the matched topology (since it gets the
    # topology file from the model).  So you could give it a different
    # lang directory, one that contained a wordlist and LM of your choice,
    # as long as phones.txt was compatible.
  
    utils/lang/check_phones_compatible.sh \
      data/lang_test_tgpr/phones.txt $lang/phones.txt
    utils/mkgraph.sh \
      --self-loop-scale 1.0 data/lang_test_tgpr \
      $tree_dir $tree_dir/graph_tgpr || exit 1;
  
    utils/lang/check_phones_compatible.sh \
      data/lang_test_bd_tgpr/phones.txt $lang/phones.txt
    utils/mkgraph.sh \
      --self-loop-scale 1.0 data/lang_test_bd_tgpr \
      $tree_dir $tree_dir/graph_bd_tgpr || exit 1;
  fi
  
  if [ $stage -le 18 ]; then
    frames_per_chunk=$(echo $chunk_width | cut -d, -f1)
    rm $dir/.error 2>/dev/null || true
  
    for data in $test_sets; do
      (
        data_affix=$(echo $data | sed s/test_//)
        nspk=$(wc -l <data/${data}_hires/spk2utt)
        for lmtype in tgpr bd_tgpr; do
          steps/nnet3/decode.sh \
            --acwt 1.0 --post-decode-acwt 10.0 \
            --extra-left-context 0 --extra-right-context 0 \
            --extra-left-context-initial 0 \
            --extra-right-context-final 0 \
            --frames-per-chunk $frames_per_chunk \
            --nj $nspk --cmd "$decode_cmd"  --num-threads 4 \
            --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \
            $tree_dir/graph_${lmtype} data/${data}_hires ${dir}/decode_${lmtype}_${data_affix} || exit 1
        done
        steps/lmrescore.sh \
          --self-loop-scale 1.0 \
          --cmd "$decode_cmd" data/lang_test_{tgpr,tg} \
          data/${data}_hires ${dir}/decode_{tgpr,tg}_${data_affix} || exit 1
        steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" \
          data/lang_test_bd_{tgpr,fgconst} \
         data/${data}_hires ${dir}/decode_${lmtype}_${data_affix}{,_fg} || exit 1
      ) || touch $dir/.error &
    done
    wait
    [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1
  fi
  
  # Not testing the 'looped' decoding separately, because for
  # TDNN systems it would give exactly the same results as the
  # normal decoding.
  
  if $test_online_decoding && [ $stage -le 19 ]; then
    # note: if the features change (e.g. you add pitch features), you will have to
    # change the options of the following command line.
    steps/online/nnet3/prepare_online_decoding.sh \
      --mfcc-config conf/mfcc_hires.conf \
      $lang exp/nnet3${nnet3_affix}/extractor ${dir} ${dir}_online
  
    rm $dir/.error 2>/dev/null || true
  
    for data in $test_sets; do
      (
        data_affix=$(echo $data | sed s/test_//)
        nspk=$(wc -l <data/${data}_hires/spk2utt)
        # note: we just give it "data/${data}" as it only uses the wav.scp, the
        # feature type does not matter.
        for lmtype in tgpr bd_tgpr; do
          steps/online/nnet3/decode.sh \
            --acwt 1.0 --post-decode-acwt 10.0 \
            --nj $nspk --cmd "$decode_cmd" \
            $tree_dir/graph_${lmtype} data/${data} ${dir}_online/decode_${lmtype}_${data_affix} || exit 1
        done
        steps/lmrescore.sh \
          --self-loop-scale 1.0 \
          --cmd "$decode_cmd" data/lang_test_{tgpr,tg} \
          data/${data}_hires ${dir}_online/decode_{tgpr,tg}_${data_affix} || exit 1
        steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" \
          data/lang_test_bd_{tgpr,fgconst} \
         data/${data}_hires ${dir}_online/decode_${lmtype}_${data_affix}{,_fg} || exit 1
      ) || touch $dir/.error &
    done
    wait
    [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1
  fi
  
  
  exit 0;