Blame view

egs/wsj/s5/local/nnet3/tuning/run_tdnn_lstm_1a.sh 11.2 KB
8dcb6dfcb   Yannick Estève   first commit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
  #!/bin/bash
  
  
  # run_tdnn_lstm_1a.sh is a TDNN+LSTM system.  Compare with the TDNN
  # system in run_tdnn_1a.sh.  Configuration is similar to
  # the same-named script run_tdnn_lstm_1a.sh in
  # egs/tedlium/s5_r2/local/nnet3/tuning.
  
  # It's a little better than the TDNN-only script on dev93, a little
  # worse on eval92.
  
  # steps/info/nnet3_dir_info.pl exp/nnet3/tdnn_lstm1a_sp
  # exp/nnet3/tdnn_lstm1a_sp: num-iters=102 nj=3..10 num-params=8.8M dim=40+100->3413 combine=-0.55->-0.54 loglike:train/valid[67,101,combined]=(-0.63,-0.55,-0.55/-0.71,-0.63,-0.63) accuracy:train/valid[67,101,combined]=(0.80,0.82,0.82/0.76,0.78,0.78)
  
  
  
  # local/nnet3/compare_wer.sh --looped --online exp/nnet3/tdnn1a_sp exp/nnet3/tdnn_lstm1a_sp 2>/dev/null
  # local/nnet3/compare_wer.sh --looped --online exp/nnet3/tdnn1a_sp exp/nnet3/tdnn_lstm1a_sp
  # System                tdnn1a_sp tdnn_lstm1a_sp
  #WER dev93 (tgpr)                9.18      8.54
  #             [looped:]                    8.54
  #             [online:]                    8.57
  #WER dev93 (tg)                  8.59      8.25
  #             [looped:]                    8.21
  #             [online:]                    8.34
  #WER dev93 (big-dict,tgpr)       6.45      6.24
  #             [looped:]                    6.28
  #             [online:]                    6.40
  #WER dev93 (big-dict,fg)         5.83      5.70
  #             [looped:]                    5.70
  #             [online:]                    5.77
  #WER eval92 (tgpr)               6.15      6.52
  #             [looped:]                    6.45
  #             [online:]                    6.56
  #WER eval92 (tg)                 5.55      6.13
  #             [looped:]                    6.08
  #             [online:]                    6.24
  #WER eval92 (big-dict,tgpr)      3.58      3.88
  #             [looped:]                    3.93
  #             [online:]                    3.88
  #WER eval92 (big-dict,fg)        2.98      3.38
  #             [looped:]                    3.47
  #             [online:]                    3.53
  # Final train prob        -0.7200   -0.5492
  # Final valid prob        -0.8834   -0.6343
  # Final train acc          0.7762    0.8154
  # Final valid acc          0.7301    0.7849
  
  
  set -e -o pipefail
  
  # First the options that are passed through to run_ivector_common.sh
  # (some of which are also used in this script directly).
  stage=0
  nj=30
  train_set=train_si284
  test_sets="test_dev93 test_eval92"
  gmm=tri4b        # this is the source gmm-dir that we'll use for alignments; it
                   # should have alignments for the specified training data.
  num_threads_ubm=32
  nnet3_affix=       # affix for exp dirs, e.g. it was _cleaned in tedlium.
  
  # Options which are not passed through to run_ivector_common.sh
  affix=1a  #affix for TDNN+LSTM directory e.g. "1a" or "1b", in case we change the configuration.
  common_egs_dir=
  reporting_email=
  
  # LSTM options
  train_stage=-10
  label_delay=5
  
  # training chunk-options
  chunk_width=40,30,20
  chunk_left_context=40
  chunk_right_context=0
  
  # training options
  srand=0
  remove_egs=true
  
  #decode options
  test_online_decoding=false  # if true, it will run the last decoding stage.
  
  . ./cmd.sh
  . ./path.sh
  . ./utils/parse_options.sh
  
  if ! cuda-compiled; then
    cat <<EOF && exit 1
  This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA
  If you want to use GPUs (and have them), go to src/, and configure and make on a machine
  where "nvcc" is installed.
  EOF
  fi
  
  local/nnet3/run_ivector_common.sh \
    --stage $stage --nj $nj \
    --train-set $train_set --gmm $gmm \
    --num-threads-ubm $num_threads_ubm \
    --nnet3-affix "$nnet3_affix"
  
  
  
  gmm_dir=exp/${gmm}
  ali_dir=exp/${gmm}_ali_${train_set}_sp
  lang=data/lang
  dir=exp/nnet3${nnet3_affix}/tdnn_lstm${affix}_sp
  train_data_dir=data/${train_set}_sp_hires
  train_ivector_dir=exp/nnet3${nnet3_affix}/ivectors_${train_set}_sp_hires
  
  for f in $train_data_dir/feats.scp $train_ivector_dir/ivector_online.scp \
      $gmm_dir/{graph_tgpr,graph_bd_tgpr}/HCLG.fst \
      $ali_dir/ali.1.gz $gmm_dir/final.mdl; do
    [ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1
  done
  
  if [ $stage -le 12 ]; then
    mkdir -p $dir
    echo "$0: creating neural net configs using the xconfig parser";
  
    num_targets=$(tree-info $ali_dir/tree |grep num-pdfs|awk '{print $2}')
  
    mkdir -p $dir/configs
    cat <<EOF > $dir/configs/network.xconfig
    input dim=100 name=ivector
    input dim=40 name=input
  
    # please note that it is important to have input layer with the name=input
    # as the layer immediately preceding the fixed-affine-layer to enable
    # the use of short notation for the descriptor
    fixed-affine-layer name=lda input=Append(-2,-1,0,1,2,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat
  
    # the first splicing is moved before the lda layer, so no splicing here
    relu-renorm-layer name=tdnn1 dim=520
    relu-renorm-layer name=tdnn2 dim=520 input=Append(-1,0,1)
    fast-lstmp-layer name=lstm1 cell-dim=520 recurrent-projection-dim=130 non-recurrent-projection-dim=130 decay-time=20 delay=-3
    relu-renorm-layer name=tdnn3 dim=520 input=Append(-3,0,3)
    relu-renorm-layer name=tdnn4 dim=520 input=Append(-3,0,3)
    fast-lstmp-layer name=lstm2 cell-dim=520 recurrent-projection-dim=130 non-recurrent-projection-dim=130 decay-time=20 delay=-3
    relu-renorm-layer name=tdnn5 dim=520 input=Append(-3,0,3)
    relu-renorm-layer name=tdnn6 dim=520 input=Append(-3,0,3)
    fast-lstmp-layer name=lstm3 cell-dim=520 recurrent-projection-dim=130 non-recurrent-projection-dim=130 decay-time=20 delay=-3
  
    output-layer name=output input=lstm3 output-delay=$label_delay dim=$num_targets max-change=1.5
  
  EOF
    steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/
  fi
  
  
  if [ $stage -le 13 ]; then
    if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then
      utils/create_split_dir.pl \
       /export/b0{3,4,5,6}/$USER/kaldi-data/egs/tedlium-$(date +'%m_%d_%H_%M')/s5_r2/$dir/egs/storage $dir/egs/storage
    fi
  
    steps/nnet3/train_rnn.py --stage=$train_stage \
      --cmd="$decode_cmd" \
      --feat.online-ivector-dir=$train_ivector_dir \
      --feat.cmvn-opts="--norm-means=false --norm-vars=false" \
      --trainer.srand=$srand \
      --trainer.max-param-change=2.0 \
      --trainer.num-epochs=6 \
      --trainer.deriv-truncate-margin=10 \
      --trainer.samples-per-iter=20000 \
      --trainer.optimization.num-jobs-initial=3 \
      --trainer.optimization.num-jobs-final=10 \
      --trainer.optimization.initial-effective-lrate=0.0003 \
      --trainer.optimization.final-effective-lrate=0.00003 \
      --trainer.optimization.shrink-value 0.99 \
      --trainer.rnn.num-chunk-per-minibatch=128,64 \
      --trainer.optimization.momentum=0.5 \
      --egs.chunk-width=$chunk_width \
      --egs.chunk-left-context=$chunk_left_context \
      --egs.chunk-right-context=$chunk_right_context \
      --egs.chunk-left-context-initial=0 \
      --egs.chunk-right-context-final=0 \
      --egs.dir="$common_egs_dir" \
      --cleanup.remove-egs=$remove_egs \
      --use-gpu=true \
      --feat-dir=$train_data_dir \
      --ali-dir=$ali_dir \
      --lang=$lang \
      --reporting.email="$reporting_email" \
      --dir=$dir  || exit 1;
  fi
  
  if [ $stage -le 14 ]; then
    frames_per_chunk=$(echo $chunk_width | cut -d, -f1)
    rm $dir/.error 2>/dev/null || true
  
    for data in $test_sets; do
      (
        frames_per_chunk=$(echo $chunk_width | cut -d, -f1)
        data_affix=$(echo $data | sed s/test_//)
        nj=$(wc -l <data/${data}_hires/spk2utt)
        for lmtype in tgpr bd_tgpr; do
          graph_dir=$gmm_dir/graph_${lmtype}
          steps/nnet3/decode.sh \
            --extra-left-context $chunk_left_context \
            --extra-right-context $chunk_right_context \
            --extra-left-context-initial 0 \
            --extra-right-context-final 0 \
            --frames-per-chunk $frames_per_chunk \
            --nj $nj --cmd "$decode_cmd"  --num-threads 4 \
            --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \
            $graph_dir data/${data}_hires ${dir}/decode_${lmtype}_${data_affix} || exit 1
        done
        steps/lmrescore.sh --cmd "$decode_cmd" data/lang_test_{tgpr,tg} \
          data/${data}_hires ${dir}/decode_{tgpr,tg}_${data_affix} || exit 1
        steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" \
          data/lang_test_bd_{tgpr,fgconst} \
         data/${data}_hires ${dir}/decode_${lmtype}_${data_affix}{,_fg} || exit 1
      ) || touch $dir/.error &
    done
    wait
    [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1
  fi
  
  if [ $stage -le 15 ]; then
    # 'looped' decoding.
    # note: you should NOT do this decoding step for setups that have bidirectional
    # recurrence, like BLSTMs-- it doesn't make sense and will give bad results.
    # we didn't write a -parallel version of this program yet,
    # so it will take a bit longer as the --num-threads option is not supported.
    # we just hardcode the --frames-per-chunk option as it doesn't have to
    # match any value used in training, and it won't affect the results (unlike
    # regular decoding).
    rm $dir/.error 2>/dev/null || true
  
    for data in $test_sets; do
      (
        data_affix=$(echo $data | sed s/test_//)
        nj=$(wc -l <data/${data}_hires/spk2utt)
        for lmtype in tgpr bd_tgpr; do
          graph_dir=$gmm_dir/graph_${lmtype}
          steps/nnet3/decode_looped.sh \
            --frames-per-chunk 30 \
            --nj $nj --cmd "$decode_cmd" \
            --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \
            $graph_dir data/${data}_hires ${dir}/decode_looped_${lmtype}_${data_affix} || exit 1
        done
        steps/lmrescore.sh --cmd "$decode_cmd" data/lang_test_{tgpr,tg} \
          data/${data}_hires ${dir}/decode_looped_{tgpr,tg}_${data_affix} || exit 1
        steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" \
          data/lang_test_bd_{tgpr,fgconst} \
         data/${data}_hires ${dir}/decode_looped_${lmtype}_${data_affix}{,_fg} || exit 1
      ) || touch $dir/.error &
    done
    wait
    [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1
  fi
  
  if $test_online_decoding && [ $stage -le 16 ]; then
    # note: if the features change (e.g. you add pitch features), you will have to
    # change the options of the following command line.
    steps/online/nnet3/prepare_online_decoding.sh \
      --mfcc-config conf/mfcc_hires.conf \
      $lang exp/nnet3${nnet3_affix}/extractor ${dir} ${dir}_online
  
    rm $dir/.error 2>/dev/null || true
  
    for data in $test_sets; do
      (
        data_affix=$(echo $data | sed s/test_//)
        nj=$(wc -l <data/${data}_hires/spk2utt)
        # note: we just give it "data/${data}" as it only uses the wav.scp, the
        # feature type does not matter.
        for lmtype in tgpr bd_tgpr; do
          graph_dir=$gmm_dir/graph_${lmtype}
          steps/online/nnet3/decode.sh \
            --nj $nj --cmd "$decode_cmd" \
            $graph_dir data/${data} ${dir}_online/decode_${lmtype}_${data_affix} || exit 1
        done
        steps/lmrescore.sh --cmd "$decode_cmd" data/lang_test_{tgpr,tg} \
          data/${data}_hires ${dir}_online/decode_{tgpr,tg}_${data_affix} || exit 1
        steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" \
          data/lang_test_bd_{tgpr,fgconst} \
         data/${data}_hires ${dir}_online/decode_${lmtype}_${data_affix}{,_fg} || exit 1
      ) || touch $dir/.error &
    done
    wait
    [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1
  fi
  
  
  
  exit 0;