Blame view

egs/fisher_swbd/s5/local/online/run_nnet2_ms.sh 4.1 KB
8dcb6dfcb   Yannick Estève   first commit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
  #!/bin/bash
  
  . ./cmd.sh
  
  
  stage=0
  train_stage=451
  use_gpu=true
  rescore=true
  set -e
  . ./cmd.sh
  . ./path.sh
  . ./utils/parse_options.sh
  
  
  # assume use_gpu=true since it would be way too slow otherwise.
  
  if ! cuda-compiled; then
    cat <<EOF && exit 1
  This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA
  If you want to use GPUs (and have them), go to src/, and configure and make on a machine
  where "nvcc" is installed.
  EOF
  fi
  parallel_opts="--gpu 1"
  num_threads=1
  minibatch_size=512
  dir=exp/nnet2_online/nnet_ms_a
  mkdir -p exp/nnet2_online
  
  
  # Stages 1 through 5 are done in run_nnet2_common.sh,
  # so it can be shared with other similar scripts.
  local/online/run_nnet2_common.sh --stage $stage
  
  if [ $stage -le 6 ]; then
    if [[ $(hostname -f) == *.clsp.jhu.edu ]]; then
      utils/create_split_dir.pl /export/b0{6,7,8,9}/${USER}/kaldi-dsata/egs/fisher_swbd/s5/$dir/egs/storage $dir/egs/storage
    fi
  
    # Because we have a lot of data here and we don't want the training to take
    # too long, we reduce the number of epochs from the defaults (15 + 5) to (3 +
    # 1).  The option "--io-opts '--max-jobs-run 12'" is to have more than the default number
    # (5) of jobs dumping the egs to disk; this is OK since we're splitting our
    # data across four filesystems for speed.
  
  
    steps/nnet2/train_multisplice_accel2.sh --stage $train_stage \
      --feat-type raw \
      --splice-indexes "layer0/-2:-1:0:1:2 layer1/-1:2 layer3/-3:3 layer4/-7:2" \
      --num-epochs 6 \
      --num-hidden-layers 6 \
      --num-jobs-initial 3 --num-jobs-final 18 \
      --online-ivector-dir exp/nnet2_online/ivectors_train \
      --cmvn-opts "--norm-means=false --norm-vars=false" \
      --num-threads "$num_threads" \
      --minibatch-size "$minibatch_size" \
      --parallel-opts "$parallel_opts" \
      --mix-up 12000 \
      --initial-effective-lrate 0.0015 --final-effective-lrate 0.00015 \
      --cmd "$decode_cmd" \
      --egs-dir "$common_egs_dir" \
      --pnorm-input-dim 4000 \
      --pnorm-output-dim 400 \
      data/train_nodup_hires data/lang exp/tri5a $dir  || exit 1;
  
  fi
  
  if [ $stage -le 7 ]; then
    steps/online/nnet2/prepare_online_decoding.sh --mfcc-config conf/mfcc_hires.conf \
      data/lang exp/nnet2_online/extractor "$dir" ${dir}_online || exit 1;
  fi
  
  if [ $stage -le 8 ]; then
    for test in eval2000 rt03; do
    # do the actual online decoding with iVectors, carrying info forward from
    # previous utterances of the same speaker.
       steps/online/nnet2/decode.sh --config conf/decode.config --cmd "$decode_cmd" --nj 30 \
          exp/tri5a/graph_fsh_sw1_tg data/$test ${dir}_online/decode_${test}_fsh_sw1_tg || exit 1;
  
    # rescore
      if [ $rescore ]; then
           steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" \
             data/lang_fsh_sw1_{tg,fg} data/${test} \
             ${dir}_online/decode_${test}_fsh_sw1_{tg,fg}
      fi
    done
  fi
  
  if [ $stage -le 9 ]; then
    for test in eval2000 rt03; do
    # this version of the decoding treats each utterance separately
    # without carrying forward speaker information.
     steps/online/nnet2/decode.sh --config conf/decode.config --cmd "$decode_cmd" --nj 30 \
       --per-utt true \
        exp/tri5a/graph_fsh_sw1_tg data/$test ${dir}_online/decode_${test}_utt_fsh_sw1_tg || exit 1;
  
  
    # rescore
      if [ $rescore ]; then
           steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" \
             data/lang_fsh_sw1_{tg,fg} data/${test} \
             ${dir}_online/decode_${test}_utt_fsh_sw1_{tg,fg}
      fi
    done
  fi
  
  if [ $stage -le 10 ]; then
    for test in eval2000 rt03; do
    # this version of the decoding treats each utterance separately
    # without carrying forward speaker information, but looks to the end
    # of the utterance while computing the iVector.
     steps/online/nnet2/decode.sh --config conf/decode.config --cmd "$decode_cmd" --nj 30 \
       --per-utt true --online false \
        exp/tri5a/graph_fsh_sw1_tg data/$test ${dir}_online/decode_${test}_utt_offline_fsh_sw1_tg || exit 1;
  
    # rescore
      if [ $rescore ]; then
           steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" \
             data/lang_fsh_sw1_{tg,fg} data/${test} \
             ${dir}_online/decode_${test}_utt_offline_fsh_sw1_{tg,fg}
      fi
    done
  fi
  
  exit 0;