run_lstm_1a.sh
5.23 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
#!/bin/bash
#started from tedlium recipe with few edits
set -e -o pipefail -u
# First the options that are passed through to run_ivector_common.sh
# (some of which are also used in this script directly).
stage=0
nj=30
decode_nj=30
min_seg_len=1.55
train_set=train
gmm=tri2b # this is the source gmm-dir for the data-type of interest; it
# should have alignments for the specified training data.
num_threads_ubm=32
nnet3_affix=_cleaned # cleanup affix for exp dirs, e.g. _cleaned
# Options which are not passed through to run_ivector_common.sh
affix=
common_egs_dir=
reporting_email=
# LSTM options
train_stage=-10
splice_indexes="-2,-1,0,1,2 0 0"
lstm_delay=" -1 -2 -3 "
label_delay=5
num_lstm_layers=3
cell_dim=1024
hidden_dim=1024
recurrent_projection_dim=256
non_recurrent_projection_dim=256
chunk_width=20
chunk_left_context=40
chunk_right_context=0
max_param_change=2.0
# training options
srand=0
num_epochs=6
initial_effective_lrate=0.0003
final_effective_lrate=0.00003
num_jobs_initial=2
num_jobs_final=3
momentum=0.5
num_chunk_per_minibatch=100
samples_per_iter=20000
remove_egs=true
#decode options
extra_left_context=
extra_right_context=
frames_per_chunk=
. ./cmd.sh
. ./path.sh
. ./utils/parse_options.sh
if ! cuda-compiled; then
cat <<EOF && exit 1
This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA
If you want to use GPUs (and have them), go to src/, and configure and make on a machine
where "nvcc" is installed.
EOF
fi
local/nnet3/run_ivector_common.sh --stage $stage \
--nj $nj \
--min-seg-len $min_seg_len \
--train-set $train_set \
--gmm $gmm \
--num-threads-ubm $num_threads_ubm \
--nnet3-affix "$nnet3_affix"
gmm_dir=exp/${gmm}
graph_dir=$gmm_dir/graph
ali_dir=exp/${gmm}_ali_${train_set}_sp_comb
dir=exp/nnet3${nnet3_affix}/lstm${affix:+_$affix}
if [ $label_delay -gt 0 ]; then dir=${dir}_ld$label_delay; fi
dir=${dir}_sp
train_data_dir=data/${train_set}_sp_hires_comb
train_ivector_dir=exp/nnet3${nnet3_affix}/ivectors_${train_set}_sp_hires_comb
for f in $train_data_dir/feats.scp $train_ivector_dir/ivector_online.scp \
$graph_dir/HCLG.fst $ali_dir/ali.1.gz $gmm_dir/final.mdl; do
[ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1
done
if [ $stage -le 12 ]; then
echo "$0: creating neural net configs"
config_extra_opts=()
[ ! -z "$lstm_delay" ] && config_extra_opts+=(--lstm-delay "$lstm_delay")
steps/nnet3/lstm/make_configs.py "${config_extra_opts[@]}" \
--feat-dir $train_data_dir \
--ivector-dir $train_ivector_dir \
--ali-dir $ali_dir \
--num-lstm-layers $num_lstm_layers \
--splice-indexes "$splice_indexes " \
--cell-dim $cell_dim \
--hidden-dim $hidden_dim \
--recurrent-projection-dim $recurrent_projection_dim \
--non-recurrent-projection-dim $non_recurrent_projection_dim \
--label-delay $label_delay \
--self-repair-scale-nonlinearity 0.00001 \
$dir/configs || exit 1;
fi
if [ $stage -le 13 ]; then
if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then
utils/create_split_dir.pl \
/export/b0{3,4,5,6}/$USER/kaldi-data/egs/tedlium-$(date +'%m_%d_%H_%M')/s5_r2/$dir/egs/storage $dir/egs/storage
fi
steps/nnet3/train_rnn.py --stage=$train_stage \
--cmd="$decode_cmd" \
--feat.online-ivector-dir=$train_ivector_dir \
--feat.cmvn-opts="--norm-means=false --norm-vars=false" \
--trainer.srand=$srand \
--trainer.num-epochs=$num_epochs \
--trainer.samples-per-iter=$samples_per_iter \
--trainer.optimization.num-jobs-initial=$num_jobs_initial \
--trainer.optimization.num-jobs-final=$num_jobs_final \
--trainer.optimization.initial-effective-lrate=$initial_effective_lrate \
--trainer.optimization.final-effective-lrate=$final_effective_lrate \
--trainer.optimization.shrink-value 0.99 \
--trainer.rnn.num-chunk-per-minibatch=$num_chunk_per_minibatch \
--trainer.optimization.momentum=$momentum \
--egs.chunk-width=$chunk_width \
--egs.chunk-left-context=$chunk_left_context \
--egs.chunk-right-context=$chunk_right_context \
--egs.dir="$common_egs_dir" \
--cleanup.remove-egs=$remove_egs \
--cleanup.preserve-model-interval=1 \
--use-gpu=true \
--feat-dir=$train_data_dir \
--ali-dir=$ali_dir \
--lang=data/lang \
--reporting.email="$reporting_email" \
--dir=$dir || exit 1;
fi
if [ $stage -le 14 ]; then
[ -z $extra_left_context ] && extra_left_context=$chunk_left_context;
[ -z $extra_right_context ] && extra_right_context=$chunk_right_context;
[ -z $frames_per_chunk ] && frames_per_chunk=$chunk_width;
rm $dir/.error 2>/dev/null || true
steps/nnet3/decode.sh --nj $decode_nj --cmd "$decode_cmd" --num-threads 4 \
--extra-left-context $extra_left_context \
--extra-right-context $extra_right_context \
--online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_test_hires \
${graph_dir} data/test_hires ${dir}/decode || exit 1
steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" data/lang data/lang_rescore \
data/test_hires ${dir}/decode_test ${dir}/decode_test_rescore || exit 1
fi
exit 0;