run_5d_gpu.sh
1.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
#!/bin/bash
# This is pnorm neural net training on top of adapted 40-dimensional features.
train_stage=-100
temp_dir= # e.g. --temp-dir /export/m1-02/dpovey/kaldi-dan2/egs/wsj/s5/
parallel_opts="--gpu 1" # This is suitable for the CLSP network, you'll likely have to change it.
dir=exp/nnet5d_gpu
# Note: since we multiplied the num-jobs by 1/4, we halved the
# learning rate, relative to run_5c.sh
. ./cmd.sh
. utils/parse_options.sh
(
if [ ! -z "$temp_dir" ] && [ ! -e $dir/egs ]; then
mkdir -p $dir
mkdir -p $temp_dir/$dir/egs
ln -s $temp_dir/$dir/egs $dir/
fi
steps/nnet2/train_pnorm.sh --stage $train_stage \
--num-jobs-nnet 4 --num-threads 1 --parallel-opts "$parallel_opts" \
--egs-dir exp/nnet5d_gpu/egs \
--mix-up 8000 \
--initial-learning-rate 0.02 --final-learning-rate 0.002 \
--num-hidden-layers 4 \
--pnorm-input-dim 2000 --pnorm-output-dim 400 \
--cmd "$decode_cmd" \
--p 2 \
data/train_si284 data/lang exp/tri4b_ali_si284 $dir || exit 1
steps/nnet2/decode.sh --cmd "$decode_cmd" --nj 10 \
--transform-dir exp/tri4b/decode_tgpr_dev93 \
exp/tri4b/graph_tgpr data/test_dev93 $dir/decode_tgpr_dev93
steps/nnet2/decode.sh --cmd "$decode_cmd" --nj 8 \
--transform-dir exp/tri4b/decode_tgpr_eval92 \
exp/tri4b/graph_tgpr data/test_eval92 $dir/decode_tgpr_eval92
steps/nnet2/decode.sh --cmd "$decode_cmd" --nj 10 \
--transform-dir exp/tri4b/decode_bd_tgpr_dev93 \
exp/tri4b/graph_bd_tgpr data/test_dev93 $dir/decode_bd_tgpr_dev93
steps/nnet2/decode.sh --cmd "$decode_cmd" --nj 8 \
--transform-dir exp/tri4b/decode_bd_tgpr_eval92 \
exp/tri4b/graph_bd_tgpr data/test_eval92 $dir/decode_bd_tgpr_eval92
)