run_cnn_1c.sh
3.31 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
#!/bin/bash
# 1c uses dropout with fewer but larger layers
#teps/info/nnet3_dir_info.pl exp/cnn1c_cifar10
#exp/cnn1c_cifar10: num-iters=60 nj=1..2 num-params=4.3M dim=96->10 combine=-0.00->-0.00 loglike:train/valid[39,59,final]=(-0.08,-0.01,-0.00/-0.71,-0.79,-2.09) accuracy:train/valid[39,59,final]=(0.98,1.00,1.00/0.78,0.78,0.78)
# Set -e here so that we catch if any executable fails immediately
set -euo pipefail
# training options
stage=0
train_stage=-10
dataset=cifar10
srand=0
reporting_email=
affix=1c
# End configuration section.
echo "$0 $@" # Print the command line for logging
. ./cmd.sh
. ./path.sh
. ./utils/parse_options.sh
if ! cuda-compiled; then
cat <<EOF && exit 1
This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA
If you want to use GPUs (and have them), go to src/, and configure and make on a machine
where "nvcc" is installed.
EOF
fi
dir=exp/cnn${affix}_${dataset}
egs=exp/${dataset}_egs
if [ ! -d $egs ]; then
echo "$0: expected directory $egs to exist. Run the get_egs.sh commands in the"
echo " run.sh before this script."
exit 1
fi
# check that the expected files are in the egs directory.
for f in $egs/egs.1.ark $egs/train_diagnostic.egs $egs/valid_diagnostic.egs $egs/combine.egs \
$egs/info/feat_dim $egs/info/left_context $egs/info/right_context \
$egs/info/output_dim; do
if [ ! -e $f ]; then
echo "$0: expected file $f to exist."
exit 1;
fi
done
mkdir -p $dir/log
if [ $stage -le 1 ]; then
mkdir -p $dir
echo "$0: creating neural net configs using the xconfig parser";
num_targets=$(cat $egs/info/output_dim)
# Note: we hardcode in the CNN config that we are dealing with 32x3x color
# images.
common1="required-time-offsets=0 height-offsets=-1,0,1 num-filters-out=32"
common2="required-time-offsets=0 height-offsets=-1,0,1 num-filters-out=64"
mkdir -p $dir/configs
cat <<EOF > $dir/configs/network.xconfig
input dim=96 name=input
conv-relu-layer name=cnn1 height-in=32 height-out=32 time-offsets=-1,0,1 $common1
conv-relu-dropout-layer name=cnn2 height-in=32 height-out=16 time-offsets=-1,0,1 dropout-proportion=0.25 $common1 height-subsample-out=2
conv-relu-layer name=cnn3 height-in=16 height-out=16 time-offsets=-1,0,1 $common2
conv-relu-dropout-layer name=cnn4 height-in=16 height-out=8 time-offsets=-1,0,1 dropout-proportion=0.25 $common2 height-subsample-out=2
relu-dropout-layer name=fully_connected1 input=Append(0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30) dropout-proportion=0.5 dim=512
output-layer name=output dim=$num_targets
EOF
steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/
fi
if [ $stage -le 2 ]; then
steps/nnet3/train_raw_dnn.py --stage=$train_stage \
--cmd="$train_cmd" \
--trainer.srand=$srand \
--trainer.max-param-change=2.0 \
--trainer.num-epochs=30 \
--egs.frames-per-eg=1 \
--trainer.optimization.num-jobs-initial=1 \
--trainer.optimization.num-jobs-final=2 \
--trainer.optimization.initial-effective-lrate=0.003 \
--trainer.optimization.final-effective-lrate=0.0003 \
--trainer.optimization.minibatch-size=256,128,64 \
--trainer.shuffle-buffer-size=2000 \
--egs.dir="$egs" \
--use-gpu=true \
--reporting.email="$reporting_email" \
--dir=$dir || exit 1;
fi
exit 0;