Blame view

egs/cifar/v1/local/nnet3/tuning/run_cnn_aug_1a.sh 4.12 KB
8dcb6dfcb   Yannick Estève   first commit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
  #!/bin/bash
  
  
  # aug_1a is as 1a but with data augmentation
  # accuracy 79.5% (1a had accuracy 69%)
  
  # steps/info/nnet3_dir_info.pl exp/cnn_aug_1a_cifar10
  # exp/cnn_aug_1a_cifar10: num-iters=60 nj=1..2 num-params=0.2M dim=96->10 combine=-0.61->-0.58 loglike:train/valid[39,59,final]=(-0.60,-0.49,-0.57/-0.68,-0.60,-0.67) accuracy:train/valid[39,59,final]=(0.79,0.83,0.81/0.76,0.79,0.77)
  
  # Set -e here so that we catch if any executable fails immediately
  set -euo pipefail
  
  
  
  # training options
  stage=0
  train_stage=-10
  dataset=cifar10
  srand=0
  reporting_email=
  affix=_aug_1a
  
  
  # End configuration section.
  echo "$0 $@"  # Print the command line for logging
  
  . ./cmd.sh
  . ./path.sh
  . ./utils/parse_options.sh
  
  if ! cuda-compiled; then
    cat <<EOF && exit 1
  This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA
  If you want to use GPUs (and have them), go to src/, and configure and make on a machine
  where "nvcc" is installed.
  EOF
  fi
  
  
  
  dir=exp/cnn${affix}_${dataset}
  
  egs=exp/${dataset}_egs
  
  if [ ! -d $egs ]; then
    echo "$0: expected directory $egs to exist.  Run the get_egs.sh commands in the"
    echo "    run.sh before this script."
    exit 1
  fi
  
  # check that the expected files are in the egs directory.
  
  for f in $egs/egs.1.ark $egs/train_diagnostic.egs $egs/valid_diagnostic.egs $egs/combine.egs \
           $egs/info/feat_dim $egs/info/left_context $egs/info/right_context \
           $egs/info/output_dim; do
    if [ ! -e $f ]; then
      echo "$0: expected file $f to exist."
      exit 1;
    fi
  done
  
  
  mkdir -p $dir/log
  
  
  if [ $stage -le 1 ]; then
    mkdir -p $dir
    echo "$0: creating neural net configs using the xconfig parser";
  
    num_targets=$(cat $egs/info/output_dim)
  
    # Note: we hardcode in the CNN config that we are dealing with 32x3x color
    # images.
  
    common="required-time-offsets=0 height-offsets=-1,0,1 num-filters-out=32"
  
    mkdir -p $dir/configs
    cat <<EOF > $dir/configs/network.xconfig
    input dim=96 name=input
    conv-relu-batchnorm-layer name=cnn1 height-in=32 height-out=32 time-offsets=-1,0,1 $common
    conv-relu-batchnorm-layer name=cnn2 height-in=32 height-out=32 time-offsets=-1,0,1 $common
    conv-relu-batchnorm-layer name=cnn3 height-in=32 height-out=32 time-offsets=-1,0,1 $common
    conv-relu-batchnorm-layer name=cnn4 height-in=32 height-out=16 time-offsets=-1,0,1 $common height-subsample-out=2
    conv-relu-batchnorm-layer name=cnn5 height-in=16 height-out=16 time-offsets=-2,0,2 $common
    conv-relu-batchnorm-layer name=cnn6 height-in=16 height-out=16 time-offsets=-2,0,2 $common
    conv-relu-batchnorm-layer name=cnn7 height-in=16 height-out=8  time-offsets=-2,0,2 $common height-subsample-out=2
    conv-relu-batchnorm-layer name=cnn8 height-in=8 height-out=8   time-offsets=-4,0,4 $common
    conv-relu-batchnorm-layer name=cnn9 height-in=8 height-out=8   time-offsets=-4,0,4 $common
    conv-relu-batchnorm-layer name=cnn10 height-in=8 height-out=4   time-offsets=-4,0,4 $common height-subsample-out=2
    conv-relu-batchnorm-layer name=cnn11 height-in=4 height-out=4   time-offsets=-8,0,8 $common
    conv-relu-batchnorm-layer name=cnn12 height-in=4 height-out=4   time-offsets=-8,0,8 $common
    relu-batchnorm-layer name=fully_connected1 input=Append(0,8,16,24) dim=128
    relu-batchnorm-layer name=fully_connected2 dim=256
    output-layer name=output dim=$num_targets
  EOF
    steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/
  fi
  
  
  if [ $stage -le 2 ]; then
  
    steps/nnet3/train_raw_dnn.py --stage=$train_stage \
      --cmd="$train_cmd" \
      --image.augmentation-opts="--horizontal-flip-prob=0.5 --horizontal-shift=0.1 --vertical-shift=0.1" \
      --trainer.srand=$srand \
      --trainer.max-param-change=2.0 \
      --trainer.num-epochs=30 \
      --egs.frames-per-eg=1 \
      --trainer.optimization.num-jobs-initial=1 \
      --trainer.optimization.num-jobs-final=2 \
      --trainer.optimization.initial-effective-lrate=0.0003 \
      --trainer.optimization.final-effective-lrate=0.00003 \
      --trainer.optimization.minibatch-size=256,128,64 \
      --trainer.shuffle-buffer-size=2000 \
      --egs.dir="$egs" \
      --use-gpu=true \
      --reporting.email="$reporting_email" \
      --dir=$dir  || exit 1;
  fi
  
  
  exit 0;