Blame view

Scripts/steps/.svn/text-base/align_nnet.sh.svn-base 3.43 KB
ec85f8892   bigot benjamin   first commit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
  #!/bin/bash
  # Copyright 2012  Brno University of Technology (Author: Karel Vesely)
  # Apache 2.0
  
  # Computes training alignments using MLP model
  
  # If you supply the "--use-graphs true" option, it will use the training
  # graphs from the source directory (where the model is).  In this
  # case the number of jobs must match with the source directory.
  
  
  # Begin configuration section.  
  nj=4
  cmd=run.pl
  # Begin configuration.
  scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1"
  beam=10
  retry_beam=40
  use_gpu_id=-1 # disable gpu
  # End configuration options.
  
  echo "$0 $@"  # Print the command line for logging
  
  [ -f path.sh ] && . ./path.sh # source the path.
  . parse_options.sh || exit 1;
  
  if [ $# != 4 ]; then
     echo "usage: steps/align_si.sh <data-dir> <lang-dir> <src-dir> <align-dir>"
     echo "e.g.:  steps/align_si.sh data/train data/lang exp/tri1 exp/tri1_ali"
     echo "main options (for others, see top of script file)"
     echo "  --config <config-file>                           # config containing options"
     echo "  --nj <nj>                                        # number of parallel jobs"
     echo "  --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
     exit 1;
  fi
  
  data=$1
  lang=$2
  srcdir=$3
  dir=$4
  
  oov=`cat $lang/oov.int` || exit 1;
  mkdir -p $dir/log
  echo $nj > $dir/num_jobs
  sdata=$data/split$nj
  [[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1;
  
  cp $srcdir/{tree,final.mdl} $dir || exit 1;
  
  #Get the files we will need
  nnet=$srcdir/final.nnet;
  [ -z "$nnet" ] && echo "Error nnet '$nnet' does not exist!" && exit 1;
  
  class_frame_counts=$srcdir/ali_train_pdf.counts
  [ -z "$class_frame_counts" ] && echo "Error class_frame_counts '$class_frame_counts' does not exist!" && exit 1;
  
  feature_transform=$srcdir/final.feature_transform
  if [ ! -f $feature_transform ]; then
    echo "Missing feature_transform '$feature_transform'"
    exit 1
  fi
  
  model=$dir/final.mdl
  [ -z "$model" ] && echo "Error transition model '$model' does not exist!" && exit 1;
  
  ###
  ### Prepare feature pipeline (same as for decoding)
  ###
  # Create the feature stream:
  feats="ark,s,cs:copy-feats scp:$sdata/JOB/feats.scp ark:- |"
  # Optionally add cmvn
  if [ -f $srcdir/norm_vars ]; then
    norm_vars=$(cat $srcdir/norm_vars 2>/dev/null)
    [ ! -f $sdata/1/cmvn.scp ] && echo "$0: cannot find cmvn stats $sdata/1/cmvn.scp" && exit 1
    feats="$feats apply-cmvn --norm-vars=$norm_vars --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp ark:- ark:- |"
  fi
  # Optionally add deltas
  if [ -f $srcdir/delta_order ]; then
    delta_order=$(cat $srcdir/delta_order)
    feats="$feats add-deltas --delta-order=$delta_order ark:- ark:- |"
  fi
  
  # Finally add feature_transform and the MLP
  feats="$feats nnet-forward --feature-transform=$feature_transform --no-softmax=true --class-frame-counts=$class_frame_counts --use-gpu-id=$use_gpu_id $nnet ark:- ark:- |"
  ###
  ###
  ###
   
  echo "$0: aligning data in $data using model from $srcdir, putting alignments in $dir"
  
  tra="ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text|";
  # We could just use gmm-align-mapped in the next line, but it's less efficient as it compiles the
  # training graphs one by one.
  $cmd JOB=1:$nj $dir/log/align.JOB.log \
    compile-train-graphs $dir/tree $dir/final.mdl  $lang/L.fst "$tra" ark:- \| \
    align-compiled-mapped $scale_opts --beam=$beam --retry-beam=$retry_beam $dir/final.mdl ark:- \
      "$feats" "ark,t:|gzip -c >$dir/ali.JOB.gz" || exit 1;
  
  echo "$0: done aligning data."