Blame view

egs/librispeech/s5/local/online/run_nnet2_common.sh 4.16 KB
8dcb6dfcb   Yannick Estève   first commit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
  #!/bin/bash
  
  # this script contains some common (shared) parts of the run_nnet*.sh scripts.
  
  . ./cmd.sh
  
  
  stage=0
  
  set -e
  . ./cmd.sh
  . ./path.sh
  . ./utils/parse_options.sh
  
  
  if [ $stage -le 1 ]; then
    # Create high-resolution MFCC features (with 40 cepstra instead of 13).
    # this shows how you can split across multiple file-systems.  we'll split the
    # MFCC dir across multiple locations.  You might want to be careful here, if you
    # have multiple copies of Kaldi checked out and run the same recipe, not to let
    # them overwrite each other.
    mfccdir=mfcc
    if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $mfccdir/storage ]; then
      utils/create_split_dir.pl /export/b0{1,2,3,4}/$USER/kaldi-data/egs/librispeech-$(date +'%m_%d_%H_%M')/s5/$mfccdir/storage $mfccdir/storage
    fi
  
    for datadir in train_960 test_clean test_other dev_clean dev_other; do
      utils/copy_data_dir.sh data/$datadir data/${datadir}_hires
      steps/make_mfcc.sh --nj 70 --mfcc-config conf/mfcc_hires.conf \
        --cmd "$train_cmd" data/${datadir}_hires exp/make_hires/$datadir $mfccdir || exit 1;
      steps/compute_cmvn_stats.sh data/${datadir}_hires exp/make_hires/$datadir $mfccdir || exit 1;
    done
  
    # now create some data subsets.
    # mixed is the clean+other data.
    # 30k is 1/10 of the data (around 100 hours), 60k is 1/5th of it (around 200 hours).
    utils/subset_data_dir.sh data/train_960_hires 30000 data/train_mixed_hires_30k
    utils/subset_data_dir.sh data/train_960_hires 60000 data/train_mixed_hires_60k
  fi
  
  if [ $stage -le 2 ]; then
    # We need to build a small system just because we need the LDA+MLLT transform
    # to train the diag-UBM on top of.  We align a subset of training data for
    # this purpose.
    utils/subset_data_dir.sh --utt-list <(awk '{print $1}' data/train_mixed_hires_30k/utt2spk) \
       data/train_960 data/train_960_30k
  
    steps/align_fmllr.sh --nj 40 --cmd "$train_cmd" \
      data/train_960_30k data/lang exp/tri6b exp/nnet2_online/tri6b_ali_30k
  fi
  
  if [ $stage -le 3 ]; then
    # Train a small system just for its LDA+MLLT transform.  We use --num-iters 13
    # because after we get the transform (12th iter is the last), any further
    # training is pointless.
    steps/train_lda_mllt.sh --cmd "$train_cmd" --num-iters 13 \
      --realign-iters "" \
      --splice-opts "--left-context=3 --right-context=3" \
      5000 10000 data/train_mixed_hires_30k data/lang \
      exp/nnet2_online/tri6b_ali_30k exp/nnet2_online/tri7b
  fi
  
  
  if [ $stage -le 4 ]; then
    mkdir -p exp/nnet2_online
    # To train a diagonal UBM we don't need very much data, so use a small subset
    # (actually, it's not that small: still around 100 hours).
    steps/online/nnet2/train_diag_ubm.sh --cmd "$train_cmd" --nj 30 --num-frames 700000 \
      data/train_mixed_hires_30k 512 exp/nnet2_online/tri7b exp/nnet2_online/diag_ubm
  fi
  
  if [ $stage -le 5 ]; then
    # iVector extractors can in general be sensitive to the amount of data, but
    # this one has a fairly small dim (defaults to 100) so we don't use all of it,
    # we use just the 60k subset (about one fifth of the data, or 200 hours).
    steps/online/nnet2/train_ivector_extractor.sh --cmd "$train_cmd" --nj 10 \
      data/train_mixed_hires_60k exp/nnet2_online/diag_ubm exp/nnet2_online/extractor || exit 1;
  fi
  
  if [ $stage -le 6 ]; then
    ivectordir=exp/nnet2_online/ivectors_train_960_hires
    if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $ivectordir/storage ]; then
      utils/create_split_dir.pl /export/b0{1,2,3,4}/$USER/kaldi-data/egs/librispeech-$(date +'%m_%d_%H_%M')/s5/$ivectordir/storage $ivectordir/storage
    fi
    # We extract iVectors on all the train data, which will be what we train the
    # system on.  With --utts-per-spk-max 2, the script.  pairs the utterances
    # into twos, and treats each of these pairs as one speaker.  Note that these
    # are extracted 'online'.
  
    # having a larger number of speakers is helpful for generalization, and to
    # handle per-utterance decoding well (iVector starts at zero).
    steps/online/nnet2/copy_data_dir.sh --utts-per-spk-max 2 data/train_960_hires data/train_960_hires_max2
    
    steps/online/nnet2/extract_ivectors_online.sh --cmd "$train_cmd" --nj 60 \
      data/train_960_hires_max2 exp/nnet2_online/extractor $ivectordir || exit 1;
  fi
  
  
  exit 0;