extract_ivectors_dnn.sh
4.21 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
#!/bin/bash
# Copyright 2013 Daniel Povey
# 2014-2015 David Snyder
# 2015 Johns Hopkins University (Author: Daniel Garcia-Romero)
# 2015 Johns Hopkins University (Author: Daniel Povey)
# 2016-2017 Go-Vivace Inc. (Author: Mousmita Sarma)
# Apache 2.0.
# This script extracts iVectors for a set of utterances, given
# Begin configuration section.
nj=5
cmd="run.pl"
stage=0
min_post=0.025 # Minimum posterior to use (posteriors below this are pruned out)
posterior_scale=1.0 # This scale helps to control for successive features being highly
# correlated. E.g. try 0.1 or 0.3.
use_gpu=true
chunk_size=256
nnet_job_opt=
# End configuration section.
echo "$0 $@" # Print the command line for logging
if [ -f path.sh ]; then . ./path.sh; fi
. parse_options.sh || exit 1;
if [ $# != 5 ]; then
echo "Usage: $0 <extractor-dir> <dnn-model> <data-language-id> <data-dnn> <ivectors-dir>"
echo " e.g.: $0 exp/extractor_dnn exp/nnet2_online/nnet_ms_a/final.mdl data/lre07 data/lre07_dnn exp/ivectors_lre07"
echo "main options (for others, see top of script file)"
echo " --config <config-file> # config containing options"
echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
echo " --nj <n|10> # Number of jobs"
echo " --num-threads <n|8> # Number of threads for each process"
echo " --stage <stage|0> # To control partial reruns"
echo " --num-gselect <n|20> # Number of Gaussians to select using"
echo " # diagonal model."
echo " --min-post <min-post|0.025> # Pruning threshold for posteriors"
echo " --use-gpu <true/false> # Use GPU to extract DNN posteriors"
echo " # sum-accs process to nfs server."
echo " --nnet-job-opt <option|''> # Options for the DNN jobs which add to or"
echo " # replace those specified by --cmd"
echo " --chunk-size <n|256> # Number of frames processed at a time by the DNN"
exit 1;
fi
srcdir=$1
nnet=$2
data=$3
data_dnn=$4
dir=$5
gpu_opt=""
if $use_gpu; then
nnet_job_opt="$nnet_job_opt --gpu 1"
gpu_opt="--use-gpu=yes"
if ! cuda-compiled; then
echo "$0: WARNING: you are trying to use the GPU but you have not compiled"
echo " for CUDA. If you have GPUs and have nvcc installed, go to src/"
echo " and do ./configure; make"
exit 1
fi
else
echo "$0: without using a GPU this will be slow."
gpu_opt="--use-gpu=no"
fi
for f in $srcdir/final.ie $srcdir/final.ubm $data/feats.scp ; do
[ ! -f $f ] && echo "No such file $f" && exit 1;
done
# Set various variables.
mkdir -p $dir/log
sdata=$data/split$nj;
utils/split_data.sh $data $nj || exit 1;
sdata_dnn=$data_dnn/split$nj;
utils/split_data.sh $data_dnn $nj || exit 1;
# Set up language recognition features
feats="ark,s,cs:apply-cmvn-sliding --norm-vars=false --center=true --cmn-window=300 scp:$sdata/JOB/feats.scp ark:- | add-deltas-sdc ark:- ark:- | select-voiced-frames ark:- scp,s,cs:$sdata/JOB/vad.scp ark:- |"
# set up nnet features
nnet_feats="ark,s,cs:apply-cmvn-sliding --center=true scp:$sdata_dnn/JOB/feats.scp ark:- |"
if [ $stage -le 0 ]; then
echo "$0: extracting iVectors"
for g in $(seq $nj); do
$cmd $nnet_job_opt $dir/log/extract_ivectors.$g.log \
nnet-am-compute $gpu_opt --apply-log=true --chunk-size=${chunk_size} \
$nnet "`echo $nnet_feats | sed s/JOB/$g/g`" ark:- \
\| select-voiced-frames ark:- scp,s,cs:$sdata/$g/vad.scp ark:- \
\| logprob-to-post --min-post=$min_post ark:- ark:- \| \
scale-post ark:- $posterior_scale ark:- \| \
ivector-extract --verbose=2 $srcdir/final.ie \
"`echo $feats | sed s/JOB/$g/g`" ark,s,cs:- \
ark,scp,t:$dir/ivector.$g.ark,$dir/ivector.$g.scp || exit 1 &
done
wait
fi
if [ $stage -le 1 ]; then
echo "$0: combining iVectors across jobs"
for j in $(seq $nj); do cat $dir/ivector.$j.scp; done >$dir/ivector.scp || exit 1;
fi