extract_ivectors.sh
3.44 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
#!/bin/bash
# Copyright 2013 Daniel Povey
# 2014 David Snyder
# Apache 2.0.
# This script extracts iVectors for a set of utterances, given
# features and a trained iVector extractor.
# Begin configuration section.
nj=30
cmd="run.pl"
stage=0
num_gselect=20 # Gaussian-selection using diagonal model: number of Gaussians to select
min_post=0.025 # Minimum posterior to use (posteriors below this are pruned out)
posterior_scale=1.0 # This scale helps to control for successve features being highly
# correlated. E.g. try 0.1 or 0.3.
# End configuration section.
echo "$0 $@" # Print the command line for logging
if [ -f path.sh ]; then . ./path.sh; fi
. parse_options.sh || exit 1;
if [ $# != 3 ]; then
echo "Usage: $0 <extractor-dir> <data> <ivector-dir>"
echo " e.g.: $0 exp/extractor_2048_male data/train_male exp/ivectors_male"
echo "main options (for others, see top of script file)"
echo " --config <config-file> # config containing options"
echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
echo " --num-iters <#iters|10> # Number of iterations of E-M"
echo " --nj <n|10> # Number of jobs (also see num-processes and num-threads)"
echo " --num-threads <n|8> # Number of threads for each process"
echo " --stage <stage|0> # To control partial reruns"
echo " --num-gselect <n|20> # Number of Gaussians to select using"
echo " # diagonal model."
echo " --min-post <min-post|0.025> # Pruning threshold for posteriors"
exit 1;
fi
srcdir=$1
data=$2
dir=$3
for f in $srcdir/final.ie $srcdir/final.ubm $data/feats.scp $data/utt2lang; do
[ ! -f $f ] && echo "No such file $f" && exit 1;
done
# Set various variables.
mkdir -p $dir/log
sdata=$data/split$nj;
utils/split_data.sh $data $nj || exit 1;
## Set up features.
feats="ark,s,cs:apply-cmvn-sliding --norm-vars=false --center=true --cmn-window=300 scp:$sdata/JOB/feats.scp ark:- | add-deltas-sdc ark:- ark:- | select-voiced-frames ark:- scp,s,cs:$sdata/JOB/vad.scp ark:- |"
if [ $stage -le 0 ]; then
echo "$0: extracting iVectors"
dubm="fgmm-global-to-gmm $srcdir/final.ubm -|"
$cmd JOB=1:$nj $dir/log/extract_ivectors.JOB.log \
gmm-gselect --n=$num_gselect "$dubm" "$feats" ark:- \| \
fgmm-global-gselect-to-post --min-post=$min_post $srcdir/final.ubm "$feats" \
ark,s,cs:- ark:- \| scale-post ark:- $posterior_scale ark:- \| \
ivector-extract --verbose=2 $srcdir/final.ie "$feats" ark,s,cs:- \
ark,scp,t:$dir/ivector.JOB.ark,$dir/ivector.JOB.scp || exit 1;
fi
if [ $stage -le 1 ]; then
echo "$0: combining iVectors across jobs"
for j in $(seq $nj); do cat $dir/ivector.$j.scp; done >$dir/ivector.scp || exit 1;
fi
if [ $stage -le 2 ]; then
# Be careful here: the language-level iVectors are now length-normalized,
# even if they are otherwise the same as the utterance-level ones.
echo "$0: computing mean of iVectors for each speaker and length-normalizing"
$cmd $dir/log/speaker_mean.log \
ivector-normalize-length scp:$dir/ivector.scp ark:- \| \
ivector-mean "ark:utils/spk2utt_to_utt2spk.pl $data/utt2lang|" ark:- ark:- ark,t:$dir/num_utts.ark \| \
ivector-normalize-length ark:- ark,scp:$dir/lang_ivector.ark,$dir/lang_ivector.scp || exit 1;
fi