Blame view

egs/yomdle_fa/v1/local/train_lm_lr.sh 4.32 KB
8dcb6dfcb   Yannick Estève   first commit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
  #!/bin/bash
  
  # Copyright 2016  Vincent Nguyen
  #           2016  Johns Hopkins University (author: Daniel Povey)
  #           2017  Ashish Arora
  #           2017  Hossein Hadian
  # Apache 2.0
  #
  # This script trains a LM on the YOMDLE+Extra training transcriptions.
  # It is based on the example scripts distributed with PocoLM
  
  # It will check if pocolm is installed and if not will proceed with installation
  
  set -e
  stage=0
  dir=data/local/local_lm
  data_dir=data
  extra_lm=download/extra_lm.txt
  order=3
  
  echo "$0 $@"  # Print the command line for logging
  . ./utils/parse_options.sh || exit 1;
  
  lm_dir=${dir}/data
  
  
  mkdir -p $dir
  . ./path.sh || exit 1; # for KALDI_ROOT
  export PATH=$KALDI_ROOT/tools/pocolm/scripts:$PATH
  ( # First make sure the pocolm toolkit is installed.
   cd $KALDI_ROOT/tools || exit 1;
   if [ -d pocolm ]; then
     echo Not installing the pocolm toolkit since it is already there.
   else
     echo "$0: Please install the PocoLM toolkit with: "
     echo " cd ../../../tools; extras/install_pocolm.sh; cd -"
     exit 1;
   fi
  ) || exit 1;
  
  bypass_metaparam_optim_opt=
  # If you want to bypass the metaparameter optimization steps with specific metaparameters
  # un-comment the following line, and change the numbers to some appropriate values.
  # You can find the values from output log of train_lm.py.
  # These example numbers of metaparameters is for 4-gram model (with min-counts)
  # running with train_lm.py.
  # The dev perplexity should be close to the non-bypassed model.
  #bypass_metaparam_optim_opt=
  # Note: to use these example parameters, you may need to remove the .done files
  # to make sure the make_lm_dir.py be called and tain only 3-gram model
  #for order in 3; do
  #rm -f ${lm_dir}/${num_word}_${order}.pocolm/.done
  
  if [ $stage -le 0 ]; then
    mkdir -p ${dir}/data
    mkdir -p ${dir}/data/text
  
    echo "$0: Getting the Data sources"
  
    rm ${dir}/data/text/* 2>/dev/null || true
  
    cat ${extra_lm} | local/bidi.py | utils/lang/bpe/prepend_words.py --encoding 'utf-8' | python3 utils/lang/bpe/apply_bpe.py -c $data_dir/train/bpe.out | sed 's/@@//g' > ${dir}/data/text/extra_lm.txt
    
    # Note: the name 'dev' is treated specially by pocolm, it automatically
    # becomes the dev set.
    nr=`cat $data_dir/train/text | wc -l`
    nr_dev=$(($nr / 10 ))
    nr_train=$(( $nr - $nr_dev ))
  
    # use the training data as an additional data source.
    # we can later fold the dev data into this.
    head -n $nr_train $data_dir/train/text | cut -d " " -f 2- >  ${dir}/data/text/train.txt
    tail -n $nr_dev $data_dir/train/text | cut -d " " -f 2- > ${dir}/data/text/dev.txt
  
    # for reporting perplexities, we'll use the "real" dev set.
    # (the validation data is used as ${dir}/data/text/dev.txt to work
    # out interpolation weights.)
    # note, we can't put it in ${dir}/data/text/, because then pocolm would use
    # it as one of the data sources.
    cut -d " " -f 2-  < $data_dir/test/text  > ${dir}/data/real_dev_set.txt
  
    # get the wordlist from MADCAT text
    cat ${dir}/data/text/{train,extra_lm}.txt | tr '[:space:]' '[
  *]' | grep -v "^\s*$" | sort | uniq -c | sort -bnr > ${dir}/data/word_count
    #cat ${dir}/data/text/extra_fa.txt | tr '[:space:]' '[
  *]' | grep -v "^\s*$" | sort | uniq -c | sort -bnr > ${dir}/data/word_count
    cat ${dir}/data/word_count | awk '{print $2}' > ${dir}/data/wordlist
  fi
  
  if [ $stage -le 1 ]; then
    # decide on the vocabulary.
    # Note: you'd use --wordlist if you had a previously determined word-list
    # that you wanted to use.
    # Note: if you have more than one order, use a certain amount of words as the
    # vocab and want to restrict max memory for 'sort',
    echo "$0: training the unpruned LM"
    min_counts='extra_lm=10 train=1'
    wordlist=${dir}/data/wordlist
  
    lm_name="`basename ${wordlist}`_${order}"
    if [ -n "${min_counts}" ]; then
      lm_name+="_`echo ${min_counts} | tr -s "[:blank:]" "_" | tr "=" "-"`"
    fi
    unpruned_lm_dir=${lm_dir}/${lm_name}.pocolm
    train_lm.py  --wordlist=${wordlist} --num-splits=30 --warm-start-ratio=1 \
                 --min-counts="$min_counts" \
                 --limit-unk-history=true \
                 ${bypass_metaparam_optim_opt} \
                 ${dir}/data/text ${order} ${lm_dir}/work ${unpruned_lm_dir}
  
    get_data_prob.py ${dir}/data/real_dev_set.txt ${unpruned_lm_dir} 2>&1 | grep -F '[perplexity'
  
    mkdir -p ${dir}/data/arpa
    format_arpa_lm.py ${unpruned_lm_dir} | gzip -c > ${dir}/data/arpa/${order}gram_unpruned.arpa.gz
  fi