Blame view

Scripts/04_train_mono_LIA.sh 1.54 KB
ec85f8892   bigot benjamin   first commit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
  #!/bin/sh
  
  EXPE_DIR=$1
  
  . ../LIA_kaldiUtils/path.sh
  . ../LIA_kaldiUtils/cmd.sh
  #echo $PATH
  LM_DIR=$EXPE_DIR/LANGUAGE_MODEL/
  AM_DIR=$EXPE_DIR/ACOUSTIC_MODEL/
  AM_DATA=$EXPE_DIR/ac_Data/
  LM_DATA=$EXPE_DIR/ling_Data/
  
  FORK=4
  
  
  cp $AM_DATA/* $AM_DIR
  cp $LM_DATA/text $AM_DIR
  cp -R $LM_DATA/* $LM_DIR
  
  # Flat start and monophone training, with delta-delta features.
  # This script applies cepstral mean normalization (per speaker).
  echo "steps/train_mono.sh  --nj $FORK --cmd "$train_cmd" $AM_DIR $LM_DATA $AM_DIR/mono"
  ##___a remettre___## steps/train_mono.sh  --nj $FORK --cmd "$train_cmd" $AM_DIR $LM_DATA $AM_DIR/mono || exit 1;
  
  # This script creates a fully expanded decoding graph (HCLG) that represents
  # all the language-model, pronunciation dictionary (lexicon), context-dependency,
  # and HMM structure in our model.  The output is a Finite State Transducer
  # that has word-ids on the output, and pdf-ids on the input (these are indexes
  # that resolve to Gaussian Mixture Models).
  # option mono for monophone ( default is contextual 3-grams)
  
  echo "=====> utils/mkgraph.sh --mono $LM_DATA  $AM_DIR/mono $AM_DIR/mono/graph"
  ##___ a remettre ____ #utils/mkgraph.sh --mono $LM_DIR  $AM_DIR/mono $AM_DIR/mono/graph
  #utils/mkgraph.sh --mono $LM_DATA  $AM_DIR/mono $AM_DIR/mono/graph
  
  
  echo "=====> steps/decode.sh --nj $FORK --cmd "$decode_cmd" --config $CONF_DIR/decode.config  $AM_DIR/mono/graph  $EXPE_DIR/TEST/ $AM_DIR/mono/decode"
  decode.sh --nj $FORK --cmd "$decode_cmd" --config $CONF_DIR/decode.config  $AM_DIR/mono/graph  $EXPE_DIR/TEST/ac_Data $AM_DIR/mono/decode