nnet-trnopts.h
3.61 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
// nnet/nnet-trnopts.h
// Copyright 2013 Brno University of Technology (Author: Karel Vesely)
// See ../../COPYING for clarification regarding multiple authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABLITY OR NON-INFRINGEMENT.
// See the Apache 2 License for the specific language governing permissions and
// limitations under the License.
#ifndef KALDI_NNET_NNET_TRNOPTS_H_
#define KALDI_NNET_NNET_TRNOPTS_H_
#include "base/kaldi-common.h"
#include "itf/options-itf.h"
namespace kaldi {
namespace nnet1 {
struct NnetTrainOptions {
// option declaration
BaseFloat learn_rate;
BaseFloat momentum;
BaseFloat l2_penalty;
BaseFloat l1_penalty;
// default values
NnetTrainOptions():
learn_rate(0.008),
momentum(0.0),
l2_penalty(0.0),
l1_penalty(0.0)
{ }
// register options
void Register(OptionsItf *opts) {
opts->Register("learn-rate", &learn_rate, "Learning rate");
opts->Register("momentum", &momentum, "Momentum");
opts->Register("l2-penalty", &l2_penalty, "L2 penalty (weight decay)");
opts->Register("l1-penalty", &l1_penalty, "L1 penalty (promote sparsity)");
}
// print for debug purposes
friend std::ostream& operator<<(std::ostream& os, const NnetTrainOptions& opts) {
os << "NnetTrainOptions : "
<< "learn_rate" << opts.learn_rate << ", "
<< "momentum" << opts.momentum << ", "
<< "l2_penalty" << opts.l2_penalty << ", "
<< "l1_penalty" << opts.l1_penalty;
return os;
}
};
struct RbmTrainOptions {
// option declaration
BaseFloat learn_rate;
BaseFloat momentum;
BaseFloat momentum_max;
int32 momentum_steps;
int32 momentum_step_period;
BaseFloat l2_penalty;
// default values
RbmTrainOptions():
learn_rate(0.4),
momentum(0.5),
momentum_max(0.9),
momentum_steps(40),
momentum_step_period(500000),
// 500000 * 40 = 55h of linear increase of momentum
l2_penalty(0.0002)
{ }
// register options
void Register(OptionsItf *opts) {
opts->Register("learn-rate", &learn_rate, "Learning rate");
opts->Register("momentum", &momentum,
"Initial momentum for linear scheduling");
opts->Register("momentum-max", &momentum_max,
"Final momentum for linear scheduling");
opts->Register("momentum-steps", &momentum_steps,
"Number of steps of linear momentum scheduling");
opts->Register("momentum-step-period", &momentum_step_period,
"Number of datapoints per single momentum increase step");
opts->Register("l2-penalty", &l2_penalty,
"L2 penalty (weight decay, increases mixing-rate)");
}
// print for debug purposes
friend std::ostream& operator<<(std::ostream& os, const RbmTrainOptions& opts) {
os << "RbmTrainOptions : "
<< "learn_rate" << opts.learn_rate << ", "
<< "momentum" << opts.momentum << ", "
<< "momentum_max" << opts.momentum_max << ", "
<< "momentum_steps" << opts.momentum_steps << ", "
<< "momentum_step_period" << opts.momentum_step_period << ", "
<< "l2_penalty" << opts.l2_penalty;
return os;
}
}; // struct RbmTrainOptions
} // namespace nnet1
} // namespace kaldi
#endif // KALDI_NNET_NNET_TRNOPTS_H_