determinize-lattice-pruned.cc
66 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
// lat/determinize-lattice-pruned.cc
// Copyright 2009-2012 Microsoft Corporation
// 2012-2013 Johns Hopkins University (Author: Daniel Povey)
// 2014 Guoguo Chen
// See ../../COPYING for clarification regarding multiple authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABLITY OR NON-INFRINGEMENT.
// See the Apache 2 License for the specific language governing permissions and
// limitations under the License.
#include <vector>
#include <climits>
#include "fstext/determinize-lattice.h" // for LatticeStringRepository
#include "fstext/fstext-utils.h"
#include "lat/lattice-functions.h" // for PruneLattice
#include "lat/minimize-lattice.h" // for minimization
#include "lat/push-lattice.h" // for minimization
#include "lat/determinize-lattice-pruned.h"
namespace fst {
// class LatticeDeterminizerPruned is templated on the same types that
// CompactLatticeWeight is templated on: the base weight (Weight), typically
// LatticeWeightTpl<float> etc. but could also be e.g. TropicalWeight, and the
// IntType, typically int32, used for the output symbols in the compact
// representation of strings [note: the output symbols would usually be
// p.d.f. id's in the anticipated use of this code] It has a special requirement
// on the Weight type: that there should be a Compare function on the weights
// such that Compare(w1, w2) returns -1 if w1 < w2, 0 if w1 == w2, and +1 if w1 >
// w2. This requires that there be a total order on the weights.
template<class Weight, class IntType> class LatticeDeterminizerPruned {
public:
// Output to Gallic acceptor (so the strings go on weights, and there is a 1-1 correspondence
// between our states and the states in ofst. If destroy == true, release memory as we go
// (but we cannot output again).
typedef CompactLatticeWeightTpl<Weight, IntType> CompactWeight;
typedef ArcTpl<CompactWeight> CompactArc; // arc in compact, acceptor form of lattice
typedef ArcTpl<Weight> Arc; // arc in non-compact version of lattice
// Output to standard FST with CompactWeightTpl<Weight> as its weight type (the
// weight stores the original output-symbol strings). If destroy == true,
// release memory as we go (but we cannot output again).
void Output(MutableFst<CompactArc> *ofst, bool destroy = true) {
KALDI_ASSERT(determinized_);
typedef typename Arc::StateId StateId;
StateId nStates = static_cast<StateId>(output_states_.size());
if (destroy)
FreeMostMemory();
ofst->DeleteStates();
ofst->SetStart(kNoStateId);
if (nStates == 0) {
return;
}
for (StateId s = 0;s < nStates;s++) {
OutputStateId news = ofst->AddState();
KALDI_ASSERT(news == s);
}
ofst->SetStart(0);
// now process transitions.
for (StateId this_state_id = 0; this_state_id < nStates; this_state_id++) {
OutputState &this_state = *(output_states_[this_state_id]);
vector<TempArc> &this_vec(this_state.arcs);
typename vector<TempArc>::const_iterator iter = this_vec.begin(), end = this_vec.end();
for (;iter != end; ++iter) {
const TempArc &temp_arc(*iter);
CompactArc new_arc;
vector<Label> olabel_seq;
repository_.ConvertToVector(temp_arc.string, &olabel_seq);
CompactWeight weight(temp_arc.weight, olabel_seq);
if (temp_arc.nextstate == kNoStateId) { // is really final weight.
ofst->SetFinal(this_state_id, weight);
} else { // is really an arc.
new_arc.nextstate = temp_arc.nextstate;
new_arc.ilabel = temp_arc.ilabel;
new_arc.olabel = temp_arc.ilabel; // acceptor. input == output.
new_arc.weight = weight; // includes string and weight.
ofst->AddArc(this_state_id, new_arc);
}
}
// Free up memory. Do this inside the loop as ofst is also allocating memory,
// and we want to reduce the maximum amount ever allocated.
if (destroy) { vector<TempArc> temp; temp.swap(this_vec); }
}
if (destroy) {
FreeOutputStates();
repository_.Destroy();
}
}
// Output to standard FST with Weight as its weight type. We will create extra
// states to handle sequences of symbols on the output. If destroy == true,
// release memory as we go (but we cannot output again).
void Output(MutableFst<Arc> *ofst, bool destroy = true) {
// Outputs to standard fst.
OutputStateId nStates = static_cast<OutputStateId>(output_states_.size());
ofst->DeleteStates();
if (nStates == 0) {
ofst->SetStart(kNoStateId);
return;
}
if (destroy)
FreeMostMemory();
// Add basic states-- but we will add extra ones to account for strings on output.
for (OutputStateId s = 0; s< nStates;s++) {
OutputStateId news = ofst->AddState();
KALDI_ASSERT(news == s);
}
ofst->SetStart(0);
for (OutputStateId this_state_id = 0; this_state_id < nStates; this_state_id++) {
OutputState &this_state = *(output_states_[this_state_id]);
vector<TempArc> &this_vec(this_state.arcs);
typename vector<TempArc>::const_iterator iter = this_vec.begin(), end = this_vec.end();
for (; iter != end; ++iter) {
const TempArc &temp_arc(*iter);
vector<Label> seq;
repository_.ConvertToVector(temp_arc.string, &seq);
if (temp_arc.nextstate == kNoStateId) { // Really a final weight.
// Make a sequence of states going to a final state, with the strings
// as labels. Put the weight on the first arc.
OutputStateId cur_state = this_state_id;
for (size_t i = 0; i < seq.size(); i++) {
OutputStateId next_state = ofst->AddState();
Arc arc;
arc.nextstate = next_state;
arc.weight = (i == 0 ? temp_arc.weight : Weight::One());
arc.ilabel = 0; // epsilon.
arc.olabel = seq[i];
ofst->AddArc(cur_state, arc);
cur_state = next_state;
}
ofst->SetFinal(cur_state, (seq.size() == 0 ? temp_arc.weight : Weight::One()));
} else { // Really an arc.
OutputStateId cur_state = this_state_id;
// Have to be careful with this integer comparison (i+1 < seq.size()) because unsigned.
// i < seq.size()-1 could fail for zero-length sequences.
for (size_t i = 0; i+1 < seq.size();i++) {
// for all but the last element of seq, create new state.
OutputStateId next_state = ofst->AddState();
Arc arc;
arc.nextstate = next_state;
arc.weight = (i == 0 ? temp_arc.weight : Weight::One());
arc.ilabel = (i == 0 ? temp_arc.ilabel : 0); // put ilabel on first element of seq.
arc.olabel = seq[i];
ofst->AddArc(cur_state, arc);
cur_state = next_state;
}
// Add the final arc in the sequence.
Arc arc;
arc.nextstate = temp_arc.nextstate;
arc.weight = (seq.size() <= 1 ? temp_arc.weight : Weight::One());
arc.ilabel = (seq.size() <= 1 ? temp_arc.ilabel : 0);
arc.olabel = (seq.size() > 0 ? seq.back() : 0);
ofst->AddArc(cur_state, arc);
}
}
// Free up memory. Do this inside the loop as ofst is also allocating memory
if (destroy) { vector<TempArc> temp; temp.swap(this_vec); }
}
if (destroy) {
FreeOutputStates();
repository_.Destroy();
}
}
// Initializer. After initializing the object you will typically
// call Determinize() and then call one of the Output functions.
// Note: ifst.Copy() will generally do a
// shallow copy. We do it like this for memory safety, rather than
// keeping a reference or pointer to ifst_.
LatticeDeterminizerPruned(const ExpandedFst<Arc> &ifst,
double beam,
DeterminizeLatticePrunedOptions opts):
num_arcs_(0), num_elems_(0), ifst_(ifst.Copy()), beam_(beam), opts_(opts),
equal_(opts_.delta), determinized_(false),
minimal_hash_(3, hasher_, equal_), initial_hash_(3, hasher_, equal_) {
KALDI_ASSERT(Weight::Properties() & kIdempotent); // this algorithm won't
// work correctly otherwise.
}
void FreeOutputStates() {
for (size_t i = 0; i < output_states_.size(); i++)
delete output_states_[i];
vector<OutputState*> temp;
temp.swap(output_states_);
}
// frees all memory except the info (in output_states_[ ]->arcs)
// that we need to output the FST.
void FreeMostMemory() {
if (ifst_) {
delete ifst_;
ifst_ = NULL;
}
{ MinimalSubsetHash tmp; tmp.swap(minimal_hash_); }
for (size_t i = 0; i < output_states_.size(); i++) {
vector<Element> empty_subset;
empty_subset.swap(output_states_[i]->minimal_subset);
}
for (typename InitialSubsetHash::iterator iter = initial_hash_.begin();
iter != initial_hash_.end(); ++iter)
delete iter->first;
{ InitialSubsetHash tmp; tmp.swap(initial_hash_); }
for (size_t i = 0; i < output_states_.size(); i++) {
vector<Element> tmp;
tmp.swap(output_states_[i]->minimal_subset);
}
{ vector<char> tmp; tmp.swap(isymbol_or_final_); }
{ // Free up the queue. I'm not sure how to make sure all
// the memory is really freed (no swap() function)... doesn't really
// matter much though.
while (!queue_.empty()) {
Task *t = queue_.top();
delete t;
queue_.pop();
}
}
{ vector<pair<Label, Element> > tmp; tmp.swap(all_elems_tmp_); }
}
~LatticeDeterminizerPruned() {
FreeMostMemory();
FreeOutputStates();
// rest is deleted by destructors.
}
void RebuildRepository() { // rebuild the string repository,
// freeing stuff we don't need.. we call this when memory usage
// passes a supplied threshold. We need to accumulate all the
// strings we need the repository to "remember", then tell it
// to clean the repository.
std::vector<StringId> needed_strings;
for (size_t i = 0; i < output_states_.size(); i++) {
AddStrings(output_states_[i]->minimal_subset, &needed_strings);
for (size_t j = 0; j < output_states_[i]->arcs.size(); j++)
needed_strings.push_back(output_states_[i]->arcs[j].string);
}
{ // the queue doesn't allow us access to the underlying vector,
// so we have to resort to a temporary collection.
std::vector<Task*> tasks;
while (!queue_.empty()) {
Task *task = queue_.top();
queue_.pop();
tasks.push_back(task);
AddStrings(task->subset, &needed_strings);
}
for (size_t i = 0; i < tasks.size(); i++)
queue_.push(tasks[i]);
}
// the following loop covers strings present in initial_hash_.
for (typename InitialSubsetHash::const_iterator
iter = initial_hash_.begin();
iter != initial_hash_.end(); ++iter) {
const vector<Element> &vec = *(iter->first);
Element elem = iter->second;
AddStrings(vec, &needed_strings);
needed_strings.push_back(elem.string);
}
std::sort(needed_strings.begin(), needed_strings.end());
needed_strings.erase(std::unique(needed_strings.begin(),
needed_strings.end()),
needed_strings.end()); // uniq the strings.
KALDI_LOG << "Rebuilding repository.";
repository_.Rebuild(needed_strings);
}
bool CheckMemoryUsage() {
int32 repo_size = repository_.MemSize(),
arcs_size = num_arcs_ * sizeof(TempArc),
elems_size = num_elems_ * sizeof(Element),
total_size = repo_size + arcs_size + elems_size;
if (opts_.max_mem > 0 && total_size > opts_.max_mem) { // We passed the memory threshold.
// This is usually due to the repository getting large, so we
// clean this out.
RebuildRepository();
int32 new_repo_size = repository_.MemSize(),
new_total_size = new_repo_size + arcs_size + elems_size;
KALDI_VLOG(2) << "Rebuilt repository in determinize-lattice: repository shrank from "
<< repo_size << " to " << new_repo_size << " bytes (approximately)";
if (new_total_size > static_cast<int32>(opts_.max_mem * 0.8)) {
// Rebuilding didn't help enough-- we need a margin to stop
// having to rebuild too often. We'll just return to the user at
// this point, with a partial lattice that's pruned tighter than
// the specified beam. Here we figure out what the effective
// beam was.
double effective_beam = beam_;
if (!queue_.empty()) { // Note: queue should probably not be empty; we're
// just being paranoid here.
Task *task = queue_.top();
double total_weight = backward_costs_[ifst_->Start()]; // best weight of FST.
effective_beam = task->priority_cost - total_weight;
}
KALDI_WARN << "Did not reach requested beam in determinize-lattice: "
<< "size exceeds maximum " << opts_.max_mem
<< " bytes; (repo,arcs,elems) = (" << repo_size << ","
<< arcs_size << "," << elems_size
<< "), after rebuilding, repo size was " << new_repo_size
<< ", effective beam was " << effective_beam
<< " vs. requested beam " << beam_;
return false;
}
}
return true;
}
bool Determinize(double *effective_beam) {
KALDI_ASSERT(!determinized_);
// This determinizes the input fst but leaves it in the "special format"
// in "output_arcs_". Must be called after Initialize(). To get the
// output, call one of the Output routines.
InitializeDeterminization(); // some start-up tasks.
while (!queue_.empty()) {
Task *task = queue_.top();
// Note: the queue contains only tasks that are "within the beam".
// We also have to check whether we have reached one of the user-specified
// maximums, of estimated memory, arcs, or states. The condition for
// ending is:
// num-states is more than user specified, OR
// num-arcs is more than user specified, OR
// memory passed a user-specified threshold and cleanup failed
// to get it below that threshold.
size_t num_states = output_states_.size();
if ((opts_.max_states > 0 && num_states > opts_.max_states) ||
(opts_.max_arcs > 0 && num_arcs_ > opts_.max_arcs) ||
(num_states % 10 == 0 && !CheckMemoryUsage())) { // note: at some point
// it was num_states % 100, not num_states % 10, but I encountered an example
// where memory was exhausted before we reached state #100.
KALDI_VLOG(1) << "Lattice determinization terminated but not "
<< " because of lattice-beam. (#states, #arcs) is ( "
<< output_states_.size() << ", " << num_arcs_
<< " ), versus limits ( " << opts_.max_states << ", "
<< opts_.max_arcs << " ) (else, may be memory limit).";
break;
// we terminate the determinization here-- whatever we already expanded is
// what we'll return... because we expanded stuff in order of total
// (forward-backward) weight, the stuff we returned first is the most
// important.
}
queue_.pop();
ProcessTransition(task->state, task->label, &(task->subset));
delete task;
}
determinized_ = true;
if (effective_beam != NULL) {
if (queue_.empty()) *effective_beam = beam_;
else
*effective_beam = queue_.top()->priority_cost -
backward_costs_[ifst_->Start()];
}
return (queue_.empty()); // return success if queue was empty, i.e. we processed
// all tasks and did not break out of the loop early due to reaching a memory,
// arc or state limit.
}
private:
typedef typename Arc::Label Label;
typedef typename Arc::StateId StateId; // use this when we don't know if it's input or output.
typedef typename Arc::StateId InputStateId; // state in the input FST.
typedef typename Arc::StateId OutputStateId; // same as above but distinguish
// states in output Fst.
typedef LatticeStringRepository<IntType> StringRepositoryType;
typedef const typename StringRepositoryType::Entry* StringId;
// Element of a subset [of original states]
struct Element {
StateId state; // use StateId as this is usually InputStateId but in one case
// OutputStateId.
StringId string;
Weight weight;
bool operator != (const Element &other) const {
return (state != other.state || string != other.string ||
weight != other.weight);
}
// This operator is only intended for the priority_queue in the function
// EpsilonClosure().
bool operator > (const Element &other) const {
return state > other.state;
}
// This operator is only intended to support sorting in EpsilonClosure()
bool operator < (const Element &other) const {
return state < other.state;
}
};
// Arcs in the format we temporarily create in this class (a representation, essentially of
// a Gallic Fst).
struct TempArc {
Label ilabel;
StringId string; // Look it up in the StringRepository, it's a sequence of Labels.
OutputStateId nextstate; // or kNoState for final weights.
Weight weight;
};
// Hashing function used in hash of subsets.
// A subset is a pointer to vector<Element>.
// The Elements are in sorted order on state id, and without repeated states.
// Because the order of Elements is fixed, we can use a hashing function that is
// order-dependent. However the weights are not included in the hashing function--
// we hash subsets that differ only in weight to the same key. This is not optimal
// in terms of the O(N) performance but typically if we have a lot of determinized
// states that differ only in weight then the input probably was pathological in some way,
// or even non-determinizable.
// We don't quantize the weights, in order to avoid inexactness in simple cases.
// Instead we apply the delta when comparing subsets for equality, and allow a small
// difference.
class SubsetKey {
public:
size_t operator ()(const vector<Element> * subset) const { // hashes only the state and string.
size_t hash = 0, factor = 1;
for (typename vector<Element>::const_iterator iter= subset->begin(); iter != subset->end(); ++iter) {
hash *= factor;
hash += iter->state + reinterpret_cast<size_t>(iter->string);
factor *= 23531; // these numbers are primes.
}
return hash;
}
};
// This is the equality operator on subsets. It checks for exact match on state-id
// and string, and approximate match on weights.
class SubsetEqual {
public:
bool operator ()(const vector<Element> * s1, const vector<Element> * s2) const {
size_t sz = s1->size();
KALDI_ASSERT(sz>=0);
if (sz != s2->size()) return false;
typename vector<Element>::const_iterator iter1 = s1->begin(),
iter1_end = s1->end(), iter2=s2->begin();
for (; iter1 < iter1_end; ++iter1, ++iter2) {
if (iter1->state != iter2->state ||
iter1->string != iter2->string ||
! ApproxEqual(iter1->weight, iter2->weight, delta_)) return false;
}
return true;
}
float delta_;
SubsetEqual(float delta): delta_(delta) {}
SubsetEqual(): delta_(kDelta) {}
};
// Operator that says whether two Elements have the same states.
// Used only for debug.
class SubsetEqualStates {
public:
bool operator ()(const vector<Element> * s1, const vector<Element> * s2) const {
size_t sz = s1->size();
KALDI_ASSERT(sz>=0);
if (sz != s2->size()) return false;
typename vector<Element>::const_iterator iter1 = s1->begin(),
iter1_end = s1->end(), iter2=s2->begin();
for (; iter1 < iter1_end; ++iter1, ++iter2) {
if (iter1->state != iter2->state) return false;
}
return true;
}
};
// Define the hash type we use to map subsets (in minimal
// representation) to OutputStateId.
typedef unordered_map<const vector<Element>*, OutputStateId,
SubsetKey, SubsetEqual> MinimalSubsetHash;
// Define the hash type we use to map subsets (in initial
// representation) to OutputStateId, together with an
// extra weight. [note: we interpret the Element.state in here
// as an OutputStateId even though it's declared as InputStateId;
// these types are the same anyway].
typedef unordered_map<const vector<Element>*, Element,
SubsetKey, SubsetEqual> InitialSubsetHash;
// converts the representation of the subset from canonical (all states) to
// minimal (only states with output symbols on arcs leaving them, and final
// states). Output is not necessarily normalized, even if input_subset was.
void ConvertToMinimal(vector<Element> *subset) {
KALDI_ASSERT(!subset->empty());
typename vector<Element>::iterator cur_in = subset->begin(),
cur_out = subset->begin(), end = subset->end();
while (cur_in != end) {
if(IsIsymbolOrFinal(cur_in->state)) { // keep it...
*cur_out = *cur_in;
cur_out++;
}
cur_in++;
}
subset->resize(cur_out - subset->begin());
}
// Takes a minimal, normalized subset, and converts it to an OutputStateId.
// Involves a hash lookup, and possibly adding a new OutputStateId.
// If it creates a new OutputStateId, it creates a new record for it, works
// out its final-weight, and puts stuff on the queue relating to its
// transitions.
OutputStateId MinimalToStateId(const vector<Element> &subset,
const double forward_cost) {
typename MinimalSubsetHash::const_iterator iter
= minimal_hash_.find(&subset);
if (iter != minimal_hash_.end()) { // Found a matching subset.
OutputStateId state_id = iter->second;
const OutputState &state = *(output_states_[state_id]);
// Below is just a check that the algorithm is working...
if (forward_cost < state.forward_cost - 0.1) {
// for large weights, this check could fail due to roundoff.
KALDI_WARN << "New cost is less (check the difference is small) "
<< forward_cost << ", "
<< state.forward_cost;
}
return state_id;
}
OutputStateId state_id = static_cast<OutputStateId>(output_states_.size());
OutputState *new_state = new OutputState(subset, forward_cost);
minimal_hash_[&(new_state->minimal_subset)] = state_id;
output_states_.push_back(new_state);
num_elems_ += subset.size();
// Note: in the previous algorithm, we pushed the new state-id onto the queue
// at this point. Here, the queue happens elsewhere, and we directly process
// the state (which result in stuff getting added to the queue).
ProcessFinal(state_id); // will work out the final-prob.
ProcessTransitions(state_id); // will process transitions and add stuff to the queue.
return state_id;
}
// Given a normalized initial subset of elements (i.e. before epsilon closure),
// compute the corresponding output-state.
OutputStateId InitialToStateId(const vector<Element> &subset_in,
double forward_cost,
Weight *remaining_weight,
StringId *common_prefix) {
typename InitialSubsetHash::const_iterator iter
= initial_hash_.find(&subset_in);
if (iter != initial_hash_.end()) { // Found a matching subset.
const Element &elem = iter->second;
*remaining_weight = elem.weight;
*common_prefix = elem.string;
if (elem.weight == Weight::Zero())
KALDI_WARN << "Zero weight!";
return elem.state;
}
// else no matching subset-- have to work it out.
vector<Element> subset(subset_in);
// Follow through epsilons. Will add no duplicate states. note: after
// EpsilonClosure, it is the same as "canonical" subset, except not
// normalized (actually we never compute the normalized canonical subset,
// only the normalized minimal one).
EpsilonClosure(&subset); // follow epsilons.
ConvertToMinimal(&subset); // remove all but emitting and final states.
Element elem; // will be used to store remaining weight and string, and
// OutputStateId, in initial_hash_;
NormalizeSubset(&subset, &elem.weight, &elem.string); // normalize subset; put
// common string and weight in "elem". The subset is now a minimal,
// normalized subset.
forward_cost += ConvertToCost(elem.weight);
OutputStateId ans = MinimalToStateId(subset, forward_cost);
*remaining_weight = elem.weight;
*common_prefix = elem.string;
if (elem.weight == Weight::Zero())
KALDI_WARN << "Zero weight!";
// Before returning "ans", add the initial subset to the hash,
// so that we can bypass the epsilon-closure etc., next time
// we process the same initial subset.
vector<Element> *initial_subset_ptr = new vector<Element>(subset_in);
elem.state = ans;
initial_hash_[initial_subset_ptr] = elem;
num_elems_ += initial_subset_ptr->size(); // keep track of memory usage.
return ans;
}
// returns the Compare value (-1 if a < b, 0 if a == b, 1 if a > b) according
// to the ordering we defined on strings for the CompactLatticeWeightTpl.
// see function
// inline int Compare (const CompactLatticeWeightTpl<WeightType,IntType> &w1,
// const CompactLatticeWeightTpl<WeightType,IntType> &w2)
// in lattice-weight.h.
// this is the same as that, but optimized for our data structures.
inline int Compare(const Weight &a_w, StringId a_str,
const Weight &b_w, StringId b_str) const {
int weight_comp = fst::Compare(a_w, b_w);
if (weight_comp != 0) return weight_comp;
// now comparing strings.
if (a_str == b_str) return 0;
vector<IntType> a_vec, b_vec;
repository_.ConvertToVector(a_str, &a_vec);
repository_.ConvertToVector(b_str, &b_vec);
// First compare their lengths.
int a_len = a_vec.size(), b_len = b_vec.size();
// use opposite order on the string lengths (c.f. Compare in
// lattice-weight.h)
if (a_len > b_len) return -1;
else if (a_len < b_len) return 1;
for(int i = 0; i < a_len; i++) {
if (a_vec[i] < b_vec[i]) return -1;
else if (a_vec[i] > b_vec[i]) return 1;
}
KALDI_ASSERT(0); // because we checked if a_str == b_str above, shouldn't reach here
return 0;
}
// This function computes epsilon closure of subset of states by following epsilon links.
// Called by InitialToStateId and Initialize.
// Has no side effects except on the string repository. The "output_subset" is not
// necessarily normalized (in the sense of there being no common substring), unless
// input_subset was.
void EpsilonClosure(vector<Element> *subset) {
// at input, subset must have only one example of each StateId. [will still
// be so at output]. This function follows input-epsilons, and augments the
// subset accordingly.
std::priority_queue<Element, vector<Element>, greater<Element> > queue;
unordered_map<InputStateId, Element> cur_subset;
typedef typename unordered_map<InputStateId, Element>::iterator MapIter;
typedef typename vector<Element>::const_iterator VecIter;
for (VecIter iter = subset->begin(); iter != subset->end(); ++iter) {
queue.push(*iter);
cur_subset[iter->state] = *iter;
}
// find whether input fst is known to be sorted on input label.
bool sorted = ((ifst_->Properties(kILabelSorted, false) & kILabelSorted) != 0);
bool replaced_elems = false; // relates to an optimization, see below.
int counter = 0; // stops infinite loops here for non-lattice-determinizable input
// (e.g. input with negative-cost epsilon loops); useful in testing.
while (queue.size() != 0) {
Element elem = queue.top();
queue.pop();
// The next if-statement is a kind of optimization. It's to prevent us
// unnecessarily repeating the processing of a state. "cur_subset" always
// contains only one Element with a particular state. The issue is that
// whenever we modify the Element corresponding to that state in "cur_subset",
// both the new (optimal) and old (less-optimal) Element will still be in
// "queue". The next if-statement stops us from wasting compute by
// processing the old Element.
if (replaced_elems && cur_subset[elem.state] != elem)
continue;
if (opts_.max_loop > 0 && counter++ > opts_.max_loop) {
KALDI_ERR << "Lattice determinization aborted since looped more than "
<< opts_.max_loop << " times during epsilon closure.";
}
for (ArcIterator<ExpandedFst<Arc> > aiter(*ifst_, elem.state); !aiter.Done(); aiter.Next()) {
const Arc &arc = aiter.Value();
if (sorted && arc.ilabel != 0) break; // Break from the loop: due to sorting there will be no
// more transitions with epsilons as input labels.
if (arc.ilabel == 0
&& arc.weight != Weight::Zero()) { // Epsilon transition.
Element next_elem;
next_elem.state = arc.nextstate;
next_elem.weight = Times(elem.weight, arc.weight);
// next_elem.string is not set up yet... create it only
// when we know we need it (this is an optimization)
MapIter iter = cur_subset.find(next_elem.state);
if (iter == cur_subset.end()) {
// was no such StateId: insert and add to queue.
next_elem.string = (arc.olabel == 0 ? elem.string :
repository_.Successor(elem.string, arc.olabel));
cur_subset[next_elem.state] = next_elem;
queue.push(next_elem);
} else {
// was not inserted because one already there. In normal
// determinization we'd add the weights. Here, we find which one
// has the better weight, and keep its corresponding string.
int comp = fst::Compare(next_elem.weight, iter->second.weight);
if (comp == 0) { // A tie on weights. This should be a rare case;
// we don't optimize for it.
next_elem.string = (arc.olabel == 0 ? elem.string :
repository_.Successor(elem.string,
arc.olabel));
comp = Compare(next_elem.weight, next_elem.string,
iter->second.weight, iter->second.string);
}
if(comp == 1) { // next_elem is better, so use its (weight, string)
next_elem.string = (arc.olabel == 0 ? elem.string :
repository_.Successor(elem.string, arc.olabel));
iter->second.string = next_elem.string;
iter->second.weight = next_elem.weight;
queue.push(next_elem);
replaced_elems = true;
}
// else it is the same or worse, so use original one.
}
}
}
}
{ // copy cur_subset to subset.
subset->clear();
subset->reserve(cur_subset.size());
MapIter iter = cur_subset.begin(), end = cur_subset.end();
for (; iter != end; ++iter) subset->push_back(iter->second);
// sort by state ID, because the subset hash function is order-dependent(see SubsetKey)
std::sort(subset->begin(), subset->end());
}
}
// This function works out the final-weight of the determinized state.
// called by ProcessSubset.
// Has no side effects except on the variable repository_, and
// output_states_[output_state_id].arcs
void ProcessFinal(OutputStateId output_state_id) {
OutputState &state = *(output_states_[output_state_id]);
const vector<Element> &minimal_subset = state.minimal_subset;
// processes final-weights for this subset. state.minimal_subset_ may be
// empty if the graphs is not connected/trimmed, I think, do don't check
// that it's nonempty.
StringId final_string = repository_.EmptyString(); // set it to keep the
// compiler happy; if it doesn't get set in the loop, we won't use the value anyway.
Weight final_weight = Weight::Zero();
bool is_final = false;
typename vector<Element>::const_iterator iter = minimal_subset.begin(), end = minimal_subset.end();
for (; iter != end; ++iter) {
const Element &elem = *iter;
Weight this_final_weight = Times(elem.weight, ifst_->Final(elem.state));
StringId this_final_string = elem.string;
if (this_final_weight != Weight::Zero() &&
(!is_final || Compare(this_final_weight, this_final_string,
final_weight, final_string) == 1)) { // the new
// (weight, string) pair is more in semiring than our current
// one.
is_final = true;
final_weight = this_final_weight;
final_string = this_final_string;
}
}
if (is_final &&
ConvertToCost(final_weight) + state.forward_cost <= cutoff_) {
// store final weights in TempArc structure, just like a transition.
// Note: we only store the final-weight if it's inside the pruning beam, hence
// the stuff with Compare.
TempArc temp_arc;
temp_arc.ilabel = 0;
temp_arc.nextstate = kNoStateId; // special marker meaning "final weight".
temp_arc.string = final_string;
temp_arc.weight = final_weight;
state.arcs.push_back(temp_arc);
num_arcs_++;
}
}
// NormalizeSubset normalizes the subset "elems" by
// removing any common string prefix (putting it in common_str),
// and dividing by the total weight (putting it in tot_weight).
void NormalizeSubset(vector<Element> *elems,
Weight *tot_weight,
StringId *common_str) {
if(elems->empty()) { // just set common_str, tot_weight
// to defaults and return...
KALDI_WARN << "empty subset";
*common_str = repository_.EmptyString();
*tot_weight = Weight::Zero();
return;
}
size_t size = elems->size();
vector<IntType> common_prefix;
repository_.ConvertToVector((*elems)[0].string, &common_prefix);
Weight weight = (*elems)[0].weight;
for(size_t i = 1; i < size; i++) {
weight = Plus(weight, (*elems)[i].weight);
repository_.ReduceToCommonPrefix((*elems)[i].string, &common_prefix);
}
KALDI_ASSERT(weight != Weight::Zero()); // we made sure to ignore arcs with zero
// weights on them, so we shouldn't have zero here.
size_t prefix_len = common_prefix.size();
for(size_t i = 0; i < size; i++) {
(*elems)[i].weight = Divide((*elems)[i].weight, weight, DIVIDE_LEFT);
(*elems)[i].string =
repository_.RemovePrefix((*elems)[i].string, prefix_len);
}
*common_str = repository_.ConvertFromVector(common_prefix);
*tot_weight = weight;
}
// Take a subset of Elements that is sorted on state, and
// merge any Elements that have the same state (taking the best
// (weight, string) pair in the semiring).
void MakeSubsetUnique(vector<Element> *subset) {
typedef typename vector<Element>::iterator IterType;
// This KALDI_ASSERT is designed to fail (usually) if the subset is not sorted on
// state.
KALDI_ASSERT(subset->size() < 2 || (*subset)[0].state <= (*subset)[1].state);
IterType cur_in = subset->begin(), cur_out = cur_in, end = subset->end();
size_t num_out = 0;
// Merge elements with same state-id
while (cur_in != end) { // while we have more elements to process.
// At this point, cur_out points to location of next place we want to put an element,
// cur_in points to location of next element we want to process.
if (cur_in != cur_out) *cur_out = *cur_in;
cur_in++;
while (cur_in != end && cur_in->state == cur_out->state) {
if (Compare(cur_in->weight, cur_in->string,
cur_out->weight, cur_out->string) == 1) {
// if *cur_in > *cur_out in semiring, then take *cur_in.
cur_out->string = cur_in->string;
cur_out->weight = cur_in->weight;
}
cur_in++;
}
cur_out++;
num_out++;
}
subset->resize(num_out);
}
// ProcessTransition was called from "ProcessTransitions" in the non-pruned
// code, but now we in effect put the calls to ProcessTransition on a priority
// queue, and it now gets called directly from Determinize(). This function
// processes a transition from state "ostate_id". The set "subset" of Elements
// represents a set of next-states with associated weights and strings, each
// one arising from an arc from some state in a determinized-state; the
// next-states are unique (there is only one Entry assocated with each)
void ProcessTransition(OutputStateId ostate_id, Label ilabel, vector<Element> *subset) {
double forward_cost = output_states_[ostate_id]->forward_cost;
StringId common_str;
Weight tot_weight;
NormalizeSubset(subset, &tot_weight, &common_str);
forward_cost += ConvertToCost(tot_weight);
OutputStateId nextstate;
{
Weight next_tot_weight;
StringId next_common_str;
nextstate = InitialToStateId(*subset,
forward_cost,
&next_tot_weight,
&next_common_str);
common_str = repository_.Concatenate(common_str, next_common_str);
tot_weight = Times(tot_weight, next_tot_weight);
}
// Now add an arc to the next state (would have been created if necessary by
// InitialToStateId).
TempArc temp_arc;
temp_arc.ilabel = ilabel;
temp_arc.nextstate = nextstate;
temp_arc.string = common_str;
temp_arc.weight = tot_weight;
output_states_[ostate_id]->arcs.push_back(temp_arc); // record the arc.
num_arcs_++;
}
// "less than" operator for pair<Label, Element>. Used in ProcessTransitions.
// Lexicographical order, which only compares the state when ordering the
// "Element" member of the pair.
class PairComparator {
public:
inline bool operator () (const pair<Label, Element> &p1, const pair<Label, Element> &p2) {
if (p1.first < p2.first) return true;
else if (p1.first > p2.first) return false;
else {
return p1.second.state < p2.second.state;
}
}
};
// ProcessTransitions processes emitting transitions (transitions with
// ilabels) out of this subset of states. It actualy only creates records
// ("Task") that get added to the queue. The transitions will be processed in
// priority order from Determinize(). This function soes not consider final
// states. Partitions the emitting transitions up by ilabel (by sorting on
// ilabel), and for each unique ilabel, it creates a Task record that contains
// the information we need to process the transition.
void ProcessTransitions(OutputStateId output_state_id) {
const vector<Element> &minimal_subset = output_states_[output_state_id]->minimal_subset;
// it's possible that minimal_subset could be empty if there are
// unreachable parts of the graph, so don't check that it's nonempty.
vector<pair<Label, Element> > &all_elems(all_elems_tmp_); // use class member
// to avoid memory allocation/deallocation.
{
// Push back into "all_elems", elements corresponding to all
// non-epsilon-input transitions out of all states in "minimal_subset".
typename vector<Element>::const_iterator iter = minimal_subset.begin(), end = minimal_subset.end();
for (;iter != end; ++iter) {
const Element &elem = *iter;
for (ArcIterator<ExpandedFst<Arc> > aiter(*ifst_, elem.state); ! aiter.Done(); aiter.Next()) {
const Arc &arc = aiter.Value();
if (arc.ilabel != 0
&& arc.weight != Weight::Zero()) { // Non-epsilon transition -- ignore epsilons here.
pair<Label, Element> this_pr;
this_pr.first = arc.ilabel;
Element &next_elem(this_pr.second);
next_elem.state = arc.nextstate;
next_elem.weight = Times(elem.weight, arc.weight);
if (arc.olabel == 0) // output epsilon
next_elem.string = elem.string;
else
next_elem.string = repository_.Successor(elem.string, arc.olabel);
all_elems.push_back(this_pr);
}
}
}
}
PairComparator pc;
std::sort(all_elems.begin(), all_elems.end(), pc);
// now sorted first on input label, then on state.
typedef typename vector<pair<Label, Element> >::const_iterator PairIter;
PairIter cur = all_elems.begin(), end = all_elems.end();
while (cur != end) {
// The old code (non-pruned) called ProcessTransition; here, instead,
// we'll put the calls into a priority queue.
Task *task = new Task;
// Process ranges that share the same input symbol.
Label ilabel = cur->first;
task->state = output_state_id;
task->priority_cost = std::numeric_limits<double>::infinity();
task->label = ilabel;
while (cur != end && cur->first == ilabel) {
task->subset.push_back(cur->second);
const Element &element = cur->second;
// Note: we'll later include the term "forward_cost" in the
// priority_cost.
task->priority_cost = std::min(task->priority_cost,
ConvertToCost(element.weight) +
backward_costs_[element.state]);
cur++;
}
// After the command below, the "priority_cost" is a value comparable to
// the total-weight of the input FST, like a total-path weight... of
// course, it will typically be less (in the semiring) than that.
// note: we represent it just as a double.
task->priority_cost += output_states_[output_state_id]->forward_cost;
if (task->priority_cost > cutoff_) {
// This task would never get done as it's past the pruning cutoff.
delete task;
} else {
MakeSubsetUnique(&(task->subset)); // remove duplicate Elements with the same state.
queue_.push(task); // Push the task onto the queue. The queue keeps it
// in prioritized order, so we always process the one with the "best"
// weight (highest in the semiring).
{ // this is a check.
double best_cost = backward_costs_[ifst_->Start()],
tolerance = 0.01 + 1.0e-04 * std::abs(best_cost);
if (task->priority_cost < best_cost - tolerance) {
KALDI_WARN << "Cost below best cost was encountered:"
<< task->priority_cost << " < " << best_cost;
}
}
}
}
all_elems.clear(); // as it's a reference to a class variable; we want it to stay
// empty.
}
bool IsIsymbolOrFinal(InputStateId state) { // returns true if this state
// of the input FST either is final or has an osymbol on an arc out of it.
// Uses the vector isymbol_or_final_ as a cache for this info.
KALDI_ASSERT(state >= 0);
if (isymbol_or_final_.size() <= state)
isymbol_or_final_.resize(state+1, static_cast<char>(OSF_UNKNOWN));
if (isymbol_or_final_[state] == static_cast<char>(OSF_NO))
return false;
else if (isymbol_or_final_[state] == static_cast<char>(OSF_YES))
return true;
// else work it out...
isymbol_or_final_[state] = static_cast<char>(OSF_NO);
if (ifst_->Final(state) != Weight::Zero())
isymbol_or_final_[state] = static_cast<char>(OSF_YES);
for (ArcIterator<ExpandedFst<Arc> > aiter(*ifst_, state);
!aiter.Done();
aiter.Next()) {
const Arc &arc = aiter.Value();
if (arc.ilabel != 0 && arc.weight != Weight::Zero()) {
isymbol_or_final_[state] = static_cast<char>(OSF_YES);
return true;
}
}
return IsIsymbolOrFinal(state); // will only recurse once.
}
void ComputeBackwardWeight() {
// Sets up the backward_costs_ array, and the cutoff_ variable.
KALDI_ASSERT(beam_ > 0);
// Only handle the toplogically sorted case.
backward_costs_.resize(ifst_->NumStates());
for (StateId s = ifst_->NumStates() - 1; s >= 0; s--) {
double &cost = backward_costs_[s];
cost = ConvertToCost(ifst_->Final(s));
for (ArcIterator<ExpandedFst<Arc> > aiter(*ifst_, s);
!aiter.Done(); aiter.Next()) {
const Arc &arc = aiter.Value();
cost = std::min(cost,
ConvertToCost(arc.weight) + backward_costs_[arc.nextstate]);
}
}
if (ifst_->Start() == kNoStateId) return; // we'll be returning
// an empty FST.
double best_cost = backward_costs_[ifst_->Start()];
if (best_cost == numeric_limits<double>::infinity())
KALDI_WARN << "Total weight of input lattice is zero.";
cutoff_ = best_cost + beam_;
}
void InitializeDeterminization() {
// We insist that the input lattice be topologically sorted. This is not a
// fundamental limitation of the algorithm (which in principle should be
// applicable to even cyclic FSTs), but it helps us more efficiently
// compute the backward_costs_ array. There may be some other reason we
// require this, that escapes me at the moment.
KALDI_ASSERT(ifst_->Properties(kTopSorted, true) != 0);
ComputeBackwardWeight();
#if !(__GNUC__ == 4 && __GNUC_MINOR__ == 0)
if(ifst_->Properties(kExpanded, false) != 0) { // if we know the number of
// states in ifst_, it might be a bit more efficient
// to pre-size the hashes so we're not constantly rebuilding them.
StateId num_states =
down_cast<const ExpandedFst<Arc>*, const Fst<Arc> >(ifst_)->NumStates();
minimal_hash_.rehash(num_states/2 + 3);
initial_hash_.rehash(num_states/2 + 3);
}
#endif
InputStateId start_id = ifst_->Start();
if (start_id != kNoStateId) {
/* Create determinized-state corresponding to the start state....
Unlike all the other states, we don't "normalize" the representation
of this determinized-state before we put it into minimal_hash_. This is actually
what we want, as otherwise we'd have problems dealing with any extra weight
and string and might have to create a "super-initial" state which would make
the output nondeterministic. Normalization is only needed to make the
determinized output more minimal anyway, it's not needed for correctness.
Note, we don't put anything in the initial_hash_. The initial_hash_ is only
a lookaside buffer anyway, so this isn't a problem-- it will get populated
later if it needs to be.
*/
vector<Element> subset(1);
subset[0].state = start_id;
subset[0].weight = Weight::One();
subset[0].string = repository_.EmptyString(); // Id of empty sequence.
EpsilonClosure(&subset); // follow through epsilon-input links
ConvertToMinimal(&subset); // remove all but final states and
// states with input-labels on arcs out of them.
// Weight::One() is the "forward-weight" of this determinized state...
// i.e. the minimal cost from the start of the determinized FST to this
// state [One() because it's the start state].
OutputState *initial_state = new OutputState(subset, 0);
KALDI_ASSERT(output_states_.empty());
output_states_.push_back(initial_state);
num_elems_ += subset.size();
OutputStateId initial_state_id = 0;
minimal_hash_[&(initial_state->minimal_subset)] = initial_state_id;
ProcessFinal(initial_state_id);
ProcessTransitions(initial_state_id); // this will add tasks to
// the queue, which we'll start processing in Determinize().
}
}
KALDI_DISALLOW_COPY_AND_ASSIGN(LatticeDeterminizerPruned);
struct OutputState {
vector<Element> minimal_subset;
vector<TempArc> arcs; // arcs out of the state-- those that have been processed.
// Note: the final-weight is included here with kNoStateId as the state id. We
// always process the final-weight regardless of the beam; when producing the
// output we may have to ignore some of these.
double forward_cost; // Represents minimal cost from start-state
// to this state. Used in prioritization of tasks, and pruning.
// Note: we know this minimal cost from when we first create the OutputState;
// this is because of the priority-queue we use, that ensures that the
// "best" path into the state will be expanded first.
OutputState(const vector<Element> &minimal_subset,
double forward_cost): minimal_subset(minimal_subset),
forward_cost(forward_cost) { }
};
vector<OutputState*> output_states_; // All the info about the output states.
int num_arcs_; // keep track of memory usage: number of arcs in output_states_[ ]->arcs
int num_elems_; // keep track of memory usage: number of elems in output_states_ and
// the keys of initial_hash_
const ExpandedFst<Arc> *ifst_;
std::vector<double> backward_costs_; // This vector stores, for every state in ifst_,
// the minimal cost to the end-state (i.e. the sum of weights; they are guaranteed to
// have "take-the-minimum" semantics). We get the double from the ConvertToCost()
// function on the lattice weights.
double beam_;
double cutoff_; // beam plus total-weight of input (and note, the weight is
// guaranteed to be "tropical-like" so the sum does represent a min-cost.
DeterminizeLatticePrunedOptions opts_;
SubsetKey hasher_; // object that computes keys-- has no data members.
SubsetEqual equal_; // object that compares subsets-- only data member is delta_.
bool determinized_; // set to true when user called Determinize(); used to make
// sure this object is used correctly.
MinimalSubsetHash minimal_hash_; // hash from Subset to OutputStateId. Subset is "minimal
// representation" (only include final and states and states with
// nonzero ilabel on arc out of them. Owns the pointers
// in its keys.
InitialSubsetHash initial_hash_; // hash from Subset to Element, which
// represents the OutputStateId together
// with an extra weight and string. Subset
// is "initial representation". The extra
// weight and string is needed because after
// we convert to minimal representation and
// normalize, there may be an extra weight
// and string. Owns the pointers
// in its keys.
struct Task {
OutputStateId state; // State from which we're processing the transition.
Label label; // Label on the transition we're processing out of this state.
vector<Element> subset; // Weighted subset of states (with strings)-- not normalized.
double priority_cost; // Cost used in deciding priority of tasks. Note:
// we assume there is a ConvertToCost() function that converts the semiring to double.
};
struct TaskCompare {
inline int operator() (const Task *t1, const Task *t2) {
// view this like operator <, which is the default template parameter
// to std::priority_queue.
// returns true if t1 is worse than t2.
return (t1->priority_cost > t2->priority_cost);
}
};
// This priority queue contains "Task"s to be processed; these correspond
// to transitions out of determinized states. We process these in priority
// order according to the best weight of any path passing through these
// determinized states... it's possible to work this out.
std::priority_queue<Task*, vector<Task*>, TaskCompare> queue_;
vector<pair<Label, Element> > all_elems_tmp_; // temporary vector used in ProcessTransitions.
enum IsymbolOrFinal { OSF_UNKNOWN = 0, OSF_NO = 1, OSF_YES = 2 };
vector<char> isymbol_or_final_; // A kind of cache; it says whether
// each state is (emitting or final) where emitting means it has at least one
// non-epsilon output arc. Only accessed by IsIsymbolOrFinal()
LatticeStringRepository<IntType> repository_; // defines a compact and fast way of
// storing sequences of labels.
void AddStrings(const vector<Element> &vec,
vector<StringId> *needed_strings) {
for (typename std::vector<Element>::const_iterator iter = vec.begin();
iter != vec.end(); ++iter)
needed_strings->push_back(iter->string);
}
};
// normally Weight would be LatticeWeight<float> (which has two floats),
// or possibly TropicalWeightTpl<float>, and IntType would be int32.
// Caution: there are two versions of the function DeterminizeLatticePruned,
// with identical code but different output FST types.
template<class Weight, class IntType>
bool DeterminizeLatticePruned(
const ExpandedFst<ArcTpl<Weight> >&ifst,
double beam,
MutableFst<ArcTpl<CompactLatticeWeightTpl<Weight, IntType> > >*ofst,
DeterminizeLatticePrunedOptions opts) {
ofst->SetInputSymbols(ifst.InputSymbols());
ofst->SetOutputSymbols(ifst.OutputSymbols());
if (ifst.NumStates() == 0) {
ofst->DeleteStates();
return true;
}
KALDI_ASSERT(opts.retry_cutoff >= 0.0 && opts.retry_cutoff < 1.0);
int32 max_num_iters = 10; // avoid the potential for infinite loops if
// retrying.
VectorFst<ArcTpl<Weight> > temp_fst;
for (int32 iter = 0; iter < max_num_iters; iter++) {
LatticeDeterminizerPruned<Weight, IntType> det(iter == 0 ? ifst : temp_fst,
beam, opts);
double effective_beam;
bool ans = det.Determinize(&effective_beam);
// if it returns false it will typically still produce reasonable output,
// just with a narrower beam than "beam". If the user specifies an infinite
// beam we don't do this beam-narrowing.
if (effective_beam >= beam * opts.retry_cutoff ||
beam == std::numeric_limits<double>::infinity() ||
iter + 1 == max_num_iters) {
det.Output(ofst);
return ans;
} else {
// The code below to set "beam" is a heuristic.
// If effective_beam is very small, we want to reduce by a lot.
// But never change the beam by more than a factor of two.
if (effective_beam < 0.0) effective_beam = 0.0;
double new_beam = beam * sqrt(effective_beam / beam);
if (new_beam < 0.5 * beam) new_beam = 0.5 * beam;
beam = new_beam;
if (iter == 0) temp_fst = ifst;
kaldi::PruneLattice(beam, &temp_fst);
KALDI_LOG << "Pruned state-level lattice with beam " << beam
<< " and retrying determinization with that beam.";
}
}
return false; // Suppress compiler warning; this code is unreachable.
}
// normally Weight would be LatticeWeight<float> (which has two floats),
// or possibly TropicalWeightTpl<float>, and IntType would be int32.
// Caution: there are two versions of the function DeterminizeLatticePruned,
// with identical code but different output FST types.
template<class Weight>
bool DeterminizeLatticePruned(const ExpandedFst<ArcTpl<Weight> > &ifst,
double beam,
MutableFst<ArcTpl<Weight> > *ofst,
DeterminizeLatticePrunedOptions opts) {
typedef int32 IntType;
ofst->SetInputSymbols(ifst.InputSymbols());
ofst->SetOutputSymbols(ifst.OutputSymbols());
KALDI_ASSERT(opts.retry_cutoff >= 0.0 && opts.retry_cutoff < 1.0);
if (ifst.NumStates() == 0) {
ofst->DeleteStates();
return true;
}
int32 max_num_iters = 10; // avoid the potential for infinite loops if
// retrying.
VectorFst<ArcTpl<Weight> > temp_fst;
for (int32 iter = 0; iter < max_num_iters; iter++) {
LatticeDeterminizerPruned<Weight, IntType> det(iter == 0 ? ifst : temp_fst,
beam, opts);
double effective_beam;
bool ans = det.Determinize(&effective_beam);
// if it returns false it will typically still
// produce reasonable output, just with a
// narrower beam than "beam".
if (effective_beam >= beam * opts.retry_cutoff ||
iter + 1 == max_num_iters) {
det.Output(ofst);
return ans;
} else {
// The code below to set "beam" is a heuristic.
// If effective_beam is very small, we want to reduce by a lot.
// But never change the beam by more than a factor of two.
if (effective_beam < 0)
effective_beam = 0;
double new_beam = beam * sqrt(effective_beam / beam);
if (new_beam < 0.5 * beam) new_beam = 0.5 * beam;
KALDI_WARN << "Effective beam " << effective_beam << " was less than beam "
<< beam << " * cutoff " << opts.retry_cutoff << ", pruning raw "
<< "lattice with new beam " << new_beam << " and retrying.";
beam = new_beam;
if (iter == 0) temp_fst = ifst;
kaldi::PruneLattice(beam, &temp_fst);
}
}
return false; // Suppress compiler warning; this code is unreachable.
}
template<class Weight>
typename ArcTpl<Weight>::Label DeterminizeLatticeInsertPhones(
const kaldi::TransitionModel &trans_model,
MutableFst<ArcTpl<Weight> > *fst) {
// Define some types.
typedef ArcTpl<Weight> Arc;
typedef typename Arc::StateId StateId;
typedef typename Arc::Label Label;
// Work out the first phone symbol. This is more related to the phone
// insertion function, so we put it here and make it the returning value of
// DeterminizeLatticeInsertPhones().
Label first_phone_label = HighestNumberedInputSymbol(*fst) + 1;
// Insert phones here.
for (StateIterator<MutableFst<Arc> > siter(*fst);
!siter.Done(); siter.Next()) {
StateId state = siter.Value();
if (state == fst->Start())
continue;
for (MutableArcIterator<MutableFst<Arc> > aiter(fst, state);
!aiter.Done(); aiter.Next()) {
Arc arc = aiter.Value();
// Note: the words are on the input symbol side and transition-id's are on
// the output symbol side.
if ((arc.olabel != 0)
&& (trans_model.TransitionIdToHmmState(arc.olabel) == 0)
&& (!trans_model.IsSelfLoop(arc.olabel))) {
Label phone =
static_cast<Label>(trans_model.TransitionIdToPhone(arc.olabel));
// Skips <eps>.
KALDI_ASSERT(phone != 0);
if (arc.ilabel == 0) {
// If there is no word on the arc, insert the phone directly.
arc.ilabel = first_phone_label + phone;
} else {
// Otherwise, add an additional arc.
StateId additional_state = fst->AddState();
StateId next_state = arc.nextstate;
arc.nextstate = additional_state;
fst->AddArc(additional_state,
Arc(first_phone_label + phone, 0,
Weight::One(), next_state));
}
}
aiter.SetValue(arc);
}
}
return first_phone_label;
}
template<class Weight>
void DeterminizeLatticeDeletePhones(
typename ArcTpl<Weight>::Label first_phone_label,
MutableFst<ArcTpl<Weight> > *fst) {
// Define some types.
typedef ArcTpl<Weight> Arc;
typedef typename Arc::StateId StateId;
typedef typename Arc::Label Label;
// Delete phones here.
for (StateIterator<MutableFst<Arc> > siter(*fst);
!siter.Done(); siter.Next()) {
StateId state = siter.Value();
for (MutableArcIterator<MutableFst<Arc> > aiter(fst, state);
!aiter.Done(); aiter.Next()) {
Arc arc = aiter.Value();
if (arc.ilabel >= first_phone_label)
arc.ilabel = 0;
aiter.SetValue(arc);
}
}
}
// instantiate for type LatticeWeight
template
void DeterminizeLatticeDeletePhones(
ArcTpl<kaldi::LatticeWeight>::Label first_phone_label,
MutableFst<ArcTpl<kaldi::LatticeWeight> > *fst);
/** This function does a first pass determinization with phone symbols inserted
at phone boundary. It uses a transition model to work out the transition-id
to phone map. First, phones will be inserted into the word level lattice.
Second, determinization will be applied on top of the phone + word lattice.
Finally, the inserted phones will be removed, converting the lattice back to
a word level lattice. The output lattice of this pass is not deterministic,
since we remove the phone symbols as a last step. It is supposed to be
followed by another pass of determinization at the word level. It could also
be useful for some other applications such as fMLLR estimation, confidence
estimation, discriminative training, etc.
*/
template<class Weight, class IntType>
bool DeterminizeLatticePhonePrunedFirstPass(
const kaldi::TransitionModel &trans_model,
double beam,
MutableFst<ArcTpl<Weight> > *fst,
const DeterminizeLatticePrunedOptions &opts) {
// First, insert the phones.
typename ArcTpl<Weight>::Label first_phone_label =
DeterminizeLatticeInsertPhones(trans_model, fst);
TopSort(fst);
// Second, do determinization with phone inserted.
bool ans = DeterminizeLatticePruned<Weight>(*fst, beam, fst, opts);
// Finally, remove the inserted phones.
DeterminizeLatticeDeletePhones(first_phone_label, fst);
TopSort(fst);
return ans;
}
// "Destructive" version of DeterminizeLatticePhonePruned() where the input
// lattice might be modified.
template<class Weight, class IntType>
bool DeterminizeLatticePhonePruned(
const kaldi::TransitionModel &trans_model,
MutableFst<ArcTpl<Weight> > *ifst,
double beam,
MutableFst<ArcTpl<CompactLatticeWeightTpl<Weight, IntType> > > *ofst,
DeterminizeLatticePhonePrunedOptions opts) {
// Returning status.
bool ans = true;
// Make sure at least one of opts.phone_determinize and opts.word_determinize
// is not false, otherwise calling this function doesn't make any sense.
if ((opts.phone_determinize || opts.word_determinize) == false) {
KALDI_WARN << "Both --phone-determinize and --word-determinize are set to "
<< "false, copying lattice without determinization.";
// We are expecting the words on the input side.
ConvertLattice<Weight, IntType>(*ifst, ofst, false);
return ans;
}
// Determinization options.
DeterminizeLatticePrunedOptions det_opts;
det_opts.delta = opts.delta;
det_opts.max_mem = opts.max_mem;
// If --phone-determinize is true, do the determinization on phone + word
// lattices.
if (opts.phone_determinize) {
KALDI_VLOG(3) << "Doing first pass of determinization on phone + word "
<< "lattices.";
ans = DeterminizeLatticePhonePrunedFirstPass<Weight, IntType>(
trans_model, beam, ifst, det_opts) && ans;
// If --word-determinize is false, we've finished the job and return here.
if (!opts.word_determinize) {
// We are expecting the words on the input side.
ConvertLattice<Weight, IntType>(*ifst, ofst, false);
return ans;
}
}
// If --word-determinize is true, do the determinization on word lattices.
if (opts.word_determinize) {
KALDI_VLOG(3) << "Doing second pass of determinization on word lattices.";
ans = DeterminizeLatticePruned<Weight, IntType>(
*ifst, beam, ofst, det_opts) && ans;
}
// If --minimize is true, push and minimize after determinization.
if (opts.minimize) {
KALDI_VLOG(3) << "Pushing and minimizing on word lattices.";
ans = PushCompactLatticeStrings<Weight, IntType>(ofst) && ans;
ans = PushCompactLatticeWeights<Weight, IntType>(ofst) && ans;
ans = MinimizeCompactLattice<Weight, IntType>(ofst) && ans;
}
return ans;
}
// Normal verson of DeterminizeLatticePhonePruned(), where the input lattice
// will be kept as unchanged.
template<class Weight, class IntType>
bool DeterminizeLatticePhonePruned(
const kaldi::TransitionModel &trans_model,
const ExpandedFst<ArcTpl<Weight> > &ifst,
double beam,
MutableFst<ArcTpl<CompactLatticeWeightTpl<Weight, IntType> > > *ofst,
DeterminizeLatticePhonePrunedOptions opts) {
VectorFst<ArcTpl<Weight> > temp_fst(ifst);
return DeterminizeLatticePhonePruned(trans_model, &temp_fst,
beam, ofst, opts);
}
bool DeterminizeLatticePhonePrunedWrapper(
const kaldi::TransitionModel &trans_model,
MutableFst<kaldi::LatticeArc> *ifst,
double beam,
MutableFst<kaldi::CompactLatticeArc> *ofst,
DeterminizeLatticePhonePrunedOptions opts) {
bool ans = true;
Invert(ifst);
if (ifst->Properties(fst::kTopSorted, true) == 0) {
if (!TopSort(ifst)) {
// Cannot topologically sort the lattice -- determinization will fail.
KALDI_ERR << "Topological sorting of state-level lattice failed (probably"
<< " your lexicon has empty words or your LM has epsilon cycles"
<< ").";
}
}
ILabelCompare<kaldi::LatticeArc> ilabel_comp;
ArcSort(ifst, ilabel_comp);
ans = DeterminizeLatticePhonePruned<kaldi::LatticeWeight, kaldi::int32>(
trans_model, ifst, beam, ofst, opts);
Connect(ofst);
return ans;
}
// Instantiate the templates for the types we might need.
// Note: there are actually four templates, each of which
// we instantiate for a single type.
template
bool DeterminizeLatticePruned<kaldi::LatticeWeight>(
const ExpandedFst<kaldi::LatticeArc> &ifst,
double prune,
MutableFst<kaldi::CompactLatticeArc> *ofst,
DeterminizeLatticePrunedOptions opts);
template
bool DeterminizeLatticePruned<kaldi::LatticeWeight>(
const ExpandedFst<kaldi::LatticeArc> &ifst,
double prune,
MutableFst<kaldi::LatticeArc> *ofst,
DeterminizeLatticePrunedOptions opts);
template
bool DeterminizeLatticePhonePruned<kaldi::LatticeWeight, kaldi::int32>(
const kaldi::TransitionModel &trans_model,
const ExpandedFst<kaldi::LatticeArc> &ifst,
double prune,
MutableFst<kaldi::CompactLatticeArc> *ofst,
DeterminizeLatticePhonePrunedOptions opts);
template
bool DeterminizeLatticePhonePruned<kaldi::LatticeWeight, kaldi::int32>(
const kaldi::TransitionModel &trans_model,
MutableFst<kaldi::LatticeArc> *ifst,
double prune,
MutableFst<kaldi::CompactLatticeArc> *ofst,
DeterminizeLatticePhonePrunedOptions opts);
}