Blame view

LDA/04d-mmf_dsae.py 8.95 KB
7db73861f   Killian   add vae et mmf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
  
  # coding: utf-8
  
  # In[2]:
  
  # Import
  import gensim
  from scipy import sparse
  import itertools
  from sklearn import preprocessing
  from keras.models import Sequential
  from keras.optimizers import SGD,Adam
  from mlp import *
  import mlp
  import sklearn.metrics
  import shelve
  import pickle
e5108393c   Killian   replace du mlp.p...
18

7db73861f   Killian   add vae et mmf
19
20
21
22
23
24
25
26
27
28
  from utils import *
  import sys
  import os
  import json
  # In[4]:
  
  infer_model=shelve.open("{}".format(sys.argv[2]))
  in_dir = sys.argv[1]
  #['ASR', 'TRS', 'LABEL']
  # In[6]:
e5108393c   Killian   replace du mlp.p...
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
  json_conf =json.load(open(sys.argv[3]))
  
  dsae_conf = json_conf["dsae"]
  
  hidden_size= dsae_conf["hidden_size"]
  input_activation=dsae_conf["input_activation"]
  output_activation=dsae_conf["output_activation"]
  loss=dsae_conf["loss"]
  epochs=dsae_conf["epochs"]
  batch_size=dsae_conf["batch"]
  patience=dsae_conf["patience"]
  do_do=dsae_conf["do"]
  try:
      k = dsae_conf["sgd"]
      if dsae_conf["sgd"]["name"] == "adam":
          sgd = Adam(lr=dsae_conf["sgd"]["lr"])#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True)
      elif dsae_conf["sgd"]["name"] == "sgd":
          sgd = SGD(lr=dsae_conf["sgd"]["lr"])
  except: 
      sgd = dsae_conf["sgd"]
  
  trans_conf = json_conf["dsae"]["transform"]
  trans_hidden_size=trans_conf["hidden_size"]
  trans_input_activation=trans_conf["input_activation"]
  trans_output_activation=trans_conf["output_activation"]
  trans_loss=trans_conf["loss"]
  trans_epochs=trans_conf["epochs"]
  trans_batch_size=trans_conf["batch"]
  trans_patience=trans_conf["patience"]
  trans_do=trans_conf["do"]
  try:
      k = trans_conf["sgd"]
      if trans_conf["sgd"]["name"] == "adam":
          trans_sgd = Adam(lr=trans_conf["sgd"]["lr"])#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True)
      elif trans_conf["sgd"]["name"] == "sgd":
          trans_sgd = SGD(lr=trans_conf["sgd"]["lr"])
  except e : 
      trans_sgd = trans_conf["sgd"]
  
  
  mlp_conf = json_conf["mlp"]
  mlp_h = mlp_conf["hidden_size"]
  mlp_loss = mlp_conf["loss"]
  mlp_dropouts = mlp_conf["do"]
  mlp_epochs = mlp_conf["epochs"]
  mlp_batch_size = mlp_conf["batch"]
  mlp_input_activation=mlp_conf["input_activation"]
  mlp_output_activation=mlp_conf["output_activation"]
7db73861f   Killian   add vae et mmf
77
  try:
e5108393c   Killian   replace du mlp.p...
78
79
80
81
82
83
84
85
86
87
88
89
      k = mlp_conf["sgd"]
      if mlp_conf["sgd"]["name"] == "adam":
          mlp_sgd = Adam(lr=mlp_conf["sgd"]["lr"])#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True)
      elif mlp_conf["sgd"]["name"] == "sgd":
          mlp_sgd = SGD(lr=mlp_conf["sgd"]["lr"])
  except: 
      mlp_sgd = mlp_conf["sgd"]
  
  
  name = json_conf["name"]
  try:
      os.mkdir("{}/{}".format(in_dir,name))
7db73861f   Killian   add vae et mmf
90
91
  except:
      pass
e5108393c   Killian   replace du mlp.p...
92
  db = shelve.open("{}/{}/ae_model.shelve".format(in_dir,name),writeback=True)
7db73861f   Killian   add vae et mmf
93
  #
7db73861f   Killian   add vae et mmf
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
  
  keys = ["ASR","TRS"]
  
  
  
  db["DSAE"] = {}
  
  db["DSAEFT"] = {}
  mod = "ASR"
  res_tuple_ASR = train_ae(infer_model["LDA"][mod]["TRAIN"],
                           infer_model["LDA"][mod]["DEV"],
                           infer_model["LDA"][mod]["TEST"],
                           hidden_size,dropouts=do_do,
                           patience = patience,sgd=sgd,
                           input_activation=input_activation,
                           output_activation=output_activation,loss=loss,epochs=epochs,
                           batch_size=batch_size,verbose=0,get_weights=True)
  mlp_res_list = []
  for layer in res_tuple_ASR[0]: 
      mlp_res_list.append(train_mlp(layer[0],infer_model['LABEL'][mod]["TRAIN"],
                                    layer[1],infer_model["LABEL"][mod]["DEV"],
                                    layer[2],infer_model["LABEL"][mod]["TEST"],
                                    mlp_h,loss=mlp_loss,dropouts=mlp_dropouts,
                                    sgd=mlp_sgd,epochs=mlp_epochs,
                                    output_activation=mlp_output_activation,
                                    input_activation=mlp_input_activation,
                                    batch_size=mlp_batch_size,fit_verbose=0))
  
  db["DSAE"][mod] = mlp_res_list
  mod = "TRS"
7db73861f   Killian   add vae et mmf
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
  res_tuple_TRS = train_ae(infer_model["LDA"][mod]["TRAIN"],
                           infer_model["LDA"][mod]["DEV"],
                           infer_model["LDA"][mod]["TEST"],
                           hidden_size,dropouts=do_do,
                           sgd=sgd,input_activation=input_activation,
                           output_activation=output_activation,loss=loss,epochs=epochs,
                           batch_size=batch_size,patience=patience,
                           verbose=0,get_weights=True)
  
  mlp_res_list = []
  for layer in res_tuple_TRS[0]: 
      mlp_res_list.append(train_mlp(layer[0],infer_model['LABEL'][mod]["TRAIN"],
                                    layer[1],infer_model["LABEL"][mod]["DEV"],
                                    layer[2],infer_model["LABEL"][mod]["TEST"],
                                    mlp_h,loss=mlp_loss,dropouts=mlp_dropouts,
                                    sgd=mlp_sgd,epochs=mlp_epochs,
                                    output_activation=mlp_output_activation,
                                    input_activation=mlp_input_activation,
                                    batch_size=mlp_batch_size,fit_verbose=0))
  
  db["DSAE"][mod] = mlp_res_list
  
  
  
  transfert = []
  
  print " get weight trans" 
e5108393c   Killian   replace du mlp.p...
151
152
  #for asr_pred, trs_pred in zip(res_tuple_ASR[0], res_tuple_TRS[0]):
   #   print "ASR", [ x.shape for x in asr_pred]
7db73861f   Killian   add vae et mmf
153

e5108393c   Killian   replace du mlp.p...
154
    #  print "TRS", [ x.shape for x in trs_pred]
7db73861f   Killian   add vae et mmf
155
156
  
  for asr_pred, trs_pred in zip(res_tuple_ASR[0], res_tuple_TRS[0]):
e5108393c   Killian   replace du mlp.p...
157
   #   print "ASR", [ x.shape for x in asr_pred]
7db73861f   Killian   add vae et mmf
158

e5108393c   Killian   replace du mlp.p...
159
160
    #  print "TRS", [ x.shape for x in trs_pred]
    #  print " TRANS SGD", trans_sgd
7db73861f   Killian   add vae et mmf
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
      transfert.append( train_ae(asr_pred[0],
                                 asr_pred[1],
                                 asr_pred[2],
                                 trans_hidden_size,
                                 dropouts=trans_do,
                                 y_train = trs_pred[0],
                                 y_dev=trs_pred[1],
                                 y_test = trs_pred[2],
                                 patience = trans_patience,sgd=trans_sgd,
                                 input_activation=trans_input_activation,
                                 output_activation=trans_output_activation,
                                 loss=trans_loss,
                                 epochs=trans_epochs,
                                 batch_size=trans_batch_size,verbose=0,get_weights=True) )
  mod = "ASR"
  mlp_res_bylvl = []
  print " MLP on transfert "
  for level, w  in transfert  :
      mlp_res_list = []
      for layer in level :
          mlp_res_list.append(train_mlp(layer[0],infer_model['LABEL'][mod]["TRAIN"],
                                        layer[1],infer_model["LABEL"][mod]["DEV"],
                                        layer[2],infer_model["LABEL"][mod]["TEST"],
                                        mlp_h,loss=mlp_loss,dropouts=mlp_dropouts,
                                        sgd=mlp_sgd,epochs=mlp_epochs,
                                        output_activation=mlp_output_activation,
                                        input_activation=mlp_input_activation,
                                        batch_size=mlp_batch_size,fit_verbose=0))
      mlp_res_bylvl.append(mlp_res_list)
  db["DSAE"]["transfert"] = mlp_res_bylvl
  
  
  print " FT " 
  WA = res_tuple_ASR[1]
e5108393c   Killian   replace du mlp.p...
195
  #print "WA", len(WA), [ len(x) for x in WA]
7db73861f   Killian   add vae et mmf
196
  WT = res_tuple_TRS[1]
e5108393c   Killian   replace du mlp.p...
197
  #print "WT", len(WT), [ len(x) for x in WT]
7db73861f   Killian   add vae et mmf
198
  Wtr = [ x[1] for x in transfert]
e5108393c   Killian   replace du mlp.p...
199
  #print "Wtr", len(Wtr), [ len(x) for x in Wtr],[ len(x[1]) for x in Wtr]
7db73861f   Killian   add vae et mmf
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
  
  ft_res = ft_dsae(infer_model["LDA"]["ASR"]["TRAIN"],
                   infer_model["LDA"]["ASR"]["DEV"],
                   infer_model["LDA"]["ASR"]["TEST"],
                   y_train=infer_model["LDA"]["TRS"]["TRAIN"],
                   y_dev=infer_model["LDA"]["TRS"]["DEV"],
                   y_test=infer_model["LDA"]["TRS"]["TEST"],
                   ae_hidden = hidden_size,
                   transfer_hidden = trans_hidden_size,
                   start_weights = WA,
                   transfer_weights = Wtr,
                   end_weights = WT,
                   input_activation = input_activation,
                   output_activation = output_activation,
                   ae_dropouts= do_do,
                   transfer_do = trans_do,
                   sgd =  sgd,
                   loss = loss ,
                   patience = patience,
                   batch_size = batch_size,
                   epochs= epochs)
  mlps_by_lvls= []
  for level  in ft_res  :
      mlp_res_list = []
      for layer in level :
          mlp_res_list.append(train_mlp(layer[0],infer_model['LABEL'][mod]["TRAIN"],
                                        layer[1],infer_model["LABEL"][mod]["DEV"],
                                        layer[2],infer_model["LABEL"][mod]["TEST"],
                                        mlp_h,loss=mlp_loss,dropouts=mlp_dropouts,
                                        sgd=mlp_sgd,epochs=mlp_epochs,
                                        output_activation=mlp_output_activation,
                                        input_activation=mlp_input_activation,
                                        batch_size=mlp_batch_size,fit_verbose=0))
      mlps_by_lvls.append(mlp_res_list)
  
  
  db["DSAEFT"]["transfert"] = mlps_by_lvls
  
  db.close()