Blame view

LDA/04b-mmf_mini_ae.py 4.78 KB
7db73861f   Killian   add vae et mmf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
  
  # coding: utf-8
  
  # In[2]:
  
  # Import
  import gensim
  from scipy import sparse
  import itertools
  from sklearn import preprocessing
  from keras.models import Sequential
  from keras.optimizers import SGD,Adam
  from mlp import *
  import sklearn.metrics
  import shelve
  import pickle
  from utils import *
  import sys
  import os
  import json
  # In[4]:
  
  infer_model=shelve.open("{}".format(sys.argv[2]))
  in_dir = sys.argv[1]
  #['ASR', 'TRS', 'LABEL']
  # In[6]:
e5108393c   Killian   replace du mlp.p...
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
  json_conf =json.load(open(sys.argv[3]))
  ae_conf = json_conf["ae"]
  
  hidden_size= ae_conf["hidden_size"]
  input_activation=ae_conf["input_activation"]
  output_activation=ae_conf["output_activation"]
  loss=ae_conf["loss"]
  epochs=ae_conf["epochs"]
  batch=ae_conf["batch"]
  patience=ae_conf["patience"]
  do_do=ae_conf["do"]
  try:
      k = ae_conf["sgd"]
      if ae_conf["sgd"]["name"] == "adam":
          sgd = Adam(lr=ae_conf["sgd"]["lr"])#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True)
      elif ae_conf["sgd"]["name"] == "sgd":
          sgd = SGD(lr=ae_conf["sgd"]["lr"])
  except: 
      sgd = ae_conf["sgd"]
  
  mlp_conf = json_conf["mlp"]
  mlp_h = mlp_conf["hidden_size"]
  mlp_loss = mlp_conf["loss"]
  mlp_dropouts = mlp_conf["do"]
  mlp_epochs = mlp_conf["epochs"]
  mlp_batch_size = mlp_conf["batch"]
  mlp_input_activation=mlp_conf["input_activation"]
  mlp_output_activation=mlp_conf["output_activation"]
7db73861f   Killian   add vae et mmf
55

e5108393c   Killian   replace du mlp.p...
56
57
58
59
60
61
62
63
64
65
66
  try:
      k = mlp_conf["sgd"]
      if mlp_conf["sgd"]["name"] == "adam":
          mlp_sgd = Adam(lr=mlp_conf["sgd"]["lr"])#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True)
      elif mlp_conf["sgd"]["name"] == "sgd":
          mlp_sgd = SGD(lr=mlp_conf["sgd"]["lr"])
  except: 
      mlp_sgd = mlp_conf["sgd"]
  
  
  name = json_conf["name"]
7db73861f   Killian   add vae et mmf
67
68
69
70
71
  try:
      os.mkdir("{}/{}".format(in_dir,name))
  except:
      pass
  db = shelve.open("{}/{}/ae_model.shelve".format(in_dir,name),writeback=True)
7db73861f   Killian   add vae et mmf
72
73
  db["LABEL"]=infer_model["LABEL"]
  #
7db73861f   Killian   add vae et mmf
74
75
76
77
78
  keys = ["ASR","TRS"]
  
  db["AE"] = {}
  db["LDA"] = {}
  for mod in keys : 
7db73861f   Killian   add vae et mmf
79
80
81
82
83
84
      db["LDA"][mod] = train_mlp(infer_model["LDA"][mod]["TRAIN"],infer_model["LABEL"][mod]["TRAIN"],
                              infer_model["LDA"][mod]["DEV"],infer_model["LABEL"][mod]["DEV"],
                              infer_model["LDA"][mod]["TEST"],infer_model["LABEL"][mod]["TEST"],
                              mlp_h ,sgd=mlp_sgd,
                              epochs=mlp_epochs,
                              batch_size=mlp_batch_size,
e5108393c   Killian   replace du mlp.p...
85
                              input_activation=mlp_input_activation,
7db73861f   Killian   add vae et mmf
86
87
88
89
90
                              output_activation=mlp_output_activation,
                              dropouts=mlp_dropouts,
                              fit_verbose=0)
  
      res=train_ae(infer_model["LDA"][mod]["TRAIN"],infer_model["LDA"][mod]["DEV"],infer_model["LDA"][mod]["TEST"],
e5108393c   Killian   replace du mlp.p...
91
                   hidden_size,patience = patience,sgd=sgd,
7db73861f   Killian   add vae et mmf
92
93
94
95
96
97
98
99
100
                   dropouts=do_do,input_activation=input_activation,output_activation=output_activation,
                   loss=loss,epochs=epochs,batch_size=batch,verbose=0)
      mlp_res_list=[]
      for layer in res :
          mlp_res_list.append(train_mlp(layer[0],infer_model['LABEL'][mod]["TRAIN"],
                                        layer[1],infer_model["LABEL"][mod]["DEV"],
                                        layer[2],infer_model["LABEL"][mod]["TEST"],
                                        mlp_h,loss=mlp_loss,dropouts=mlp_dropouts,sgd=mlp_sgd,epochs=mlp_epochs,
                                        output_activation=mlp_output_activation,
e5108393c   Killian   replace du mlp.p...
101
                                        input_activation=mlp_input_activation,
7db73861f   Killian   add vae et mmf
102
103
104
105
106
107
108
109
110
111
                                        batch_size=mlp_batch_size,fit_verbose=0))
      db["AE"][mod]=mlp_res_list
  
  mod = "ASR"
  mod2= "TRS"
  mlp_res_list=[]
  
  res = train_ae(infer_model["LDA"][mod]["TRAIN"],
                  infer_model["LDA"][mod]["DEV"],
                  infer_model["LDA"][mod]["TEST"],
e5108393c   Killian   replace du mlp.p...
112
                  hidden_size,dropouts=do_do,patience = patience,
7db73861f   Killian   add vae et mmf
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
                  sgd=sgd,input_activation=input_activation,output_activation=output_activation,loss=loss,epochs=epochs,
                  batch_size=batch,
                  y_train=infer_model["LDA"][mod]["TRAIN"],
                  y_dev=infer_model["LDA"][mod2]["DEV"],
                  y_test=infer_model["LDA"][mod2]["TEST"])
  
  for layer in res :
      mlp_res_list.append(train_mlp(layer[0],infer_model["LABEL"][mod]["TRAIN"],
                                    layer[1],infer_model["LABEL"][mod]["DEV"],
                                    layer[2],infer_model["LABEL"][mod]["TEST"],
                                    mlp_h,loss=mlp_loss,dropouts=mlp_dropouts,sgd=mlp_sgd,epochs=mlp_epochs,
                                    output_activation=mlp_output_activation,
                                    input_activation=input_activation,
                                    batch_size=mlp_batch_size,fit_verbose=0))
  
  db["AE"]["SPE"] = mlp_res_list
  
  db.sync()
  db.close()