Commit e5108393c82cba7f29adcc24d27a17368a398e7e

Authored by Killian
1 parent dcac9f70ba
Exists in master

replace du mlp.py et ajout de la mmf_experience

Showing 13 changed files with 490 additions and 289 deletions Side-by-side Diff

LDA/00-mmf_make_features.py
... ... @@ -11,14 +11,16 @@
11 11  
12 12 input_dir = sys.argv[1] # Dossier de premire niveau contient ASR et TRS
13 13 level = sys.argv[2] # taille de LDA ( -5) voulu
  14 +output_dir = sys.argv[3]
14 15  
15 16 lb=LabelBinarizer()
16 17 #y_train=lb.fit_transform([utils.select(ligneid) for ligneid in origin_corps["LABEL"]["TRAIN"]])
17 18  
18 19  
19   -data = shelve.open("{}/mmf_{}.shelve".format(input_dir,level))
20   -data["LABEL"]= {"LDA":{}}
21   -for mod in ["ASR", "TRS" ]
  20 +data = shelve.open("{}/mmf_{}.shelve".format(output_dir,level),writeback=True)
  21 +data["LABEL"]= {}
  22 +data["LDA"] = {"ASR":{},"TRS":{}}
  23 +for mod in ["ASR", "TRS" ]:
22 24 train = pandas.read_table("{}/{}/train_{}.ssv".format(input_dir, mod, level), sep=" ", header=None )
23 25 dev = pandas.read_table("{}/{}/dev_{}.ssv".format(input_dir, mod, level), sep=" ", header=None )
24 26 test = pandas.read_table("{}/{}/test_{}.ssv".format(input_dir, mod, level), sep=" ", header=None )
... ... @@ -29,10 +31,12 @@
29 31 lb.fit(y_train)
30 32 data["LABEL"][mod]={"TRAIN":lb.transform(y_train),"DEV":lb.transform(y_dev), "TEST": lb.transform(y_test)}
31 33  
32   - data["LDA"][mod]={}
33   - data["LDA"][mod]["TRAIN"]=train.iloc[:,1:].values
34   - data["LDA"][mod]["DEV"]=dev.iloc[:,1:].values
35   - data["LDA"][mod]["TEST"]=test.iloc[:,1:].values
  34 + # data["LDA"][mod]={'ASR':[]}
  35 + print data["LDA"][mod]
  36 + print train.values
  37 + data["LDA"][mod]["TRAIN"]=train.iloc[:,1:-1].values
  38 + data["LDA"][mod]["DEV"]=dev.iloc[:,1:-1].values
  39 + data["LDA"][mod]["TEST"]=test.iloc[:,1:-1].values
36 40  
37 41 data.sync()
38 42 data.close()
... ... @@ -101,7 +101,7 @@
101 101 test_max = numpy.max(res[2])
102 102 out_db[key]=(res,(dev_best,test_best,test_max))
103 103 ress.append((key,dev_best,test_best,test_max))
104   -
  104 +print sys.argv[2]
105 105 for el in ress :
106 106 print el
107 107 out_db.close()
LDA/04b-mmf_mini_ae.py
... ... @@ -25,87 +25,71 @@
25 25 #['ASR', 'TRS', 'LABEL']
26 26 # In[6]:
27 27  
  28 +json_conf =json.load(open(sys.argv[3]))
  29 +ae_conf = json_conf["ae"]
28 30  
29   -hidden_size=[ 100 , 50, 100 ]
30   -input_activation="tanh"
31   -output_activation="tanh"
32   -loss="mse"
33   -epochs=1000
34   -batch=1
35   -patience=60
36   -do_do=[False]
37   -sgd = Adam(lr=0.000001)#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True)
  31 +hidden_size= ae_conf["hidden_size"]
  32 +input_activation=ae_conf["input_activation"]
  33 +output_activation=ae_conf["output_activation"]
  34 +loss=ae_conf["loss"]
  35 +epochs=ae_conf["epochs"]
  36 +batch=ae_conf["batch"]
  37 +patience=ae_conf["patience"]
  38 +do_do=ae_conf["do"]
  39 +try:
  40 + k = ae_conf["sgd"]
  41 + if ae_conf["sgd"]["name"] == "adam":
  42 + sgd = Adam(lr=ae_conf["sgd"]["lr"])#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True)
  43 + elif ae_conf["sgd"]["name"] == "sgd":
  44 + sgd = SGD(lr=ae_conf["sgd"]["lr"])
  45 +except:
  46 + sgd = ae_conf["sgd"]
38 47  
  48 +mlp_conf = json_conf["mlp"]
  49 +mlp_h = mlp_conf["hidden_size"]
  50 +mlp_loss = mlp_conf["loss"]
  51 +mlp_dropouts = mlp_conf["do"]
  52 +mlp_epochs = mlp_conf["epochs"]
  53 +mlp_batch_size = mlp_conf["batch"]
  54 +mlp_input_activation=mlp_conf["input_activation"]
  55 +mlp_output_activation=mlp_conf["output_activation"]
39 56  
  57 +try:
  58 + k = mlp_conf["sgd"]
  59 + if mlp_conf["sgd"]["name"] == "adam":
  60 + mlp_sgd = Adam(lr=mlp_conf["sgd"]["lr"])#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True)
  61 + elif mlp_conf["sgd"]["name"] == "sgd":
  62 + mlp_sgd = SGD(lr=mlp_conf["sgd"]["lr"])
  63 +except:
  64 + mlp_sgd = mlp_conf["sgd"]
40 65  
41   -mlp_h = [ 150 ,150 ,150 ]
42   -mlp_loss = "categorical_crossentropy"
43   -mlp_dropouts = []
44   -mlp_sgd = Adam(lr=0.0001)
45   -mlp_epochs = 2000
46   -mlp_batch_size = 8
47   -mlp_output_activation="softmax"
48 66  
49   -try :
50   - sgd_repr=sgd.get_config()["name"]
51   -except AttributeError :
52   - sgd_repr=sgd
53   -
54   -try :
55   - mlp_sgd_repr=mlp_sgd.get_config()["name"]
56   -except AttributeError :
57   - mlp_sgd_repr=mlp_sgd
58   -
59   -
60   -params={ "h1" : "_".join([ str(x) for x in hidden_size ]),
61   - "inside_activation" : input_activation,
62   - "output_activation" : output_activation,
63   - "do_dropout": "_".join([str(x) for x in do_do]),
64   - "loss" : loss,
65   - "epochs" : epochs ,
66   - "batch_size" : batch,
67   - "patience" : patience,
68   - "sgd" : sgd_repr,
69   - "mlp_h ": "_".join([str(x) for x in mlp_h]),
70   - "mlp_loss ": mlp_loss,
71   - "mlp_dropouts ": "_".join([str(x) for x in mlp_dropouts]),
72   - "mlp_sgd ": mlp_sgd_repr,
73   - "mlp_epochs ": mlp_epochs,
74   - "mlp_batch_size ": mlp_batch_size,
75   - "mlp_output" : mlp_output_activation
76   - }
77   -name = "_".join([ str(x) for x in params.values()])
  67 +name = json_conf["name"]
78 68 try:
79 69 os.mkdir("{}/{}".format(in_dir,name))
80 70 except:
81 71 pass
82 72 db = shelve.open("{}/{}/ae_model.shelve".format(in_dir,name),writeback=True)
83   -db["params"] = params
84 73 db["LABEL"]=infer_model["LABEL"]
85 74 #
86   -json.dump(params,
87   - open("{}/{}/ae_model.json".format(in_dir,name),"w"),
88   - indent=4)
89   -
90 75 keys = ["ASR","TRS"]
91 76  
92 77 db["AE"] = {}
93 78 db["LDA"] = {}
94 79 for mod in keys :
95   - print mod
96 80 db["LDA"][mod] = train_mlp(infer_model["LDA"][mod]["TRAIN"],infer_model["LABEL"][mod]["TRAIN"],
97 81 infer_model["LDA"][mod]["DEV"],infer_model["LABEL"][mod]["DEV"],
98 82 infer_model["LDA"][mod]["TEST"],infer_model["LABEL"][mod]["TEST"],
99 83 mlp_h ,sgd=mlp_sgd,
100 84 epochs=mlp_epochs,
101 85 batch_size=mlp_batch_size,
102   - input_activation=input_activation,
  86 + input_activation=mlp_input_activation,
103 87 output_activation=mlp_output_activation,
104 88 dropouts=mlp_dropouts,
105 89 fit_verbose=0)
106 90  
107 91 res=train_ae(infer_model["LDA"][mod]["TRAIN"],infer_model["LDA"][mod]["DEV"],infer_model["LDA"][mod]["TEST"],
108   - hidden_size,patience = params["patience"],sgd=sgd,
  92 + hidden_size,patience = patience,sgd=sgd,
109 93 dropouts=do_do,input_activation=input_activation,output_activation=output_activation,
110 94 loss=loss,epochs=epochs,batch_size=batch,verbose=0)
111 95 mlp_res_list=[]
... ... @@ -115,7 +99,7 @@
115 99 layer[2],infer_model["LABEL"][mod]["TEST"],
116 100 mlp_h,loss=mlp_loss,dropouts=mlp_dropouts,sgd=mlp_sgd,epochs=mlp_epochs,
117 101 output_activation=mlp_output_activation,
118   - input_activation=input_activation,
  102 + input_activation=mlp_input_activation,
119 103 batch_size=mlp_batch_size,fit_verbose=0))
120 104 db["AE"][mod]=mlp_res_list
121 105  
... ... @@ -126,7 +110,7 @@
126 110 res = train_ae(infer_model["LDA"][mod]["TRAIN"],
127 111 infer_model["LDA"][mod]["DEV"],
128 112 infer_model["LDA"][mod]["TEST"],
129   - hidden_size,dropouts=do_do,patience = params["patience"],
  113 + hidden_size,dropouts=do_do,patience = patience,
130 114 sgd=sgd,input_activation=input_activation,output_activation=output_activation,loss=loss,epochs=epochs,
131 115 batch_size=batch,
132 116 y_train=infer_model["LDA"][mod]["TRAIN"],
... ... @@ -25,70 +25,70 @@
25 25 in_dir = sys.argv[1]
26 26 #['ASR', 'TRS', 'LABEL']
27 27 # In[6]:
  28 +json_conf =json.load(open(sys.argv[3]))
  29 +sae_conf = json_conf["sae"]
28 30  
  31 +hidden_size= sae_conf["hidden_size"]
  32 +input_activation=sae_conf["input_activation"]
  33 +output_activation=sae_conf["output_activation"]
  34 +loss=sae_conf["loss"]
  35 +epochs=sae_conf["epochs"]
  36 +batch=sae_conf["batch"]
  37 +patience=sae_conf["patience"]
  38 +do_do=sae_conf["do"]
29 39  
30   -hidden_size=[ 100, 80, 50 , 20 ]
31   -input_activation="relu"
32   -output_activation="relu"
33   -loss="mse"
34   -epochs=3000
35   -batch=1
36   -patience=20
37   -do_do=[ 0 ] * len(hidden_size)
38   -sgd = Adam(lr=0.0001)#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True)
39   -try :
40   - sgd_repr=sgd.get_config()["name"]
41   -except AttributeError :
42   - sgd_repr=sgd
  40 +try:
  41 + k = sae_conf["sgd"]
  42 + if sae_conf["sgd"]["name"] == "adam":
  43 + sgd = Adam(lr=sae_conf["sgd"]["lr"])
  44 + elif sae_conf["sgd"]["name"] == "sgd":
  45 + sgd = SGD(lr=sae_conf["sgd"]["lr"])
  46 +except :
  47 + sgd = sae_conf["sgd"]
43 48  
44   -params={ "h1" : "_".join([str(x) for x in hidden_size]),
45   - "inside_activation" : input_activation,
46   - "out_activation" : output_activation,
47   - "do_dropout": "_".join([str(x) for x in do_do]),
48   - "loss" : loss,
49   - "epochs" : epochs ,
50   - "batch_size" : batch,
51   - "patience" : patience,
52   - "sgd" : sgd_repr}
53   -name = "_".join([ str(x) for x in params.values()])
  49 +name = json_conf["name"]
54 50 try:
55   - os.mkdir("{}/SAE_{}".format(in_dir,name))
  51 + os.mkdir("{}/{}".format(in_dir,name))
56 52 except:
57 53 pass
58   -db = shelve.open("{}/SAE_{}/ae_model.shelve".format(in_dir,name),writeback=True)
  54 +db = shelve.open("{}/{}/ae_model.shelve".format(in_dir,name),writeback=True)
59 55 #
60   -json.dump(params,
61   - open("{}/SAE_{}/ae_model.json".format(in_dir,name),"w"),
62   - indent=4)
63   -
64 56 keys = ["ASR","TRS"]
  57 +mlp_conf = json_conf["mlp"]
  58 +mlp_h = mlp_conf["hidden_size"]
  59 +mlp_loss = mlp_conf["loss"]
  60 +mlp_dropouts = mlp_conf["do"]
  61 +mlp_epochs = mlp_conf["epochs"]
  62 +mlp_batch_size = mlp_conf["batch"]
  63 +mlp_input_activation=mlp_conf["input_activation"]
  64 +mlp_output_activation=mlp_conf["output_activation"]
65 65  
66   -mlp_h = [ 150 , 300 ]
67   -mlp_loss ="categorical_crossentropy"
68   -mlp_dropouts = [0,0,0,0]
69   -mlp_sgd = Adam(0.001)
70   -mlp_epochs = 2000
71   -mlp_batch_size = 8
  66 +try:
  67 + k = mlp_conf["sgd"]
  68 + if mlp_conf["sgd"]["name"] == "adam":
  69 + mlp_sgd = Adam(lr=mlp_conf["sgd"]["lr"])
  70 + elif mlp_conf["sgd"]["name"] == "sgd" :
  71 + mlp_sgd = SGD(lr=mlp_conf["sgd"]["lr"])
  72 +except :
  73 + mlp_sgd = mlp_conf["sgd"]
72 74  
  75 +
73 76 db["SAE"] = {}
74 77  
75 78 db["SAEFT"] = {}
76 79 for mod in keys :
77   - print "MODE ", mod
78 80 res_tuple=train_sae(infer_model["LDA"][mod]["TRAIN"],infer_model["LDA"][mod]["DEV"],
79 81 infer_model["LDA"][mod]["TEST"],
80 82 hidden_size,dropouts=do_do,
81   - patience = params["patience"],sgd=sgd,input_activation="tanh",
  83 + patience = "patience",sgd=sgd,input_activation="tanh",
82 84 output_activation="tanh",loss=loss,epochs=epochs,
83 85 batch_size=batch,verbose=0)
84 86 #print len(res), [len(x) for x in res[0]], [ len(x) for x in res[1]]
85 87 for name , levels in zip(["SAE","SAEFT"],res_tuple):
86   - print "NAME", name
87 88 mlp_res_by_level = []
88 89 for res in levels:
89 90 mlp_res_list=[]
90 91 for nb,layer in enumerate(res) :
91   - print "layer NB",nb
92 92 mlp_res_list.append(train_mlp(layer[0],infer_model["LABEL"][mod]["TRAIN"],
93 93 layer[1],infer_model["LABEL"][mod]["DEV"],
94 94 layer[2],infer_model["LABEL"][mod]["TEST"],
95 95  
... ... @@ -100,11 +100,10 @@
100 100  
101 101 mod = "ASR"
102 102 mod2= "TRS"
103   -print "mode SPE "
104 103 res_tuple = train_sae(infer_model["LDA"][mod]["TRAIN"],
105 104 infer_model["LDA"][mod]["DEV"],
106 105 infer_model["LDA"][mod]["TEST"],
107   - hidden_size,dropouts=[0],patience=params["patience"],
  106 + hidden_size,dropouts=[0],patience="patience",
108 107 sgd=sgd,input_activation=input_activation,output_activation=input_activation,
109 108 loss=loss,epochs=epochs,batch_size=batch,
110 109 y_train=infer_model["LDA"][mod2]["TRAIN"],
... ... @@ -125,5 +124,6 @@
125 124 mlp_res_by_level.append(mlp_res_list)
126 125 db[name]["SPE"] = mlp_res_by_level
127 126  
  127 +db.sync()
128 128 db.close()
... ... @@ -15,6 +15,7 @@
15 15 import sklearn.metrics
16 16 import shelve
17 17 import pickle
  18 +
18 19 from utils import *
19 20 import sys
20 21 import os
21 22  
22 23  
23 24  
24 25  
25 26  
26 27  
27 28  
28 29  
29 30  
30 31  
... ... @@ -26,94 +27,71 @@
26 27 #['ASR', 'TRS', 'LABEL']
27 28 # In[6]:
28 29  
29   -# AE params
30   -hidden_size=[ 100, 100 ]
31   -input_activation="relu"
32   -output_activation="relu"
33   -loss="mse"
34   -epochs= 1000
35   -batch_size=1
36   -patience=20
37   -do_do=[ 0.25 ] * len(hidden_size)
38   -sgd = Adam(lr=0.00001)#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True)
39   -try :
40   - sgd_repr=sgd.get_config()["name"]
41   -except AttributeError :
42   - sgd_repr=sgd
  30 +json_conf =json.load(open(sys.argv[3]))
43 31  
44   -# Transforme :
45   -trans_hidden_size=[ 300 , 300 ]
46   -trans_input_activation="relu"
47   -trans_output_activation="relu"
48   -trans_loss="mse"
49   -trans_epochs=1000
50   -trans_batch_size=8
51   -trans_patience=20
52   -trans_do=[ 0.25 ] * len(trans_hidden_size)
53   -trans_sgd = Adam(lr=0.0001)#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True)
54   -try :
55   - trans_sgd_repr=trans_sgd.get_config()["name"]
56   -except AttributeError :
57   - trans_sgd_repr=trans_sgd
  32 +dsae_conf = json_conf["dsae"]
58 33  
  34 +hidden_size= dsae_conf["hidden_size"]
  35 +input_activation=dsae_conf["input_activation"]
  36 +output_activation=dsae_conf["output_activation"]
  37 +loss=dsae_conf["loss"]
  38 +epochs=dsae_conf["epochs"]
  39 +batch_size=dsae_conf["batch"]
  40 +patience=dsae_conf["patience"]
  41 +do_do=dsae_conf["do"]
  42 +try:
  43 + k = dsae_conf["sgd"]
  44 + if dsae_conf["sgd"]["name"] == "adam":
  45 + sgd = Adam(lr=dsae_conf["sgd"]["lr"])#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True)
  46 + elif dsae_conf["sgd"]["name"] == "sgd":
  47 + sgd = SGD(lr=dsae_conf["sgd"]["lr"])
  48 +except:
  49 + sgd = dsae_conf["sgd"]
59 50  
  51 +trans_conf = json_conf["dsae"]["transform"]
  52 +trans_hidden_size=trans_conf["hidden_size"]
  53 +trans_input_activation=trans_conf["input_activation"]
  54 +trans_output_activation=trans_conf["output_activation"]
  55 +trans_loss=trans_conf["loss"]
  56 +trans_epochs=trans_conf["epochs"]
  57 +trans_batch_size=trans_conf["batch"]
  58 +trans_patience=trans_conf["patience"]
  59 +trans_do=trans_conf["do"]
  60 +try:
  61 + k = trans_conf["sgd"]
  62 + if trans_conf["sgd"]["name"] == "adam":
  63 + trans_sgd = Adam(lr=trans_conf["sgd"]["lr"])#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True)
  64 + elif trans_conf["sgd"]["name"] == "sgd":
  65 + trans_sgd = SGD(lr=trans_conf["sgd"]["lr"])
  66 +except e :
  67 + trans_sgd = trans_conf["sgd"]
60 68  
61   -ae={ "h1" : "_".join([str(x) for x in hidden_size]),
62   - "inside_activation" : input_activation,
63   - "out_activation" : output_activation,
64   - "do_dropout": "_".join([str(x) for x in do_do]),
65   - "loss" : loss,
66   - "epochs" : epochs ,
67   - "batch_size" : batch_size,
68   - "patience" : patience,
69   - "sgd" : sgd_repr}
70   -name = "_".join([ str(x) for x in ae.values()])
71 69  
72   -trans={ "h1" : "_".join([str(x) for x in trans_hidden_size]),
73   - "inside_activation" : trans_input_activation,
74   - "out_activation" : trans_output_activation,
75   - "do_dropout": "_".join([str(x) for x in trans_do]),
76   - "loss" : trans_loss,
77   - "epochs" : trans_epochs ,
78   - "batch_size" : trans_batch_size,
79   - "patience" : trans_patience,
80   - "sgd" : trans_sgd_repr}
  70 +mlp_conf = json_conf["mlp"]
  71 +mlp_h = mlp_conf["hidden_size"]
  72 +mlp_loss = mlp_conf["loss"]
  73 +mlp_dropouts = mlp_conf["do"]
  74 +mlp_epochs = mlp_conf["epochs"]
  75 +mlp_batch_size = mlp_conf["batch"]
  76 +mlp_input_activation=mlp_conf["input_activation"]
  77 +mlp_output_activation=mlp_conf["output_activation"]
  78 +try:
  79 + k = mlp_conf["sgd"]
  80 + if mlp_conf["sgd"]["name"] == "adam":
  81 + mlp_sgd = Adam(lr=mlp_conf["sgd"]["lr"])#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True)
  82 + elif mlp_conf["sgd"]["name"] == "sgd":
  83 + mlp_sgd = SGD(lr=mlp_conf["sgd"]["lr"])
  84 +except:
  85 + mlp_sgd = mlp_conf["sgd"]
81 86  
82   -mlp_h = [ 300 , 300 ]
83   -mlp_loss ="categorical_crossentropy"
84   -mlp_dropouts = [0,0,0,0]
85   -mlp_sgd = Adam(0.0001)
86   -mlp_epochs = 1000
87   -mlp_batch_size = 8
88   -mlp_input_activation = "relu"
89   -mlp_output_activation = "softmax"
90 87  
91   -try :
92   - mlp_sgd_repr=mlp_sgd.get_config()["name"]
93   -except AttributeError :
94   - mlp_sgd_repr=mlp_sgd
95   -
96   -
97   -
98   -mlp={ "h1" : "_".join([str(x) for x in mlp_h ]),
99   - "inside_activation" : mlp_input_activation,
100   - "out_activation" : mlp_output_activation,
101   - "do_dropout": "_".join([str(x) for x in mlp_dropouts]),
102   - "loss" : mlp_loss,
103   - "epochs" : mlp_epochs ,
104   - "batch_size" : mlp_batch_size,
105   - "sgd" : mlp_sgd_repr}
106   -
107   -params = { "ae":ae, "trans":trans, "mlp":mlp}
  88 +name = json_conf["name"]
108 89 try:
109   - os.mkdir("{}/DSAE_{}".format(in_dir,name))
  90 + os.mkdir("{}/{}".format(in_dir,name))
110 91 except:
111 92 pass
112   -db = shelve.open("{}/DSAE_{}/ae_model.shelve".format(in_dir,name),writeback=True)
  93 +db = shelve.open("{}/{}/ae_model.shelve".format(in_dir,name),writeback=True)
113 94 #
114   -json.dump(params,
115   - open("{}/DSAE_{}/ae_model.json".format(in_dir,name),"w"),
116   - indent=4)
117 95  
118 96 keys = ["ASR","TRS"]
119 97  
... ... @@ -144,7 +122,6 @@
144 122  
145 123 db["DSAE"][mod] = mlp_res_list
146 124 mod = "TRS"
147   -print hidden_size
148 125 res_tuple_TRS = train_ae(infer_model["LDA"][mod]["TRAIN"],
149 126 infer_model["LDA"][mod]["DEV"],
150 127 infer_model["LDA"][mod]["TEST"],
151 128  
152 129  
153 130  
... ... @@ -173,16 +150,16 @@
173 150  
174 151 print " get weight trans"
175 152  
176   -for asr_pred, trs_pred in zip(res_tuple_ASR[0], res_tuple_TRS[0]):
177   - print "ASR", [ x.shape for x in asr_pred]
  153 +#for asr_pred, trs_pred in zip(res_tuple_ASR[0], res_tuple_TRS[0]):
  154 + # print "ASR", [ x.shape for x in asr_pred]
178 155  
179   - print "TRS", [ x.shape for x in trs_pred]
180   - print
  156 + # print "TRS", [ x.shape for x in trs_pred]
181 157  
182 158 for asr_pred, trs_pred in zip(res_tuple_ASR[0], res_tuple_TRS[0]):
183   - print "ASR", [ x.shape for x in asr_pred]
  159 + # print "ASR", [ x.shape for x in asr_pred]
184 160  
185   - print "TRS", [ x.shape for x in trs_pred]
  161 + # print "TRS", [ x.shape for x in trs_pred]
  162 + # print " TRANS SGD", trans_sgd
186 163 transfert.append( train_ae(asr_pred[0],
187 164 asr_pred[1],
188 165 asr_pred[2],
189 166  
190 167  
... ... @@ -217,13 +194,13 @@
217 194  
218 195 print " FT "
219 196 WA = res_tuple_ASR[1]
220   -print "WA", len(WA), [ len(x) for x in WA]
  197 +#print "WA", len(WA), [ len(x) for x in WA]
221 198 WT = res_tuple_TRS[1]
222 199  
223   -print "WT", len(WT), [ len(x) for x in WT]
  200 +#print "WT", len(WT), [ len(x) for x in WT]
224 201 Wtr = [ x[1] for x in transfert]
225 202  
226   -print "Wtr", len(Wtr), [ len(x) for x in Wtr],[ len(x[1]) for x in Wtr]
  203 +#print "Wtr", len(Wtr), [ len(x) for x in Wtr],[ len(x[1]) for x in Wtr]
227 204  
228 205 ft_res = ft_dsae(infer_model["LDA"]["ASR"]["TRAIN"],
229 206 infer_model["LDA"]["ASR"]["DEV"],
1 1  
2 2 # coding: utf-8
3   -
4   -# In[2]:
5   -
6   -# Import
7 3 import gensim
8 4 from scipy import sparse
9 5 import itertools
10 6  
11 7  
12 8  
13 9  
14 10  
15 11  
16 12  
17 13  
18 14  
19 15  
... ... @@ -26,71 +22,64 @@
26 22 #['ASR', 'TRS', 'LABEL']
27 23 # In[6]:
28 24  
  25 +json_conf =json.load(open(sys.argv[3]))
  26 +vae_conf = json_conf["vae"]
29 27  
30   -hidden_size= [60]
31   -input_activation="tanh"
32   -output_activation="sigmoid"
33   -epochs=300
34   -batch=1
35   -patience=60
36   -sgd = Adam(lr=0.0001)#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True)
37   -latent_dim = 30
  28 +hidden_size= vae_conf["hidden_size"]
  29 +input_activation=vae_conf["input_activation"]
  30 +output_activation=vae_conf["output_activation"]
  31 +epochs=vae_conf["epochs"]
  32 +batch=vae_conf["batch"]
  33 +patience=vae_conf["patience"]
  34 +latent_dim = vae_conf["latent"]
  35 +try:
  36 + k = vae_conf["sgd"]
  37 + if vae_conf["sgd"]["name"] == "adam":
  38 + sgd = Adam(lr=vae_conf["sgd"]["lr"])#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True)
  39 + elif vae_conf["sgd"]["name"] == "sgd":
  40 + sgd = SGD(lr=vae_conf["sgd"]["lr"])
  41 +except:
  42 + sgd = vae_conf["sgd"]
38 43  
  44 +mlp_conf = json_conf["mlp"]
  45 +mlp_h = mlp_conf["hidden_size"]
  46 +mlp_loss = mlp_conf["loss"]
  47 +mlp_dropouts = mlp_conf["do"]
  48 +mlp_epochs = mlp_conf["epochs"]
  49 +mlp_batch_size = mlp_conf["batch"]
  50 +mlp_input_activation=mlp_conf["input_activation"]
  51 +mlp_output_activation=mlp_conf["output_activation"]
39 52  
40 53  
41   -mlp_h = [ 256 ]
42   -mlp_loss = "categorical_crossentropy"
43   -mlp_dropouts = []
44   -mlp_sgd = Adam(lr=0.001)
45   -mlp_epochs = 1000
46   -mlp_batch_size = 16
47   -mlp_output_activation="softmax"
  54 +try:
  55 + k = mlp_conf["sgd"]
  56 + if mlp_conf["sgd"]["name"] == "adam":
  57 + mlp_sgd = Adam(lr=mlp_conf["sgd"]["lr"])#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True)
  58 + elif mlp_conf["sgd"]["name"] == "sgd":
  59 + mlp_sgd = SGD(lr=mlp_conf["sgd"]["lr"])
  60 +except:
  61 + mlp_sgd = mlp_conf["sgd"]
48 62  
49   -try :
50   - sgd_repr=sgd.get_config()["name"]
51   -except AttributeError :
52   - sgd_repr=sgd
53 63  
54   -try :
55   - mlp_sgd_repr=mlp_sgd.get_config()["name"]
56   -except AttributeError :
57   - mlp_sgd_repr=mlp_sgd
  64 +name = json_conf["name"]
58 65  
59 66  
60   -params={ "h1" : "_".join([ str(x) for x in hidden_size ]),
61   - "inside_activation" : input_activation,
62   - "output_activation" : output_activation,
63   - "epochs" : epochs ,
64   - "batch_size" : batch,
65   - "patience" : patience,
66   - "sgd" : sgd_repr,
67   - "mlp_h ": "_".join([str(x) for x in mlp_h]),
68   - "mlp_loss ": mlp_loss,
69   - "mlp_dropouts ": "_".join([str(x) for x in mlp_dropouts]),
70   - "mlp_sgd ": mlp_sgd_repr,
71   - "mlp_epochs ": mlp_epochs,
72   - "mlp_batch_size ": mlp_batch_size,
73   - "mlp_output" : mlp_output_activation
74   - }
75   -name = "_".join([ str(x) for x in params.values()])
76 67 try:
77   - os.mkdir("{}/VAE_{}".format(in_dir,name))
  68 + os.mkdir("{}/{}".format(in_dir,name))
78 69 except:
79 70 pass
80   -db = shelve.open("{}/VAE_{}/ae_model.shelve".format(in_dir,name),writeback=True)
81   -db["params"] = params
  71 +
  72 +
  73 +db = shelve.open("{}/{}/ae_model.shelve".format(in_dir,name),writeback=True)
82 74 db["LABEL"]=infer_model["LABEL"]
83 75 #
84   -json.dump(params,
85   - open("{}/VAE_{}/ae_model.json".format(in_dir,name),"w"),
86   - indent=4)
87 76  
88 77 keys = ["ASR","TRS"]
89 78  
90 79 db["VAE"] = {}
91 80 db["LDA"] = {}
92 81 for mod in keys :
93   - print mod
  82 + #print mod
94 83 db["LDA"][mod] = train_mlp(infer_model["LDA"][mod]["TRAIN"],infer_model["LABEL"][mod]["TRAIN"],
95 84 infer_model["LDA"][mod]["DEV"],infer_model["LABEL"][mod]["DEV"],
96 85 infer_model["LDA"][mod]["TEST"],infer_model["LABEL"][mod]["TEST"],
1   -../VARIATIONAL/Variational-Autoencoder/mlp.py
  1 +# -*- coding: utf-8 -*-
  2 +import keras
  3 +import numpy as np
  4 +#from keras.layers.core import Dense, Dropout, Activation
  5 +from keras.optimizers import SGD,Adam
  6 +from keras.models import Sequential
  7 +from keras.layers import Input, Dense, Dropout
  8 +from keras.models import Model
  9 +from keras.utils.layer_utils import layer_from_config
  10 +from itertools import izip_longest
  11 +
  12 +import pandas
  13 +from collections import namedtuple
  14 +from sklearn.metrics import accuracy_score as perf
  15 +save_tuple= namedtuple("save_tuple",["pred_train","pred_dev","pred_test"])
  16 +
  17 +
  18 +def ft_dsae(train,dev,test,
  19 + y_train=None,y_dev=None,y_test=None,
  20 + ae_hidden=[20],transfer_hidden=[20],
  21 + start_weights=None,transfer_weights=None,end_weights=None,
  22 + input_activation="tanh", output_activation="tanh",
  23 + init="glorot_uniform",
  24 + ae_dropouts=[None], transfer_do=[None],
  25 + sgd="sgd", loss="mse", patience=5, verbose=0, epochs=5, batch_size=8):
  26 +
  27 + if not start_weights :
  28 + start_weights = [ None ] * len(ae_hidden)
  29 + if not transfer_weights :
  30 + transfer_weights = [None ] * len(transfer_hidden)
  31 + if not end_weights :
  32 + end_weights = [ None ] * len(end_weights)
  33 + if not transfer_do :
  34 + transfer_do = [0] * len(transfer_hidden)
  35 + predict_y = True
  36 + if y_train is None or y_dev is None or y_test is None :
  37 + y_train = train
  38 + y_dev = dev
  39 + y_test = test
  40 + predict_y = False
  41 + param_predict = [ train, dev, test ]
  42 + if predict_y :
  43 + param_predict += [ y_train, y_dev ,y_test ]
  44 +
  45 + pred_by_level = [] # Contient les prediction par niveaux de transfert
  46 + layers = [Input(shape=(train.shape[1],))]
  47 + #for w in transfer_weights:
  48 + #print "TW",[ [ y.shape for y in x ] for x in w]
  49 + #print "SW",[ [ y.shape for y in x] for x in start_weights]
  50 + #print "EW",[ [ y.shape for y in x ] for x in end_weights]
  51 + for cpt in range(1,len(ae_hidden)):
  52 + #print ae_hidden,cpt
  53 + #print cpt, "before"
  54 + #print "before2", [ [ x.shape for x in y] for y in start_weights[:cpt] ]
  55 + #print "before3", [ [ x.shape for x in y] for y in transfer_weights[cpt]]
  56 + #print "before4", [ [ x.shape for x in y] for y in end_weights[cpt:]]
  57 + sizes = ae_hidden[:cpt] + transfer_hidden + ae_hidden[cpt:]
  58 + weights = start_weights[:cpt] + transfer_weights[(cpt-1)] + end_weights[cpt:]
  59 + #print "SIZES", sizes
  60 + #print "AW",[ [ y.shape for y in x ] for x in weights]
  61 + #print "WEI", len(weights) , [ len(x) for x in weights ]
  62 + if len(ae_dropouts) == len(ae_hidden):
  63 + do = ae_dropouts[:cpt] + transfer_do + ae_dropouts[cpt:]
  64 + else :
  65 + do = [ 0 ] * (len(ae_hidden) + len(transfer_hidden))
  66 + for w in weights[:-1]:
  67 + #print "STEP", size
  68 + layers.append(Dense(w[1].shape[0],activation=input_activation,init=init,weights=w)(layers[-1]))
  69 + if do :
  70 + d = do.pop(0)
  71 + if d > 0 :
  72 + layers.append(Dropout(d)(layers[-1]))
  73 +
  74 + layers.append(Dense(y_train.shape[1],activation=output_activation)(layers[-1]))
  75 + models = [Model(input=layers[0] , output=x) for x in layers[1:]]
  76 + models[-1].compile(optimizer=sgd,loss=loss)
  77 + models[-1].fit(train,y_train,nb_epoch=epochs,batch_size=batch_size,callbacks=[keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience, verbose=0)],validation_data=(dev,dev),verbose=verbose)
  78 + predictions = [ [x.predict(y) for y in param_predict ] for x in models ]
  79 + pred_by_level.append(predictions)
  80 +
  81 + return pred_by_level
  82 +
  83 +def train_mlp(x_train,y_train,x_dev,y_dev,x_test,y_test,hidden_size,input_activation="relu",hidden_activation="relu",output_activation="softmax",loss="mse",init="glorot_uniform",dropouts=None,sgd=None,epochs=1200,batch_size=16,fit_verbose=1,test_verbose=0,save_pred=False,keep_histo=False):
  84 +
  85 +
  86 + layers = [Input(shape=(x_train.shape[1],))]
  87 +
  88 + for h in hidden_size:
  89 + if dropouts:
  90 + d = dropouts.pop(0)
  91 + if d > 0 :
  92 + layers.append(Dropout(d)(layers[-1]))
  93 +
  94 + layers.append(Dense(h,init=init,activation=input_activation)(layers[-1]))
  95 + #if dropouts:
  96 + # drop_prob=dropouts.pop(0)
  97 + # if drop_prob > 0:
  98 + # model.add(Dropout(drop_prob))
  99 +
  100 + #if dropouts:
  101 + # drop_prob=dropouts.pop(0)
  102 + # if drop_prob > 0:
  103 + # model.add(Dropout(drop_prob))
  104 +
  105 + #if dropouts:
  106 + # model.add(Dropout(dropouts.pop(0)))
  107 + if dropouts:
  108 + d = dropouts.pop(0)
  109 + if d > 0 :
  110 + layers.append(Dropout(d)(layers[-1]))
  111 +
  112 + layers.append(Dense( y_train.shape[1],activation=output_activation,init=init)(layers[-1]))
  113 +
  114 + model = Model(layers[0] , layers[-1])
  115 + if not sgd:
  116 + sgd = SGD(lr=0.01, decay=0, momentum=0.9)
  117 +
  118 + model.compile(loss=loss, optimizer=sgd,metrics=['accuracy'])
  119 +
  120 + scores_dev=[]
  121 + scores_test=[]
  122 + scores_train=[]
  123 + save=None
  124 + for i in range(epochs):
  125 + hist=model.fit(x_train, y_train, nb_epoch=1, batch_size=batch_size,verbose=fit_verbose,validation_data=(x_dev,y_dev))
  126 + pred_train=model.predict(x_train)
  127 + pred_dev=model.predict(x_dev)
  128 + pred_test=model.predict(x_test)
  129 +
  130 + scores_train.append(perf(np.argmax(y_train,axis=1),np.argmax(pred_train,axis=1)))
  131 + scores_dev.append(perf(np.argmax(y_dev,axis=1),np.argmax(pred_dev,axis=1)))
  132 + scores_test.append(perf(np.argmax(y_test,axis=1),np.argmax(pred_test,axis=1)))
  133 + if fit_verbose :
  134 + print "{} {} {} {}".format(i,scores_train[-1],scores_dev[-1],scores_test[-1])
  135 + if save is None or (len(scores_dev)>2 and scores_dev[-1] > scores_dev[-2]):
  136 + save=save_tuple(pred_train,pred_dev,pred_test)
  137 + arg_dev = np.argmax(scores_dev)
  138 + best_dev=scores_dev[arg_dev]
  139 + best_test=scores_test[arg_dev]
  140 + max_test=np.max(scores_test)
  141 + if fit_verbose:
  142 + print " res : {} {} {}".format(best_dev,best_test,max_test)
  143 +
  144 + res=[scores_train,scores_dev,scores_test]
  145 + if save_pred:
  146 + res.append(save)
  147 + if keep_histo:
  148 + res.append(hist)
  149 + return res
  150 +
  151 +def train_ae(train,dev,test,hidden_sizes,y_train=None,y_dev=None,y_test=None,dropouts=None,input_activation="tanh",output_activation="tanh",loss="mse",sgd=None,epochs=500,batch_size=8,verbose=1,patience=20,get_weights=False,set_weights=[]):
  152 +
  153 + input_vect = Input(shape=(train.shape[1],))
  154 +
  155 + previous = [input_vect]
  156 +
  157 + if dropouts is None:
  158 + dropouts = [ 0 ] * (len(hidden_sizes) +1)
  159 + if sgd is None :
  160 + sgd = SGD(lr=0.01, decay=0, momentum=0.9)
  161 + did_do = False
  162 + if dropouts :
  163 + d = dropouts.pop(0)
  164 + if d :
  165 + previous.append(Dropout(d)(previous[-1]))
  166 + did_do = True
  167 +
  168 + for h_layer,weight_layer in izip_longest(hidden_sizes,set_weights,fillvalue=None) :
  169 + # ,weights=w
  170 + if weight_layer :
  171 + w = weight_layer[0]
  172 + else :
  173 + w = None
  174 + #print "ADD SIZE" , h_layer
  175 + if did_do :
  176 + p = previous.pop()
  177 + did_do = False
  178 + else :
  179 + p = previous[-1]
  180 + previous.append(Dense(h_layer,activation=input_activation,weights=w)(previous[-1]))
  181 + if dropouts:
  182 + d = dropouts.pop(0)
  183 + if d :
  184 + previous.append(Dropout(d)(previous[-1]))
  185 + did_do = True
  186 +
  187 + predict_y = True
  188 + if y_train is None or y_dev is None or y_test is None :
  189 + y_train = train
  190 + y_dev = dev
  191 + y_test = test
  192 + predict_y = False
  193 + previous.append(Dense(y_train.shape[1],activation=output_activation)(previous[-1]))
  194 + models = [Model(input=previous[0] , output=x) for x in previous[1:]]
  195 + print "MLP", sgd, loss
  196 + models[-1].compile(optimizer=sgd,loss=loss)
  197 + models[-1].fit(train,y_train,nb_epoch=epochs,batch_size=batch_size,callbacks=[keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience, verbose=0)],validation_data=(dev,dev),verbose=verbose)
  198 + param_predict = [ train, dev, test ]
  199 + if predict_y :
  200 + param_predict += [ y_train, y_dev ,y_test ]
  201 + predictions = [ [x.predict(y) for y in param_predict ] for x in models ]
  202 + if get_weights :
  203 + weights = [ x.get_weights() for x in models[-1].layers if x.get_weights() ]
  204 + return ( predictions , weights )
  205 + else :
  206 + return predictions
  207 +
  208 +def train_sae(train,dev,test,hidden_sizes,y_train=None,y_dev=None,y_test=None,dropouts=None,input_activation="tanh",output_activation="tanh",loss="mse",sgd=None,epochs=500,batch_size=8,verbose=1,patience=20):
  209 +
  210 + weights = []
  211 + predictions = [[(train,dev,test),()]]
  212 + ft_pred = []
  213 + past_sizes = []
  214 +
  215 +
  216 + for size in hidden_sizes :
  217 + #print "DO size " , size , "FROM" , hidden_sizes
  218 + res_pred, res_wght = train_ae(predictions[-1][-2][0], predictions[-1][-2][1],predictions[-1][-2][2],[size],
  219 + dropouts=dropouts, input_activation=input_activation,
  220 + output_activation=output_activation, loss=loss, sgd=sgd,
  221 + epochs=epochs, batch_size=batch_size, verbose=verbose,
  222 + patience=patience,get_weights=True)
  223 + past_sizes.append(size)
  224 + weights.append(res_wght)
  225 + predictions.append(res_pred)
  226 + #print "FINE TUNE "
  227 + res_ftpred = train_ae(train,dev,test,past_sizes,y_train=y_train,y_dev=y_dev,y_test=y_test,
  228 + dropouts=dropouts,
  229 + input_activation=input_activation,
  230 + output_activation=output_activation,
  231 + loss=loss,sgd=sgd,epochs=epochs,
  232 + batch_size=batch_size,verbose=verbose,patience=patience,
  233 + set_weights=weights)
  234 + ft_pred.append(res_ftpred)
  235 +
  236 + return ( predictions[1:] , ft_pred)
1   -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/193/ MM_features/data_w20/mmf_193.shelve >> output_v7/recap.txt
2   -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/194/ MM_features/data_w20/mmf_194.shelve >> output_v7/recap.txt
3   -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/195/ MM_features/data_w20/mmf_195.shelve >> output_v7/recap.txt
4   -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/196/ MM_features/data_w20/mmf_196.shelve >> output_v7/recap.txt
5   -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/197/ MM_features/data_w20/mmf_197.shelve >> output_v7/recap.txt
6   -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/198/ MM_features/data_w20/mmf_198.shelve >> output_v7/recap.txt
7   -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/393/ MM_features/data_w20/mmf_393.shelve >> output_v7/recap.txt
  1 +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/193/ MM_features/data_w99/mmf_193.shelve >> output_v8/recap.txt
  2 +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/194/ MM_features/data_w99/mmf_194.shelve >> output_v8/recap.txt
  3 +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/195/ MM_features/data_w99/mmf_195.shelve >> output_v8/recap.txt
  4 +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/196/ MM_features/data_w99/mmf_196.shelve >> output_v8/recap.txt
  5 +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/197/ MM_features/data_w99/mmf_197.shelve >> output_v8/recap.txt
  6 +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/198/ MM_features/data_w99/mmf_198.shelve >> output_v8/recap.txt
  7 +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/393/ MM_features/data_w99/mmf_393.shelve >> output_v8/recap.txt
1   -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/394/ MM_features/data_w20/mmf_394.shelve >> output_v7/recap.txt
2   -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/395/ MM_features/data_w20/mmf_395.shelve >> output_v7/recap.txt
3   -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/396/ MM_features/data_w20/mmf_396.shelve >> output_v7/recap.txt
4   -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/397/ MM_features/data_w20/mmf_397.shelve >> output_v7/recap.txt
5   -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/43/ MM_features/data_w20/mmf_43.shelve >> output_v7/recap.txt
6   -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/44/ MM_features/data_w20/mmf_44.shelve >> output_v7/recap.txt
7   -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/45/ MM_features/data_w20/mmf_45.shelve >> output_v7/recap.txt
  1 +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/394/ MM_features/data_w99/mmf_394.shelve >> output_v8/recap.txt
  2 +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/395/ MM_features/data_w99/mmf_395.shelve >> output_v8/recap.txt
  3 +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/396/ MM_features/data_w99/mmf_396.shelve >> output_v8/recap.txt
  4 +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/397/ MM_features/data_w99/mmf_397.shelve >> output_v8/recap.txt
  5 +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/43/ MM_features/data_w99/mmf_43.shelve >> output_v8/recap.txt
  6 +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/44/ MM_features/data_w99/mmf_44.shelve >> output_v8/recap.txt
  7 +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/45/ MM_features/data_w99/mmf_45.shelve >> output_v8/recap.txt
1   -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/46/ MM_features/data_w20/mmf_46.shelve >> output_v7/recap.txt
2   -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/47/ MM_features/data_w20/mmf_47.shelve >> output_v7/recap.txt
3   -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/48/ MM_features/data_w20/mmf_48.shelve >> output_v7/recap.txt
4   -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/493/ MM_features/data_w20/mmf_493.shelve >> output_v7/recap.txt
5   -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/494/ MM_features/data_w20/mmf_494.shelve >> output_v7/recap.txt
6   -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/495/ MM_features/data_w20/mmf_495.shelve >> output_v7/recap.txt
7   -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/496/ MM_features/data_w20/mmf_496.shelve >> output_v7/recap.txt
  1 +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/46/ MM_features/data_w99/mmf_46.shelve >> output_v8/recap.txt
  2 +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/47/ MM_features/data_w99/mmf_47.shelve >> output_v8/recap.txt
  3 +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/48/ MM_features/data_w99/mmf_48.shelve >> output_v8/recap.txt
  4 +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/493/ MM_features/data_w99/mmf_493.shelve >> output_v8/recap.txt
  5 +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/494/ MM_features/data_w99/mmf_494.shelve >> output_v8/recap.txt
  6 +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/495/ MM_features/data_w99/mmf_495.shelve >> output_v8/recap.txt
  7 +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/496/ MM_features/data_w99/mmf_496.shelve >> output_v8/recap.txt
1   -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/497/ MM_features/data_w20/mmf_497.shelve >> output_v7/recap.txt
2   -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/50/ MM_features/data_w20/mmf_50.shelve >> output_v7/recap.txt
3   -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/93/ MM_features/data_w20/mmf_93.shelve >> output_v7/recap.txt
4   -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/94/ MM_features/data_w20/mmf_94.shelve >> output_v7/recap.txt
5   -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/95/ MM_features/data_w20/mmf_95.shelve >> output_v7/recap.txt
6   -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/96/ MM_features/data_w20/mmf_96.shelve >> output_v7/recap.txt
7   -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/97/ MM_features/data_w20/mmf_97.shelve >> output_v7/recap.txt
8   -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/98/ MM_features/data_w20/mmf_98.shelve >> output_v7/recap.txt
  1 +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/497/ MM_features/data_w99/mmf_497.shelve >> output_v8/recap.txt
  2 +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/50/ MM_features/data_w99/mmf_50.shelve >> output_v8/recap.txt
  3 +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/93/ MM_features/data_w99/mmf_93.shelve >> output_v8/recap.txt
  4 +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/94/ MM_features/data_w99/mmf_94.shelve >> output_v8/recap.txt
  5 +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/95/ MM_features/data_w99/mmf_95.shelve >> output_v8/recap.txt
  6 +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/96/ MM_features/data_w99/mmf_96.shelve >> output_v8/recap.txt
  7 +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/97/ MM_features/data_w99/mmf_97.shelve >> output_v8/recap.txt
  8 +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/98/ MM_features/data_w99/mmf_98.shelve >> output_v8/recap.txt
  1 +output_dir=$1
  2 +features=$2
  3 +json_conf=$3
  4 +time (
  5 +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04b-mmf_mini_ae.py $output_dir $features $json_conf >> $output_dir/miniae.log &
  6 +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04c-mmf_sae.py $output_dir $features $json_conf >> $output_dir/sae.log &
  7 +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04d-mmf_dsae.py $output_dir $features $json_conf >> $output_dir/dsae.log &
  8 +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04e-mm_vae.py $output_dir $features $json_conf >> $output_dir/vae.log &
  9 +wait
  10 +)