From e5108393c82cba7f29adcc24d27a17368a398e7e Mon Sep 17 00:00:00 2001 From: Killian Date: Thu, 7 Jul 2016 01:29:57 +0200 Subject: [PATCH] replace du mlp.py et ajout de la mmf_experience --- LDA/00-mmf_make_features.py | 18 ++-- LDA/04a-mmdf.py | 2 +- LDA/04b-mmf_mini_ae.py | 102 ++++++++----------- LDA/04c-mmf_sae.py | 82 +++++++-------- LDA/04d-mmf_dsae.py | 165 +++++++++++++----------------- LDA/04e-mm_vae.py | 103 +++++++++---------- LDA/mlp.py | 239 +++++++++++++++++++++++++++++++++++++++++++- LDA/mlp_mmf1.sh | 14 +-- LDA/mlp_mmf2.sh | 14 +-- LDA/mlp_mmf3.sh | 14 +-- LDA/mlp_mmf4.sh | 16 +-- LDA/run_mmf.sh | 10 ++ 12 files changed, 490 insertions(+), 289 deletions(-) mode change 120000 => 100755 LDA/mlp.py create mode 100644 LDA/run_mmf.sh diff --git a/LDA/00-mmf_make_features.py b/LDA/00-mmf_make_features.py index cc80b49..f78e185 100644 --- a/LDA/00-mmf_make_features.py +++ b/LDA/00-mmf_make_features.py @@ -11,14 +11,16 @@ from utils import select_mmf as select input_dir = sys.argv[1] # Dossier de premire niveau contient ASR et TRS level = sys.argv[2] # taille de LDA ( -5) voulu +output_dir = sys.argv[3] lb=LabelBinarizer() #y_train=lb.fit_transform([utils.select(ligneid) for ligneid in origin_corps["LABEL"]["TRAIN"]]) -data = shelve.open("{}/mmf_{}.shelve".format(input_dir,level)) -data["LABEL"]= {"LDA":{}} -for mod in ["ASR", "TRS" ] +data = shelve.open("{}/mmf_{}.shelve".format(output_dir,level),writeback=True) +data["LABEL"]= {} +data["LDA"] = {"ASR":{},"TRS":{}} +for mod in ["ASR", "TRS" ]: train = pandas.read_table("{}/{}/train_{}.ssv".format(input_dir, mod, level), sep=" ", header=None ) dev = pandas.read_table("{}/{}/dev_{}.ssv".format(input_dir, mod, level), sep=" ", header=None ) test = pandas.read_table("{}/{}/test_{}.ssv".format(input_dir, mod, level), sep=" ", header=None ) @@ -29,10 +31,12 @@ for mod in ["ASR", "TRS" ] lb.fit(y_train) data["LABEL"][mod]={"TRAIN":lb.transform(y_train),"DEV":lb.transform(y_dev), "TEST": lb.transform(y_test)} - data["LDA"][mod]={} - data["LDA"][mod]["TRAIN"]=train.iloc[:,1:].values - data["LDA"][mod]["DEV"]=dev.iloc[:,1:].values - data["LDA"][mod]["TEST"]=test.iloc[:,1:].values + # data["LDA"][mod]={'ASR':[]} + print data["LDA"][mod] + print train.values + data["LDA"][mod]["TRAIN"]=train.iloc[:,1:-1].values + data["LDA"][mod]["DEV"]=dev.iloc[:,1:-1].values + data["LDA"][mod]["TEST"]=test.iloc[:,1:-1].values data.sync() data.close() diff --git a/LDA/04a-mmdf.py b/LDA/04a-mmdf.py index bb281e4..8c49391 100644 --- a/LDA/04a-mmdf.py +++ b/LDA/04a-mmdf.py @@ -101,7 +101,7 @@ for key in ["TRS", "ASR"] : test_max = numpy.max(res[2]) out_db[key]=(res,(dev_best,test_best,test_max)) ress.append((key,dev_best,test_best,test_max)) - +print sys.argv[2] for el in ress : print el out_db.close() diff --git a/LDA/04b-mmf_mini_ae.py b/LDA/04b-mmf_mini_ae.py index d2000f7..dc52788 100644 --- a/LDA/04b-mmf_mini_ae.py +++ b/LDA/04b-mmf_mini_ae.py @@ -25,87 +25,71 @@ in_dir = sys.argv[1] #['ASR', 'TRS', 'LABEL'] # In[6]: +json_conf =json.load(open(sys.argv[3])) +ae_conf = json_conf["ae"] + +hidden_size= ae_conf["hidden_size"] +input_activation=ae_conf["input_activation"] +output_activation=ae_conf["output_activation"] +loss=ae_conf["loss"] +epochs=ae_conf["epochs"] +batch=ae_conf["batch"] +patience=ae_conf["patience"] +do_do=ae_conf["do"] +try: + k = ae_conf["sgd"] + if ae_conf["sgd"]["name"] == "adam": + sgd = Adam(lr=ae_conf["sgd"]["lr"])#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True) + elif ae_conf["sgd"]["name"] == "sgd": + sgd = SGD(lr=ae_conf["sgd"]["lr"]) +except: + sgd = ae_conf["sgd"] + +mlp_conf = json_conf["mlp"] +mlp_h = mlp_conf["hidden_size"] +mlp_loss = mlp_conf["loss"] +mlp_dropouts = mlp_conf["do"] +mlp_epochs = mlp_conf["epochs"] +mlp_batch_size = mlp_conf["batch"] +mlp_input_activation=mlp_conf["input_activation"] +mlp_output_activation=mlp_conf["output_activation"] -hidden_size=[ 100 , 50, 100 ] -input_activation="tanh" -output_activation="tanh" -loss="mse" -epochs=1000 -batch=1 -patience=60 -do_do=[False] -sgd = Adam(lr=0.000001)#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True) - - - -mlp_h = [ 150 ,150 ,150 ] -mlp_loss = "categorical_crossentropy" -mlp_dropouts = [] -mlp_sgd = Adam(lr=0.0001) -mlp_epochs = 2000 -mlp_batch_size = 8 -mlp_output_activation="softmax" - -try : - sgd_repr=sgd.get_config()["name"] -except AttributeError : - sgd_repr=sgd - -try : - mlp_sgd_repr=mlp_sgd.get_config()["name"] -except AttributeError : - mlp_sgd_repr=mlp_sgd - - -params={ "h1" : "_".join([ str(x) for x in hidden_size ]), - "inside_activation" : input_activation, - "output_activation" : output_activation, - "do_dropout": "_".join([str(x) for x in do_do]), - "loss" : loss, - "epochs" : epochs , - "batch_size" : batch, - "patience" : patience, - "sgd" : sgd_repr, - "mlp_h ": "_".join([str(x) for x in mlp_h]), - "mlp_loss ": mlp_loss, - "mlp_dropouts ": "_".join([str(x) for x in mlp_dropouts]), - "mlp_sgd ": mlp_sgd_repr, - "mlp_epochs ": mlp_epochs, - "mlp_batch_size ": mlp_batch_size, - "mlp_output" : mlp_output_activation - } -name = "_".join([ str(x) for x in params.values()]) +try: + k = mlp_conf["sgd"] + if mlp_conf["sgd"]["name"] == "adam": + mlp_sgd = Adam(lr=mlp_conf["sgd"]["lr"])#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True) + elif mlp_conf["sgd"]["name"] == "sgd": + mlp_sgd = SGD(lr=mlp_conf["sgd"]["lr"]) +except: + mlp_sgd = mlp_conf["sgd"] + + +name = json_conf["name"] try: os.mkdir("{}/{}".format(in_dir,name)) except: pass db = shelve.open("{}/{}/ae_model.shelve".format(in_dir,name),writeback=True) -db["params"] = params db["LABEL"]=infer_model["LABEL"] # -json.dump(params, - open("{}/{}/ae_model.json".format(in_dir,name),"w"), - indent=4) - keys = ["ASR","TRS"] db["AE"] = {} db["LDA"] = {} for mod in keys : - print mod db["LDA"][mod] = train_mlp(infer_model["LDA"][mod]["TRAIN"],infer_model["LABEL"][mod]["TRAIN"], infer_model["LDA"][mod]["DEV"],infer_model["LABEL"][mod]["DEV"], infer_model["LDA"][mod]["TEST"],infer_model["LABEL"][mod]["TEST"], mlp_h ,sgd=mlp_sgd, epochs=mlp_epochs, batch_size=mlp_batch_size, - input_activation=input_activation, + input_activation=mlp_input_activation, output_activation=mlp_output_activation, dropouts=mlp_dropouts, fit_verbose=0) res=train_ae(infer_model["LDA"][mod]["TRAIN"],infer_model["LDA"][mod]["DEV"],infer_model["LDA"][mod]["TEST"], - hidden_size,patience = params["patience"],sgd=sgd, + hidden_size,patience = patience,sgd=sgd, dropouts=do_do,input_activation=input_activation,output_activation=output_activation, loss=loss,epochs=epochs,batch_size=batch,verbose=0) mlp_res_list=[] @@ -115,7 +99,7 @@ for mod in keys : layer[2],infer_model["LABEL"][mod]["TEST"], mlp_h,loss=mlp_loss,dropouts=mlp_dropouts,sgd=mlp_sgd,epochs=mlp_epochs, output_activation=mlp_output_activation, - input_activation=input_activation, + input_activation=mlp_input_activation, batch_size=mlp_batch_size,fit_verbose=0)) db["AE"][mod]=mlp_res_list @@ -126,7 +110,7 @@ mlp_res_list=[] res = train_ae(infer_model["LDA"][mod]["TRAIN"], infer_model["LDA"][mod]["DEV"], infer_model["LDA"][mod]["TEST"], - hidden_size,dropouts=do_do,patience = params["patience"], + hidden_size,dropouts=do_do,patience = patience, sgd=sgd,input_activation=input_activation,output_activation=output_activation,loss=loss,epochs=epochs, batch_size=batch, y_train=infer_model["LDA"][mod]["TRAIN"], diff --git a/LDA/04c-mmf_sae.py b/LDA/04c-mmf_sae.py index 6811118..1130cac 100644 --- a/LDA/04c-mmf_sae.py +++ b/LDA/04c-mmf_sae.py @@ -25,70 +25,70 @@ infer_model=shelve.open("{}".format(sys.argv[2])) in_dir = sys.argv[1] #['ASR', 'TRS', 'LABEL'] # In[6]: +json_conf =json.load(open(sys.argv[3])) +sae_conf = json_conf["sae"] +hidden_size= sae_conf["hidden_size"] +input_activation=sae_conf["input_activation"] +output_activation=sae_conf["output_activation"] +loss=sae_conf["loss"] +epochs=sae_conf["epochs"] +batch=sae_conf["batch"] +patience=sae_conf["patience"] +do_do=sae_conf["do"] -hidden_size=[ 100, 80, 50 , 20 ] -input_activation="relu" -output_activation="relu" -loss="mse" -epochs=3000 -batch=1 -patience=20 -do_do=[ 0 ] * len(hidden_size) -sgd = Adam(lr=0.0001)#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True) -try : - sgd_repr=sgd.get_config()["name"] -except AttributeError : - sgd_repr=sgd +try: + k = sae_conf["sgd"] + if sae_conf["sgd"]["name"] == "adam": + sgd = Adam(lr=sae_conf["sgd"]["lr"]) + elif sae_conf["sgd"]["name"] == "sgd": + sgd = SGD(lr=sae_conf["sgd"]["lr"]) +except : + sgd = sae_conf["sgd"] -params={ "h1" : "_".join([str(x) for x in hidden_size]), - "inside_activation" : input_activation, - "out_activation" : output_activation, - "do_dropout": "_".join([str(x) for x in do_do]), - "loss" : loss, - "epochs" : epochs , - "batch_size" : batch, - "patience" : patience, - "sgd" : sgd_repr} -name = "_".join([ str(x) for x in params.values()]) +name = json_conf["name"] try: - os.mkdir("{}/SAE_{}".format(in_dir,name)) + os.mkdir("{}/{}".format(in_dir,name)) except: pass -db = shelve.open("{}/SAE_{}/ae_model.shelve".format(in_dir,name),writeback=True) +db = shelve.open("{}/{}/ae_model.shelve".format(in_dir,name),writeback=True) # -json.dump(params, - open("{}/SAE_{}/ae_model.json".format(in_dir,name),"w"), - indent=4) - keys = ["ASR","TRS"] +mlp_conf = json_conf["mlp"] +mlp_h = mlp_conf["hidden_size"] +mlp_loss = mlp_conf["loss"] +mlp_dropouts = mlp_conf["do"] +mlp_epochs = mlp_conf["epochs"] +mlp_batch_size = mlp_conf["batch"] +mlp_input_activation=mlp_conf["input_activation"] +mlp_output_activation=mlp_conf["output_activation"] + +try: + k = mlp_conf["sgd"] + if mlp_conf["sgd"]["name"] == "adam": + mlp_sgd = Adam(lr=mlp_conf["sgd"]["lr"]) + elif mlp_conf["sgd"]["name"] == "sgd" : + mlp_sgd = SGD(lr=mlp_conf["sgd"]["lr"]) +except : + mlp_sgd = mlp_conf["sgd"] -mlp_h = [ 150 , 300 ] -mlp_loss ="categorical_crossentropy" -mlp_dropouts = [0,0,0,0] -mlp_sgd = Adam(0.001) -mlp_epochs = 2000 -mlp_batch_size = 8 db["SAE"] = {} db["SAEFT"] = {} for mod in keys : - print "MODE ", mod res_tuple=train_sae(infer_model["LDA"][mod]["TRAIN"],infer_model["LDA"][mod]["DEV"], infer_model["LDA"][mod]["TEST"], hidden_size,dropouts=do_do, - patience = params["patience"],sgd=sgd,input_activation="tanh", + patience = "patience",sgd=sgd,input_activation="tanh", output_activation="tanh",loss=loss,epochs=epochs, batch_size=batch,verbose=0) #print len(res), [len(x) for x in res[0]], [ len(x) for x in res[1]] for name , levels in zip(["SAE","SAEFT"],res_tuple): - print "NAME", name mlp_res_by_level = [] for res in levels: mlp_res_list=[] for nb,layer in enumerate(res) : - print "layer NB",nb mlp_res_list.append(train_mlp(layer[0],infer_model["LABEL"][mod]["TRAIN"], layer[1],infer_model["LABEL"][mod]["DEV"], layer[2],infer_model["LABEL"][mod]["TEST"], @@ -100,11 +100,10 @@ for mod in keys : mod = "ASR" mod2= "TRS" -print "mode SPE " res_tuple = train_sae(infer_model["LDA"][mod]["TRAIN"], infer_model["LDA"][mod]["DEV"], infer_model["LDA"][mod]["TEST"], - hidden_size,dropouts=[0],patience=params["patience"], + hidden_size,dropouts=[0],patience="patience", sgd=sgd,input_activation=input_activation,output_activation=input_activation, loss=loss,epochs=epochs,batch_size=batch, y_train=infer_model["LDA"][mod2]["TRAIN"], @@ -125,4 +124,5 @@ for name , levels in zip(["SAE","SAEFT"],res_tuple): mlp_res_by_level.append(mlp_res_list) db[name]["SPE"] = mlp_res_by_level +db.sync() db.close() diff --git a/LDA/04d-mmf_dsae.py b/LDA/04d-mmf_dsae.py index f2a6e6f..d768f9b 100644 --- a/LDA/04d-mmf_dsae.py +++ b/LDA/04d-mmf_dsae.py @@ -15,6 +15,7 @@ import mlp import sklearn.metrics import shelve import pickle + from utils import * import sys import os @@ -26,94 +27,71 @@ in_dir = sys.argv[1] #['ASR', 'TRS', 'LABEL'] # In[6]: -# AE params -hidden_size=[ 100, 100 ] -input_activation="relu" -output_activation="relu" -loss="mse" -epochs= 1000 -batch_size=1 -patience=20 -do_do=[ 0.25 ] * len(hidden_size) -sgd = Adam(lr=0.00001)#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True) -try : - sgd_repr=sgd.get_config()["name"] -except AttributeError : - sgd_repr=sgd - -# Transforme : -trans_hidden_size=[ 300 , 300 ] -trans_input_activation="relu" -trans_output_activation="relu" -trans_loss="mse" -trans_epochs=1000 -trans_batch_size=8 -trans_patience=20 -trans_do=[ 0.25 ] * len(trans_hidden_size) -trans_sgd = Adam(lr=0.0001)#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True) -try : - trans_sgd_repr=trans_sgd.get_config()["name"] -except AttributeError : - trans_sgd_repr=trans_sgd - - - -ae={ "h1" : "_".join([str(x) for x in hidden_size]), - "inside_activation" : input_activation, - "out_activation" : output_activation, - "do_dropout": "_".join([str(x) for x in do_do]), - "loss" : loss, - "epochs" : epochs , - "batch_size" : batch_size, - "patience" : patience, - "sgd" : sgd_repr} -name = "_".join([ str(x) for x in ae.values()]) - -trans={ "h1" : "_".join([str(x) for x in trans_hidden_size]), - "inside_activation" : trans_input_activation, - "out_activation" : trans_output_activation, - "do_dropout": "_".join([str(x) for x in trans_do]), - "loss" : trans_loss, - "epochs" : trans_epochs , - "batch_size" : trans_batch_size, - "patience" : trans_patience, - "sgd" : trans_sgd_repr} - -mlp_h = [ 300 , 300 ] -mlp_loss ="categorical_crossentropy" -mlp_dropouts = [0,0,0,0] -mlp_sgd = Adam(0.0001) -mlp_epochs = 1000 -mlp_batch_size = 8 -mlp_input_activation = "relu" -mlp_output_activation = "softmax" - -try : - mlp_sgd_repr=mlp_sgd.get_config()["name"] -except AttributeError : - mlp_sgd_repr=mlp_sgd - - - -mlp={ "h1" : "_".join([str(x) for x in mlp_h ]), - "inside_activation" : mlp_input_activation, - "out_activation" : mlp_output_activation, - "do_dropout": "_".join([str(x) for x in mlp_dropouts]), - "loss" : mlp_loss, - "epochs" : mlp_epochs , - "batch_size" : mlp_batch_size, - "sgd" : mlp_sgd_repr} - -params = { "ae":ae, "trans":trans, "mlp":mlp} +json_conf =json.load(open(sys.argv[3])) + +dsae_conf = json_conf["dsae"] + +hidden_size= dsae_conf["hidden_size"] +input_activation=dsae_conf["input_activation"] +output_activation=dsae_conf["output_activation"] +loss=dsae_conf["loss"] +epochs=dsae_conf["epochs"] +batch_size=dsae_conf["batch"] +patience=dsae_conf["patience"] +do_do=dsae_conf["do"] +try: + k = dsae_conf["sgd"] + if dsae_conf["sgd"]["name"] == "adam": + sgd = Adam(lr=dsae_conf["sgd"]["lr"])#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True) + elif dsae_conf["sgd"]["name"] == "sgd": + sgd = SGD(lr=dsae_conf["sgd"]["lr"]) +except: + sgd = dsae_conf["sgd"] + +trans_conf = json_conf["dsae"]["transform"] +trans_hidden_size=trans_conf["hidden_size"] +trans_input_activation=trans_conf["input_activation"] +trans_output_activation=trans_conf["output_activation"] +trans_loss=trans_conf["loss"] +trans_epochs=trans_conf["epochs"] +trans_batch_size=trans_conf["batch"] +trans_patience=trans_conf["patience"] +trans_do=trans_conf["do"] +try: + k = trans_conf["sgd"] + if trans_conf["sgd"]["name"] == "adam": + trans_sgd = Adam(lr=trans_conf["sgd"]["lr"])#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True) + elif trans_conf["sgd"]["name"] == "sgd": + trans_sgd = SGD(lr=trans_conf["sgd"]["lr"]) +except e : + trans_sgd = trans_conf["sgd"] + + +mlp_conf = json_conf["mlp"] +mlp_h = mlp_conf["hidden_size"] +mlp_loss = mlp_conf["loss"] +mlp_dropouts = mlp_conf["do"] +mlp_epochs = mlp_conf["epochs"] +mlp_batch_size = mlp_conf["batch"] +mlp_input_activation=mlp_conf["input_activation"] +mlp_output_activation=mlp_conf["output_activation"] try: - os.mkdir("{}/DSAE_{}".format(in_dir,name)) + k = mlp_conf["sgd"] + if mlp_conf["sgd"]["name"] == "adam": + mlp_sgd = Adam(lr=mlp_conf["sgd"]["lr"])#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True) + elif mlp_conf["sgd"]["name"] == "sgd": + mlp_sgd = SGD(lr=mlp_conf["sgd"]["lr"]) +except: + mlp_sgd = mlp_conf["sgd"] + + +name = json_conf["name"] +try: + os.mkdir("{}/{}".format(in_dir,name)) except: pass -db = shelve.open("{}/DSAE_{}/ae_model.shelve".format(in_dir,name),writeback=True) +db = shelve.open("{}/{}/ae_model.shelve".format(in_dir,name),writeback=True) # -json.dump(params, - open("{}/DSAE_{}/ae_model.json".format(in_dir,name),"w"), - indent=4) keys = ["ASR","TRS"] @@ -144,7 +122,6 @@ for layer in res_tuple_ASR[0]: db["DSAE"][mod] = mlp_res_list mod = "TRS" -print hidden_size res_tuple_TRS = train_ae(infer_model["LDA"][mod]["TRAIN"], infer_model["LDA"][mod]["DEV"], infer_model["LDA"][mod]["TEST"], @@ -173,16 +150,16 @@ transfert = [] print " get weight trans" -for asr_pred, trs_pred in zip(res_tuple_ASR[0], res_tuple_TRS[0]): - print "ASR", [ x.shape for x in asr_pred] +#for asr_pred, trs_pred in zip(res_tuple_ASR[0], res_tuple_TRS[0]): + # print "ASR", [ x.shape for x in asr_pred] - print "TRS", [ x.shape for x in trs_pred] - print + # print "TRS", [ x.shape for x in trs_pred] for asr_pred, trs_pred in zip(res_tuple_ASR[0], res_tuple_TRS[0]): - print "ASR", [ x.shape for x in asr_pred] + # print "ASR", [ x.shape for x in asr_pred] - print "TRS", [ x.shape for x in trs_pred] + # print "TRS", [ x.shape for x in trs_pred] + # print " TRANS SGD", trans_sgd transfert.append( train_ae(asr_pred[0], asr_pred[1], asr_pred[2], @@ -217,13 +194,13 @@ db["DSAE"]["transfert"] = mlp_res_bylvl print " FT " WA = res_tuple_ASR[1] -print "WA", len(WA), [ len(x) for x in WA] +#print "WA", len(WA), [ len(x) for x in WA] WT = res_tuple_TRS[1] -print "WT", len(WT), [ len(x) for x in WT] +#print "WT", len(WT), [ len(x) for x in WT] Wtr = [ x[1] for x in transfert] -print "Wtr", len(Wtr), [ len(x) for x in Wtr],[ len(x[1]) for x in Wtr] +#print "Wtr", len(Wtr), [ len(x) for x in Wtr],[ len(x[1]) for x in Wtr] ft_res = ft_dsae(infer_model["LDA"]["ASR"]["TRAIN"], infer_model["LDA"]["ASR"]["DEV"], diff --git a/LDA/04e-mm_vae.py b/LDA/04e-mm_vae.py index 4cbb650..7818868 100644 --- a/LDA/04e-mm_vae.py +++ b/LDA/04e-mm_vae.py @@ -1,9 +1,5 @@ # coding: utf-8 - -# In[2]: - -# Import import gensim from scipy import sparse import itertools @@ -26,71 +22,64 @@ in_dir = sys.argv[1] #['ASR', 'TRS', 'LABEL'] # In[6]: +json_conf =json.load(open(sys.argv[3])) +vae_conf = json_conf["vae"] + +hidden_size= vae_conf["hidden_size"] +input_activation=vae_conf["input_activation"] +output_activation=vae_conf["output_activation"] +epochs=vae_conf["epochs"] +batch=vae_conf["batch"] +patience=vae_conf["patience"] +latent_dim = vae_conf["latent"] +try: + k = vae_conf["sgd"] + if vae_conf["sgd"]["name"] == "adam": + sgd = Adam(lr=vae_conf["sgd"]["lr"])#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True) + elif vae_conf["sgd"]["name"] == "sgd": + sgd = SGD(lr=vae_conf["sgd"]["lr"]) +except: + sgd = vae_conf["sgd"] + +mlp_conf = json_conf["mlp"] +mlp_h = mlp_conf["hidden_size"] +mlp_loss = mlp_conf["loss"] +mlp_dropouts = mlp_conf["do"] +mlp_epochs = mlp_conf["epochs"] +mlp_batch_size = mlp_conf["batch"] +mlp_input_activation=mlp_conf["input_activation"] +mlp_output_activation=mlp_conf["output_activation"] + + +try: + k = mlp_conf["sgd"] + if mlp_conf["sgd"]["name"] == "adam": + mlp_sgd = Adam(lr=mlp_conf["sgd"]["lr"])#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True) + elif mlp_conf["sgd"]["name"] == "sgd": + mlp_sgd = SGD(lr=mlp_conf["sgd"]["lr"]) +except: + mlp_sgd = mlp_conf["sgd"] + + +name = json_conf["name"] + -hidden_size= [60] -input_activation="tanh" -output_activation="sigmoid" -epochs=300 -batch=1 -patience=60 -sgd = Adam(lr=0.0001)#SGD(lr=0.00001,nesterov=False) #'rmsprop'# Adam(lr=0.00001)#SGD(lr=0.001, momentum=0.9, nesterov=True) -latent_dim = 30 - - - -mlp_h = [ 256 ] -mlp_loss = "categorical_crossentropy" -mlp_dropouts = [] -mlp_sgd = Adam(lr=0.001) -mlp_epochs = 1000 -mlp_batch_size = 16 -mlp_output_activation="softmax" - -try : - sgd_repr=sgd.get_config()["name"] -except AttributeError : - sgd_repr=sgd - -try : - mlp_sgd_repr=mlp_sgd.get_config()["name"] -except AttributeError : - mlp_sgd_repr=mlp_sgd - - -params={ "h1" : "_".join([ str(x) for x in hidden_size ]), - "inside_activation" : input_activation, - "output_activation" : output_activation, - "epochs" : epochs , - "batch_size" : batch, - "patience" : patience, - "sgd" : sgd_repr, - "mlp_h ": "_".join([str(x) for x in mlp_h]), - "mlp_loss ": mlp_loss, - "mlp_dropouts ": "_".join([str(x) for x in mlp_dropouts]), - "mlp_sgd ": mlp_sgd_repr, - "mlp_epochs ": mlp_epochs, - "mlp_batch_size ": mlp_batch_size, - "mlp_output" : mlp_output_activation - } -name = "_".join([ str(x) for x in params.values()]) try: - os.mkdir("{}/VAE_{}".format(in_dir,name)) + os.mkdir("{}/{}".format(in_dir,name)) except: pass -db = shelve.open("{}/VAE_{}/ae_model.shelve".format(in_dir,name),writeback=True) -db["params"] = params + + +db = shelve.open("{}/{}/ae_model.shelve".format(in_dir,name),writeback=True) db["LABEL"]=infer_model["LABEL"] # -json.dump(params, - open("{}/VAE_{}/ae_model.json".format(in_dir,name),"w"), - indent=4) keys = ["ASR","TRS"] db["VAE"] = {} db["LDA"] = {} for mod in keys : - print mod + #print mod db["LDA"][mod] = train_mlp(infer_model["LDA"][mod]["TRAIN"],infer_model["LABEL"][mod]["TRAIN"], infer_model["LDA"][mod]["DEV"],infer_model["LABEL"][mod]["DEV"], infer_model["LDA"][mod]["TEST"],infer_model["LABEL"][mod]["TEST"], diff --git a/LDA/mlp.py b/LDA/mlp.py deleted file mode 120000 index bfa3a4b..0000000 --- a/LDA/mlp.py +++ /dev/null @@ -1 +0,0 @@ -../VARIATIONAL/Variational-Autoencoder/mlp.py \ No newline at end of file diff --git a/LDA/mlp.py b/LDA/mlp.py new file mode 100755 index 0000000..7e8e2cb --- /dev/null +++ b/LDA/mlp.py @@ -0,0 +1,238 @@ +# -*- coding: utf-8 -*- +import keras +import numpy as np +#from keras.layers.core import Dense, Dropout, Activation +from keras.optimizers import SGD,Adam +from keras.models import Sequential +from keras.layers import Input, Dense, Dropout +from keras.models import Model +from keras.utils.layer_utils import layer_from_config +from itertools import izip_longest + +import pandas +from collections import namedtuple +from sklearn.metrics import accuracy_score as perf +save_tuple= namedtuple("save_tuple",["pred_train","pred_dev","pred_test"]) + + +def ft_dsae(train,dev,test, + y_train=None,y_dev=None,y_test=None, + ae_hidden=[20],transfer_hidden=[20], + start_weights=None,transfer_weights=None,end_weights=None, + input_activation="tanh", output_activation="tanh", + init="glorot_uniform", + ae_dropouts=[None], transfer_do=[None], + sgd="sgd", loss="mse", patience=5, verbose=0, epochs=5, batch_size=8): + + if not start_weights : + start_weights = [ None ] * len(ae_hidden) + if not transfer_weights : + transfer_weights = [None ] * len(transfer_hidden) + if not end_weights : + end_weights = [ None ] * len(end_weights) + if not transfer_do : + transfer_do = [0] * len(transfer_hidden) + predict_y = True + if y_train is None or y_dev is None or y_test is None : + y_train = train + y_dev = dev + y_test = test + predict_y = False + param_predict = [ train, dev, test ] + if predict_y : + param_predict += [ y_train, y_dev ,y_test ] + + pred_by_level = [] # Contient les prediction par niveaux de transfert + layers = [Input(shape=(train.shape[1],))] + #for w in transfer_weights: + #print "TW",[ [ y.shape for y in x ] for x in w] + #print "SW",[ [ y.shape for y in x] for x in start_weights] + #print "EW",[ [ y.shape for y in x ] for x in end_weights] + for cpt in range(1,len(ae_hidden)): + #print ae_hidden,cpt + #print cpt, "before" + #print "before2", [ [ x.shape for x in y] for y in start_weights[:cpt] ] + #print "before3", [ [ x.shape for x in y] for y in transfer_weights[cpt]] + #print "before4", [ [ x.shape for x in y] for y in end_weights[cpt:]] + sizes = ae_hidden[:cpt] + transfer_hidden + ae_hidden[cpt:] + weights = start_weights[:cpt] + transfer_weights[(cpt-1)] + end_weights[cpt:] + #print "SIZES", sizes + #print "AW",[ [ y.shape for y in x ] for x in weights] + #print "WEI", len(weights) , [ len(x) for x in weights ] + if len(ae_dropouts) == len(ae_hidden): + do = ae_dropouts[:cpt] + transfer_do + ae_dropouts[cpt:] + else : + do = [ 0 ] * (len(ae_hidden) + len(transfer_hidden)) + for w in weights[:-1]: + #print "STEP", size + layers.append(Dense(w[1].shape[0],activation=input_activation,init=init,weights=w)(layers[-1])) + if do : + d = do.pop(0) + if d > 0 : + layers.append(Dropout(d)(layers[-1])) + + layers.append(Dense(y_train.shape[1],activation=output_activation)(layers[-1])) + models = [Model(input=layers[0] , output=x) for x in layers[1:]] + models[-1].compile(optimizer=sgd,loss=loss) + models[-1].fit(train,y_train,nb_epoch=epochs,batch_size=batch_size,callbacks=[keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience, verbose=0)],validation_data=(dev,dev),verbose=verbose) + predictions = [ [x.predict(y) for y in param_predict ] for x in models ] + pred_by_level.append(predictions) + + return pred_by_level + +def train_mlp(x_train,y_train,x_dev,y_dev,x_test,y_test,hidden_size,input_activation="relu",hidden_activation="relu",output_activation="softmax",loss="mse",init="glorot_uniform",dropouts=None,sgd=None,epochs=1200,batch_size=16,fit_verbose=1,test_verbose=0,save_pred=False,keep_histo=False): + + + layers = [Input(shape=(x_train.shape[1],))] + + for h in hidden_size: + if dropouts: + d = dropouts.pop(0) + if d > 0 : + layers.append(Dropout(d)(layers[-1])) + + layers.append(Dense(h,init=init,activation=input_activation)(layers[-1])) + #if dropouts: + # drop_prob=dropouts.pop(0) + # if drop_prob > 0: + # model.add(Dropout(drop_prob)) + + #if dropouts: + # drop_prob=dropouts.pop(0) + # if drop_prob > 0: + # model.add(Dropout(drop_prob)) + + #if dropouts: + # model.add(Dropout(dropouts.pop(0))) + if dropouts: + d = dropouts.pop(0) + if d > 0 : + layers.append(Dropout(d)(layers[-1])) + + layers.append(Dense( y_train.shape[1],activation=output_activation,init=init)(layers[-1])) + + model = Model(layers[0] , layers[-1]) + if not sgd: + sgd = SGD(lr=0.01, decay=0, momentum=0.9) + + model.compile(loss=loss, optimizer=sgd,metrics=['accuracy']) + + scores_dev=[] + scores_test=[] + scores_train=[] + save=None + for i in range(epochs): + hist=model.fit(x_train, y_train, nb_epoch=1, batch_size=batch_size,verbose=fit_verbose,validation_data=(x_dev,y_dev)) + pred_train=model.predict(x_train) + pred_dev=model.predict(x_dev) + pred_test=model.predict(x_test) + + scores_train.append(perf(np.argmax(y_train,axis=1),np.argmax(pred_train,axis=1))) + scores_dev.append(perf(np.argmax(y_dev,axis=1),np.argmax(pred_dev,axis=1))) + scores_test.append(perf(np.argmax(y_test,axis=1),np.argmax(pred_test,axis=1))) + if fit_verbose : + print "{} {} {} {}".format(i,scores_train[-1],scores_dev[-1],scores_test[-1]) + if save is None or (len(scores_dev)>2 and scores_dev[-1] > scores_dev[-2]): + save=save_tuple(pred_train,pred_dev,pred_test) + arg_dev = np.argmax(scores_dev) + best_dev=scores_dev[arg_dev] + best_test=scores_test[arg_dev] + max_test=np.max(scores_test) + if fit_verbose: + print " res : {} {} {}".format(best_dev,best_test,max_test) + + res=[scores_train,scores_dev,scores_test] + if save_pred: + res.append(save) + if keep_histo: + res.append(hist) + return res + +def train_ae(train,dev,test,hidden_sizes,y_train=None,y_dev=None,y_test=None,dropouts=None,input_activation="tanh",output_activation="tanh",loss="mse",sgd=None,epochs=500,batch_size=8,verbose=1,patience=20,get_weights=False,set_weights=[]): + + input_vect = Input(shape=(train.shape[1],)) + + previous = [input_vect] + + if dropouts is None: + dropouts = [ 0 ] * (len(hidden_sizes) +1) + if sgd is None : + sgd = SGD(lr=0.01, decay=0, momentum=0.9) + did_do = False + if dropouts : + d = dropouts.pop(0) + if d : + previous.append(Dropout(d)(previous[-1])) + did_do = True + + for h_layer,weight_layer in izip_longest(hidden_sizes,set_weights,fillvalue=None) : + # ,weights=w + if weight_layer : + w = weight_layer[0] + else : + w = None + #print "ADD SIZE" , h_layer + if did_do : + p = previous.pop() + did_do = False + else : + p = previous[-1] + previous.append(Dense(h_layer,activation=input_activation,weights=w)(previous[-1])) + if dropouts: + d = dropouts.pop(0) + if d : + previous.append(Dropout(d)(previous[-1])) + did_do = True + + predict_y = True + if y_train is None or y_dev is None or y_test is None : + y_train = train + y_dev = dev + y_test = test + predict_y = False + previous.append(Dense(y_train.shape[1],activation=output_activation)(previous[-1])) + models = [Model(input=previous[0] , output=x) for x in previous[1:]] + print "MLP", sgd, loss + models[-1].compile(optimizer=sgd,loss=loss) + models[-1].fit(train,y_train,nb_epoch=epochs,batch_size=batch_size,callbacks=[keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience, verbose=0)],validation_data=(dev,dev),verbose=verbose) + param_predict = [ train, dev, test ] + if predict_y : + param_predict += [ y_train, y_dev ,y_test ] + predictions = [ [x.predict(y) for y in param_predict ] for x in models ] + if get_weights : + weights = [ x.get_weights() for x in models[-1].layers if x.get_weights() ] + return ( predictions , weights ) + else : + return predictions + +def train_sae(train,dev,test,hidden_sizes,y_train=None,y_dev=None,y_test=None,dropouts=None,input_activation="tanh",output_activation="tanh",loss="mse",sgd=None,epochs=500,batch_size=8,verbose=1,patience=20): + + weights = [] + predictions = [[(train,dev,test),()]] + ft_pred = [] + past_sizes = [] + + + for size in hidden_sizes : + #print "DO size " , size , "FROM" , hidden_sizes + res_pred, res_wght = train_ae(predictions[-1][-2][0], predictions[-1][-2][1],predictions[-1][-2][2],[size], + dropouts=dropouts, input_activation=input_activation, + output_activation=output_activation, loss=loss, sgd=sgd, + epochs=epochs, batch_size=batch_size, verbose=verbose, + patience=patience,get_weights=True) + past_sizes.append(size) + weights.append(res_wght) + predictions.append(res_pred) + #print "FINE TUNE " + res_ftpred = train_ae(train,dev,test,past_sizes,y_train=y_train,y_dev=y_dev,y_test=y_test, + dropouts=dropouts, + input_activation=input_activation, + output_activation=output_activation, + loss=loss,sgd=sgd,epochs=epochs, + batch_size=batch_size,verbose=verbose,patience=patience, + set_weights=weights) + ft_pred.append(res_ftpred) + + return ( predictions[1:] , ft_pred) + + diff --git a/LDA/mlp_mmf1.sh b/LDA/mlp_mmf1.sh index 37480be..3fa0df9 100644 --- a/LDA/mlp_mmf1.sh +++ b/LDA/mlp_mmf1.sh @@ -1,7 +1,7 @@ -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/193/ MM_features/data_w20/mmf_193.shelve >> output_v7/recap.txt -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/194/ MM_features/data_w20/mmf_194.shelve >> output_v7/recap.txt -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/195/ MM_features/data_w20/mmf_195.shelve >> output_v7/recap.txt -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/196/ MM_features/data_w20/mmf_196.shelve >> output_v7/recap.txt -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/197/ MM_features/data_w20/mmf_197.shelve >> output_v7/recap.txt -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/198/ MM_features/data_w20/mmf_198.shelve >> output_v7/recap.txt -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/393/ MM_features/data_w20/mmf_393.shelve >> output_v7/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/193/ MM_features/data_w99/mmf_193.shelve >> output_v8/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/194/ MM_features/data_w99/mmf_194.shelve >> output_v8/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/195/ MM_features/data_w99/mmf_195.shelve >> output_v8/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/196/ MM_features/data_w99/mmf_196.shelve >> output_v8/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/197/ MM_features/data_w99/mmf_197.shelve >> output_v8/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/198/ MM_features/data_w99/mmf_198.shelve >> output_v8/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/393/ MM_features/data_w99/mmf_393.shelve >> output_v8/recap.txt diff --git a/LDA/mlp_mmf2.sh b/LDA/mlp_mmf2.sh index c7d8a42..d350f50 100644 --- a/LDA/mlp_mmf2.sh +++ b/LDA/mlp_mmf2.sh @@ -1,7 +1,7 @@ -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/394/ MM_features/data_w20/mmf_394.shelve >> output_v7/recap.txt -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/395/ MM_features/data_w20/mmf_395.shelve >> output_v7/recap.txt -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/396/ MM_features/data_w20/mmf_396.shelve >> output_v7/recap.txt -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/397/ MM_features/data_w20/mmf_397.shelve >> output_v7/recap.txt -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/43/ MM_features/data_w20/mmf_43.shelve >> output_v7/recap.txt -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/44/ MM_features/data_w20/mmf_44.shelve >> output_v7/recap.txt -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/45/ MM_features/data_w20/mmf_45.shelve >> output_v7/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/394/ MM_features/data_w99/mmf_394.shelve >> output_v8/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/395/ MM_features/data_w99/mmf_395.shelve >> output_v8/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/396/ MM_features/data_w99/mmf_396.shelve >> output_v8/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/397/ MM_features/data_w99/mmf_397.shelve >> output_v8/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/43/ MM_features/data_w99/mmf_43.shelve >> output_v8/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/44/ MM_features/data_w99/mmf_44.shelve >> output_v8/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/45/ MM_features/data_w99/mmf_45.shelve >> output_v8/recap.txt diff --git a/LDA/mlp_mmf3.sh b/LDA/mlp_mmf3.sh index 4b2c1e1..a7a85a1 100644 --- a/LDA/mlp_mmf3.sh +++ b/LDA/mlp_mmf3.sh @@ -1,7 +1,7 @@ -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/46/ MM_features/data_w20/mmf_46.shelve >> output_v7/recap.txt -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/47/ MM_features/data_w20/mmf_47.shelve >> output_v7/recap.txt -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/48/ MM_features/data_w20/mmf_48.shelve >> output_v7/recap.txt -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/493/ MM_features/data_w20/mmf_493.shelve >> output_v7/recap.txt -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/494/ MM_features/data_w20/mmf_494.shelve >> output_v7/recap.txt -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/495/ MM_features/data_w20/mmf_495.shelve >> output_v7/recap.txt -THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v7/496/ MM_features/data_w20/mmf_496.shelve >> output_v7/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/46/ MM_features/data_w99/mmf_46.shelve >> output_v8/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/47/ MM_features/data_w99/mmf_47.shelve >> output_v8/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/48/ MM_features/data_w99/mmf_48.shelve >> output_v8/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/493/ MM_features/data_w99/mmf_493.shelve >> output_v8/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/494/ MM_features/data_w99/mmf_494.shelve >> output_v8/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/495/ MM_features/data_w99/mmf_495.shelve >> output_v8/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04a-mmdf.py output_v8/496/ MM_features/data_w99/mmf_496.shelve >> output_v8/recap.txt diff --git a/LDA/mlp_mmf4.sh b/LDA/mlp_mmf4.sh index 4c8600a..16a493d 100644 --- a/LDA/mlp_mmf4.sh +++ b/LDA/mlp_mmf4.sh @@ -1,8 +1,8 @@ -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/497/ MM_features/data_w20/mmf_497.shelve >> output_v7/recap.txt -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/50/ MM_features/data_w20/mmf_50.shelve >> output_v7/recap.txt -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/93/ MM_features/data_w20/mmf_93.shelve >> output_v7/recap.txt -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/94/ MM_features/data_w20/mmf_94.shelve >> output_v7/recap.txt -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/95/ MM_features/data_w20/mmf_95.shelve >> output_v7/recap.txt -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/96/ MM_features/data_w20/mmf_96.shelve >> output_v7/recap.txt -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/97/ MM_features/data_w20/mmf_97.shelve >> output_v7/recap.txt -THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v7/98/ MM_features/data_w20/mmf_98.shelve >> output_v7/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/497/ MM_features/data_w99/mmf_497.shelve >> output_v8/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/50/ MM_features/data_w99/mmf_50.shelve >> output_v8/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/93/ MM_features/data_w99/mmf_93.shelve >> output_v8/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/94/ MM_features/data_w99/mmf_94.shelve >> output_v8/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/95/ MM_features/data_w99/mmf_95.shelve >> output_v8/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/96/ MM_features/data_w99/mmf_96.shelve >> output_v8/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/97/ MM_features/data_w99/mmf_97.shelve >> output_v8/recap.txt +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04a-mmdf.py output_v8/98/ MM_features/data_w99/mmf_98.shelve >> output_v8/recap.txt diff --git a/LDA/run_mmf.sh b/LDA/run_mmf.sh new file mode 100644 index 0000000..b4d3924 --- /dev/null +++ b/LDA/run_mmf.sh @@ -0,0 +1,10 @@ +output_dir=$1 +features=$2 +json_conf=$3 +time ( +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04b-mmf_mini_ae.py $output_dir $features $json_conf >> $output_dir/miniae.log & +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04c-mmf_sae.py $output_dir $features $json_conf >> $output_dir/sae.log & +THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python 04d-mmf_dsae.py $output_dir $features $json_conf >> $output_dir/dsae.log & +THEANO_FLAGS=mode=FAST_RUN,device=gpu1,floatX=float32 python 04e-mm_vae.py $output_dir $features $json_conf >> $output_dir/vae.log & +wait +) -- 1.8.2.3