DECODA_binary_BOW_AE_REALSPE_TANH_MODELS.py 3.84 KB
# coding: utf-8

# In[2]:

# Import
import pandas
# Alignement
import nltk
import codecs
import gensim
from scipy import sparse
import itertools
from sklearn.feature_extraction.text import CountVectorizer
import scipy.sparse
import scipy.io
from sklearn import preprocessing
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation,AutoEncoder
from keras.optimizers import SGD,Adam
from keras.layers import containers
from mlp import *
import mlp
import sklearn.metrics
import shelve
import pickle
from utils import *
import sys
# In[4]:

db=shelve.open("{}.shelve".format(sys.argv[2]),writeback=True)
#['vocab', 'ASR_SPARSE', 'TRS_SPARSE', 'LABEL']
# In[6]:
# In[10]:
print "making sparse data"
sparse_corp=shelve.open("{}.shelve".format(sys.argv[1]))
ASR_sparse=sparse_corp["ASR"]
TRS_sparse=sparse_corp["TRS"]
db["LABEL"] = sparse_corp["LABEL"]
db["ASR"] = ASR_sparse
db["TRS"] = TRS_sparse
# In[11]:
#z.apply(select)
hidden_size=3096
hidden_size2=2048
input_activation="relu"
out_activation="relu"
loss="mse"
epochs=1000
batch=64
patience=40

print "gogo autoencoder ASR"
sgd = 'adam'#SGD(lr=0.0001)#( momentum=0.9, nesterov=True)
autoencode=Sequential()
autoencode.add(Dense(hidden_size,input_dim=ASR_sparse["TRAIN"].shape[1],init='glorot_uniform',activation=input_activation))
autoencode.add(Dense(hidden_size2,input_dim=hidden_size,init='glorot_uniform',activation=input_activation))
autoencode.add(Dense(hidden_size,input_dim=hidden_size2,init="glorot_uniform",activation=out_activation))
autoencode.add(Dense(ASR_sparse["DEV"].todense().shape[1],input_dim=hidden_size,init="glorot_uniform",activation=out_activation))

#autoencode.compile(optimizer=sgd,loss=loss)

autoencode.compile(optimizer=sgd,loss=loss)


# In[ ]:

autoencode.fit(ASR_sparse["TRAIN"].todense(),TRS_sparse["TRAIN"].todense(),nb_epoch=epochs,batch_size=batch,
               callbacks=[keras.callbacks.EarlyStopping(monitor='val_loss',
                                                        patience=patience, verbose=0)],           validation_data=(ASR_sparse["DEV"].todense(),TRS_sparse["DEV"].todense()),verbose=1)


# In[ ]:

auto_decoder=Sequential()
auto_decoder.add(Dense(hidden_size,input_dim=ASR_sparse["DEV"].todense().shape[1],init='uniform',activation=input_activation,weights=autoencode.get_weights()[:2]))
auto_decoder.add(Dense(hidden_size2,input_dim=hidden_size,init='glorot_uniform',activation=input_activation,weights=autoencode.get_weights()[2:4]))
auto_decoder.add(Dense(hidden_size,input_dim=hidden_size2,init='glorot_uniform',activation=input_activation,weights=autoencode.get_weights()[4:6]))
auto_decoder.compile(optimizer=sgd,loss=loss)


# In[77]:

#autoencode.predict(ASR_sparse["DEV"].todense())


# In[ ]:

print "auto encoder et auto decoder asr okay"

ASR_sparse_AE={}
for i in ASR_sparse.keys():
    ASR_sparse_AE[i]=auto_decoder.predict(ASR_sparse[i].todense())
    #TRS_sparse[i]=dico.transform(TRS[i][2])

db["ASR_AE_H2"]=ASR_sparse_AE


auto_decoder=Sequential()
auto_decoder.add(Dense(hidden_size,input_dim=ASR_sparse["DEV"].todense().shape[1],init='uniform',activation=input_activation,weights=autoencode.get_weights()[:2]))
auto_decoder.add(Dense(hidden_size2,input_dim=hidden_size,init='glorot_uniform',activation=input_activation,weights=autoencode.get_weights()[2:4]))
auto_decoder.compile(optimizer=sgd,loss=loss)

ASR_sparse_AE_H1={}
for i in ASR_sparse.keys():
    ASR_sparse_AE_H1[i]=auto_decoder.predict(ASR_sparse[i].todense())
    #TRS_sparse[i]=dico.transform(TRS[i][2])

db["ASR_AE_H1"]=ASR_sparse_AE_H1



db.sync()




# In[261]:

#pred_dev= model_TRS_AE.predict(TRS_sparse_AE["DEV"],batch_size=1)

TRS_AE={}
ASR_AE={}
for i in TRS_sparse.keys():
    TRS_AE[i]=autoencode.predict(TRS_sparse[i].todense())
    ASR_AE[i]=autoencode.predict(ASR_sparse[i].todense())


db["TRS_AE_OUT"]=TRS_AE
db["ASR_AE_OUT"]=ASR_AE

# # Transfert de couche
# ICI
db.sync()
db.close()