Blame view
BOTTLENECK/04-accuracyscore.py
2.1 KB
d414b83e1 add Botttleneck M... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
# coding: utf-8 # In[2]: # Import import gensim from scipy import sparse import numpy as np import itertools from sklearn import preprocessing from keras.models import Sequential from keras.optimizers import SGD,Adam from keras.layers.advanced_activations import ELU,PReLU from keras.callbacks import ModelCheckpoint from mlp import * from sklearn import metrics from sklearn.preprocessing import LabelBinarizer import shelve import pickle from utils import * import sys import os import json # In[4]: in_dir = sys.argv[1] #['ASR', 'TRS', 'LABEL'] # In[6]: json_conf =json.load(open(sys.argv[2])) name = json_conf["name"] db = shelve.open("{}/{}/labels.shelve".format(in_dir,name)) # keys = sorted(db.keys()) keys.remove("IDS") keys.remove("transfert") keys.remove("LABEL") mods = ["ASR", "TRS"] ref_train = db["LABEL"]["ASR"]["TRAIN"] ref_dev = db["LABEL"]["ASR"]["DEV"] ref_test = db["LABEL"]["ASR"]["TEST"] print "name,MOD,level,train,dev,test" for mod in mods : for lvl in keys : if "TEST" in db[lvl][mod] : train_score = metrics.accuracy_score(np.argmax(ref_train,axis=1),db[lvl][mod]["TRAIN"]) dev_score = metrics.accuracy_score(np.argmax(ref_dev,axis=1),db[lvl][mod]["DEV"]) test_score = metrics.accuracy_score(np.argmax(ref_test,axis=1),db[lvl][mod]["TEST"]) else : train_score = "ERROR" dev_score = "ERROR" test_score = "ERROR" print ",".join([name,mod, lvl, str(train_score), str(dev_score) , str(test_score)]) for level in db["transfert"].keys() : for layer in db["transfert"][level].keys(): if "TRAIN" in db["transfert"][level][layer].keys(): train_score = metrics.accuracy_score(np.argmax(ref_train,axis=1),db["transfert"][level][layer]["TRAIN"]) dev_score = metrics.accuracy_score(np.argmax(ref_dev,axis=1),db["transfert"][level][layer]["DEV"]) test_score = metrics.accuracy_score(np.argmax(ref_test,axis=1),db["transfert"][level][layer]["TEST"]) print ",".join([name,"transfert",level+"/"+layer, str(train_score), str(dev_score) , str(test_score)]) db.close() |