Blame view
bin/clustering_pvector.py
5.92 KB
ac78b07ea All base bin file... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 |
''' The goal of this script is to apply a clustering to pvector in order to find new classes assigned for each utterance or frame. This new class can be used for new training systems, replacing character classes for example by the calculatering classes from clustering. We hope this will generate interesting classes that will help the system to understand the structure of the voices. TODO: Change it in such a way as to take a number (1, 2, 3, 4) and calculate everything needed like clustering. Train on the train set and then project the test on this clustering in order to know to what cluster it belong to. ''' import os import numpy as np from sklearn.cluster import KMeans import matplotlib.pyplot as plt import argparse import pandas as pd import pickle ''' Return data in panda format version ''' def read_vecfile(filepath, toy_version=False): vectors = "" metas = "" with open(filepath, "r") as f: for i, line in enumerate(f): if toy_version == True and i > 100: break spl_line = line.split(" ") if(len(vectors) == 0): vectors = np.empty((0, len(spl_line[1:])), np.float32) metas = np.empty((0, len(spl_line[0].split(",")))) # Then we add the current line to the data metas = np.append( metas, np.asarray([spl_line[0].split(",")]), axis=0) vectors = np.append( vectors, np.asarray([spl_line[1:]], dtype=np.float32), axis=0) return (metas, vectors) ''' Return list of metas of the listfile ''' def read_lstfile(filepath, toy_version=False): metas = np.empty((0, 4)) with open(filepath, "r") as f: for i, line in enumerate(f): if toy_version == True and i > 100: break metas = np.append( metas, np.asarray([line.rstrip(' ').split(",")]), axis=0) return metas ''' Save a vector file from metas and vector values ''' def save_file(filepath, metas, values=None): with open(filepath, "w") as f: for i in range(len(metas)): metas_str = ",".join(str(v) for v in metas[i]) if not values == None: try: infos_str = " ".join(str(v) for v in values[i]) except TypeError as te: infos_str = str(values[i]) f.write(metas_str + " " + infos_str + " ") else: f.write(metas_str + " ") ''' Take the data and index them. ''' def index_data(metas, vectors): data = {} data["en-us"] = {} data["fr-fr"] = {} for i, vector in enumerate(vectors): meta = metas[i] data[meta[0]][meta[3]] = {} data[meta[0]][meta[3]]["metas"] = meta data[meta[0]][meta[3]]["vector"] = vector return data ''' Récupère un sous ensemble des données de base à partir d'une liste. ''' def get_subdata(data, lst): metas = "" vectors = "" for meta in lst: vector = data[meta[0]][meta[3]]["vector"] if(len(metas) == 0): metas = np.empty((0, len(meta))) vectors = np.empty((0, len(vector)), np.float64) metas = np.append( metas, np.asarray([data[meta[0]][meta[3]]["metas"]]), axis=0) vectors = np.append( vectors, np.asarray([vector]), axis=0) return metas, vectors ''' Apply clustering on data of filename. Use a list to determine train et test using train valid, test. Save the file with the given suffix. Check the existence of the files before calculating and saving, if the two files already exist, it will not calculate it again. However: if one file is not present, this function will calculate it again. TODO: Add a variable to force the calculation of all the files even if they exist. ''' def apply_clustering(filename, dir_lst, dir_data, suffix_outfile): # Applicate it for normal version metas, vectors = read_vecfile(os.path.join(dir_data, filename), toy_version=False) data = index_data(metas, vectors) ### CURSOR # Get Train train_lst = read_lstfile(os.path.join(dir_lst, "train_" + str(NUMBER) + ".lst")) train_metas, train_vectors = get_subdata(data, train_lst) # Get Val val_lst = read_lstfile(os.path.join(dir_lst, "val_" + str(NUMBER) + ".lst")) val_metas, val_vectors = get_subdata(data, val_lst) # Get Test test_lst = read_lstfile(os.path.join(dir_lst, "test_" + str(NUMBER) + ".lst")) test_metas, test_vectors = get_subdata(data, test_lst) # Verif shapes print("verif shapes") print(train_metas.shape) print(val_metas.shape) print(test_metas.shape) # Entrainer le k-means sur le train + val #Ks = [12, 24, 48] print("k=[", end="") Ks = [6,12,24,48,64] for k in Ks: # Process the name suffix = "_" + suffix_outfile if not suffix_outfile == "" else "" k_str = "{:03d}".format(k) # K in string filename_pickle = os.path.join( DIR_DATA, "clusters_trained_on_train_" +str(k_str)+ "_pickle_" + suffix + ".txt") filename_clusters = os.path.join( DIR_DATA, "clusters_trained_on_train_" +str(k_str)+ suffix + ".txt") # Check if on of the two file does not exist condition = not( os.path.exists(filename_pickle) and os.path.exists(filename_clusters) ) if condition: print(str(k)+",", end=" ") kmeans = KMeans(n_clusters=k, n_init=10, random_state=0).fit( train_vectors) test_pred = kmeans.predict(np.concatenate((val_vectors, test_vectors), axis=0)) metas_tosave = np.concatenate([train_metas, val_metas, test_metas], axis=0) values_tosave = np.concatenate([kmeans.labels_, test_pred], axis=0) metas_tosave[:, 1] = values_tosave # Replace char by clusters save_file(filename_clusters, metas_tosave) pickle.dump(kmeans, open( filename_pickle, "wb" ) ) print("]") for NUMBER in range(1, 5): print("JACKKNIFING NUMBER: " + str(NUMBER)) DIR_MAIN="exp/pvector-1" DIR_DATA=os.path.join(DIR_MAIN, str(NUMBER)) DIR_LST=os.path.join(DIR_MAIN, "lst") OUTFILE_NAME="clustering" print("Calculating mass_effect_pvectors") apply_clustering("masseffect_pvectors.txt", dir_lst = os.path.join(DIR_MAIN, "lst"), dir_data = DIR_DATA, suffix_outfile = "") print("Calculating mass_effect_pvectors_final") apply_clustering("masseffect_pvectors_final.txt", dir_lst = os.path.join(DIR_MAIN, "lst"), dir_data = DIR_DATA, suffix_outfile = "final") |