b0VIM 7.4/Zdjtitouantitouan~titouan/WORKSPACE/QCNN/deep_complex_networks/complexnn/bn.pyutf-8 3210#"! UtpeSfM>RD6ad%ef.J0/.  n d > "  x A <  M L W = <  v u ;   d{[1vVU6ZYTS^zf/ elif axis == -1 and ndim == 4: centred_imag = input_centred[:, :, input_dim:] centred_real = input_centred[:, :, :input_dim] elif ndim == 3: centred_imag = input_centred[:, input_dim:] centred_real = input_centred[:, :input_dim] if (axis == 1 and ndim != 3) or ndim == 2: cat_W_4_imag = K.concatenate([broadcast_Wri, broadcast_Wri], axis=axis) cat_W_4_real = K.concatenate([broadcast_Wrr, broadcast_Wii], axis=axis) broadcast_Wii = K.reshape(Wii, variances_broadcast) broadcast_Wri = K.reshape(Wri, variances_broadcast) broadcast_Wrr = K.reshape(Wrr, variances_broadcast) # x_imag_normed = Wri * x_real_centred + Wii * x_imag_centred # x_real_normed = Wrr * x_real_centred + Wri * x_imag_centred # where the real and imaginary parts are obtained as follows: # The returned result will be a complex standardized input # Normalization. We multiply, x_normalized = W.x. # And we have computed the inverse square root matrix W = sqrt(V)! Wri = -Vri * inverse_st Wii = (Vrr + s) * inverse_st Wrr = (Vii + s) * inverse_st inverse_st = 1.0 / (s * t) # So we proceed as follows: # (1/s)(1/t)[ -Vir Vrr+s ] # [ Vii+s -Vri ] # Thus giving us # http://mathworld.wolfram.com/MatrixInverse.html # inv( [ C D ] ) = (1/det) [ -C A ] # [ A B ] [ D -B ] # solution for 2x2 matrices # the square root matrix, and can thus invert it using the analytical # invert. We can do this because we've already computed the determinant of # but we don't need to do this immediately since we can also simultaneously # https://en.wikipedia.org/wiki/Square_root_of_a_2_by_2_matrix # (1/t) [ Vir Vii+s ] # [ Vrr+s Vri ] # The square root matrix could now be explicitly formed as t = K.sqrt(tau + 2 * s) s = K.sqrt(delta) # Determinant of square root matrix delta = (Vrr * Vii) - (Vri ** 2) # delta = (Vrr * Vii) - (Vri ** 2) = Determinant. Guaranteed >= 0 because SPD tau = Vrr + Vii # tau = Vrr + Vii = Trace. Guaranteed >= 0 because SPD # inversion as well). # the computation of square root we compute the determinant we'll need for # square rooting, followed by inversion (I do this in that order because during # We require the covariance matrix's inverse square root. That first requires variances_broadcast[0] = K.shape(input_centred)[0] if layernorm: variances_broadcast[axis] = input_dim variances_broadcast = [1] * ndim input_dim = K.shape(input_centred)[axis] // 2 ndim = K.ndim(input_centred) layernorm=False, axis=-1):def complex_standardization(input_centred, Vrr, Vii, Vri, return initializers.serialize(init) else: return "sqrt_init" if init in [sqrt_init]:def sanitizedInitSer(init): return initializers.get(init) else: return sqrt_init if init in ["sqrt_init"]:def sanitizedInitGet(init): return value value = (1 / K.sqrt(K.constant(2))) * K.ones(shape)def sqrt_init(shape, dtype=None):import keras.backend as Kfrom keras import initializers, regularizers, constraintsfrom keras.layers import Layer, InputSpecimport numpy as np# https://github.com/fchollet/keras/blob/master/keras/layers/normalization.py# available here:# the Keras implementation of batch Normalization# Note: The implementation of complex Batchnorm is based on## Authors: Chiheb Trabelsi, Olexa Bilaniuk## -*- coding: utf-8 -*-#!/usr/bin/env pythonad6hZ=!D Q P ) c A _ M L  r q W D % ? +g H," return dict(list(base_config.items()) + list(config.items())) base_config = super(ComplexBatchNormalization, self).get_config() } 'gamma_off_constraint': constraints .serialize(self.gamma_off_constraint), 'gamma_diag_constraint': constraints .serialize(self.gamma_diag_constraint), 'beta_constraint': constraints .serialize(self.beta_constraint), 'gamma_off_regularizer': regularizers.serialize(self.gamma_off_regularizer), 'gamma_diag_regularizer': regularizers.serialize(self.gamma_diag_regularizer), 'beta_regularizer': regularizers.serialize(self.beta_regularizer), 'moving_covariance_initializer': sanitizedInitSer(self.moving_covariance_initializer), 'moving_variance_initializer': sanitizedInitSer(self.moving_variance_initializer), 'moving_mean_initializer': sanitizedInitSer(self.moving_mean_initializer), 'gamma_off_initializer': sanitizedInitSer(self.gamma_off_initializer), 'gamma_diag_initializer': sanitizedInitSer(self.gamma_diag_initializer), 'beta_initializer': sanitizedInitSer(self.beta_initializer), 'scale': self.scale, 'center': self.center, 'epsilon': self.epsilon, 'momentum': self.momentum, 'axis': self.axis, config = { def get_config(self): training=training) normalize_inference, return K.in_train_phase(input_bn, # Pick the normalized form corresponding to the training phase. ) self.gamma_ii, self.scale, self.center, axis=self.axis self.moving_Vri, self.beta, self.gamma_rr, self.gamma_ri, inference_centred, self.moving_Vrr, self.moving_Vii, return ComplexBN( inference_centred = inputs else: inference_centred = inputs - K.reshape(self.moving_mean, broadcast_mu_shape) if self.center: def normalize_inference(): self.add_update(update_list, inputs) update_list.append(K.moving_average_update(self.moving_Vri, Vri, self.momentum)) update_list.append(K.moving_average_update(self.moving_Vii, Vii, self.momentum)) update_list.append(K.moving_average_update(self.moving_Vrr, Vrr, self.momentum)) if self.scale: update_list.append(K.moving_average_update(self.moving_mean, mu, self.momentum)) if self.center: update_list = [] else: return input_bn if training in {0, False}: ) axis=self.axis self.gamma_ii, self.scale, self.center,ad~Si,e  L  Q   ] N $ j T J I a 5 $ xo'8i+j&t-N@~} if center: else: return cat_gamma_4_real * standardized_output + cat_gamma_4_imag * rolled_standardized_output else: return cat_gamma_4_real * standardized_output + cat_gamma_4_imag * rolled_standardized_output + broadcast_beta broadcast_beta = K.reshape(beta, broadcast_beta_shape) if center: rolled_standardized_output = K.concatenate([centred_imag, centred_real], axis=axis) ) 'axis: ' + str(self.axis) + '; ndim: ' + str(ndim) + '.' 'Incorrect Batchnorm combination of axis and dimensions. axis should be either 1 or -1. ' raise ValueError( else: centred_imag = standardized_output[:, :, :, :, input_dim:] centred_real = standardized_output[:, :, :, :, :input_dim] elif axis == -1 and ndim == 5: centred_imag = standardized_output[:, :, :, input_dim:] centred_real = standardized_output[:, :, :, :input_dim] elif axis == -1 and ndim == 4: centred_imag = standardized_output[:, :, input_dim:] centred_real = standardized_output[:, :, :input_dim] elif ndim == 3: centred_imag = standardized_output[:, input_dim:] centred_real = standardized_output[:, :input_dim] if (axis == 1 and ndim != 3) or ndim == 2: cat_gamma_4_imag = K.concatenate([broadcast_gamma_ri, broadcast_gamma_ri], axis=axis) cat_gamma_4_real = K.concatenate([broadcast_gamma_rr, broadcast_gamma_ii], axis=axis) broadcast_gamma_ii = K.reshape(gamma_ii, gamma_broadcast_shape) broadcast_gamma_ri = K.reshape(gamma_ri, gamma_broadcast_shape) broadcast_gamma_rr = K.reshape(gamma_rr, gamma_broadcast_shape) # x_imag_BN = gamma_ri * x_real_normed + gamma_ii * x_imag_normed + beta_imag # x_real_BN = gamma_rr * x_real_normed + gamma_ri * x_imag_normed + beta_real # where: # Beta = [beta_real beta_imag].T # and the shifting parameter # Gamma = [ gamma_ri gamma_ii ] # [ gamma_rr gamma_ri ] # the scaling parameter # Now we perform th scaling and Shifting of the normalized x using ) axis=axis layernorm, input_centred, Vrr, Vii, Vri, standardized_output = complex_standardization( if scale: broadcast_beta_shape[axis] = input_dim * 2 broadcast_beta_shape = [1] * ndim if center: gamma_broadcast_shape[axis] = input_dim gamma_broadcast_shape = [1] * ndim if scale: input_dim = K.shape(input_centred)[axis] // 2 ndim = K.ndim(input_centred) center=True, layernorm=False, axis=-1): gamma_rr, gamma_ri, gamma_ii, scale=True,def ComplexBN(input_centred, Vrr, Vii, Vri, beta, return output # = output # ----------------------------------------------- # + Wri * x_imag_centered | Wri * x_real_centered # Wrr * x_real_centered | Wii * x_imag_centered output = cat_W_4_real * input_centred + cat_W_4_imag * rolled_input rolled_input = K.concatenate([centred_imag, centred_real], axis=axis) ) 'axis: ' + str(self.axis) + '; ndim: ' + str(ndim) + '.' 'Incorrect Batchnorm combination of axis and dimensions. axis should be either 1 or -1. ' raise ValueError( else: centred_imag = input_centred[:, :, :, :, input_dim:] centred_real = input_centred[:, :, :, :, :input_dim] elif axis == -1 and ndim == 5: centred_imag = input_centred[:, :, :, input_dim:] centred_real = input_centred[:, :, :, :input_dim]adRM}\[Z27 v f ,  f  h  X  ^ r"l)Y PHG0r= iAfJmR self.scale = scale self.center = center self.epsilon = epsilon self.momentum = momentum self.axis = axis self.supports_masking = True super(ComplexBatchNormalization, self).__init__(**kwargs) **kwargs): gamma_off_constraint=None, gamma_diag_constraint=None, beta_constraint=None, gamma_off_regularizer=None, gamma_diag_regularizer=None, beta_regularizer=None, moving_covariance_initializer='zeros', moving_variance_initializer='sqrt_init', moving_mean_initializer='zeros', gamma_off_initializer='zeros', gamma_diag_initializer='sqrt_init', beta_initializer='zeros', scale=True, center=True, epsilon=1e-4, momentum=0.9, axis=-1, def __init__(self, """ - [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/abs/1502.03167) # References Same shape as input. # Output shape when using this layer as the first layer in a model. (tuple of integers, does not include the samples axis) Arbitrary. Use the keyword argument `input_shape` # Input shape gamma_constraint: Optional constraint for the gamma weights. beta_constraint: Optional constraint for the beta weights. gamma_regularizer: Optional regularizer for the gamma weights. beta_regularizer: Optional regularizer for the beta weights. the real and imaginary parts. moving_covariance_initializer: Initializer for the moving covariance of moving_variance_initializer: Initializer for the moving variances. moving_mean_initializer: Initializer for the moving means. gamma_off_initializer: Initializer for the off-diagonal elements of the gamma matrix. which are the variances of the real part and the imaginary part. gamma_diag_initializer: Initializer for the diagonal elements of the gamma matrix. beta_initializer: Initializer for the real_beta and the imag_beta weight. If False, `gamma` is not used. scale: If True, multiply by the `gamma` matrix. (beta is formed by real_beta and imag_beta) If False, `beta` is ignored. center: If True, add offset of `beta` to complex normalized tensor. real and imaginary parts in order to avoid dividing by zero. epsilon: Small float added to each of the variances related to the imaginary parts. momentum: Momentum for the moving statistics related to the real and set `axis=2` in `ComplexBatchNormalization`. `data_format="channels_first"`, For instance, after a `Conv2D` layer with (typically the features axis). axis: Integer, the axis that should be normalized # Arguments null matrix. and the 2 by 2 relation matrix, also called pseudo-covariance, close to the close to the null vector, the 2 by 2 covariance matrix of a complex unit close to identity i.e. applies a transformation that maintains the mean of a complex unit Normalize the activations of the previous complex layer at each batch, Batch normalization layer (Ioffe and Szegedy, 2014). """Complex version of the real domain class ComplexBatchNormalization(Layer): return input_centred else: return input_centred + broadcast_beta broadcast_beta = K.reshape(beta, broadcast_beta_shape)ad#7>ZS P W   B s 8 p 3 6e~AGi*L n-}Z76 self.moving_Vii = None self.moving_Vrr = None self.gamma_ri = None self.gamma_ii = None self.gamma_rr = None else: trainable=False) name='moving_Vri', initializer=self.moving_covariance_initializer, self.moving_Vri = self.add_weight(shape=param_shape, trainable=False) name='moving_Vii', initializer=self.moving_variance_initializer, self.moving_Vii = self.add_weight(shape=param_shape, trainable=False) name='moving_Vrr', initializer=self.moving_variance_initializer, self.moving_Vrr = self.add_weight(shape=param_shape, constraint=self.gamma_off_constraint) regularizer=self.gamma_off_regularizer, initializer=self.gamma_off_initializer, name='gamma_ri', self.gamma_ri = self.add_weight(shape=param_shape, constraint=self.gamma_diag_constraint) regularizer=self.gamma_diag_regularizer, initializer=self.gamma_diag_initializer, name='gamma_ii', self.gamma_ii = self.add_weight(shape=param_shape, constraint=self.gamma_diag_constraint) regularizer=self.gamma_diag_regularizer, initializer=self.gamma_diag_initializer, name='gamma_rr', self.gamma_rr = self.add_weight(shape=param_shape, if self.scale: param_shape = (input_shape[self.axis] // 2,) axes={self.axis: dim}) self.input_spec = InputSpec(ndim=len(input_shape), str(input_shape) + '.') 'but the layer received an input with shape ' + 'input tensor should have a defined dimension ' raise ValueError('Axis ' + str(self.axis) + ' of ' if dim is None: dim = input_shape[self.axis] ndim = len(input_shape) def build(self, input_shape): self.gamma_off_constraint = constraints .get(gamma_off_constraint) self.gamma_diag_constraint = constraints .get(gamma_diag_constraint) self.beta_constraint = constraints .get(beta_constraint) self.gamma_off_regularizer = regularizers.get(gamma_off_regularizer) self.gamma_diag_regularizer = regularizers.get(gamma_diag_regularizer) self.beta_regularizer = regularizers.get(beta_regularizer) self.moving_covariance_initializer = sanitizedInitGet(moving_covariance_initializer) self.moving_variance_initializer = sanitizedInitGet(moving_variance_initializer) self.moving_mean_initializer = sanitizedInitGet(moving_mean_initializer) self.gamma_off_initializer = sanitizedInitGet(gamma_off_initializer) self.gamma_diag_initializer = sanitizedInitGet(gamma_diag_initializer) self.beta_initializer = sanitizedInitGet(beta_initializer)ad{R{Fg { ; -  { [ 0 u 6 U  a )  LUbiwQ-<"zcU{z self.beta, self.gamma_rr, self.gamma_ri, input_centred, Vrr, Vii, Vri, input_bn = ComplexBN( raise ValueError('Error. Both scale and center in batchnorm are set to False.') else: Vri = None Vii = None Vrr = None elif self.center: ) axis=reduction_axes, centred_real * centred_imag, Vri = K.mean( # Vri contains the real and imaginary covariance for each feature map. ) + self.epsilon axis=reduction_axes centred_squared_imag, Vii = K.mean( ) + self.epsilon axis=reduction_axes centred_squared_real, Vrr = K.mean( if self.scale: ) 'axis: ' + str(self.axis) + '; ndim: ' + str(ndim) + '.' 'Incorrect Batchnorm combination of axis and dimensions. axis should be either 1 or -1. ' raise ValueError( else: centred_imag = input_centred[:, :, :, :, input_dim:] centred_real = input_centred[:, :, :, :, :input_dim] centred_squared_imag = centred_squared[:, :, :, :, input_dim:] centred_squared_real = centred_squared[:, :, :, :, :input_dim] elif self.axis == -1 and ndim == 5: centred_imag = input_centred[:, :, :, input_dim:] centred_real = input_centred[:, :, :, :input_dim] centred_squared_imag = centred_squared[:, :, :, input_dim:] centred_squared_real = centred_squared[:, :, :, :input_dim] elif self.axis == -1 and ndim == 4: centred_imag = input_centred[:, :, input_dim:] centred_real = input_centred[:, :, :input_dim] centred_squared_imag = centred_squared[:, :, input_dim:] centred_squared_real = centred_squared[:, :, :input_dim] elif ndim == 3: centred_imag = input_centred[:, input_dim:] centred_real = input_centred[:, :input_dim] centred_squared_imag = centred_squared[:, input_dim:] centred_squared_real = centred_squared[:, :input_dim] if (self.axis == 1 and ndim != 3) or ndim == 2: centred_squared = input_centred ** 2 input_centred = inputs else: input_centred = inputs - broadcast_mu if self.center: broadcast_mu = K.reshape(mu, broadcast_mu_shape) broadcast_mu_shape[self.axis] = input_shape[self.axis] broadcast_mu_shape = [1] * len(input_shape) mu = K.mean(inputs, axis=reduction_axes) input_dim = input_shape[self.axis] // 2 del reduction_axes[self.axis] reduction_axes = list(range(ndim)) ndim = len(input_shape) input_shape = K.int_shape(inputs) def call(self, inputs, training=None): self.built = True self.moving_mean = None self.beta = None else: trainable=False) name='moving_mean', initializer=self.moving_mean_initializer, self.moving_mean = self.add_weight(shape=(input_shape[self.axis],), constraint=self.beta_constraint) regularizer=self.beta_regularizer, initializer=self.beta_initializer, name='beta', self.beta = self.add_weight(shape=(input_shape[self.axis],), if self.center: self.moving_Vri = None