Layer层自定义
实现方法
class DecayingDropout(Layer): def __init__(self, initial_keep_rate=1., decay_interval=10000, decay_rate=0.977, noise_shape=None, seed=None, **kwargs): super(DecayingDropout, self).__init__(**kwargs)self.supports_masking = True
self.iterations = self.add_weight(name='iterations', shape=(1,), dtype=K.floatx(), initializer='zeros', trainable=False)super(DecayingDropout, self).build(input_shape)
def call(self, x, mask=None): if mask is not None: mask = K.repeat(mask, x.shape[-1]) mask = tf.transpose(mask, [0,2,1]) mask = K.cast(mask, K.floatx()) x = x * mask return K.sum(x, axis=self.axis) / K.sum(mask, axis=self.axis) else: return K.mean(x, axis=self.axis)def call(self, inputs, training=None): if 0. < self.rate < 1.: noise_shape = self._get_noise_shape(inputs) def dropped_inputs(): return K.dropout(inputs, self.rate, noise_shape, seed=self.seed) return K.in_train_phase(dropped_inputs, inputs, training=training)def call(sekf, x): self.add_update([K.moving_average_update(self.moving_mean, mean,self.momentum), K.moving_average_update(self.moving_variance,variance,self.momentum)], inputs)def call(self, inputs, training=None): self.add_update([K.update_add(self.iterations, [1])], inputs)
参考资料
最后更新于