From e4f2dfdffdfd65fd4ac1141d628ee7752a98c11d Mon Sep 17 00:00:00 2001 From: Yang Song Date: Sun, 18 Mar 2018 00:36:18 -0700 Subject: [PATCH] Fix bug in computing gradient penalty This is a shameful bug... Obviously the part I edited was copied directly from the Github repo of improved training for Wasserstein GANs. However, in the author's code, the image was first flatten to a matrix, that's why the reduction axis was only for [1]. In this implementation, we should use reduction axis [1, 2, 3] because we don't flatten images. --- WGAN_GP.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/WGAN_GP.py b/WGAN_GP.py index 347004c5..f9d4e324 100644 --- a/WGAN_GP.py +++ b/WGAN_GP.py @@ -115,7 +115,7 @@ def build_model(self): interpolates = self.inputs + (alpha * differences) _,D_inter,_=self.discriminator(interpolates, is_training=True, reuse=True) gradients = tf.gradients(D_inter, [interpolates])[0] - slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1])) + slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), axis=[1, 2, 3])) gradient_penalty = tf.reduce_mean((slopes - 1.) ** 2) self.d_loss += self.lambd * gradient_penalty @@ -264,4 +264,4 @@ def load(self, checkpoint_dir): return True, counter else: print(" [*] Failed to find a checkpoint") - return False, 0 \ No newline at end of file + return False, 0