tensorboard应用

#监控W参数


w = tf.get_default_graph().get_tensor_by_name(self.core.name)
                #print("w1:",w)
tf.summary.histogram(os.path.split(self.core.name)[0],w)
#监控损失


    def get_adversarial_loss(self, scope_to_reuse=None):
        """Return the adversarial losses for the generator and the
        discriminator."""
        if self.config['gan']['type'] == 'gan':
            adv_loss_d = tf.losses.sigmoid_cross_entropy(
                tf.ones_like(self.D_real.tensor_out),
                self.D_real.tensor_out)
            adv_loss_g = tf.losses.sigmoid_cross_entropy(
                tf.zeros_like(self.D_fake.tensor_out),
                self.D_fake.tensor_out)

        if (self.config['gan']['type'] == 'wgan'
                or self.config['gan']['type'] == 'wgan-gp'):
            adv_loss_d = (tf.reduce_mean(self.D_fake.tensor_out)
                          - tf.reduce_mean(self.D_real.tensor_out))
            with tf.name_scope("loss_g"):
                adv_loss_g = -tf.reduce_mean(self.D_fake.tensor_out)
                tf.summary.scalar('loss_g/adv_loss_g',adv_loss_g)
            if self.config['gan']['type'] == 'wgan-gp':
                eps = tf.random_uniform(
                    [tf.shape(self.x_)[0], 1, 1, 1, 1], 0.0, 1.0)
                inter = eps * self.x_ + (1. - eps) * self.G.tensor_out
                if scope_to_reuse is None:
                    D_inter = Discriminator(inter, self.config, name='D',
                                            reuse=True)
                else:
                    with tf.variable_scope(scope_to_reuse, reuse=True):
                        D_inter = Discriminator(inter, self.config, name='D',
                                                reuse=True)
                gradient = tf.gradients(D_inter.tensor_out, inter)[0]
                slopes = tf.sqrt(1e-8 + tf.reduce_sum(
                    tf.square(gradient),
                    tf.range(1, len(gradient.get_shape()))))
                gradient_penalty = tf.reduce_mean(tf.square(slopes - 1.0))
                with tf.name_scope("loss_d"):
                    adv_loss_d += (self.config['gan']['gp_coefficient']
                                   * gradient_penalty)
                    tf.summary.scalar('loss_d/adv_loss_d',adv_loss_d)

        return adv_loss_g, adv_loss_d

保存:

if TRAIN_CONFIG['training_phase'] == 'pretrain':
            self.merged=tf.summary.merge_all()
            from datetime import datetime
            self.writer=tf.summary.FileWriter("tensorboard/"+datetime.now().strftime("%Y%m%d-%H%M%S")+'_pretrain',tf.get_default_graph())

在训练的时候每部保存,以便于监控:

result=self.sess.run(self.merged,feed_dict_batch)#把merge搞起来
self.writer.add_summary(result,int(str(epoch)+str(batch)))#i是步数

猜你喜欢

转载自blog.csdn.net/qq_33266320/article/details/83443436