BiGAN reproducibility

Data set is 10000 samples before the 8000 training set, back to test. Each sample is 144 * 1 (weight matrix composed of 12 * 12), the original BiGAN encoder, and determines the generator, the inside of the connection layer all replaced with a full convolution.

 

from __future__ import print_function, division

from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply, GaussianNoise
from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D
from keras.layers import MaxPooling2D, concatenate
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras import losses
from keras.utils import to_categorical
from pandas import read_csv 
import keras.backend as K
import pandas as pd
import matplotlib.pyplot as plt

import numpy as np

class BIGAN():
    def __init__(self):
        self.img_rows = 12
        self.img_cols = 12
        self.channels = 1
        self.img_shape = (self.img_rows, self.img_cols, self.channels)
        self.latent_dim = 100

        optimizer = Adam(0.0002, 0.5)

        # Build and compile the discriminator
        self.discriminator = self.build_discriminator()
        self.discriminator.compile(loss=['binary_crossentropy'],
            optimizer=optimizer,
            metrics=['accuracy'])

        # Build the generator
        self.generator = self.build_generator()

        # Build the encoder
        self.encoder = self.build_encoder()

        # The part of the bigan that trains the discriminator and encoder
        self.discriminator.trainable = False

        # Generate image from sampled noise
        z = Input(shape=(self.latent_dim, ))
        img_ = self.generator(z)

        # Encode image
        img = Input(shape=self.img_shape)
        z_ = self.encoder(img)

        # Latent -> img is fake, and img -> latent is valid
        fake = self.discriminator([z, img_])
        valid = self.discriminator([z_, img])

        # Set up and compile the combined model
        # Trains generator to fool the discriminator
        self.bigan_generator = Model([z, img], [fake, valid])
        self.bigan_generator.compile(loss=['binary_crossentropy', 'binary_crossentropy'],
            optimizer=optimizer)


    def build_encoder(self):
        model = Sequential()

        model.add(Conv2D(16, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0,1),(0,1))))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(128, kernel_size=3, strides=1, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(self.latent_dim))

        model.summary()

        img = Input(shape=self.img_shape)
        z = model(img)

        return Model(img, z)

    def build_generator(self):
        model = Sequential()

        model.add(Dense(64 * 3 * 3, activation="relu", input_dim=self.latent_dim))
        model.add(Reshape((3, 3, 64)))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(UpSampling2D())
        model.add(Conv2D(32, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(Conv2D(self.channels, kernel_size=3, padding="same"))
        model.add(Activation("tanh"))

        model.summary()

        z = Input(shape=(self.latent_dim,))
        gen_img = model(z)

        return Model(z, gen_img)

    def build_discriminator(self):

        z = Input(shape=(self.latent_dim, ))
        img = Input(shape=self.img_shape)
        d_in = concatenate([z, Flatten()(img)])

        model = Dense(14*14, activation="relu")(d_in)
        model = Reshape((14, 14, 1))(model)
        model = Conv2D(16, kernel_size=3, strides=2,padding="same")(model)
        model = LeakyReLU(alpha=0.2)(model)
        model = Dropout(0.25)(model)
        model = Conv2D(32, kernel_size=3, strides=2, padding="same")(model)
        model = ZeroPadding2D(padding=((0,1),(0,1)))(model)
        model = BatchNormalization(momentum=0.8)(model)
        model = LeakyReLU(alpha=0.2)(model)
        model = Dropout(0.25)(model)
        model = Conv2D(64, kernel_size=3, strides=2, padding="same")(model)
        model = BatchNormalization(momentum=0.8)(model)
        model = LeakyReLU(alpha=0.2)(model)
        model = Dropout(0.25)(model)
        model = Conv2D(128, kernel_size=3, strides=1, padding="same")(model)
        model = BatchNormalization(momentum=0.8)(model)
        model = LeakyReLU(alpha=0.2)(model)
        model = Dropout(0.25)(model)
        model = Flatten()(model)
        validity = Dense(1, activation="sigmoid")(model)
        
        return Model([z, img], validity)

    def train(self, epochs, batch_size=128, sample_interval=50):

        # Load the dataset
        dataset = read_csv('GANData.csv')
        values = dataset.values
        XY= values
        n_train_hours1 =8000
        n_train_hours2 = n_train_hours1+1
        x_train=XY[:n_train_hours1,:]
        x_test =XY[n_train_hours2:, :]
        X_train = x_train.reshape(-1,12,12,1)
        X_test = x_test.reshape(-1,12,12,1)

        # Adversarial ground truths
        valid = np.ones((batch_size, 1))
        fake = np.zeros((batch_size, 1))
        FHZ=np.zeros((epochs, 3))
        for epoch in range(epochs):


            # ---------------------
            #  Train Discriminator
            # ---------------------

            # Sample noise and generate img
            z = np.random.normal(size=(batch_size, self.latent_dim))
            imgs_ = self.generator.predict(z)

            # Select a random batch of images and encode
            idx = np.random.randint(0, X_train.shape[0], batch_size)
            imgs = X_train[idx]
            z_ = self.encoder.predict(imgs)

            # Train the discriminator (img -> z is valid, z -> img is fake)
            d_loss_real = self.discriminator.train_on_batch([z_, imgs], valid)
            d_loss_fake = self.discriminator.train_on_batch([z, imgs_], fake)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

            # ---------------------
            #  Train Generator
            # ---------------------

            # Train the generator (z -> img is valid and img -> z is is invalid)
            g_loss = self.bigan_generator.train_on_batch([z, imgs], [valid, fake])

            # Plot the progress
            print ("%d [D loss: %f, acc: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss[0]))
            FHZ[epoch,0]=d_loss[0]
            FHZ[epoch,1]=d_loss[1]
            FHZ[epoch,2]=g_loss[0]
            # If at save interval => save generated image samples
            if epoch % sample_interval == 0:
                self.sample_interval(epoch)
        return FHZ
    def sample_interval(self, epoch):
        r, c = 5, 5
        z = np.random.normal(size=(25, self.latent_dim))
        gen_imgs = self.generator.predict(z)
        gen_imgs = 0.5 * gen_imgs + 0.5

        decoded_imgs = gen_imgs.reshape((gen_imgs.shape[0], -1))  
        print('decoded_imgs.shape:',decoded_imgs.shape)
        data=decoded_imgs
        data_df = pd.DataFrame(data)
 
        # create and writer pd.DataFrame to excel
        writer = pd.ExcelWriter('Result.xlsx')
        data_df.to_excel(writer,'page_1',float_format='%.5f') # float_format 控制精度
        writer.save() 
        # Rescale images 0 - 1
        #fig.savefig("images/mnist_%d.png" % epoch)


if __name__ == '__main__':
    bigan = BIGAN()
    d_loss=bigan.train(epochs=10, batch_size=32, sample_interval=9)
    import numpy
    numpy.savetxt("d_lossnum.csv", d_loss, delimiter=',')

 

Guess you like

Origin www.cnblogs.com/nanhaijindiao/p/11592299.html