keras学习笔记(4)

1.gpu配置

import tensorflow as tf

#当前程序可用GPU
gpu=tf.config.experimental.list_physical_devices(device_type='GPU')
cpu=tf.config.experimental.list_physical_devices(device_type='CPU')

gpu

cpu

#设置可用gpu
tf.config.experimental.set_visible_devices(devices=gpu[0:2],device_type='GPU')

#当前程序只可见部分gpu的另一种方法
import os
os.environ['CUDA_VISIBLE_DEVICES']='2,3'

#设置gpu动态分配显存
gpus=tf.config.experimental.list_physical_devices(device_type='GPU')
for gpu in gpus:
    tf.config.experimental.set_memory_growth(device=gpu,Tue)

#设置gpu只用部分显存
gpus=tf.config.experimental.list_physical_devices(device_type='GPU')
tf.config.experimental.set_virtual_device_configuration(gpus[0],[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024)])

 2.#图像语义分割FCN

#1.全卷积网络FCN
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import os
import glob

os.listdir('./annotations/trimaps')[-5:]

img=tf.io.read_file('./annotations/trimaps/Abyssinian_1.png')

img=tf.image.decode_png(img)

img.shape

img=tf.squeeze(img)#将维度为1的维度压缩掉,缩小为度

img.shape

plt.imshow(img)

#看标签图像有几种像素
np.unique(img.numpy())

img=tf.io.read_file('./images/Abyssinian_1.jpg')

img=tf.image.decode_jpeg(img)

plt.imshow(img)

#开始整理数据
images=glob.glob('./images/*.jpg')

images_count=len(images)

images_count

anno=glob.glob('./annotations/trimaps/*.png')

len(anno)

np.random.seed(2019)
index=np.random.permutation(images_count)

images=np.array(images)[index]
anno=np.array(anno)[index]#因为使用了同样的索引,所以还是一一对应的

dataset=tf.data.Dataset.from_tensor_slices((images,anno))#里面是图像和标签的地址值,以元组形式传入

test_count=int(images_count*0.2)
train_count=images_count-test_count

test_count,train_count

data_train=dataset.skip(test_count)
data_test=dataset.take(test_count)

#图像预处理
def read_jpg(path):
    img=tf.io.read_file(path)
    img=tf.image.decode_jpeg(img,channels=3)
    return img
def read_png(path):
    img=tf.io.read_file(path)
    img=tf.image.decode_png(img,channels=1)
    return img

def normal_img(input_image,input_anno):
    input_image=tf.cast(input_image,tf.float32)/127.5-1
    input_anno=input_anno-1#1,2,3>>>0,1,2
    return input_image,input_anno

def load_images(input_image_path,input_anno_path):
    input_image=read_jpg(input_image_path)
    input_anno=read_png(input_anno_path)
    input_image=tf.image.resize(input_image,(224,224))
    input_anno=tf.image.resize(input_anno,(224,224))
    return normal_img(input_image,input_anno)

data_train=data_train.map(load_images,num_parallel_calls=tf.data.experimental.AUTOTUNE)
data_test=data_test.map(load_images,num_parallel_calls=tf.data.experimental.AUTOTUNE)

BATCH_SIZE=8

data_train=data_train.repeat().shuffle(100).batch(BATCH_SIZE)
data_test=data_test.batch(BATCH_SIZE)

data_train

for img,anno in data_train.take(1):
    plt.subplot(1,2,1)
    plt.imshow(tf.keras.preprocessing.image.array_to_img(img[0]))
    plt.subplot(1,2,2)
    plt.imshow(tf.keras.preprocessing.image.array_to_img(anno[0]))

#使用预训练网络
conv_base=tf.keras.applications.VGG16(weights='imagenet',input_shape=(224,224,3),include_top=False)

conv_base.summary()

conv_base.layers

#使用name获取某一层
conv_base.get_layer('block5_conv3')

conv_base.get_layer('block5_conv3').output

submodel=tf.keras.models.Model(inputs=conv_base.input,outputs=conv_base.get_layer('block5_conv3').output)#去掉随后一层的池化

submodel.summary()

layer_names=['block5_conv3','block4_conv3','block3_conv3','block5_pool']

#多输出层的引出
layers_output=[conv_base.get_layer(layer_name).output for layer_name in layer_names]

multi_out_model=tf.keras.models.Model(inputs=conv_base.input,outputs=layers_output)

multi_out_model.trainable=False

multi_out_model.summary()

inputs=tf.keras.layers.Input(shape=(224,224,3))
out_block5_conv3,out_block4_conv3,out_block3_conv3,out=multi_out_model(inputs)


out.shape,out_block5_conv3.shape,out_block4_conv3.shape,out_block3_conv3.shape

#上采样(反卷积)
x1=tf.keras.layers.Conv2DTranspose(512,3,strides=2,padding='same',activation='relu')(out)


x1.shape

x1=tf.keras.layers.Conv2D(512,3,padding='same',activation='relu')(x1)

x1.shape

x2=tf.add(x1,out_block5_conv3)

x2.shape

x2=tf.keras.layers.Conv2DTranspose(512,3,strides=2,padding='same',activation='relu')(x2)
x2=tf.keras.layers.Conv2D(512,3,padding='same',activation='relu')(x2)
x3=tf.add(x2,out_block4_conv3)

x3.shape

x3=tf.keras.layers.Conv2DTranspose(256,3,strides=2,padding='same',activation='relu')(x3)
x3=tf.keras.layers.Conv2D(256,3,padding='same',activation='relu')(x3)
x4=tf.add(x3,out_block3_conv3)

x4.shape

#上采样
x5=tf.keras.layers.Conv2DTranspose(128,3,strides=2,padding='same',activation='relu')(x4)
x5=tf.keras.layers.Conv2D(128,3,padding='same',activation='relu')(x5)
prediction=tf.keras.layers.Conv2DTranspose(3,3,strides=2,padding='same',activation='softmax')(x5)

prediction.shape

model=tf.keras.models.Model(inputs=inputs,outputs=prediction)

model.summary()

#模型配置
model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['acc'])

#模型训练
history=model.fit(data_train,epochs=3,steps_per_epoch=train_count//BATCH_SIZE,validation_data=data_test,validation_steps=test_count//BATCH_SIZE)

num=3
for image,mask in data_test.take(1):#取一个batch
    pred_mask=model.predict(image)
    pred_mask=tf.argmax(pred_mask,axis=-1)
    pred_mask=pred_mask[...,tf.newaxis]
    plt.figure(figsize=(10,10))
    for i in range(num):
        plt.subplot(num,3,i*num+1)
        plt.imshow(tf.keras.preprocessing.image.array_to_img(image[i]))
        plt.subplot(num,3,i*num+2)
        plt.imshow(tf.keras.preprocessing.image.array_to_img(mask[i]))
        plt.subplot(num,3,i*num+3)
        plt.imshow(tf.keras.preprocessing.image.array_to_img(pred_mask[i]))

  

猜你喜欢

转载自www.cnblogs.com/Turing-dz/p/13193932.html