class ResNet_50():
def __init__(self):
super(ResNet_50, self).__init__()
self.main()
def load_data(self,file_path):
""" file_path: ./datasets/
datasets/
train/
Classification_1/
img_1.jpg
img_2.jpg
img_3.jpg
...
Classification_2/
img_1.jpg
img_2.jpg
img_3.jpg
...
Classification_3/
...
val/
Classification_1/
...
Classification_2/
...
test/
... """
class_train_filename = os.listdir(file_path + 'train' + '/')
class_val_filename = os.listdir(file_path + 'val' + '/')
class_test_filename = os.listdir(file_path + 'test' + '/')
train_data, val_data, test_data = [], [], []
train_label, val_label, test_label = [], [], []
for index in range(len(class_train_filename)):
path = file_path + 'train' + '/' + class_train_filename[index]
dir_name_list = os.listdir(path + '/')
for item in dir_name_list:
img_path = path + '/' + item
item_image = cv2.imread(img_path)
item_image = cv2.resize(item_image, (224,224), interpolation=cv2.INTER_AREA)
train_data.append(item_image)
train_label.append(index)
for index in range(len(class_test_filename)):
path = file_path + 'test' + '/' + class_test_filename[index]
dir_name_list = os.listdir(path + '/')
for item in dir_name_list:
img_path = path + '/' + item
item_image = cv2.imread(img_path)
item_image = cv2.resize(item_image, (224,224), interpolation=cv2.INTER_AREA)
test_data.append(item_image)
test_label.append(index)
for index in range(len(class_val_filename)):
path = file_path + 'val' + '/' + class_val_filename[index]
dir_name_list = os.listdir(path + '/')
for item in dir_name_list:
img_path = path + '/' + item
item_image = cv2.imread(img_path)
item_image = cv2.resize(item_image, (224,224), interpolation=cv2.INTER_AREA)
val_data.append(item_image)
val_label.append(index)
return train_data, train_label, \
val_data, val_label, \
test_data, test_label
def reshape_image(self):
train_data, train_label, val_data, val_label, test_data, test_label = self.load_data('datasets/')
train_data = np.array(train_data)
test_data = np.array(test_data)
val_data = np.array(val_data, dtype='float32')
train_label = np_utils.to_categorical(train_label)
test_label = np_utils.to_categorical(test_label)
val_label = np_utils.to_categorical(val_label)
return train_data, train_label, \
val_data, val_label, \
test_data, test_label
def Conv2d_BN(self, x, nb_filter, kernel_size, strides=(1, 1), padding='same', name=None):
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
x = Conv2D(nb_filter, kernel_size, padding=padding, strides=strides, name=conv_name)(x)
x = BatchNormalization(axis=3, name=bn_name)(x)
x = Activation('relu')(x)
return x
def Conv_Block(self, inpt, nb_filter, kernel_size, strides=(1, 1), with_conv_shortcut=False):
x = self.Conv2d_BN(inpt, nb_filter=nb_filter[0], kernel_size=(1, 1), strides=strides, padding='same')
x = self.Conv2d_BN(x, nb_filter=nb_filter[1], kernel_size=(3, 3), padding='same')
x = self.Conv2d_BN(x, nb_filter=nb_filter[2], kernel_size=(1, 1), padding='same')
if with_conv_shortcut:
shortcut = self.Conv2d_BN(inpt, nb_filter=nb_filter[2], strides=strides, kernel_size=kernel_size)
x = add([x, shortcut])
return x
else:
x = add([x, inpt])
return x
def creatcnn(self):
inpt = Input(shape=(224, 224, 3))
x = ZeroPadding2D((3, 3))(inpt)
x = self.Conv2d_BN(x, nb_filter=64, kernel_size=(7, 7), strides=(2, 2), padding='valid')
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x)
x = self.Conv_Block(x, nb_filter=[64, 64, 256], kernel_size=(3, 3), strides=(1, 1), with_conv_shortcut=True)
x = self.Conv_Block(x, nb_filter=[64, 64, 256], kernel_size=(3, 3))
x = self.Conv_Block(x, nb_filter=[64, 64, 256], kernel_size=(3, 3))
x = self.Conv_Block(x, nb_filter=[128, 128, 512], kernel_size=(3, 3), strides=(2, 2), with_conv_shortcut=True)
x = self.Conv_Block(x, nb_filter=[128, 128, 512], kernel_size=(3, 3))
x = self.Conv_Block(x, nb_filter=[128, 128, 512], kernel_size=(3, 3))
x = self.Conv_Block(x, nb_filter=[128, 128, 512], kernel_size=(3, 3))
x = self.Conv_Block(x, nb_filter=[256, 256, 1024], kernel_size=(3, 3), strides=(2, 2), with_conv_shortcut=True)
x = self.Conv_Block(x, nb_filter=[256, 256, 1024], kernel_size=(3, 3))
x = self.Conv_Block(x, nb_filter=[256, 256, 1024], kernel_size=(3, 3))
x = self.Conv_Block(x, nb_filter=[256, 256, 1024], kernel_size=(3, 3))
x = self.Conv_Block(x, nb_filter=[256, 256, 1024], kernel_size=(3, 3))
x = self.Conv_Block(x, nb_filter=[256, 256, 1024], kernel_size=(3, 3))
x = self.Conv_Block(x, nb_filter=[512, 512, 2048], kernel_size=(3, 3), strides=(2, 2), with_conv_shortcut=True)
x = self.Conv_Block(x, nb_filter=[512, 512, 2048], kernel_size=(3, 3))
x = self.Conv_Block(x, nb_filter=[512, 512, 2048], kernel_size=(3, 3))
x = AveragePooling2D(pool_size=(7, 7))(x)
x = Flatten()(x)
x = Dense(2, activation='sigmoid')(x)
model = Model(inputs=inpt, outputs=x)
return model
def accuracy(self, list_1, list_2):
acc = 0
mis = 0
for i, j in zip(list_1, list_2):
if i == j:
acc += 1
else:
mis += 1
return acc / (acc + mis)
def main(self):
train_data, train_label, val_data, val_label, test_data, test_label = self.reshape_image()
seed = 7
np.random.seed(seed)
# sgd = SGD(decay=0.0001, momentum=0.9)
model = self.creatcnn()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(
train_data,\
train_label,\
batch_size=5,\
epochs=8,\
validation_data=(val_data, val_label),\
verbose=2)
pred = model.predict(test_data)
acc = self.accuracy(pred,test_label)
print(acc)
model.save('ResNet_weight.h5')
搭建ResNet50模型
猜你喜欢
转载自blog.csdn.net/Stybill_LV_/article/details/110861874
今日推荐
周排行