前言
终于来到了尾声,但我看最后一个代码的时候,发现很多.pkl
文件我没有生成,只能后期补上来了。。。
一、main_aggregation.py
作者的GitHub中对于这个代码的解释:运行main_aggregation.py
为BAA增加不同的本地补丁
二、代码解释
1.打开.pkl
文件
# Load data
print('...loading training data')
f = open('dataR2.pkl', 'rb')#R2的.pkl文件
dataR2 = pickle.load(f)
f.close()
f = open('dataR1.pkl', 'rb')#R1的.pkl文件
dataR1 = pickle.load(f)
f.close()
f = open('dataHand.pkl', 'rb')#整个手掌的.pkl文件
dataHand = pickle.load(f)
f.close()
f = open('data_age.pkl', 'rb')#对应的age
age = pickle.load(f)
f.close()
f = open('data_gender.pkl','rb')#gender.pkl
gender = pickle.load(f)
f.close()
data = np.asarray(dataHand, dtype=np.float32)
dataR1 = np.asarray(dataR1, dtype=np.float32)
dataR2 = np.asarray(dataR2, dtype=np.float32)
#data[:,:,:,0] = dataR1[:,:,:,0]
data[:,:,:,1] = dataR1[:,:,:,1]
data[:,:,:,2] = dataR2[:,:,:,2]
print (data.shape)
age = np.asarray(age)
gender = np.asarray(gender)
data /= 255.
gender =2*( gender-0.5)
x_final = []
y_final = []
gender_final = []
2.随机将数据集分为测试集、验证集、训练集
这段代码和之前的main_classification.py
是一样的
# Shuffle images and split into train, validation and test sets
#random_no = np.random.choice(data.shape[0], size=data.shape[0], replace=False)
random_no = np.arange(x.shape[0])
np.random.seed(0)
np.random.shuffle(random_no)
for i in random_no:
x_final.append(data[i,:,:,:])
y_final.append(age[i])
gender_final.append(gender[i])
x_final = np.asarray(x_final)
y_final = np.asarray(y_final)
gender_final = np.asarray(gender_final)
print (y_final[:50])
print (gender_final[:50])
k = 500 # Decides split count
x_test = x_final[:k,:,:,:]
y_test = y_final[:k]
gender_test = gender_final[:k]
x_valid = x_final[k:2*k,:,:,:]
y_valid = y_final[k:2*k]
gender_valid = gender_final[k:2*k]
x_train = x_final[2*k:,:,:,:]
y_train = y_final[2*k:]
gender_train = gender_final[2*k:]
3.预训练模型获得权重
这部分代码也和main_classification.py
的一样,可参考那篇文章
# Using VGG19 with pretrained weights from Imagenet
base_model = Xception(weights='imagenet', include_top=False)
for i,layer in enumerate(base_model.layers):
print (i,layer.name)
input = Input(shape=(560,560,3),name='input1')
input_gender = Input(shape=(1,),dtype='float32',name='input2')
output = base_model(input)
gender_embedding=Dense(32)(input_gender)
#gender_embedding=Dense(12)(gender_embedding)
#x = keras.layers.MaxPooling2D(pool_size=(5,5))(output)
#x = keras.layers.Conv2D(512,kernel_size=(3,3))(x)
x = keras.layers.Conv2D(256,kernel_size=(3,3))(output)
print (K.int_shape(output))
x = keras.layers.MaxPooling2D(pool_size=(3,3))(x)
print (K.int_shape(x))
x=Flatten()(x)
f = keras.layers.Concatenate(axis=1)([x,gender_embedding])
print (K.int_shape(f))
#x = Dense(256, activation='relu')(x)
predictions = Dense(1)(f)
model = Model(inputs=[input,input_gender], outputs=predictions)
for i,layer in enumerate(model.layers):
print (i,layer.name)
Adam=keras.optimizers.Adam(lr=0.0003,beta_1=0.9,beta_2=0.999)
model.compile(optimizer=Adam, loss='mean_absolute_error', metrics=['MAE'])
4.存储权重
ImageDataGenerator
这部分代码前面也或多或少提到过
# Save weights after every epoch
DataGen = ImageDataGenerator(rotation_range=20,width_shift_range=0.15,height_shift_range=0.15,zoom_range=0.2,horizontal_flip=True)
def Generator(x_train,gender_train,y_train,batch_size):
loopcount = len(y_train)//batch_size
i=0
while (True):
if i>loopcount:
i=0
# i=np.random.randint(0,loopcount)
x_train_batch = x_train[i*batch_size:(i+1)*batch_size,:,:,:]
x_train_batch = DataAugment(x_train_batch)
gender_train_batch = gender_train[i*batch_size:(i+1)*batch_size]
y_train_batch = y_train[i*batch_size:(i+1)*batch_size]
inputs = [x_train_batch,gender_train_batch]
target = y_train_batch
yield (inputs ,target)
i = i+1
checkpoint =keras.callbacks.ModelCheckpoint(filepath='weights/weights.{epoch:02d}-{val_loss:.2f}.hdf5',save_weights_only=True,period=30)
history = model.fit_generator(DataGen.flow([x_train,gender_train],y_train,batch_size=batch_size),steps_per_epoch=np.ceil(len(y_train)/batch_size),epochs=350,verbose=1,validation_data=([x_valid,gender_valid],y_valid))
#history = model.fit_generator(Generator(x_train,gender_train,y_train,batch_size),steps_per_epoch=np.ceil(len(y_train)/batch_size),epochs=10,verbose=1,validation_data=([x_valid,gender_valid],y_valid))
history=model.fit([x_train,gender_train],y_train,batch_size=batch_size,epochs=80,verbose=1,validation_data=([x_valid,gender_valid],y_valid), callbacks = [checkpoint])
score = model.evaluate([x_test,gender_test], y_test, batch_size=batch_size)
print('Test loss:', score[0])
print('Test MAE:', score[1])
##Visulization
weights=model.layers[-1].get_weights()[0]
print (weights.shape)
#ShowAttentionV1(base_model,'/raid/chenchao/code/BoneAge/BoneAge/data/train/')
#for layer in base_model.layers[:16]:
# layer.trainable=False
#for layer in base_model.layers:
# print (layer.name,layer.trainable)
Adam=keras.optimizers.Adam(lr=0.0001,beta_1=0.9,beta_2=0.999)
model.compile(optimizer=Adam, loss='mean_absolute_error', metrics=['MAE'])
#history = model.fit_generator(Generator(x_train,gender_train,y_train,batch_size),steps_per_epoch=np.ceil(len(y_train)/batch_size),epochs=30,verbose=1,validation_data=([x_valid,gender_valid],y_valid))
history = model.fit([x_train,gender_train], y_train,batch_size=batch_size,epochs=30,verbose=1,validation_data=([x_valid,gender_valid],y_valid), callbacks = [checkpoint])
score = model.evaluate([x_test,gender_test], y_test, batch_size=batch_size)
print('Test loss:', score[0])
print('Test MAE:', score[1])
#ShowAttentionV1(base_model,'/raid/chenchao/code/BoneAge/BoneAge/data/train/')
Adam=keras.optimizers.Adam(lr=0.00001,beta_1=0.9,beta_2=0.999)
model.compile(optimizer=Adam, loss='mean_absolute_error', metrics=['MAE'])
#history = model.fit_generator(Generator(x_train,gender_train,y_train,batch_size),steps_per_epoch=np.ceil(len(y_train)/batch_size),epochs=20,verbose=1,validation_data=([x_valid,gender_valid],y_valid))
history = model.fit([x_train,gender_train],y_train,batch_size=batch_size,epochs=20,verbose=1,validation_data=([x_valid,gender_valid],y_valid), callbacks = [checkpoint])
score = model.evaluate([x_test,gender_test], y_test, batch_size=batch_size)
print('Test loss:', score[0])
print('Test MAE:', score[1])
#ShowAttentionV1(base_model,'/raid/chenchao/code/BoneAge/BoneAge/data/train/')
model.save_weights("model.h5")
with open('history.pkl', 'wb') as f:
pickle.dump(history.history, f)
f.close()
总结
这部分代码就是和前面提到的main_classification.py
大同小异,这部分引入了R1、R2、Hand三个部分取训练/提取特征,是得BAA的效果更好了。
补充
这个部分提到的R1、R2、Hand部分的图像怎么生成,首先R1和Hand就是看crop_patches.py
其中有一行代码,改变其中一个值就可以了。
ret,mask = cv2.threshold(heatmap,40,255,cv2.THRESH_BINARY)
40就是生成R1的值,改成10就是生成Hand图。
R2的生成比较麻烦,首先将crop_patches.py
最后的代码改动
print(path)
#croped_img = crop(img, mask)
#cv2.imwrite('R2/' + path, croped_img)
MaskImg = maskout(img,mask)
cv2.imwrite('R2/'+path,MaskImg)
生成的图片就像下面这样子的。。。。
其次,在将图片作为输入放入main_classification.py
中生成热图,然后再用crop_patches.py
去剪切生成图片。
至于原因可以细看文章就可以知道了,讲的很清楚哦