版权声明:如使用此博客内容,请经过作者同意,谢谢 https://blog.csdn.net/qq_40994943/article/details/87946083
网上找了挺久没有能直接拿来用的,索性自己搭一个吧,精简结构图:
下面上代码:
# alexnet
# 构建模型
def inference(input_tensor, train, regularizer):
with tf.variable_scope('layer1-conv1'):#通过tf.variable_scope函数控制tf.get_variable函数来获取以及创建过的变量
conv1_weights = tf.get_variable("weight",[11,11,3,96],initializer=tf.truncated_normal_initializer(stddev=0.1))
conv1_biases = tf.get_variable("bias", [96], initializer=tf.constant_initializer(0.0))
conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 4, 4, 1], padding='SAME')#strides=【1,1,1,1】表示滑动步长为1
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
relu1=tf.nn.local_response_normalization(relu1, depth_radius=2, alpha=1e-4, beta=0.75, name='norm1', bias=1)
with tf.name_scope("layer2-pool1"):
pool1 = tf.nn.max_pool(relu1, ksize = [1,3,3,1],strides=[1,2,2,1],padding="VALID")
with tf.variable_scope("layer3-conv2"):
conv2_weights = tf.get_variable("weight",[5,5,96,256],initializer=tf.truncated_normal_initializer(stddev=0.1))
conv2_biases = tf.get_variable("bias", [256], initializer=tf.constant_initializer(0.0))
conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
relu2 = tf.nn.local_response_normalization(relu2, depth_radius=2, alpha=1e-4, beta=0.75, name='norm1', bias=1)
with tf.name_scope("layer4-pool2"):
pool2 = tf.nn.max_pool(relu2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID')
with tf.variable_scope("layer5-conv3"):
conv3_weights = tf.get_variable("weight",[3,3,256,384],initializer=tf.truncated_normal_initializer(stddev=0.1))
conv3_biases = tf.get_variable("bias", [384], initializer=tf.constant_initializer(0.0))
conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME')
relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases))
with tf.variable_scope("layer7-conv4"):
conv4_weights = tf.get_variable("weight",[3,3,384,384],initializer=tf.truncated_normal_initializer(stddev=0.1))
conv4_biases = tf.get_variable("bias", [384], initializer=tf.constant_initializer(0.0))
conv4 = tf.nn.conv2d(relu3, conv4_weights, strides=[1, 1, 1, 1], padding='SAME')
relu4 = tf.nn.relu(tf.nn.bias_add(conv4, conv4_biases))
with tf.variable_scope("layer8-conv5"):
conv5_weights = tf.get_variable("weight",[3,3,384,256],initializer=tf.truncated_normal_initializer(stddev=0.1))
conv5_biases = tf.get_variable("bias", [256], initializer=tf.constant_initializer(0.0))
conv5 = tf.nn.conv2d(relu4, conv5_weights, strides=[1, 1, 1, 1], padding='SAME')
relu5 = tf.nn.relu(tf.nn.bias_add(conv5, conv5_biases))
with tf.name_scope("layer8-pool5"):
pool5 = tf.nn.max_pool(relu5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID')
nodes = 6*6*256
reshaped = tf.reshape(pool5,[-1,nodes])
with tf.variable_scope('layer9-fc1'):
fc1_weights = tf.get_variable("weight", [nodes, 4096],initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None: tf.add_to_collection('losses', regularizer(fc1_weights))
fc1_biases = tf.get_variable("bias", [4096], initializer=tf.constant_initializer(0.1))
fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)
if train: fc1 = tf.nn.dropout(fc1, 0.5)
with tf.variable_scope('layer10-fc2'):
fc2_weights = tf.get_variable("weight", [4096, 4096],initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None: tf.add_to_collection('losses', regularizer(fc2_weights))
fc2_biases = tf.get_variable("bias", [4096], initializer=tf.constant_initializer(0.1))
fc2 = tf.nn.relu(tf.matmul(fc1, fc2_weights) + fc2_biases)#matmul矩阵乘法
if train: fc2 = tf.nn.dropout(fc2,0.5)
with tf.variable_scope('layer11-fc3'):
fc3_weights = tf.get_variable("weight", [4096, 5],initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None: tf.add_to_collection('losses', regularizer(fc3_weights))
fc3_biases = tf.get_variable("bias", [5], initializer=tf.constant_initializer(0.1))
logit = tf.matmul(fc2, fc3_weights) + fc3_biases
return logit
有问题可评论交流