参考链接:https://www.cnblogs.com/zjutzz/p/6185452.html
stage1:读取caffemodel文件内容
可以的,使用__str__
就可以了,但是这种方法产生的文件当中存在blobs等其他内容,并且占用很大的空间
import caffe.proto.caffe_pb2 as caffe_pb2# 载入caffe.proto编译生成的caffe_pb2文件
#stage 1:完全模仿protobuf官网例子
# 载入模型
caffemodel_filename = '/home/xuy/py-faster-rcnn/data/imagenet_models/ZF.v2.caffemodel'
ZFmodel = caffe_pb2.NetParameter()
f = open(caffemodel_filename, 'rb')
ZFmodel.ParseFromString(f.read())
f.close()
print help(ZFmodel)
print ZFmodel.name
print ZFmodel.input
# stage 2:读出caffemodel的所有字段,但是model.__str__()需要载入的文件大小要远大于caffemodel,因为载入了很多的blobs
caffemodel_filename ='/home/xuy/桌面/code/python/caffe/examples/mnist/lenet_iter_10000.caffemodel'
model = caffe_pb2.NetParameter()
f = open(caffemodel_filename, 'rb')
model.ParseFromString(f.read())
f.close()
save_filename = 'lenet_from_caffemodel_withoutfilter.prototxt'
# save_filename = 'lenet_from_caffemodel_withfilter.prototxt'
with open(save_filename,'w') as fd:
fd.write(model.__str__())
# 打开刚刚存储的文件,输出里面的内容,输出时过滤掉“blobs”块当中的数字和"每层的phase: TRAIN"行。
# 取消下面的注释,就对于刚刚生成的prototxt进行处理
f = open(save_filename, 'r')
lines = f.readlines()
f.close()
wr = open(save_filename, 'w')
now_have_blobs = False#判断是否有blobs块,如果有的话那就直接删除
nu = 1
for line in lines:
# print nu
nu = nu + 1
content = line.strip('\n')
if (content == ' blobs {'):
now_have_blobs = True
elif (content == ' }' and now_have_blobs == True):#过滤掉bolbs块的中间内容
now_have_blobs = False
continue
if (content == ' phase: TRAIN'):
continue
if (now_have_blobs):
continue
else:#如果什么也没有的话,直接写入剩余的内容
wr.write(content + '\n')
wr.close()
stage2:手动解析caffemodel
优点:节省存储空间,不需要通过model.__str__(),产生比caffemodel文件更大的中间文件
#stage3:对照着caffe.proto,把一些参数的默认值过滤掉,以及blobs过滤掉。
#开始解析ZFnet:运用在faster rcnn的模型
# 载入模型,根据网络结构模型,结合caffemodel,生成prototxt文件
caffemodel_filename = '/home/xuy/py-faster-rcnn/data/imagenet_models/ZF.v2.caffemodel'
model = caffe_pb2.NetParameter()
f = open(caffemodel_filename, 'rb')
model.ParseFromString(f.read())
f.close()
layers = model.layer
# print 'name: ' + model.name#name: ImageNet_Zeiler_spm
layer_id=-1
result_file='ZFmodel.prototxt'
for layer in layers:#遍历每一层
layer_id = layer_id + 1
res = list()
# name
res.append('layer {')
res.append(' name: "%s"' % layer.name)
# type
res.append(' type: "%s"' % layer.type)
# bottom
for bottom in layer.bottom:
res.append(' bottom: "%s"' % bottom)
# top
for top in layer.top:
res.append(' top: "%s"' % top)
# loss_weight
for loss_weight in layer.loss_weight:
res.append(' loss_weight: ' + loss_weight)
# param
for param in layer.param:
param_res = list()
if param.lr_mult is not None:
param_res.append(' lr_mult: %s' % param.lr_mult)
if param.decay_mult != 1:
param_res.append(' decay_mult: %s' % param.decay_mult)
if len(param_res) > 0:
res.append(' param{')
res.extend(param_res)
res.append(' }')
# lrn_param
if layer.lrn_param is not None:
lrn_res = list()
if layer.lrn_param.local_size != 5:#这个值应该是默认值,如果不等于这个默认值的话,那么就需要添加
lrn_res.append(' local_size: %d' % layer.lrn_param.local_size)
if layer.lrn_param.alpha != 1:
lrn_res.append(' alpha: %f' % layer.lrn_param.alpha)
if layer.lrn_param.beta != 0.75:
lrn_res.append(' beta: %f' % layer.lrn_param.beta)
NormRegionMapper = {'0': 'ACROSS_CHANNELS', '1': 'WITHIN_CHANNEL'}
if layer.lrn_param.norm_region != 0:
lrn_res.append(' norm_region: %s' % NormRegionMapper[str(layer.lrn_param.norm_region)])
EngineMapper = {'0': 'DEFAULT', '1': 'CAFFE', '2': 'CUDNN'}
if layer.lrn_param.engine != 0:
lrn_res.append(' engine: %s' % EngineMapper[str(layer.lrn_param.engine)])
if len(lrn_res) > 0:
res.append(' lrn_param{')
res.extend(lrn_res)
res.append(' }')
# include
if len(layer.include) > 0:
include_res = list()
includes = layer.include
phase_mapper = {
'0': 'TRAIN',
'1': 'TEST'
}
for include in includes:
if include.phase is not None:
include_res.append(' phase: ', phase_mapper[str(include.phase)])
if len(include_res) > 0:
res.append(' include {')
res.extend(include_res)
res.append(' }')
# transform_param
if layer.transform_param is not None:
transform_param_res = list()
if layer.transform_param.scale != 1:
transform_param_res.append(' scale: %s' % layer.transform_param.scale)
if layer.transform_param.mirror != False:
transform_param_res.append(' mirror: ' + layer.transform_param.mirror)
if len(transform_param_res) > 0:
res.append(' transform_param {')
res.extend(transform_param_res)
res.append(' }')
# data_param
if layer.data_param is not None and (
layer.data_param.source != "" or layer.data_param.batch_size != 0 or layer.data_param.backend != 0):
data_param_res = list()
if layer.data_param.source is not None:
data_param_res.append(' source: "%s"' % layer.data_param.source)
if layer.data_param.batch_size is not None:
data_param_res.append(' batch_size: %d' % layer.data_param.batch_size)
if layer.data_param.backend is not None:
data_param_res.append(' backend: %s' % layer.data_param.backend)
if len(data_param_res) > 0:
res.append(' data_param: {')
res.extend(data_param_res)
res.append(' }')
# convolution_param
if layer.convolution_param is not None:
convolution_param_res = list()
conv_param = layer.convolution_param
if conv_param.num_output != 0:
convolution_param_res.append(' num_output: %d' % conv_param.num_output)
if len(conv_param.kernel_size) > 0:
for kernel_size in conv_param.kernel_size:
convolution_param_res.append(' kernel_size: %d' % kernel_size)
if len(conv_param.pad) > 0:
for pad in conv_param.pad:
convolution_param_res.append(' pad: %d' % pad)
if len(conv_param.stride) > 0:
for stride in conv_param.stride:
convolution_param_res.append(' stride: %d' % stride)
if conv_param.weight_filler is not None and conv_param.weight_filler.type != 'constant':
convolution_param_res.append(' weight_filler {')
convolution_param_res.append(' type: "%s"' % conv_param.weight_filler.type)
convolution_param_res.append(' }')
if conv_param.bias_filler is not None and conv_param.bias_filler.type != 'constant':
convolution_param_res.append(' bias_filler {')
convolution_param_res.append(' type: "%s"' % conv_param.bias_filler.type)
convolution_param_res.append(' }')
if len(convolution_param_res) > 0:
res.append(' convolution_param {')
res.extend(convolution_param_res)
res.append(' }')
# pooling_param
if layer.pooling_param is not None:
pooling_param_res = list()
if layer.pooling_param.kernel_size > 0:
pooling_param_res.append(' kernel_size: %d' % layer.pooling_param.kernel_size)
pooling_param_res.append(' stride: %d' % layer.pooling_param.stride)
pooling_param_res.append(' pad: %d' % layer.pooling_param.pad)
PoolMethodMapper = {'0': 'MAX', '1': 'AVE', '2': 'STOCHASTIC'}
pooling_param_res.append(' pool: %s' % PoolMethodMapper[str(layer.pooling_param.pool)])
if len(pooling_param_res) > 0:
res.append(' pooling_param {')
res.extend(pooling_param_res)
res.append(' }')
# inner_product_param
if layer.inner_product_param is not None:
inner_product_param_res = list()
if layer.inner_product_param.num_output != 0:
inner_product_param_res.append(' num_output: %d' % layer.inner_product_param.num_output)
if len(inner_product_param_res) > 0:
res.append(' inner_product_param {')
res.extend(inner_product_param_res)
res.append(' }')
# drop_param
if layer.dropout_param is not None:
dropout_param_res = list()
# if layer.dropout_param.dropout_ratio != 0.5 or layer.dropout_param.scale_train != True:
if layer.dropout_param.dropout_ratio != 0.5:
dropout_param_res.append(' dropout_ratio: %f' % layer.dropout_param.dropout_ratio)
# dropout_param_res.append(' scale_train: ' + str(layer.dropout_param.scale_train))
if len(dropout_param_res) > 0:
res.append(' dropout_param {')
res.extend(dropout_param_res)
res.append(' }')
res.append('}')
with open(result_file, 'a+') as fd:
for line in res:
# print line
fd.writelines('\n'+line)
解析结果:
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param{
lr_mult: 1.0
}
param{
lr_mult: 2.0
}
convolution_param {
num_output: 96
kernel_size: 7
pad: 1
stride: 2
weight_filler {
type: "gaussian"
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "norm1"
type: "LRN"
bottom: "conv1"
top: "norm1"
lrn_param{
local_size: 3
alpha: 0.000050
norm_region: WITHIN_CHANNEL
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "norm1"
top: "pool1"
pooling_param {
kernel_size: 3
stride: 2
pad: 0
pool: MAX
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param{
lr_mult: 1.0
}
param{
lr_mult: 2.0
}
convolution_param {
num_output: 256
kernel_size: 5
pad: 0
stride: 2
weight_filler {
type: "gaussian"
}
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "norm2"
type: "LRN"
bottom: "conv2"
top: "norm2"
lrn_param{
local_size: 3
alpha: 0.000050
norm_region: WITHIN_CHANNEL
}
}
layer {
name: "pool2"
type: "Pooling"
bottom: "norm2"
top: "pool2"
pooling_param {
kernel_size: 3
stride: 2
pad: 0
pool: MAX
}
}
layer {
name: "conv3"
type: "Convolution"
bottom: "pool2"
top: "conv3"
param{
lr_mult: 1.0
}
param{
lr_mult: 2.0
}
convolution_param {
num_output: 384
kernel_size: 3
pad: 1
stride: 1
weight_filler {
type: "gaussian"
}
}
}
layer {
name: "relu3"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "conv4"
type: "Convolution"
bottom: "conv3"
top: "conv4"
param{
lr_mult: 1.0
}
param{
lr_mult: 2.0
}
convolution_param {
num_output: 384
kernel_size: 3
pad: 1
stride: 1
weight_filler {
type: "gaussian"
}
}
}
layer {
name: "relu4"
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layer {
name: "conv5"
type: "Convolution"
bottom: "conv4"
top: "conv5"
param{
lr_mult: 1.0
}
param{
lr_mult: 2.0
}
convolution_param {
num_output: 256
kernel_size: 3
pad: 1
stride: 1
weight_filler {
type: "gaussian"
}
}
}
layer {
name: "relu5"
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layer {
name: "pool5_spm6"
type: "Pooling"
bottom: "conv5"
top: "pool5_spm6"
pooling_param {
kernel_size: 3
stride: 2
pad: 0
pool: MAX
}
}
layer {
name: "pool5_spm6_flatten"
type: "Flatten"
bottom: "pool5_spm6"
top: "pool5_spm6_flatten"
}
layer {
name: "fc6"
type: "InnerProduct"
bottom: "pool5_spm6_flatten"
top: "fc6"
param{
lr_mult: 1.0
}
param{
lr_mult: 2.0
}
inner_product_param {
num_output: 4096
}
}
layer {
name: "relu6"
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layer {
name: "drop6"
type: "Dropout"
bottom: "fc6"
top: "fc6"
}
layer {
name: "fc7"
type: "InnerProduct"
bottom: "fc6"
top: "fc7"
param{
lr_mult: 1.0
}
param{
lr_mult: 2.0
}
inner_product_param {
num_output: 4096
}
}
layer {
name: "relu7"
type: "ReLU"
bottom: "fc7"
top: "fc7"
}
layer {
name: "drop7"
type: "Dropout"
bottom: "fc7"
top: "fc7"
}
layer {
name: "fc8"
type: "InnerProduct"
bottom: "fc7"
top: "fc8"
param{
lr_mult: 1.0
}
param{
lr_mult: 2.0
}
inner_product_param {
num_output: 1000
}
}
layer {
name: "prob"
type: "Softmax"
bottom: "fc8"
top: "prob"
}