(输出各层)神经网络权重与偏置

权重和偏置的简单理解

y = ax + b    # a就是权重,b就是偏置

模型参数:偏差和权重    Weights 和 biases

for layer in model.modules():
   if isinstance(layer, nn.Linear):
        print(layer.weight)
        print(layer.bias)

Element in in state_dict: 
"model.conv1.weight", 
"model.bn1.weight", "model.bn1.bias",
"model.bn1.running_mean", 
"model.bn1.running_var", 

"model.layer1.0.conv1.weight",
"model.layer1.0.bn1.weight", "model.layer1.0.bn1.bias", 
"model.layer1.0.bn1.running_mean", 
"model.layer1.0.bn1.running_var", 
"model.layer1.0.conv2.weight", 
"model.layer1.0.bn2.weight", "model.layer1.0.bn2.bias", 
"model.layer1.0.bn2.running_mean",
"model.layer1.0.bn2.running_var", 

"model.layer1.1.conv1.weight", 
"model.layer1.1.bn1.weight", "model.layer1.1.bn1.bias", 
"model.layer1.1.bn1.running_mean", 
"model.layer1.1.bn1.running_var", 
"model.layer1.1.conv2.weight", 
"model.layer1.1.bn2.weight", "model.layer1.1.bn2.bias", 
"model.layer1.1.bn2.running_mean", 
"model.layer1.1.bn2.running_var", 

"model.layer2.0.conv1.weight", 
"model.layer2.0.bn1.weight", "model.layer2.0.bn1.bias", 
"model.layer2.0.bn1.running_mean", 
"model.layer2.0.bn1.running_var", 
"model.layer2.0.conv2.weight",
 "model.layer2.0.bn2.weight", "model.layer2.0.bn2.bias", 
......

conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
bn1 = nn.BatchNorm2d(64)
relu = nn.ReLU(inplace=True)
maxpool = nn.MaxPool2()
......   nn.softmax()  ......
发布了1636 篇原创文章 · 获赞 341 · 访问量 221万+

猜你喜欢

转载自blog.csdn.net/tony2278/article/details/103903778