BodyPoseModel姿态检测模型深度解读


Pose_Model

本文主要介绍姿势识别中模型的创建和模型意义。
要实现建模需匹配运行环境requires.txt:安装必备的环境。

pytorch
numpy
matplotlib
opencv-python
scipy
scikit-image
tqdm
代码内容:包括一个函数make_layers()和两个模型大类BodyPoseModel和HandPoseMode.
from collections import OrderedDict
import torch
import torch.nn as nn


def make_layers(block, no_relu_layers):
    layers = []
    for layer_name, v in block.items():
        if 'pool' in layer_name:
            layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1],
                                 padding=v[2])
            layers.append((layer_name, layer))
        else:
            conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
                               kernel_size=v[2], stride=v[3],
                               padding=v[4])
            layers.append((layer_name, conv2d))
            if layer_name not in no_relu_layers:
                layers.append(('relu_' + layer_name, nn.ReLU(inplace=True)))

    return nn.Sequential(OrderedDict(layers))


class BodyPoseModel(nn.Module):
    def __init__(self):
        super(BodyPoseModel, self).__init__()

        # these layers have no relu layer
        no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2', 'Mconv7_stage2_L1',
                          'Mconv7_stage2_L2', 'Mconv7_stage3_L1', 'Mconv7_stage3_L2',
                          'Mconv7_stage4_L1', 'Mconv7_stage4_L2', 'Mconv7_stage5_L1',
                          'Mconv7_stage5_L2', 'Mconv7_stage6_L1', 'Mconv7_stage6_L1']
        blocks = {
    
    }
        block0 = OrderedDict([
            ('conv1_1', [3, 64, 3, 1, 1]),
            ('conv1_2', [64, 64, 3, 1, 1]),
            ('pool1_stage1', [2, 2, 0]),
            ('conv2_1', [64, 128, 3, 1, 1]),
            ('conv2_2', [128, 128, 3, 1, 1]),
            ('pool2_stage1', [2, 2, 0]),
            ('conv3_1', [128, 256, 3, 1, 1]),
            ('conv3_2', [256, 256, 3, 1, 1]),
            ('conv3_3', [256, 256, 3, 1, 1]),
            ('conv3_4', [256, 256, 3, 1, 1]),
            ('pool3_stage1', [2, 2, 0]),
            ('conv4_1', [256, 512, 3, 1, 1]),
            ('conv4_2', [512, 512, 3, 1, 1]),
            ('conv4_3_CPM', [512, 256, 3, 1, 1]),
            ('conv4_4_CPM', [256, 128, 3, 1, 1])
        ])

        # Stage 1
        block1_1 = OrderedDict([
            ('conv5_1_CPM_L1', [128, 128, 3, 1, 1]),
            ('conv5_2_CPM_L1', [128, 128, 3, 1, 1]),
            ('conv5_3_CPM_L1', [128, 128, 3, 1, 1]),
            ('conv5_4_CPM_L1', [128, 512, 1, 1, 0]),
            ('conv5_5_CPM_L1', [512, 38, 1, 1, 0])  # no relu layer
        ])

        block1_2 = OrderedDict([
            ('conv5_1_CPM_L2', [128, 128, 3, 1, 1]),
            ('conv5_2_CPM_L2', [128, 128, 3, 1, 1]),
            ('conv5_3_CPM_L2', [128, 128, 3, 1, 1]),
            ('conv5_4_CPM_L2', [128, 512, 1, 1, 0]),
            ('conv5_5_CPM_L2', [512, 19, 1, 1, 0])  # no relu layer
        ])
        blocks['block1_1'] = block1_1
        blocks['block1_2'] = block1_2

        self.model0 = make_layers(block0, no_relu_layers)

        # Stages 2 - 6
        for i in range(2, 7):
            blocks['block%d_1' % i] = OrderedDict([
                ('Mconv1_stage%d_L1' % i, [185, 128, 7, 1, 3]),
                ('Mconv2_stage%d_L1' % i, [128, 128, 7, 1, 3]),
                ('Mconv3_stage%d_L1' % i, [128, 128, 7, 1, 3]),
                ('Mconv4_stage%d_L1' % i, [128, 128, 7, 1, 3]),
                ('Mconv5_stage%d_L1' % i, [128, 128, 7, 1, 3]),
                ('Mconv6_stage%d_L1' % i, [128, 128, 1, 1, 0]),
                ('Mconv7_stage%d_L1' % i, [128, 38, 1, 1, 0])
            ])

            blocks['block%d_2' % i] = OrderedDict([
                ('Mconv1_stage%d_L2' % i, [185, 128, 7, 1, 3]),
                ('Mconv2_stage%d_L2' % i, [128, 128, 7, 1, 3]),
                ('Mconv3_stage%d_L2' % i, [128, 128, 7, 1, 3]),
                ('Mconv4_stage%d_L2' % i, [128, 128, 7, 1, 3]),
                ('Mconv5_stage%d_L2' % i, [128, 128, 7, 1, 3]),
                ('Mconv6_stage%d_L2' % i, [128, 128, 1, 1, 0]),
                ('Mconv7_stage%d_L2' % i, [128, 19, 1, 1, 0])
            ])

        for k in blocks.keys():
            blocks[k] = make_layers(blocks[k], no_relu_layers)

        self.model1_1 = blocks['block1_1']
        self.model2_1 = blocks['block2_1']
        self.model3_1 = blocks['block3_1']
        self.model4_1 = blocks['block4_1']
        self.model5_1 = blocks['block5_1']
        self.model6_1 = blocks['block6_1']

        self.model1_2 = blocks['block1_2']
        self.model2_2 = blocks['block2_2']
        self.model3_2 = blocks['block3_2']
        self.model4_2 = blocks['block4_2']
        self.model5_2 = blocks['block5_2']
        self.model6_2 = blocks['block6_2']

    def forward(self, x):

        out1 = self.model0(x)

        out1_1 = self.model1_1(out1)
        out1_2 = self.model1_2(out1)
        out2 = torch.cat([out1_1, out1_2, out1], 1)

        out2_1 = self.model2_1(out2)
        out2_2 = self.model2_2(out2)
        out3 = torch.cat([out2_1, out2_2, out1], 1)

        out3_1 = self.model3_1(out3)
        out3_2 = self.model3_2(out3)
        out4 = torch.cat([out3_1, out3_2, out1], 1)

        out4_1 = self.model4_1(out4)
        out4_2 = self.model4_2(out4)
        out5 = torch.cat([out4_1, out4_2, out1], 1)

        out5_1 = self.model5_1(out5)
        out5_2 = self.model5_2(out5)
        out6 = torch.cat([out5_1, out5_2, out1], 1)

        out6_1 = self.model6_1(out6)
        out6_2 = self.model6_2(out6)

        return out6_1, out6_2


class HandPoseModel(nn.Module):
    def __init__(self):
        super(HandPoseModel, self).__init__()

        # these layers have no relu layer
        no_relu_layers = ['conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3',
                          'Mconv7_stage4', 'Mconv7_stage5', 'Mconv7_stage6']
        # stage 1
        block1_0 = OrderedDict([
            ('conv1_1', [3, 64, 3, 1, 1]),
            ('conv1_2', [64, 64, 3, 1, 1]),
            ('pool1_stage1', [2, 2, 0]),
            ('conv2_1', [64, 128, 3, 1, 1]),
            ('conv2_2', [128, 128, 3, 1, 1]),
            ('pool2_stage1', [2, 2, 0]),
            ('conv3_1', [128, 256, 3, 1, 1]),
            ('conv3_2', [256, 256, 3, 1, 1]),
            ('conv3_3', [256, 256, 3, 1, 1]),
            ('conv3_4', [256, 256, 3, 1, 1]),
            ('pool3_stage1', [2, 2, 0]),
            ('conv4_1', [256, 512, 3, 1, 1]),
            ('conv4_2', [512, 512, 3, 1, 1]),
            ('conv4_3', [512, 512, 3, 1, 1]),
            ('conv4_4', [512, 512, 3, 1, 1]),
            ('conv5_1', [512, 512, 3, 1, 1]),
            ('conv5_2', [512, 512, 3, 1, 1]),
            ('conv5_3_CPM', [512, 128, 3, 1, 1])
        ])

        block1_1 = OrderedDict([
            ('conv6_1_CPM', [128, 512, 1, 1, 0]),
            ('conv6_2_CPM', [512, 22, 1, 1, 0])
        ])

        blocks = {
    
    }
        blocks['block1_0'] = block1_0
        blocks['block1_1'] = block1_1

        # stage 2-6
        for i in range(2, 7):
            blocks['block%d' % i] = OrderedDict([
                ('Mconv1_stage%d' % i, [150, 128, 7, 1, 3]),
                ('Mconv2_stage%d' % i, [128, 128, 7, 1, 3]),
                ('Mconv3_stage%d' % i, [128, 128, 7, 1, 3]),
                ('Mconv4_stage%d' % i, [128, 128, 7, 1, 3]),
                ('Mconv5_stage%d' % i, [128, 128, 7, 1, 3]),
                ('Mconv6_stage%d' % i, [128, 128, 1, 1, 0]),
                ('Mconv7_stage%d' % i, [128, 22, 1, 1, 0])
            ])

        for k in blocks.keys():
            blocks[k] = make_layers(blocks[k], no_relu_layers)

        self.model1_0 = blocks['block1_0']
        self.model1_1 = blocks['block1_1']
        self.model2 = blocks['block2']
        self.model3 = blocks['block3']
        self.model4 = blocks['block4']
        self.model5 = blocks['block5']
        self.model6 = blocks['block6']

    def forward(self, x):
        out1_0 = self.model1_0(x)
        out1_1 = self.model1_1(out1_0)
        concat_stage2 = torch.cat([out1_1, out1_0], 1)
        out_stage2 = self.model2(concat_stage2)
        concat_stage3 = torch.cat([out_stage2, out1_0], 1)
        out_stage3 = self.model3(concat_stage3)
        concat_stage4 = torch.cat([out_stage3, out1_0], 1)
        out_stage4 = self.model4(concat_stage4)
        concat_stage5 = torch.cat([out_stage4, out1_0], 1)
        out_stage5 = self.model5(concat_stage5)
        concat_stage6 = torch.cat([out_stage5, out1_0], 1)
        out_stage6 = self.model6(concat_stage6)
        return out_stage6


if __name__ == "__main__":
    from torchsummary import summary

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # model = HandPoseModel()
    model = BodyPoseModel()

    summary(model.cuda(), (3, 512, 512))

1. BodyPoseModel

本文将介绍的姿势估计,是一个全神经网络的模型构架。除了第一个神经块儿含有pool,输出会改变尺寸外,其他块儿和卷积中输出特征图的尺寸不变,只是特征图张数,即通道数在变化。

1.1 关键函数make_layer

对于make_layer函数,有一下几点说明。

make_layer

  1. make_layer函数的输入参数(block,no_relu_layers):
    block的数据结构为字典:且字典为顺序字典,表示创建层的顺序。
    no_relu_layers为list:表中存放不需要后接relu激活函数的层的名称。
  2. 函数中,中间变量layers=[],将存放根据block顺序字典的键、值生成的特定顺序连接的卷积层,pool层和relu层。即layer_name是block的key,表示要创建哪一类神经网络层;v是block的value,表示生成神经网络层的参数,如:layer_name =‘conv1_1’ ,v=[3, 64, 3, 1, 1],表示生成卷积层,输入3通道,输出64通道,卷积核3x3,步长1,padding=1四面拓宽1个像素。
  3. make_layer函数的输出为nn.Sequnential(OrderedDict(layers)), 里面是顺序连接的普通神经网络层conv、maxpool、relu 顺序连接组成的,可以将输出看作一个完整神经网络块儿
  4. 那么,每当调用本函数生成一个神经块儿的时候,可以看作一个整体,就可以判断输入通道数和输出通道数是多少了。示例图如下:
    在这里插入图片描述

1.2 模型创建:

模型的架构:首先继承基本模型nn.Module;

在这里插入图片描述

  1. 设置不需要后接relu等激活层的神经网络层的名称,统一放在no_relu_layers列表中。
  2. blocks={},将会搜集存放多神经网络块儿。
  3. block0是第一神经网络块儿,它的特殊性在于含有pool,即输出会有特征图尺寸的变化,被称作第一个输入块儿。
  4. 那么,self.model0 = make_layers(block0, no_relu_layers)生成的第一个块儿的输入输出通道数分别是【3,128】.
  5. 注意区分构成神经网络每一层的参数v和神经网络层的输出shape。
    例如:输出shape[-1,64,512,512],表示本层layer=Conv2d-1的神经层的输出结构batch_size是-1,表示任意batch;64是输出通道数,即输出多少张特征图;[512,512]表示输出的每一张特征图的高和宽。
    在这里插入图片描述
    在这里插入图片描述
  6. block1_1,block1_2将被make_layer函数生成各自的一个神经块儿。它们没有pool层,padding=1,所以块儿内每一个神经层输出的每一张特征图的尺寸和输入这个块儿的特征图保持一致,只是特征图的张数(通道数)在改变。注意,此时只有self.model0是调用make_layer生成神经网络块儿的,block1_1,block1_2只是构造了需要哪些部件,存在blocks顺序字典里面了。
  7. 神经块儿 block1_1,block1_2的输入输出:【128,38】,【128,19】
  8. 注意,块儿的输出层名称包含在no_relu_layers列表中,不含relu。
    在这里插入图片描述
  9. 像 block1_1,block1_2一样,创建两个四组共10个神经块儿block2_1,block2_2,block3_1,block3_2,block4_1,block4_2,block5_1,block5_2,block6_1,block6_2。同样,注意8块儿的输出层名称是否包含在no_relu_layers列表中。
  10. 10个块儿,分两组,输入输出通道数分别是【185,38】和【185,19】
    在这里插入图片描述
  11. 注意,先前只生成了一个块儿,剩下的12个块儿只是存放在blocks中,当遍历完blocks中的部件,每一个块儿才在make_layer函数调用下生成神经网络块儿。
  12. 将这12个块儿赋值给self_##,变量才是示例的变量。
  13. 最后,生成模型,需要重写forward.

在这里插入图片描述
14. forward中最终呈现模型中伸进块儿的连接方式。
15. 输入3通道的数据,到self.model0,输出通道数out1=128; out1作为输入,到self.model_1_1,输出通道数out1_1=38;
16. 注意,第三个块儿,输入是out1,到self.model1_2,输出通道数是out1_2=19.
17. torch.cat(),是将三个块儿的输出,拼接在一个,组成一个新的输出,到下一层。
out2 = torch.cat([out1_1, out1_2, out1], 1)函数表示将列表中[out1_1, out1_2, out1]的2组特征组,横向合成,即38+19+128张特征图,合成为185通道。因为这些块儿中没有改变图形尺寸(宽高)的因素,所以可以整合。
18. 同理,后面的out2,out3,out4,out5,也有同样的操作步骤。即是著名神经网络Resnet的shortcut路径。
19. 最后返回模型的输出值,out6_1,out6_2.(38张特征图和19张特征图)。
20. 将特征图对应高斯热图,求数值坐标位置。请听下回分解。

1.3 整体结构

在这里插入图片描述

1.4 模型输出框架

----------------------------------------------------------------
        Layer (type)               Output Shape         Param #
================================================================
            Conv2d-1         [-1, 64, 512, 512]           1,792
              ReLU-2         [-1, 64, 512, 512]               0
            Conv2d-3         [-1, 64, 512, 512]          36,928
              ReLU-4         [-1, 64, 512, 512]               0
         MaxPool2d-5         [-1, 64, 256, 256]               0
            Conv2d-6        [-1, 128, 256, 256]          73,856
              ReLU-7        [-1, 128, 256, 256]               0
            Conv2d-8        [-1, 128, 256, 256]         147,584
              ReLU-9        [-1, 128, 256, 256]               0
        MaxPool2d-10        [-1, 128, 128, 128]               0
           Conv2d-11        [-1, 256, 128, 128]         295,168
             ReLU-12        [-1, 256, 128, 128]               0
           Conv2d-13        [-1, 256, 128, 128]         590,080
             ReLU-14        [-1, 256, 128, 128]               0
           Conv2d-15        [-1, 256, 128, 128]         590,080
             ReLU-16        [-1, 256, 128, 128]               0
           Conv2d-17        [-1, 256, 128, 128]         590,080
             ReLU-18        [-1, 256, 128, 128]               0
        MaxPool2d-19          [-1, 256, 64, 64]               0
           Conv2d-20          [-1, 512, 64, 64]       1,180,160
             ReLU-21          [-1, 512, 64, 64]               0
           Conv2d-22          [-1, 512, 64, 64]       2,359,808
             ReLU-23          [-1, 512, 64, 64]               0
           Conv2d-24          [-1, 256, 64, 64]       1,179,904
             ReLU-25          [-1, 256, 64, 64]               0
           Conv2d-26          [-1, 128, 64, 64]         295,040
             ReLU-27          [-1, 128, 64, 64]               0
           Conv2d-28          [-1, 128, 64, 64]         147,584
             ReLU-29          [-1, 128, 64, 64]               0
           Conv2d-30          [-1, 128, 64, 64]         147,584
             ReLU-31          [-1, 128, 64, 64]               0
           Conv2d-32          [-1, 128, 64, 64]         147,584
             ReLU-33          [-1, 128, 64, 64]               0
           Conv2d-34          [-1, 512, 64, 64]          66,048
             ReLU-35          [-1, 512, 64, 64]               0
           Conv2d-36           [-1, 38, 64, 64]          19,494
           Conv2d-37          [-1, 128, 64, 64]         147,584
             ReLU-38          [-1, 128, 64, 64]               0
           Conv2d-39          [-1, 128, 64, 64]         147,584
             ReLU-40          [-1, 128, 64, 64]               0
           Conv2d-41          [-1, 128, 64, 64]         147,584
             ReLU-42          [-1, 128, 64, 64]               0
           Conv2d-43          [-1, 512, 64, 64]          66,048
             ReLU-44          [-1, 512, 64, 64]               0
           Conv2d-45           [-1, 19, 64, 64]           9,747
           Conv2d-46          [-1, 128, 64, 64]       1,160,448
             ReLU-47          [-1, 128, 64, 64]               0
           Conv2d-48          [-1, 128, 64, 64]         802,944
             ReLU-49          [-1, 128, 64, 64]               0
           Conv2d-50          [-1, 128, 64, 64]         802,944
             ReLU-51          [-1, 128, 64, 64]               0
           Conv2d-52          [-1, 128, 64, 64]         802,944
             ReLU-53          [-1, 128, 64, 64]               0
           Conv2d-54          [-1, 128, 64, 64]         802,944
             ReLU-55          [-1, 128, 64, 64]               0
           Conv2d-56          [-1, 128, 64, 64]          16,512
             ReLU-57          [-1, 128, 64, 64]               0
           Conv2d-58           [-1, 38, 64, 64]           4,902
           Conv2d-59          [-1, 128, 64, 64]       1,160,448
             ReLU-60          [-1, 128, 64, 64]               0
           Conv2d-61          [-1, 128, 64, 64]         802,944
             ReLU-62          [-1, 128, 64, 64]               0
           Conv2d-63          [-1, 128, 64, 64]         802,944
             ReLU-64          [-1, 128, 64, 64]               0
           Conv2d-65          [-1, 128, 64, 64]         802,944
             ReLU-66          [-1, 128, 64, 64]               0
           Conv2d-67          [-1, 128, 64, 64]         802,944
             ReLU-68          [-1, 128, 64, 64]               0
           Conv2d-69          [-1, 128, 64, 64]          16,512
             ReLU-70          [-1, 128, 64, 64]               0
           Conv2d-71           [-1, 19, 64, 64]           2,451
           Conv2d-72          [-1, 128, 64, 64]       1,160,448
             ReLU-73          [-1, 128, 64, 64]               0
           Conv2d-74          [-1, 128, 64, 64]         802,944
             ReLU-75          [-1, 128, 64, 64]               0
           Conv2d-76          [-1, 128, 64, 64]         802,944
             ReLU-77          [-1, 128, 64, 64]               0
           Conv2d-78          [-1, 128, 64, 64]         802,944
             ReLU-79          [-1, 128, 64, 64]               0
           Conv2d-80          [-1, 128, 64, 64]         802,944
             ReLU-81          [-1, 128, 64, 64]               0
           Conv2d-82          [-1, 128, 64, 64]          16,512
             ReLU-83          [-1, 128, 64, 64]               0
           Conv2d-84           [-1, 38, 64, 64]           4,902
           Conv2d-85          [-1, 128, 64, 64]       1,160,448
             ReLU-86          [-1, 128, 64, 64]               0
           Conv2d-87          [-1, 128, 64, 64]         802,944
             ReLU-88          [-1, 128, 64, 64]               0
           Conv2d-89          [-1, 128, 64, 64]         802,944
             ReLU-90          [-1, 128, 64, 64]               0
           Conv2d-91          [-1, 128, 64, 64]         802,944
             ReLU-92          [-1, 128, 64, 64]               0
           Conv2d-93          [-1, 128, 64, 64]         802,944
             ReLU-94          [-1, 128, 64, 64]               0
           Conv2d-95          [-1, 128, 64, 64]          16,512
             ReLU-96          [-1, 128, 64, 64]               0
           Conv2d-97           [-1, 19, 64, 64]           2,451
           Conv2d-98          [-1, 128, 64, 64]       1,160,448
             ReLU-99          [-1, 128, 64, 64]               0
          Conv2d-100          [-1, 128, 64, 64]         802,944
            ReLU-101          [-1, 128, 64, 64]               0
          Conv2d-102          [-1, 128, 64, 64]         802,944
            ReLU-103          [-1, 128, 64, 64]               0
          Conv2d-104          [-1, 128, 64, 64]         802,944
            ReLU-105          [-1, 128, 64, 64]               0
          Conv2d-106          [-1, 128, 64, 64]         802,944
            ReLU-107          [-1, 128, 64, 64]               0
          Conv2d-108          [-1, 128, 64, 64]          16,512
            ReLU-109          [-1, 128, 64, 64]               0
          Conv2d-110           [-1, 38, 64, 64]           4,902
          Conv2d-111          [-1, 128, 64, 64]       1,160,448
            ReLU-112          [-1, 128, 64, 64]               0
          Conv2d-113          [-1, 128, 64, 64]         802,944
            ReLU-114          [-1, 128, 64, 64]               0
          Conv2d-115          [-1, 128, 64, 64]         802,944
            ReLU-116          [-1, 128, 64, 64]               0
          Conv2d-117          [-1, 128, 64, 64]         802,944
            ReLU-118          [-1, 128, 64, 64]               0
          Conv2d-119          [-1, 128, 64, 64]         802,944
            ReLU-120          [-1, 128, 64, 64]               0
          Conv2d-121          [-1, 128, 64, 64]          16,512
            ReLU-122          [-1, 128, 64, 64]               0
          Conv2d-123           [-1, 19, 64, 64]           2,451
          Conv2d-124          [-1, 128, 64, 64]       1,160,448
            ReLU-125          [-1, 128, 64, 64]               0
          Conv2d-126          [-1, 128, 64, 64]         802,944
            ReLU-127          [-1, 128, 64, 64]               0
          Conv2d-128          [-1, 128, 64, 64]         802,944
            ReLU-129          [-1, 128, 64, 64]               0
          Conv2d-130          [-1, 128, 64, 64]         802,944
            ReLU-131          [-1, 128, 64, 64]               0
          Conv2d-132          [-1, 128, 64, 64]         802,944
            ReLU-133          [-1, 128, 64, 64]               0
          Conv2d-134          [-1, 128, 64, 64]          16,512
            ReLU-135          [-1, 128, 64, 64]               0
          Conv2d-136           [-1, 38, 64, 64]           4,902
          Conv2d-137          [-1, 128, 64, 64]       1,160,448
            ReLU-138          [-1, 128, 64, 64]               0
          Conv2d-139          [-1, 128, 64, 64]         802,944
            ReLU-140          [-1, 128, 64, 64]               0
          Conv2d-141          [-1, 128, 64, 64]         802,944
            ReLU-142          [-1, 128, 64, 64]               0
          Conv2d-143          [-1, 128, 64, 64]         802,944
            ReLU-144          [-1, 128, 64, 64]               0
          Conv2d-145          [-1, 128, 64, 64]         802,944
            ReLU-146          [-1, 128, 64, 64]               0
          Conv2d-147          [-1, 128, 64, 64]          16,512
            ReLU-148          [-1, 128, 64, 64]               0
          Conv2d-149           [-1, 19, 64, 64]           2,451
          Conv2d-150          [-1, 128, 64, 64]       1,160,448
            ReLU-151          [-1, 128, 64, 64]               0
          Conv2d-152          [-1, 128, 64, 64]         802,944
            ReLU-153          [-1, 128, 64, 64]               0
          Conv2d-154          [-1, 128, 64, 64]         802,944
            ReLU-155          [-1, 128, 64, 64]               0
          Conv2d-156          [-1, 128, 64, 64]         802,944
            ReLU-157          [-1, 128, 64, 64]               0
          Conv2d-158          [-1, 128, 64, 64]         802,944
            ReLU-159          [-1, 128, 64, 64]               0
          Conv2d-160          [-1, 128, 64, 64]          16,512
            ReLU-161          [-1, 128, 64, 64]               0
          Conv2d-162           [-1, 38, 64, 64]           4,902
          Conv2d-163          [-1, 128, 64, 64]       1,160,448
            ReLU-164          [-1, 128, 64, 64]               0
          Conv2d-165          [-1, 128, 64, 64]         802,944
            ReLU-166          [-1, 128, 64, 64]               0
          Conv2d-167          [-1, 128, 64, 64]         802,944
            ReLU-168          [-1, 128, 64, 64]               0
          Conv2d-169          [-1, 128, 64, 64]         802,944
            ReLU-170          [-1, 128, 64, 64]               0
          Conv2d-171          [-1, 128, 64, 64]         802,944
            ReLU-172          [-1, 128, 64, 64]               0
          Conv2d-173          [-1, 128, 64, 64]          16,512
            ReLU-174          [-1, 128, 64, 64]               0
          Conv2d-175           [-1, 19, 64, 64]           2,451
            ReLU-176           [-1, 19, 64, 64]               0
================================================================
Total params: 52,311,446
Trainable params: 52,311,446
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 3.00
Forward/backward pass size (MB): 1771.28
Params size (MB): 199.55
Estimated Total Size (MB): 1973.83
----------------------------------------------------------------

Process finished with exit code 0

在这里插入图片描述
在这里插入图片描述

github:https://github.com/beauthy/pytorch-openpose

参考文献

参考文献
@inproceedings{simon2017hand,
author = {Tomas Simon and Hanbyul Joo and Iain Matthews and Yaser Sheikh},
booktitle = {CVPR},
title = {Hand Keypoint Detection in Single Images using Multiview Bootstrapping},
year = {2017}
}

猜你喜欢

转载自blog.csdn.net/beauthy/article/details/108338105