使用tensorrt为模型推理加速

文档详情

安装依赖

pip install nvidia-pyindex
pip install nvidia-tensorrt

封装过程

  • 构建阶段
import tensorrt as trt
logger = trt.Logger(trt.Logger.WARNING)
builder = trt.Builder(logger)
  • 创建网络
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
  • 反序列化加载engine
runtime = trt.Runtime(logger)
engine = runtime.deserialize_cuda_engine(serialized_engine)
  • 执行推理
context = engine.create_execution_context()
context.execute_async_v2(buffers, stream_ptr)
  • 整体封装
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt

TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
trt.init_libnvinfer_plugins(TRT_LOGGER, '')


def GiB(val):
    return val * 1 << 30


# Simple helper data class that's a little nicer to use than a 2-tuple.
class HostDeviceMem(object):
    def __init__(self, host_mem, device_mem):
        self.host = host_mem
        self.device = device_mem

    def __str__(self):
        return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)

    def __repr__(self):
        return self.__str__()


# Allocates all buffers required for an engine, i.e. host/device inputs/outputs.
def allocate_buffers(engine):
    inputs = []
    outputs = []
    bindings = []
    stream = cuda.Stream()
    for binding in engine:
        size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
        dtype = trt.nptype(engine.get_binding_dtype(binding))
        # Allocate host and device buffers
        host_mem = cuda.pagelocked_empty(size, dtype)
        device_mem = cuda.mem_alloc(host_mem.nbytes)
        # Append the device buffer to device bindings.
        bindings.append(int(device_mem))
        # Append to the appropriate list.
        if engine.binding_is_input(binding):
            inputs.append(HostDeviceMem(host_mem, device_mem))
        else:
            outputs.append(HostDeviceMem(host_mem, device_mem))
    return inputs, outputs, bindings, stream


# This function is generalized for multiple inputs/outputs.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def do_inference(context, bindings, inputs, outputs, stream, batch_size=1):
    # Transfer input data to the GPU.
    [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
    # Run inference.
    context.execute_async(batch_size=batch_size, bindings=bindings, stream_handle=stream.handle)
    # Transfer predictions back from the GPU.
    [cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
    # Synchronize the stream
    stream.synchronize()
    # Return only the host outputs.
    return [out.host for out in outputs]


# This function is generalized for multiple inputs/outputs for full dimension networks.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def do_inference_v2(context, bindings, inputs, outputs, stream):
    # Transfer input data to the GPU.
    [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
    # Run inference.
    context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
    # Transfer predictions back from the GPU.
    [cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
    # Synchronize the stream
    stream.synchronize()
    # Return only the host outputs.
    return [out.host for out in outputs]


# The Onnx path is used for Onnx models.
def build_engine_onnx(model_file):
    builder = trt.Builder(TRT_LOGGER)
    network = builder.create_network(EXPLICIT_BATCH)
    config = builder.create_builder_config()
    parser = trt.OnnxParser(network, TRT_LOGGER)

    config.max_workspace_size = GiB(1)
    # Load the Onnx model and parse it in order to populate the TensorRT network.
    with open(model_file, 'rb') as model:
        if not parser.parse(model.read()):
            print('ERROR: Failed to parse the ONNX file.')
            for error in range(parser.num_errors):
                print(parser.get_error(error))
            return None
    return builder.build_engine(network, config)


# The Caffe path is used for Caffe2 models.
def build_engine_caffe(model_file, deploy_file, dtype, mark_output: list):
    # You can set the logger severity higher to suppress messages (or lower to display more messages).
    with trt.Builder(
            TRT_LOGGER) as builder, builder.create_network() as network, builder.create_builder_config() as config, trt.CaffeParser() as parser:
        # Workspace size is the maximum amount of memory available to the builder while building an engine.
        # It should generally be set as high as possible.
        config.max_workspace_size = GiB(1)
        # Load the Caffe model and parse it in order to populate the TensorRT network.
        # This function returns an object that we can query to find tensors by name.
        model_tensors = parser.parse(deploy=deploy_file, model=model_file, network=network, dtype=dtype)
        # For Caffe, we need to manually mark the output of the network.
        # Since we know the name of the output tensor, we can find it in model_tensors.
        for layer in mark_output:
            network.mark_output(model_tensors.find(layer))
        return builder.build_engine(network, config)


class Model:

    def __init__(self, engine_file=None):
        """

        :param engine_file: engine 文件
        """
        self.engine = None
        if engine_file:
            self.load_engine_file(engine_file=engine_file)
        self.inputs, self.outputs, self.bindings, self.stream, self.context = None, None, None, None, None

    def load_engine_file(self, engine_file):
        """
        加载trt engine文件
        :param engine_file:
        :return:
        """
        runtime = trt.Runtime(TRT_LOGGER)
        with open(engine_file, "rb") as f:
            engine = runtime.deserialize_cuda_engine(f.read())
        self.engine = engine

    def build_engine_by_onnx(self, model_file="ResNet50.onnx"):
        """
        通过onnx构建engine文件
        :param model_file:
        :return:
        """
        self.engine = build_engine_onnx(model_file)

    def build_engine_by_caffe(self, model_file="ResNet50_fp32.caffemodel", deploy_file="ResNet50_N2.prototxt",
                              dtype=trt.float32, mark_output: list = []):
        """
        通过caffe文件构建engine
        :param model_file:
        :param deploy_file:
        :param dtype:
        :param mark_output:
        :return:
        """
        self.engine = build_engine_caffe(model_file, deploy_file, dtype, mark_output=mark_output)

    def allocate_buffers(self):
        """
        分配缓存
        :return:
        """
        self.inputs, self.outputs, self.bindings, self.stream = allocate_buffers(self.engine)

    def create_execution_context(self):
        """
        生成执行器
        :return:
        """
        self.context = self.engine.create_execution_context()

    def do_inference(self, inputs):
        """
        推理
        :param inputs:
        :return:
        """
        self.inputs[0].host[:] = inputs[:]
        return do_inference(context=self.context, bindings=self.bindings, inputs=self.inputs, outputs=self.outputs,
                            stream=self.stream)

    def do_inference_v2(self, inputs):
        """
        推理
        :param inputs:
        :return:
        """
        self.inputs[0].host[:] = inputs[:]
        return do_inference_v2(context=self.context, bindings=self.bindings, inputs=self.inputs, outputs=self.outputs,
                               stream=self.stream)

  • 测试
    自行准备模型engine文件
import numpy as np
import cv2

from infer import Model


# 查找网络的所有输出层
def getOutputLayers(prototxt_path):
    # 打开prototxt
    f = open(prototxt_path)
    # 按行读取
    line = f.readline()

    # 记录bottom的名字
    bottom = []
    # 记录top的名字
    top = []
    # 记录每个bottom和top的行号,解决输出层因为inplace无法正确获取的问题
    cnt = 0

    while line:
        line = line.replace(" ", "")
        line = line.replace('"', "")

        pos = 0
        flag = ""
        for j in range(len(line)):
            if line[j] == ':':
                pos = j + 1
                break
            else:
                flag += line[j]

        name = ""
        if flag != "":
            for j in range(pos, len(line) - 1):
                name += line[j]

        if flag == "bottom":
            bottom.append([name, cnt])
        if flag == "top":
            top.append([name, cnt])

        line = f.readline()
        cnt = cnt + 1

    f.close()
    ans = []

    for i in range(len(top)):
        now = top[i][0]
        now_cnt = top[i][1]
        flag = True
        # 只有当前top的名字在bottom中找不到,或者只能找到一个距离最近的(inplace情况),才是输出层
        for j in range(len(bottom)):
            if bottom[j][0] == now:
                if abs(bottom[j][1] - now_cnt) > 1:
                    flag = False
        if (flag):
            ans.append(now)
    # 返回输出层名字列表,对于ssd就是['detection_out', 'keep_count']
    return ans


mark_output = []
layers = getOutputLayers("head_detection.prototxt")
for layer in layers:
    mark_output.append(layer)

model = Model(engine_file="head_detection.engine")
model.build_engine_by_caffe(model_file="./head_detection.caffemodel", deploy_file="head_detection.prototxt",
                            mark_output=mark_output)
model.allocate_buffers()
model.create_execution_context()
print(model.engine)
print(model.bindings)
img = cv2.imread("./1.jpg")
cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
cv2.resize(img, (512, 512))
data = np.array(img).flatten()

res = model.do_inference(data)[0].reshape((1, 1, 50, 7))
print(res)
print(res.shape)

  • 效果
root@d697bba88e88:/workspace/build/models# python3 test_trt.py 
[TensorRT] WARNING: Current optimization profile is: 0. Please ensure there are no enqueued operations pending in this context prior to switching profiles
<tensorrt.tensorrt.ICudaEngine object at 0x7f9cddc21930>
[140308738211840, 140309111512576, 140309111514112]
(598, 900, 3)
resieze =  (512, 512, 3)
[ 98  87  81 ... 221 221 221]
(786432,)
[[[[0.00000000e+00 1.00000000e+00 7.01323450e-01 2.53085375e-01
    0.00000000e+00 2.63810396e-01 1.81251913e-02]
   [0.00000000e+00 1.00000000e+00 6.86035991e-01 2.68468022e-01
    0.00000000e+00 2.79185116e-01 1.83350369e-02]
   [0.00000000e+00 1.00000000e+00 6.22933388e-01 7.85305619e-01
    0.00000000e+00 7.94009924e-01 1.48523413e-02]
   [0.00000000e+00 1.00000000e+00 6.11043394e-01 9.83310580e-01
    0.00000000e+00 9.96181369e-01 1.94536522e-02]
   [0.00000000e+00 1.00000000e+00 6.08991027e-01 7.70242155e-01
    0.00000000e+00 7.79535949e-01 1.44858146e-02]
   [0.00000000e+00 1.00000000e+00 6.05013907e-01 2.83096880e-01

猜你喜欢

转载自blog.csdn.net/xzpdxz/article/details/124726563