ndk实例总结补充:使用V4L2采集usb图像分析

ndk实例总结系列

ndk实例总结:jni实例
ndk实例总结:opencv图像处理
ndk实例总结:安卓Camera与usbCamera原始图像处理
ndk实例总结补充:使用V4L2采集usb图像分析
ndk实例总结:使用fmpeg播放rtsp流

前言

本文是对ndk实例总结:安卓Camera与usbCamera原始图像处理中usbCamera部分的补充,主要分析下使用V4L2采集usb图像的流程

V4L2介绍

v4l2是linux操作系统下用于采集图片、视频和音频数据的API接口,配合适当的视频采集设备和相应的驱动程序,可以实现图片、视频、音频等的采集。在远程会议、可视电话、视频监控系统和嵌入式多媒体终端中都有广泛应用。

在linux下,所有外设都被看成一种特殊的文件,称为"设备文件",可以像访问普通文件一样对设备文件进行访问。

V4L2支持两种方式来采集图像:内存映射(mmap)和直接读取方式(read)。前者一般用于连续的视频数据采集,后者常用静态图片数据采集。

V4L2在include/linux/video.h文件下定义了一些重要的数据结构,在采集图像的过程中,就是通过对这些数据的操作来获得最终的图像数据。

V4L2视频流程图

流程图清楚的展示了应用程序通过V4L2接口采集视频数据的步骤,结合下面的代码实现分析能更好的理解

在这里插入图片描述

代码实现分析

这里分析下v4l2封装类内部的代码实现

V4LAchieve::V4LAchieve(const int &iCameraIndex, const int &iWidth, const int &iHeigth)
        : miCameraIndex(-1), miOpenedCameraFd(-1),
          miBufferCount(-1), mpstV4LBuffers(NULL) {
    this->miCameraIndex = iCameraIndex;
    m_pYUYV422 = new unsigned char[iWidth * iHeigth * 2];
    m_iYUYV422Len = 0;
    m_iWidth = iWidth;
    m_iHeight = iHeigth;
}

封装类的构造函数,iCameraIndex是video设备号,比如/dev/video0这里就需要传入0,iWidth与iHeigth是图像的宽和高,m_pYUYV422是用来放置一帧图像数据的变量,yuv图像是双通道,因此初始化m_pYUYV422时需要分配宽×高×2大小的内存空间

bool V4LAchieve::OpenCamera() {
    const int iMAXPATH = 50;
    char *chPCameraDevicename = new char[iMAXPATH];
    std::memset(chPCameraDevicename, 0, iMAXPATH);
    std::sprintf(chPCameraDevicename, "/dev/video%d", miCameraIndex);
    V4L_LOGI("Open Camera Device : %s", chPCameraDevicename);
    //打开video设备文件
    miOpenedCameraFd = open(chPCameraDevicename, O_RDWR /* required */ | O_NONBLOCK, 0);
    if (miOpenedCameraFd < 0) {
        V4L_LOGI(" Open Camera Device : %s Failed", chPCameraDevicename);
        return false;
    }
    delete[] chPCameraDevicename;
    chPCameraDevicename = NULL;

    //查询视频设备参数
    if (!GetCameraParameters()) {
        V4L_LOGE("GetCameraParameters Fail");
        return false;
    }
    //设置视频采集参数
    if (!SetCameraVideoFormat()) {
        V4L_LOGE("SetCameraVideoFormat Fail");
        return false;
    }
    //开始视频的采集
    if (!StartCameraCapture()) {
        V4L_LOGI("StartCameraCapture Fail");
        return false;
    }
    return true;
}

使用open来打开/dev/video*设备,获得的设备号放置在miOpenedCameraFd中

视频设备打开成功后进行查询视频设备参数操作

bool V4LAchieve::GetCameraParameters() {
    if (miOpenedCameraFd < 0) {
        V4L_LOGE("Invalid Camera File Descriptor");
        return false;
    }
    struct v4l2_capability stV4l2Capability;
    std::memset(&stV4l2Capability, 0, sizeof(struct v4l2_capability));
    //查询视频设备参数
    if (ioctl(miOpenedCameraFd, VIDIOC_QUERYCAP, &stV4l2Capability) < 0) {
        V4L_LOGE("Get Camera Parameters Failed!");
        return false;
    }
    V4L_LOGI("Camera Capability as :");
    V4L_LOGI("Camera Bus info: %s", stV4l2Capability.bus_info);
    V4L_LOGI("Camera Name: %s", stV4l2Capability.card);
    V4L_LOGI("Camera Kernel Version: %d", stV4l2Capability.version);
    V4L_LOGI("Camera Driver Info: %s", stV4l2Capability.driver);
    return true;
}

可以从这里获取到摄像头的相关信息

查询成功后进行设置视频采集参数

bool V4LAchieve::SetCameraVideoFormat() {
    if (miOpenedCameraFd < 0) {
        V4L_LOGE("Invalid Camera File Descriptor");
        return false;
    }
    struct v4l2_format stV4l2Format;
    std::memset(&stV4l2Format, 0, sizeof(struct v4l2_format));
    stV4l2Format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    stV4l2Format.fmt.pix.width = m_iWidth;
    stV4l2Format.fmt.pix.height = m_iHeight;
    stV4l2Format.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
//    stV4l2Format.fmt.pix.pixelformat = V4L2_PIX_FMT_MJPEG;
    stV4l2Format.fmt.pix.field = V4L2_FIELD_INTERLACED;
    //设置视频帧格式,包括宽度高度,帧的点阵格式(YUYV/MJPEG)
    if (ioctl(miOpenedCameraFd, VIDIOC_S_FMT, &stV4l2Format) < 0) {
        V4L_LOGE("set camera Capture format error! ");
        return false;
    }
    V4L_LOGI("set camera capture format is ok !");

    struct v4l2_requestbuffers stV4l2RequestBuffers;
    std::memset(&stV4l2RequestBuffers, 0, sizeof(struct v4l2_requestbuffers));
    stV4l2RequestBuffers.count = 4;
    stV4l2RequestBuffers.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    stV4l2RequestBuffers.memory = V4L2_MEMORY_MMAP;

    //申请缓冲,count是申请的数量,一般不少于3个
    if (ioctl(miOpenedCameraFd, VIDIOC_REQBUFS, &stV4l2RequestBuffers) < 0) {
        V4L_LOGE("申请缓存失败! ");
        return false;
    }
    if (stV4l2RequestBuffers.count < 2) {
        V4L_LOGE("Insufficient buffer memory");
    }
    V4L_LOGI("The Camera Apply Cache Success");;
    V4L_LOGI("Cache Num = %d", stV4l2RequestBuffers.count);;
    V4L_LOGI("Cache  Size = %d", stV4l2RequestBuffers.memory);
    V4L_LOGI("Cache  Type = %d", stV4l2RequestBuffers.type);
    //保存缓存的帧数
    miBufferCount = stV4l2RequestBuffers.count;
    //开始分配缓存
    //内存中建立对应空间
    mpstV4LBuffers = (struct st_V4LBuffer *) calloc(stV4l2RequestBuffers.count,
                                                    sizeof(struct st_V4LBuffer));
    unsigned int iReqBuffersNum = 0;
    for (iReqBuffersNum = 0; iReqBuffersNum < stV4l2RequestBuffers.count; ++iReqBuffersNum) {
        //驱动中的一帧
        struct v4l2_buffer stV4l2Buffer;
        std::memset(&stV4l2Buffer, 0, sizeof(struct v4l2_buffer));

        stV4l2Buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
        stV4l2Buffer.memory = V4L2_MEMORY_MMAP;
        stV4l2Buffer.index = iReqBuffersNum;
        //查询帧缓冲区在内核空间中的长度和偏移量
        if (-1 == ioctl(miOpenedCameraFd, VIDIOC_QUERYBUF, &stV4l2Buffer)) {
            V4L_LOGE("VIDIOC_QUERYBUF error");
            break;
        }
        //将帧缓冲区的地址映射到用户内存空间中
        mpstV4LBuffers[iReqBuffersNum].iLength = stV4l2Buffer.length;
        mpstV4LBuffers[iReqBuffersNum].pStart =
                //通过mmap建立映射关系
                mmap(NULL /* start anywhere */,
                     stV4l2Buffer.length,
                     PROT_READ | PROT_WRITE /* required */,
                     MAP_SHARED /* recommended */,
                     miOpenedCameraFd, stV4l2Buffer.m.offset);
        if (MAP_FAILED == mpstV4LBuffers[iReqBuffersNum].pStart) {
            V4L_LOGE("mmap failed\n");
            break;
        }
        ::memset(mpstV4LBuffers[iReqBuffersNum].pStart, 0, mpstV4LBuffers[iReqBuffersNum].iLength);
    }
    if (iReqBuffersNum < stV4l2RequestBuffers.count) {
        V4L_LOGE(" error in request v4l2 buffer ");
        return false;
    }
    V4L_LOGI(" request v4l2 buffer finsihed");
    unsigned int index = 0;
    for (index = 0; index < iReqBuffersNum; ++index) {
        struct v4l2_buffer buf;
        std::memset(&buf, 0, sizeof(struct v4l2_buffer));

        buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
        buf.memory = V4L2_MEMORY_MMAP;
        buf.index = index;

        //将申请到的帧缓冲全部放入视频采集输出列队
        if (-1 == ioctl(miOpenedCameraFd, VIDIOC_QBUF, &buf)) {
            V4L_LOGE("VIDIOC_QBUF failed with index= %d", index);
            break;
        }
    }
    if (index < iReqBuffersNum) {
        V4L_LOGE(" error in  v4l2 buffer queue ");
        return false;
    }
    return true;
}

首先进行视频帧格式的设置,在v4l2_format结构体中设置相关参数,比如图像格式(yuyv/mjpeg)、宽高等

然后申请帧缓冲区,这里申请了4帧缓冲区,申请成功后,在内存中分配相应的内存空间,然后查询帧缓冲区中每帧在内核空间中的长度和偏移量,并通过mmap映射到用户的内存空间中

最后将申请到的帧缓冲全部放入视频采集输出列队后设置视频采集参数就完成了

接下来开始视频的采集

bool V4LAchieve::StartCameraCapture() {
    if (miOpenedCameraFd < 0) return false;
    enum v4l2_buf_type type;
    type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

    //开始捕捉图像数据
    if (-1 == ioctl(miOpenedCameraFd, VIDIOC_STREAMON, &type)) {
        V4L_LOGI(" Start VIDIOC_STREAMON failed \n");
        return false;
    }
    V4L_LOGI("VIDIOC_STREAMON Start collecting capture graph ok");
    return true;
}

开启采集后,视频图像就会不停放入输出队列中

然后应用程序就可以从缓冲区中获取图像了

bool V4LAchieve::CameraVideoGetLoop() {
    fd_set fds;
    struct timeval tv;
    //将指定的文件描述符集清空
    FD_ZERO (&fds);
    //在文件描述符集合中增加一个新的文件描述符
    FD_SET (miOpenedCameraFd, &fds);
    /* Timeout. */
    tv.tv_sec = 2;
    tv.tv_usec = 0;
    //判断是否可读(即摄像头是否准备好),tv是定时
    int r = ::select(miOpenedCameraFd + 1, &fds, NULL, NULL, &tv);
    if (-1 == r) {
        if (EINTR == errno)
            return 0;
        V4L_LOGE("select err");
        return false;
    }
    // start to dequeue image systemCameraFrame
    struct v4l2_buffer buf;
    std::memset(&buf, 0, sizeof(struct v4l2_buffer));
    buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    buf.memory = V4L2_MEMORY_MMAP;
    //从视频采集输出队列中取出已含有采集数据的帧缓冲区
    if (ioctl(miOpenedCameraFd, VIDIOC_DQBUF, &buf) < 0) {
        V4L_LOGE("获取数据失败! ");
        return false;
    }
    assert (buf.index < (unsigned long) miBufferCount);

    m_iYUYV422Len = mpstV4LBuffers[buf.index].iLength;
    //从帧缓冲区中获取一帧数据
    ::memcpy(m_pYUYV422, mpstV4LBuffers[buf.index].pStart, mpstV4LBuffers[buf.index].iLength);
    //将帧缓冲区重新入列
    ioctl(miOpenedCameraFd, VIDIOC_QBUF, &buf);
    return true;
}

主要流程就是从视频采集输出队列中取出帧缓冲区,然后从帧缓冲区中获取一帧数据,memcpy至m_pYUYV422中,最后将帧缓冲区重新入列

这样应用程序每次调用CameraVideoGetLoop后,就可以从m_pYUYV422获取最新一帧图像

bool V4LAchieve::StopCameraCapture() {
    if (miOpenedCameraFd < 0) return false;
    enum v4l2_buf_type type;
    type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

    //停止捕捉图像数据
    if (-1 == ioctl(miOpenedCameraFd, VIDIOC_STREAMOFF, &type)) {
        V4L_LOGI(" Stop VIDIOC_STREAMOFF failed \n");
        return false;
    }
    V4L_LOGI("VIDIOC_STREAMOFF Stop collecting capture graph ok");
    return true;
}

bool V4LAchieve::CloseCamera() {
    //停止视频的采集
    StopCameraCapture();
    //释放申请的视频帧缓冲区
    for (int index = 0; index < (mpstV4LBuffers ? miBufferCount : 0); index++)
        ::munmap(mpstV4LBuffers[index].pStart, mpstV4LBuffers[index].iLength);
    //释放用户帧缓冲区内存
    if (mpstV4LBuffers) {
        free(mpstV4LBuffers);
        mpstV4LBuffers = NULL;
    }
    //关闭设备文件
    if (miOpenedCameraFd != -1) {
        ::close(miOpenedCameraFd);
        miOpenedCameraFd = -1;
    }
    return true;
}

不需要视频图像的获取时,就需要先ioctl VIDIOC_STREAMOFF来停止采集视频图像,然后解除用户与内核的帧缓冲区内存地址映射,再释放用户帧缓冲区内存空间,最后关闭设备文件

V4LAchieve::~V4LAchieve() {
    delete[] m_pYUYV422;
}

最后是析构函数,释放m_pYUYV422的内存

ndk开发基础学习系列:

JNI和NDK编程(一)JNI的开发流程
JNI和NDK编程(二)NDK的开发流程
JNI和NDK编程(三)JNI的数据类型和类型签名
JNI和NDK编程(四)JNI调用Java方法的流程

完整demo:

https://github.com/GavinAndre/JNIDemo

参考

https://blog.csdn.net/simonforfuture/article/details/78743800

发布了174 篇原创文章 · 获赞 119 · 访问量 55万+

猜你喜欢

转载自blog.csdn.net/lj402159806/article/details/88027906
今日推荐