ffmpeg4教程11:avfilter+opencv视频融屏(MCU)、实现画中画、添加logo

基于vs2017 vc++  ffmpeg4.0.2下测试

ffmpeg 环境配置请百度(vs2017 ffmpeg ) 

主要是先将音频avframe采集到队列或链表里,再通过

av_buffersrc_add_frame添加多帧取出就是

字符串[in0]setpts=PTS-STARTPTS,scale=640x480[base]; [in1]setpts=PTS-STARTPTS,scale=320x480[left]; [in2]setpts=PTS-STARTPTS,scale=320x480[right]; [base][left]overlay=shortest=1:x=0:y=0[tmp1];[tmp1][right]overlay=shortest=1:x=320:y=0[out]

其他的像写入采集完一个显示默认图片啥的请进群交流(讨论群261074724)或查看前面的教程 

static int init_filters(const char *filters_descr, VideoState2 *pVideo)
{

    char args[512], pad_name[20];
    int ret = 0, vnum = 2 + 1;
    const AVFilter **buffersrc = (const AVFilter**)av_malloc(vnum * sizeof(AVFilter**));
    const AVFilter *buffersink = avfilter_get_by_name("buffersink");
    // 

    AVFilterInOut   **outputs = (AVFilterInOut**)av_malloc(vnum * sizeof(AVFilterInOut*));
    AVFilterInOut *inputs = avfilter_inout_alloc();
    enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_BGR24, AV_PIX_FMT_NONE };

    AVFilterGraph *_filter_graph = avfilter_graph_alloc();
    for (int i = 0; i < vnum; i++)
    {
        buffersrc[i] = avfilter_get_by_name("buffer");
        outputs[i] = avfilter_inout_alloc();
        pVideo[i].vbuffersrc_ctx = NULL;
        pVideo[i].bgbuffersrc_ctx = NULL;
    }
    /* buffer video source: the decoded frames from the decoder will be inserted here. */
    if (!buffersrc || !buffersink) {
        av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
        ret = AVERROR_UNKNOWN;
        goto end;
    }
    snprintf(args, sizeof(args),
        "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
        640, 480, AV_PIX_FMT_BGR24,
        pVideo[0].envideocodecCtx->time_base.num, pVideo[0].envideocodecCtx->time_base.den,
        pVideo[0].envideocodecCtx->sample_aspect_ratio.num, pVideo[0].envideocodecCtx->sample_aspect_ratio.den);
    snprintf(pad_name, sizeof(pad_name), "in%d", 0);
    ret = avfilter_graph_create_filter(&pVideo[0].bgbuffersrc_ctx, buffersrc[0], pad_name, args, NULL, _filter_graph);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source0\n");
        goto end;
    }
    for (int i = 0; i < vnum - 1; i++) {
        snprintf(args, sizeof(args),
            "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
            pVideo[i].envideocodecCtx->width, pVideo[i].envideocodecCtx->height, pVideo[i].envideocodecCtx->pix_fmt,
            pVideo[i].envideocodecCtx->time_base.num, pVideo[i].envideocodecCtx->time_base.den,
            pVideo[i].envideocodecCtx->sample_aspect_ratio.num, pVideo[i].envideocodecCtx->sample_aspect_ratio.den);
        snprintf(pad_name, sizeof(pad_name), "in%d", i + 1);
        ret = avfilter_graph_create_filter(&pVideo[i].vbuffersrc_ctx, buffersrc[i], pad_name, args, NULL, _filter_graph);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
            goto end;
        }
    }
    /* buffer video sink: to terminate the filter chain. */
    ret = avfilter_graph_create_filter(&pVideo[0].vfilter_ctx_sink, buffersink, "out", NULL, NULL, _filter_graph);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
        goto end;
    }

    ret = av_opt_set_int_list(pVideo[0].vfilter_ctx_sink, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
        goto end;
    }

    /*
    * Set the endpoints for the filter graph. The filter_graph will
    * be linked to the graph described by filters_descr.
    */

    for (int i = 0; i < vnum; i++)
    {
        snprintf(pad_name, sizeof(pad_name), "in%d", i);
        outputs[i]->name = av_strdup(pad_name);
        if (i == 0) { outputs[i]->filter_ctx = pVideo[0].bgbuffersrc_ctx; }
        else { outputs[i]->filter_ctx = pVideo[i - 1].vbuffersrc_ctx; }
        outputs[i]->pad_idx = 0;
        if (i == vnum - 1)
            outputs[i]->next = NULL;
        else
            outputs[i]->next = outputs[i + 1];
    }

    /*
    * The buffer sink input must be connected to the output pad of
    * the last filter described by filters_descr; since the last
    * filter output label is not specified, it is set to "out" by
    * default.
    */
    inputs->name = av_strdup("out");
    inputs->filter_ctx = pVideo[0].vfilter_ctx_sink;
    inputs->pad_idx = 0;
    inputs->next = NULL;

    ret = avfilter_graph_parse_ptr(_filter_graph, filters_descr, &inputs, outputs, NULL);
    if (ret < 0)
    {
        printf("Filter: failed to call avfilter_graph_parse_ptr\n");
        return -1;
    }

    if ((ret = avfilter_graph_config(_filter_graph, NULL)) < 0)
    {
        printf("error:%d\n", ret);
        goto end;
    }

    /* Fill FilteringContext */
end:
    avfilter_inout_free(&inputs);
    for(int i=0;i<vnum;i++) 
    av_free(pVideo[i].vbuffersrc_ctx);
    
    avfilter_inout_free(outputs);
     
    char* temp = avfilter_graph_dump(_filter_graph, NULL);
    printf("%s\n", temp);

    return ret;
}

关于和opencv结合、缩放、logo、解码完显示图片啥的请加群

讨论群261074724

猜你喜欢

转载自blog.csdn.net/Java_lilin/article/details/85126308
今日推荐