FFmpeg 视频添加水印

1.简介

本示例,在视频中添加一个logo图片,把添加水印好的图片保存到本地。

2.流程

2.1打开输入的文件

首先打开输入的视频文件,查找到视频流索引,找到对应的视频解码器,拷贝一些重要的参数到解码器,最后打开解码器。

	//av_register_all();
	avformat_network_init();

	///打开输入的流
	int ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL);
	if (ret != 0)
	{
		printf("Couldn't open input stream.\n");
		return -1;
	}

	//查找流信息
	if (avformat_find_stream_info(fmt_ctx, NULL) < 0)
	{
		printf("Couldn't find stream information.\n");
		return -1;
	}

	//找到视频流索引
	video_index = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);

	AVStream* st = fmt_ctx->streams[video_index];

	AVCodec* codec = nullptr;

	//找到解码器
	codec = avcodec_find_decoder(st->codecpar->codec_id);
	if (!codec)
	{
		fprintf(stderr, "Codec not found\n");
		return -1;
	}

	//申请AVCodecContext
	dec_ctx = avcodec_alloc_context3(codec);
	if (!dec_ctx)
	{
		return -1;
	}

	avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[video_index]->codecpar);

	//打开解码器
	if ((ret = avcodec_open2(dec_ctx, codec, NULL) < 0))
	{
		return -1;
	}

	return 0;

2.2初始化滤镜

2.2.1获取滤镜处理的源

获得滤镜处理的源及滤镜处理的sink滤镜,同时申请输入和输出的滤镜结构AVFilterInOut。

	const AVFilter* buffersrc = avfilter_get_by_name("buffer");
	const AVFilter* buffersink = avfilter_get_by_name("buffersink");
	AVFilterInOut* outputs = avfilter_inout_alloc();
	AVFilterInOut* inputs = avfilter_inout_alloc();

2.2.2处理AVFilterGraph

需要的AVFilter和AVFilterInOut申请完成之后,需要申请一个AVFilterGraph,用来存储Filter的in和out描述信息

    AVFilterGraph* filter_graph = NULL;
	
    filter_graph = avfilter_graph_alloc();
	if (!outputs || !inputs || !filter_graph)
	{
		ret = AVERROR(ENOMEM);
		return ret;
	}

2.2.3创建AVFilterContext

接下来创建一个AVFilterContext结构用来存储Filter的处理内容,包括input与output的Filter信息,在创建input信息时,需要加入原视频的相关信息,比如pix_fmt、time_base等。

首先输入参数:

	char args[512];
    snprintf(args, sizeof(args),
        "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
		dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
		time_base.num, time_base.den,
		dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);

然后创建AVFilterContext:

	AVFilterContext* buffersink_ctx = NULL;
    AVFilterContext* buffersrc_ctx = NULL;

    ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
		args, NULL, filter_graph);
	if (ret < 0) 
	{
		av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
		return ret;
	}

	/* buffer video sink: to terminate the filter chain. */
	ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
		NULL, NULL, filter_graph);
	if (ret < 0) 
	{
		av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
		return ret;
	}

2.2.4设置其他参数

创建完输入与输出的AVFilterContext之后,如果还需要设置一些其他与Filter相关的参数。可以通过使用av_opt_set_int_list进行设置,例如设置输出的pix_fmt参数。

	ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
		AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
	if (ret < 0) 
	{
		av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
		return ret;
	}

2.2.5建立滤镜解析器

参数设置完毕后,可以针对前面设置的Filter相关的内容建立滤镜解析器。

    const char* filter_descr = "movie=logo.jpg[wm];[in][wm]overlay=5:5[out]";

    outputs->name = av_strdup("in");
	outputs->filter_ctx = buffersrc_ctx;
	outputs->pad_idx = 0;
	outputs->next = NULL;

	inputs->name = av_strdup("out");
	inputs->filter_ctx = buffersink_ctx;
	inputs->pad_idx = 0;
	inputs->next = NULL;

	if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
		&inputs, &outputs, NULL)) < 0)
	{
		return ret;
	}

	if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
	{
		return ret;
	}

2.3读取数据解码并获取到添加好水印的数据。

	while (av_read_frame(fmt_ctx, pkt) >= 0)
	{
		if (pkt->stream_index == video_index)
		{
			int ret = avcodec_send_packet(dec_ctx, pkt);
			if (ret >= 0)
			{
				ret = avcodec_receive_frame(dec_ctx, frame);
				if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
				{
					continue;
				}
				else if (ret < 0)
				{
					continue;
				}

				frame->pts = frame->best_effort_timestamp;

				/* push the decoded frame into the filtergraph */
				if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) 
				{
					av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
					break;
				}

				/* pull filtered frames from the filtergraph */
				while (1) 
				{
					ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
					if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
						break;
					if (ret < 0)
						break;

					switch (dec_ctx->pix_fmt)
					{
					case AV_PIX_FMT_YUV420P:
					{

						int size = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, filt_frame->width, filt_frame->height, 1);

						char fileName[20] = { 0 };
						sprintf(fileName, "img2/%d.yuv", dec_ctx->frame_number);

						FILE* fp;
						fp = fopen(fileName, "wb");

						for (int i = 0; i < filt_frame->height; i++) 
						{
							fwrite(filt_frame->data[0] + filt_frame->linesize[0] * i, 1, filt_frame->width, fp);
						}
						for (int i = 0; i < filt_frame->height / 2; i++) 
						{
							fwrite(filt_frame->data[1] + filt_frame->linesize[1] * i, 1, filt_frame->width / 2, fp);
						}
						for (int i = 0; i < filt_frame->height / 2; i++)
						{
							fwrite(filt_frame->data[2] + filt_frame->linesize[2] * i, 1, filt_frame->width / 2, fp);
						}

						fclose(fp);
					}
					break;
					default:
						return -1;
					}

					av_frame_unref(filt_frame);
					
				}
				av_frame_unref(frame);
			}
		}
	}

3.效果

logo图如下

效果图如下,可以看见添加到了左上角。

 4.源码

#include "pch.h"
#include <iostream>
#include <Windows.h>

extern "C"
{
#include "libavformat/avformat.h"
#include "libavutil/dict.h"
#include "libavutil/opt.h"
#include "libavutil/timestamp.h"
#include "libavutil/avutil.h"
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
#include "libavutil/imgutils.h" 
#include "libavfilter/avfilter.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
};

static AVFormatContext* fmt_ctx = NULL;
static AVCodecContext* dec_ctx = NULL;
AVFilterContext* buffersink_ctx = NULL;
AVFilterContext* buffersrc_ctx = NULL;
AVFilterGraph* filter_graph = NULL;

int video_index = -1;

const char* filter_descr = "movie=logo.jpg[wm];[in][wm]overlay=5:5[out]";
static int64_t last_pts = AV_NOPTS_VALUE;


static int open_input_file(const char* filename)
{
	//av_register_all();
	avformat_network_init();

	///打开输入的流
	int ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL);
	if (ret != 0)
	{
		printf("Couldn't open input stream.\n");
		return -1;
	}

	//查找流信息
	if (avformat_find_stream_info(fmt_ctx, NULL) < 0)
	{
		printf("Couldn't find stream information.\n");
		return -1;
	}

	//找到视频流索引
	video_index = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);

	AVStream* st = fmt_ctx->streams[video_index];

	AVCodec* codec = nullptr;

	//找到解码器
	codec = avcodec_find_decoder(st->codecpar->codec_id);
	if (!codec)
	{
		fprintf(stderr, "Codec not found\n");
		return -1;
	}

	//申请AVCodecContext
	dec_ctx = avcodec_alloc_context3(codec);
	if (!dec_ctx)
	{
		return -1;
	}

	avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[video_index]->codecpar);

	//打开解码器
	if ((ret = avcodec_open2(dec_ctx, codec, NULL) < 0))
	{
		return -1;
	}

	return 0;
}

static int init_filters(const char* filters_descr)
{
	char args[512];
	int ret = 0;
	const AVFilter* buffersrc = avfilter_get_by_name("buffer");
	const AVFilter* buffersink = avfilter_get_by_name("buffersink");
	AVFilterInOut* outputs = avfilter_inout_alloc();
	AVFilterInOut* inputs = avfilter_inout_alloc();
	AVRational time_base = fmt_ctx->streams[video_index]->time_base;
	enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };

	filter_graph = avfilter_graph_alloc();
	if (!outputs || !inputs || !filter_graph)
	{
		ret = AVERROR(ENOMEM);
		return ret;
	}

	/* buffer video source: the decoded frames from the decoder will be inserted here. */
	snprintf(args, sizeof(args),
		"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
		dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
		time_base.num, time_base.den,
		dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);

	ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
		args, NULL, filter_graph);
	if (ret < 0) 
	{
		av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
		return ret;
	}

	/* buffer video sink: to terminate the filter chain. */
	ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
		NULL, NULL, filter_graph);
	if (ret < 0) 
	{
		av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
		return ret;
	}

	ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
		AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
	if (ret < 0) 
	{
		av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
		return ret;
	}

	outputs->name = av_strdup("in");
	outputs->filter_ctx = buffersrc_ctx;
	outputs->pad_idx = 0;
	outputs->next = NULL;

	inputs->name = av_strdup("out");
	inputs->filter_ctx = buffersink_ctx;
	inputs->pad_idx = 0;
	inputs->next = NULL;

	if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
		&inputs, &outputs, NULL)) < 0)
	{
		return ret;
	}

	if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
	{
		return ret;
	}

	avfilter_inout_free(&inputs);
	avfilter_inout_free(&outputs);

	return ret;
}

int main()
{
///1.打开文件
	const char* inputUrl = "test.mp4";

	int ret = -1;
	if ((ret = open_input_file(inputUrl) < 0))
	{
		return -1;
	}

///2.初始化滤镜
	if ((ret = init_filters(filter_descr)) < 0)
	{
		return -1;
	}

	AVPacket* pkt = av_packet_alloc();
	//av_init_packet(pkt);

	AVFrame* frame = av_frame_alloc();
	AVFrame *filt_frame = av_frame_alloc();

	while (av_read_frame(fmt_ctx, pkt) >= 0)
	{
		if (pkt->stream_index == video_index)
		{
			int ret = avcodec_send_packet(dec_ctx, pkt);
			if (ret >= 0)
			{
				ret = avcodec_receive_frame(dec_ctx, frame);
				if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
				{
					continue;
				}
				else if (ret < 0)
				{
					continue;
				}

				frame->pts = frame->best_effort_timestamp;

				/* push the decoded frame into the filtergraph */
				if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) 
				{
					av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
					break;
				}

				/* pull filtered frames from the filtergraph */
				while (1) 
				{
					ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
					if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
						break;
					if (ret < 0)
						break;

					switch (dec_ctx->pix_fmt)
					{
					case AV_PIX_FMT_YUV420P:
					{

						int size = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, filt_frame->width, filt_frame->height, 1);

						char fileName[20] = { 0 };
						sprintf(fileName, "img2/%d.yuv", dec_ctx->frame_number);

						FILE* fp;
						fp = fopen(fileName, "wb");

						for (int i = 0; i < filt_frame->height; i++) 
						{
							fwrite(filt_frame->data[0] + filt_frame->linesize[0] * i, 1, filt_frame->width, fp);
						}
						for (int i = 0; i < filt_frame->height / 2; i++) 
						{
							fwrite(filt_frame->data[1] + filt_frame->linesize[1] * i, 1, filt_frame->width / 2, fp);
						}
						for (int i = 0; i < filt_frame->height / 2; i++)
						{
							fwrite(filt_frame->data[2] + filt_frame->linesize[2] * i, 1, filt_frame->width / 2, fp);
						}

						fclose(fp);
					}
					break;
					default:
						return -1;
					}

					av_frame_unref(filt_frame);
					
				}
				av_frame_unref(frame);
			}
		}
	}

	avfilter_graph_free(&filter_graph);
	avcodec_close(dec_ctx);
	avcodec_free_context(&dec_ctx);
	avformat_close_input(&fmt_ctx);
	av_frame_free(&frame);
	av_frame_free(&filt_frame);
	av_packet_free(&pkt);

	return 0;
}

猜你喜欢

转载自blog.csdn.net/wzz953200463/article/details/125983491