一、命令方式
查看所有已支持的滤镜
ffmpeg -filters
查看doubleweave这个滤镜的参数选项
ffmpeg -h filter=doubleweave
ffmpeg -h filter=weave
官网对这两个滤镜的解释:
The weave takes a field-based video input and join each two sequential
fields into single frame, producing a new double height clip with half
the frame rate and half the frame count. The doubleweave works same as
weave but without halving frame rate and frame count.
也就是说doubleweave在奇场和偶场帧交织后,视频的帧率不变,但weave就会减半.
https://ffmpeg.org/ffmpeg-all.html#Video-Filters
参考(感谢部门大佬给的这张图片,顺藤摸瓜才有了这篇博客):
以下命令都可以播放:当执行第二个时默认first_field=top这个选项。
ffplay -vf “weave=first_field=top” -i westLife.mp4
ffplay -fflags nobuffer -vf weave udp://127.0.0.1:6017
二、代码实现
1.代码实现要注意的几个部分
(1)注意这里是从项目里摘出来的代码,不能直接使用,滤镜的使用都是流程性的代码,以下代码第一场,是设置weave这个滤镜的参数,其他和一般滤镜的使用相同,不管什么滤镜,使用的流程基本是固定的,这部分可以参考雷神的博客。
(2)avfilter_get_by_name(“buffersink”)的改动,ffmpeg4.3如果使用ffbuffersink报错:
函数会返回值为:-12
报错为:Cannot allocate memory
(3)需要连续两帧视频帧放入av_buffersrc_add_frame,才会在av_buffersink_get_frame中得到交织好的一帧,不然av_buffersink_get_frame会报如下错误:
函数返回值为:-11
报错为:Resource temporarily unavailable
(4)同时av_buffersrc_add_frame这个函数要注意一下,它会把frame类的frame->data释放掉。但不会把这个结构体的内存空间给释放。因此文末示例代码中每次调用这个函数后,调用av_frame_get_buffer给frame重新分配frame->data的内存空间。av_buffersink_get_frame会给frame->data分配空间,但不会给frame分配结构体空间结构体,这部分需要自行分配。
int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame);
int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame);
(5)同时ffmpeg4.3已经没有libavfilter/avfiltergraph.h这个头文件了,合并到libavfilter/avfilter.h头文件了。
(6)int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)这个函数也要注意,AVFrame中的参数一定要和AVFilterContext中一致,比如视频宽高等(AVFilterContext的配置也就是文末代码中args变量的配置),不然会报以下错误:
Changing video frame properties on the fly is not supported by all filters.
2.代码实现部分

const char *filter_descr = "weave=first_field=top";
int FeedStream::init_filters(const char *filters_descr)
{
char args[512];
int ret;
string filter_buf = "buffer";
string filter_buf_sink = "ffbuffersink";
const AVFilter *buffersrc = avfilter_get_by_name("buffer");
const AVFilter *buffersink = avfilter_get_by_name("buffersink");//注意这行的改动
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_UYVY422, AV_PIX_FMT_NONE}; //AV_PIX_FMT_UYVY422 AV_PIX_FMT_YUV420P
AVBufferSinkParams *buffersink_params;
filter_graph = avfilter_graph_alloc();
/* buffer video source: the decoded frames from the decoder will be inserted here. */
snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
decodec_ctx_v->width, decodec_ctx_v->height, AV_PIX_FMT_UYVY422,
decodec_ctx_v->time_base.num, decodec_ctx_v->time_base.den,
decodec_ctx_v->sample_aspect_ratio.num, decodec_ctx_v->sample_aspect_ratio.den);
printf("-=-=--=-=-=-=-=args:%s\n", args);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
args, NULL, filter_graph);
if (ret < 0)
{
printf("Cannot create buffer source re = %d\n", ret);
return ret;
}
/* buffer video sink: to terminate the filter chain. */
buffersink_params = av_buffersink_params_alloc();
buffersink_params->pixel_fmts = pix_fmts;
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
NULL, buffersink_params, filter_graph);
av_free(buffersink_params);
if (ret < 0)
{
printf("Cannot create buffer sink re = %d\n", ret);
ErrorFunc(ret);
}
/* Endpoints for the filter graph. */
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
&inputs, &outputs, NULL)) < 0)
return ret;
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
return ret;
return 0;
}
int FeedStream::Interlace()
{
if (av_buffersrc_add_frame(buffersrc_ctx, uvyv_frame) < 0) //这个函数会reset frame_v中的data[]
{
printf("Error while add frame.\n");
ErrorFunc(1);
}
uvyv_frame->format = AV_PIX_FMT_UYVY422;
uvyv_frame->width = decodec_ctx_v->width;
uvyv_frame->height = decodec_ctx_v->height;
uvyv_frame->pts = 0;
int temp_re = av_frame_get_buffer(uvyv_frame, 32); //avpicture_fill 32
if (temp_re != 0)
{
printf("fail to get buffer for pframe\n");
ErrorFunc(temp_re);
}
int re = av_buffersink_get_frame(buffersink_ctx, uvyv_frame_weave);
if (re < 0)
{
printf("againV_temp\n");
return re;
// ErrorFunc(1);
}
else
{
return re;
//printf("success while get frame.uvyv_frame_weave->height:%d, uvyv_frame_weave->pts:%d\n", uvyv_frame_weave->height, uvyv_frame_weave->pts);
}
}