FFmpeg进阶: 给视频添加背景音乐


在抖音、快手一类的短视频软件中,为了提升视频的效果,我们通常会给视频添加有趣的背景音乐。通过FFmpeg库我们也可以给视频添加背景音乐,这里就介绍一下如何通过FFmpeg库给视频添加背景音乐。添加背景音乐的原理图如下图所示:

在这里插入图片描述

首先我们解封装原始视频得到视频流和音频流,之后我们解封装音频文件得到音频流,通过对应的混音滤镜我们将原始的音频流和背景音乐的音频流进行混合,混合完毕之后我们再将混合的音频流和原始视频流进行封装,就得到了带背景音乐的视频。

视频解封装

对原始视频进行解封装,得到视频的上下文和对应的音视频流。

//打开视频文件
//@1文件地址 @2媒体上下文  @3音频解码器上下文  @4视频解码器上下文  @5音频流 
//@6视频流
int openVideoFile(const char *file, AVFormatContext *&formatContext, AVCodecContext *&audioContext,
	AVCodecContext *&videoContext, AVStream *&audioStream, AVStream *&videoStream) {
    
    
	int ret = 0;
	ret = avformat_open_input(&formatContext, file, nullptr, nullptr);
	if (ret < 0) 
	{
    
    
		return -1;
	}
	ret = avformat_find_stream_info(formatContext, nullptr);
	if (ret < 0) 
	{
    
    
		return -1;
	}

	for (int j = 0; j < formatContext->nb_streams; ++j) {
    
    
		if (formatContext->streams[j]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
    
    
			videoStream = formatContext->streams[j];
			AVCodec *codec = avcodec_find_decoder(videoStream->codecpar->codec_id);
			videoContext = avcodec_alloc_context3(codec);
			avcodec_parameters_to_context(videoContext, videoStream->codecpar);
			avcodec_open2(videoContext, codec, nullptr);
		}
		else if (formatContext->streams[j]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
    
    
			audioStream = formatContext->streams[j];
			AVCodec *codec = avcodec_find_decoder(audioStream->codecpar->codec_id);
			audioContext = avcodec_alloc_context3(codec);
			avcodec_parameters_to_context(audioContext, audioStream->codecpar);
			avcodec_open2(audioContext, codec, nullptr);
		}
		if (videoStream && audioStream) break;
	}

	if (!videoStream) 
	{
    
    
		return -1;
	}
	if (!audioContext) 
	{
    
    
		return -1;
	}

	return 0;
}

背景音乐解封装

视频文件解封装完毕之后,我们对背景音乐进行解封装,得到对应的音频流,用来和视频文件中的音频流进行混音处理。

//打开音频文件
//@1文件地址 @2媒体上下文  @3音频解码器上下文  @4音频流
int openAudioFile(const char *file, AVFormatContext *&formatContext, AVCodecContext *&audioContext,
	AVStream *&audioStream) {
    
    
	int ret = 0;
	ret = avformat_open_input(&formatContext, file, nullptr, nullptr);
	if (ret < 0) 
	{
    
    
		return -1;
	}
	ret = avformat_find_stream_info(formatContext, nullptr);
	if (ret < 0) 
	{
    
    
		return -1;
	}

	for (int j = 0; j < formatContext->nb_streams; ++j) {
    
    
		if (formatContext->streams[j]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
    
    
			audioStream = formatContext->streams[j];
			AVCodec *codec = avcodec_find_decoder(audioStream->codecpar->codec_id);
			audioContext = avcodec_alloc_context3(codec);
			avcodec_parameters_to_context(audioContext, audioStream->codecpar);
			avcodec_open2(audioContext, codec, nullptr);
		}
	}
	if (!audioStream) 
	{
    
    
		return -1;
	}
	return 0;
}

封装音频滤镜

为了方便使用音频滤镜,这里我们对音频滤镜的操作进行一下封装,对应的封装类如下:

//audio_filter.h
#ifndef VIDEOBOX_AUDIO_FILTER_H
#define VIDEOBOX_AUDIO_FILTER_H

extern "C" 
{
    
    
#include <libavcodec/avcodec.h>
#include <libavfilter/avfilter.h>
#include <libavfilter/buffersrc.h>
#include <libavfilter/buffersink.h>
#include <libavutil/channel_layout.h>
#include <libavutil/opt.h>
}

struct AudioConfig 
{
    
    
	//采样的格式
	AVSampleFormat format = AV_SAMPLE_FMT_NONE;
	//采样率
	int sample_rate = 0;
	//声道的布局
	uint64_t ch_layout = AV_CH_LAYOUT_STEREO;
	//时间基
	AVRational timebase = {
    
     1, 1 };
	AudioConfig(AVSampleFormat format, int sample_rate, uint64_t ch_layout, AVRational timebase) 
	{
    
    
		this->format = format;
		this->sample_rate = sample_rate;
		this->ch_layout = ch_layout;
		this->timebase = timebase;
	}

};

class AudioFilter 
{
    
    
protected:
	//输出槽
	AVFilterContext *buffersink_ctx = nullptr;
	//输入缓存1
	AVFilterContext *buffersrc1_ctx = nullptr;
	//输入缓存2
	AVFilterContext *buffersrc2_ctx = nullptr;
	//滤镜图
	AVFilterGraph *filter_graph;
	//滤镜描述
	const char *description = nullptr;
public:
	AudioFilter() = default;

	//创建音频滤镜
	//@paramer filter_descr 滤镜的描述
	//@paramer inConfig1 输入音频配置1
	//@paramer inConfig2 输入音频配置2
	//@paramer outConfig 输出音频配置
	//@return 0正常 其它错误
	int create(const char *filter_descr, AudioConfig* inConfig1, AudioConfig* inConfig2, AudioConfig* outConfig);
	
	//创建音频滤镜
	//@paramer filter_descr 滤镜的描述
	//@paramer inConfig1 输入音频配置1
	//@paramer outConfig 输出音频配置
	//@return 0正常 其它错误
	int create(const char *filter_descr, AudioConfig* inConfig, AudioConfig* outConfig);

	//输出音频滤镜流程图
	void dumpGraph();

	//创建音频滤镜
	//@paramer input1 输入帧1
	//@paramer input2 输入帧2
	//@paramer reuslt 输出帧
	//@return 0正常 其它错误
	int filter(AVFrame *input1, AVFrame* input2, AVFrame* result);


	//添加输入帧1
	int addInput1(AVFrame * input);

	//添加输入帧2
	int addInput2(AVFrame* input);

	//获取处理之后的结果
	int getFrame(AVFrame* result);

	//销毁滤镜
	void destroy();
};


#endif 
//audio_filter.cpp
#include "audio_filter.h"

int AudioFilter::create(const char *filter_descr, AudioConfig *inConfig1,
	AudioConfig *inConfig2, AudioConfig *outConfig) {
    
    
	this->description = filter_descr;
	char args[512];
	int ret = 0;

	//设置缓存滤镜和输出滤镜
	const AVFilter *buffersrc = avfilter_get_by_name("abuffer");
	const AVFilter *buffersink = avfilter_get_by_name("abuffersink");
	AVFilterInOut *output = avfilter_inout_alloc();
	AVFilterInOut *inputs[2];
	inputs[0] = avfilter_inout_alloc();
	inputs[1] = avfilter_inout_alloc();

	char ch_layout[128];
	int nb_channels = 0;
	int pix_fmts[] = {
    
    outConfig->format, AV_SAMPLE_FMT_NONE };

	//创建滤镜容器
	filter_graph = avfilter_graph_alloc();
	if (!inputs[0] || !inputs[1] || !output || !filter_graph) {
    
    
		ret = AVERROR(ENOMEM);
		goto end;
	}

	//声道布局
	nb_channels = av_get_channel_layout_nb_channels(inConfig1->ch_layout);
	av_get_channel_layout_string(ch_layout, sizeof(ch_layout), nb_channels, inConfig1->ch_layout);

	//输入缓存1的配置
	snprintf(args, sizeof(args),
		"sample_rate=%d:sample_fmt=%d:channel_layout=%s:channels=%d:time_base=%d/%d",
		inConfig1->sample_rate,
		inConfig1->format,
		ch_layout,
		nb_channels,
		inConfig1->timebase.num,
		inConfig1->timebase.den);
	ret = avfilter_graph_create_filter(&buffersrc1_ctx, buffersrc, "in1",
		args, nullptr, filter_graph);
	if (ret < 0) 
	{
    
    
		goto end;
	}

	//输入缓存2的配置
	nb_channels = av_get_channel_layout_nb_channels(inConfig2->ch_layout);
	av_get_channel_layout_string(ch_layout, sizeof(ch_layout), nb_channels, inConfig2->ch_layout);
	snprintf(args, sizeof(args),
		"sample_rate=%d:sample_fmt=%d:channel_layout=%s:channels=%d:time_base=%d/%d",
		inConfig2->sample_rate,
		inConfig2->format,
		ch_layout,
		nb_channels,
		inConfig2->timebase.num,
		inConfig2->timebase.den);
	ret = avfilter_graph_create_filter(&buffersrc2_ctx, buffersrc, "in2",
		args, nullptr, filter_graph);
	if (ret < 0) 
	{
    
    
		goto end;
	}

	//创建输出
	ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
		nullptr, nullptr, filter_graph);
	if (ret < 0)
	{
    
    
		goto end;
	}

	ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", pix_fmts,
		AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
	if (ret < 0) {
    
    
		goto end;
	}

	inputs[0]->name = av_strdup("in1");
	inputs[0]->filter_ctx = buffersrc1_ctx;
	inputs[0]->pad_idx = 0;
	inputs[0]->next = inputs[1];

	inputs[1]->name = av_strdup("in2");
	inputs[1]->filter_ctx = buffersrc2_ctx;
	inputs[1]->pad_idx = 0;
	inputs[1]->next = nullptr;

	output->name = av_strdup("out");
	output->filter_ctx = buffersink_ctx;
	output->pad_idx = 0;
	output->next = nullptr;

	//引脚的输出和输入与滤镜容器的相反
	avfilter_graph_set_auto_convert(filter_graph, AVFILTER_AUTO_CONVERT_NONE);
	if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_descr,
		&output, inputs, nullptr)) < 0) {
    
    
		goto end;
	}

	//使滤镜容器生效
	if ((ret = avfilter_graph_config(filter_graph, nullptr)) < 0) {
    
    
		goto end;
	}

end:
	avfilter_inout_free(inputs);
	avfilter_inout_free(&output);

	return ret;
}

int AudioFilter::create(const char *filter_descr, AudioConfig *inConfig, AudioConfig *outConfig) 
{
    
    
	this->description = filter_descr;
	char args[512];
	int ret = 0;
	const AVFilter *buffersrc = avfilter_get_by_name("abuffer");
	const AVFilter *buffersink = avfilter_get_by_name("abuffersink");
	AVFilterInOut *output = avfilter_inout_alloc();
	AVFilterInOut *input = avfilter_inout_alloc();

	char ch_layout[128];
	int nb_channels = 0;
	int pix_fmts[] = {
    
     outConfig->format, AV_SAMPLE_FMT_NONE };

	filter_graph = avfilter_graph_alloc();
	if (!input || !output || !filter_graph) {
    
    
		ret = AVERROR(ENOMEM);
		goto end;
	}

	//缓存源和槽定义
	nb_channels = av_get_channel_layout_nb_channels(inConfig->ch_layout);
	av_get_channel_layout_string(ch_layout, sizeof(ch_layout), nb_channels, inConfig->ch_layout);
	snprintf(args, sizeof(args),
		"sample_rate=%d:sample_fmt=%d:channel_layout=%s:channels=%d:time_base=%d/%d",
		inConfig->sample_rate,
		inConfig->format,
		ch_layout,
		nb_channels,
		inConfig->timebase.num,
		inConfig->timebase.den);
	ret = avfilter_graph_create_filter(&buffersrc1_ctx, buffersrc, "in1",
		args, nullptr, filter_graph);
	if (ret < 0) {
    
    
		goto end;
	}

	ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
		nullptr, nullptr, filter_graph);
	if (ret < 0) {
    
    
		goto end;
	}

	ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", pix_fmts,
		AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
	if (ret < 0) {
    
    
		goto end;
	}

	input->name = av_strdup("in");
	input->filter_ctx = buffersrc1_ctx;
	input->pad_idx = 0;
	input->next = nullptr;

	output->name = av_strdup("out");
	output->filter_ctx = buffersink_ctx;
	output->pad_idx = 0;
	output->next = nullptr;

	if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_descr,
		&output, &input, nullptr)) < 0) {
    
    
		goto end;
	}

	if ((ret = avfilter_graph_config(filter_graph, nullptr)) < 0) 
	{
    
    
		goto end;
	}

end:
	avfilter_inout_free(&input);
	avfilter_inout_free(&output);

	return ret;
}

void AudioFilter::dumpGraph() {
    
    
	printf("%s\n%s", description, avfilter_graph_dump(filter_graph, nullptr));
}


void AudioFilter::destroy() {
    
    
	if (filter_graph)
		avfilter_graph_free(&filter_graph);

	filter_graph = nullptr;

}

int AudioFilter::filter(AVFrame *input1, AVFrame *input2, AVFrame *result) 
{
    
    
	int ret = av_buffersrc_add_frame_flags(buffersrc1_ctx, input1, AV_BUFFERSRC_FLAG_KEEP_REF);
	if (ret < 0) 
	{
    
    
		return ret;
	}

	ret = av_buffersrc_add_frame_flags(buffersrc2_ctx, input2, AV_BUFFERSRC_FLAG_KEEP_REF);
	if (ret < 0) 
	{
    
    
		return ret;
	}

	return av_buffersink_get_samples(buffersink_ctx, result, result->nb_samples);
}

int AudioFilter::getFrame(AVFrame *result) {
    
    
	if (filter_graph != nullptr)
	{
    
    
		int ret = av_buffersink_get_samples(buffersink_ctx, result, result->nb_samples);
		return ret;
	}
	return -1;
}

int AudioFilter::addInput1(AVFrame *input) {
    
    
	if (filter_graph != nullptr)
	{
    
    
		return av_buffersrc_add_frame_flags(buffersrc1_ctx, input, AV_BUFFERSRC_FLAG_KEEP_REF);
	}
	return - 1;
}

int AudioFilter::addInput2(AVFrame *input) {
    
    
	if (filter_graph != nullptr)
	{
    
    
		return av_buffersrc_add_frame_flags(buffersrc2_ctx, input, AV_BUFFERSRC_FLAG_KEEP_REF);
	}
	return -1;
}

添加背景音乐

完成上述操作之后,我们就可以给对应的视频添加背景音乐了。在指定背景音乐的时候,我们一定要注意调整背景音乐的音量,防止背景音乐喧宾夺主。添加背景音乐的操作如下所示:

//给视频添加背景音乐
//@1输出视频地址  @2输入视频地址 @3背景音乐地址 @4背景音乐的音量
int add_bgm_to_video(const char *output_filename, const char *input_filename, const char *bgm_filename, float bgm_volume) 
{
    
    
	int ret = 0;
	//各种解码器的上下文
	AVFormatContext *outFmtContext = nullptr;
	AVFormatContext *inFmtContext = nullptr;
	AVFormatContext *bgmFmtContext = nullptr;
	AVCodecContext *inAudioContext = nullptr;
	AVCodecContext *inVideoContext = nullptr;
	AVCodecContext *outAudioContext = nullptr;
	AVCodecContext *bgmAudioContext = nullptr;
	AudioFilter filter;

	//音视频流信息
	AVStream *inAudioStream = nullptr;
	AVStream *inVideoStream = nullptr;
	AVStream *outAudioStream = nullptr;
	AVStream *outVideoStream = nullptr;
	AVStream *bgmAudioStream = nullptr;

	AVCodec *audioCodec = nullptr;

	//打开视频文件获取上下文
	ret = openVideoFile(input_filename, inFmtContext, inAudioContext, inVideoContext, inAudioStream,
		inVideoStream);
	if (ret < 0) return ret;

	//打开音频文件获取上下文
	ret = openAudioFile(bgm_filename, bgmFmtContext, bgmAudioContext, bgmAudioStream);
	if (ret < 0) return ret;

	//创建输出的上下文
	ret = avformat_alloc_output_context2(&outFmtContext, nullptr, nullptr, output_filename);

	audioCodec = avcodec_find_encoder(inAudioStream->codecpar->codec_id);

	//创建输出视频流,不需要编码
	outVideoStream = avformat_new_stream(outFmtContext, nullptr);
	if (!outVideoStream) {
    
    
		return -1;
	}
	outVideoStream->id = outFmtContext->nb_streams - 1;
	ret = avcodec_parameters_copy(outVideoStream->codecpar, inVideoStream->codecpar);
	if (ret < 0) {
    
    
		return -1;
	}
	outVideoStream->codecpar->codec_tag = 0;

	//创建音频流,需要编码
	outAudioStream = avformat_new_stream(outFmtContext, audioCodec);
	if (!outAudioStream) 
	{
    
    
		return -1;
	}
	outAudioStream->id = outFmtContext->nb_streams - 1;

	//设置音频参数
	outAudioContext = avcodec_alloc_context3(audioCodec);
	avcodec_parameters_to_context(outAudioContext, inAudioStream->codecpar);
	outAudioContext->codec_type = inAudioContext->codec_type;
	outAudioContext->codec_id = inAudioContext->codec_id;
	outAudioContext->sample_fmt = inAudioContext->sample_fmt;
	outAudioContext->sample_rate = inAudioContext->sample_rate;
	outAudioContext->bit_rate = inAudioContext->bit_rate;
	outAudioContext->channel_layout = inAudioContext->channel_layout;
	outAudioContext->channels = inAudioContext->channels;
	outAudioContext->time_base = AVRational{
    
     1, outAudioContext->sample_rate };
	outAudioContext->flags |= AV_CODEC_FLAG_LOW_DELAY;
	outAudioStream->time_base = outAudioContext->time_base;
	if (outFmtContext->oformat->flags & AVFMT_GLOBALHEADER) 
	{
    
    
		outAudioContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
	}

	//打开编码器
	ret = avcodec_open2(outAudioContext, audioCodec, nullptr);
	if (ret < 0) 
	{
    
    
		return -1;
	}
	ret = avcodec_parameters_from_context(outAudioStream->codecpar, outAudioContext);
	if (ret < 0) 
	{
    
    
		return -1;
	}

	//拷贝原始数据
	av_dict_copy(&outFmtContext->metadata, inFmtContext->metadata, 0);
	av_dict_copy(&outVideoStream->metadata, inVideoStream->metadata, 0);
	av_dict_copy(&outAudioStream->metadata, inAudioStream->metadata, 0);


	//设置输入输出配置
	AudioConfig inputConfig{
    
     inAudioContext->sample_fmt,
		inAudioContext->sample_rate,
		inAudioContext->channel_layout,
		inAudioContext->time_base };
	AudioConfig bgmConfig{
    
     bgmAudioContext->sample_fmt,
		bgmAudioContext->sample_rate,
		bgmAudioContext->channel_layout,
		bgmAudioContext->time_base };
	AudioConfig outputConfig{
    
     outAudioContext->sample_fmt,
		outAudioContext->sample_rate,
		outAudioContext->channel_layout,
		outAudioContext->time_base };

	//通过滤镜修改音频的音量和采样率
	char filter_description[256];
	char ch_layout[128];
	av_get_channel_layout_string(ch_layout, 128, av_get_channel_layout_nb_channels(outAudioContext->channel_layout),
		outAudioContext->channel_layout);
	snprintf(filter_description, sizeof(filter_description),
		"[in1]aresample=%d[a1];[in2]aresample=%d,volume=volume=%f[a2];[a1][a2]amix[out]",
		outAudioContext->sample_rate,
		outAudioContext->sample_rate,
		bgm_volume);
	filter.create(filter_description, &inputConfig, &bgmConfig, &outputConfig);
	filter.dumpGraph();

	if (!(outFmtContext->oformat->flags & AVFMT_NOFILE)) 
	{
    
    
		ret = avio_open(&outFmtContext->pb, output_filename, AVIO_FLAG_WRITE);
		if (ret < 0) 
		{
    
    
			return -1;
		}
	}

	//写文件头
	ret = avformat_write_header(outFmtContext, nullptr);
	if (ret < 0) 
	{
    
    
		return -1;
	}

	AVFrame *inputFrame = av_frame_alloc();
	AVFrame *bgmFrame = av_frame_alloc();
	AVFrame *mixFrame = av_frame_alloc();

	do 
	{
    
    
		AVPacket packet{
    
     nullptr };
		av_init_packet(&packet);
		ret = av_read_frame(inFmtContext, &packet);
		if (ret == AVERROR_EOF)
		{
    
    
			break;
		}
		else if (ret < 0)
		{
    
    
			break;
		}

		if (packet.flags & AV_PKT_FLAG_DISCARD) continue;
		if (packet.stream_index == inVideoStream->index) 
		{
    
    
			packet.stream_index = outVideoStream->index;
			av_packet_rescale_ts(&packet, inVideoStream->time_base, outVideoStream->time_base);
			packet.duration = av_rescale_q(packet.duration, inVideoStream->time_base, outVideoStream->time_base);
			packet.pos = -1;
			ret = av_interleaved_write_frame(outFmtContext, &packet);
		}
		else if (packet.stream_index == inAudioStream->index)
		{
    
    
			packet.stream_index = outAudioStream->index;
			av_packet_rescale_ts(&packet, inAudioStream->time_base, outAudioStream->time_base);

			// decode input frame
			ret = avcodec_send_packet(inAudioContext, &packet);
			
			ret = avcodec_receive_frame(inAudioContext, inputFrame);
			if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
    
    
				continue;
			}
			else if (ret < 0) {
    
    
				return -1;
			}

			filter.addInput1(inputFrame);
			//添加背景音乐对应的音频帧
		decode:
			int got_bgm = 0;
			while (true) {
    
    
				AVPacket bgmPacket{
    
     nullptr };
				av_init_packet(&bgmPacket);
				ret = av_read_frame(bgmFmtContext, &bgmPacket);
				if (ret == AVERROR_EOF) {
    
    
					av_seek_frame(bgmFmtContext, bgmAudioStream->index, 0, 0);
					continue;
				}
				else if (ret != 0) {
    
    
					break;
				}
				if (bgmPacket.stream_index == bgmAudioStream->index) {
    
    
					avcodec_send_packet(bgmAudioContext, &bgmPacket);
					ret = avcodec_receive_frame(bgmAudioContext, bgmFrame);
					if (ret == 0) 
					{
    
    
						got_bgm = 1;
						break;
					}
				}
			}

			//读取混合之后的音频帧
			filter.addInput2(bgmFrame);
			int got_mix = 0;
			if (got_bgm) {
    
    
				ret = filter.getFrame(mixFrame);
				got_mix = ret == 0;
			}
			if (!got_mix) {
    
    
				goto decode;
			}
			mixFrame->pts = inputFrame->pts;

			av_frame_unref(inputFrame);
			av_frame_unref(bgmFrame);
			avcodec_send_frame(outAudioContext, mixFrame);

			//将混合之后的音频帧写入到文件中
		encode:
			AVPacket mixPacket{
    
     nullptr };
			ret = avcodec_receive_packet(outAudioContext, &mixPacket);
			if (ret == 0) 
			{
    
    
				mixPacket.stream_index = outAudioStream->index;
				ret = av_interleaved_write_frame(outFmtContext, &mixPacket);
				goto encode;
			}
			else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
    
    
				
			}
			else {
    
    

				return -1;
			}
		}
	} while (true);

	filter.destroy();

	av_write_trailer(outFmtContext);

	if (!(outFmtContext->oformat->flags & AVFMT_NOFILE)) {
    
    
		avio_closep(&outFmtContext->pb);
	}

	//清理分配之后的数据
	av_frame_free(&inputFrame);
	av_frame_free(&bgmFrame);
	av_frame_free(&mixFrame);

	avformat_free_context(outFmtContext);
	avformat_free_context(inFmtContext);
	avformat_free_context(bgmFmtContext);

	avcodec_free_context(&inAudioContext);
	avcodec_free_context(&inVideoContext);
	avcodec_free_context(&bgmAudioContext);
	avcodec_free_context(&outAudioContext);

	return 0;
}

int main(int argc, char* argv[])
{
    
    
	if (argc != 4)
	{
    
    
		return -1;
	}
	std::string video_input = std::string(argv[1]);  //视频输入地址
	std::string bgm_input = std::string(argv[2]);    //背景音乐地址
	std::string video_output = std::string(argv[3]);  //视频输出

	add_bgm_to_video(video_output.c_str(), video_input.c_str(), bgm_input.c_str(), 0.5);
}

猜你喜欢

转载自blog.csdn.net/yang1fei2/article/details/128169598