FFMpeg-10、完成两个mp4文件的视频拼接输出yuv及mp4_ffmpeg使用拼接两段视频

AVFrame *m_pFrameEncodeYUV1 = NULL;
uint8_t *m_VideoEncodebuffer1 = NULL;
struct SwsContext *m_pAddVideoConvert_ctx1 = NULL;
int m_iOutWidth = 1280;
int m_iOutHeight = 720;
for (;;)
{
	int re = av_read_frame(ic, pkt);
	if (re != 0)
	{
		break;
	}
	AVCodecContext *cc = 0;
	if (pkt->stream_index == videoStream)
	{
		cc = vc;
	}
	if (pkt->stream_index == audioStream)
	{
		cc = ac;
	}

	///解码视频
	//发送packet到解码线程  send传NULL后调用多次receive取出所有缓冲帧
	re = avcodec_send_packet(cc, pkt);
	if (re != 0)
	{
		XError(re);
		continue;
	}
	av_packet_unref(pkt);
	long long startTime = av_gettime();
	for (;;)
	{
		//从线程中获取解码接口,一次send可能对应多次receive
		re = avcodec_receive_frame(cc, frame2);
		if (re != 0) {
			break;
		}
		//视频
		if (cc == vc)
		{

			m_pFrameEncodeYUV1 = av_frame_alloc();
			m_VideoEncodebuffer1 = (uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, m_iOutWidth, m_iOutHeight));
			avpicture_fill((AVPicture *)m_pFrameEncodeYUV1, m_VideoEncodebuffer1, AV_PIX_FMT_YUV420P, m_iOutWidth, m_iOutHeight);
			m_pFrameEncodeYUV1->width = m_iOutWidth;
			m_pFrameEncodeYUV1->height = m_iOutHeight;
			m_pFrameEncodeYUV1->format = AV_PIX_FMT_YUV420P;

			if (!m_pAddVideoConvert_ctx1)
			{
				m_pAddVideoConvert_ctx1 = sws_getContext(frame2->width, frame2->height,
					AV_PIX_FMT_YUV420P, m_iOutWidth, m_iOutHeight, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
			}
			sws_scale(m_pAddVideoConvert_ctx1, frame2->data, frame2->linesize, 0, frame2->height, m_pFrameEncodeYUV1->data, m_pFrameEncodeYUV1->linesize);

			//自测
			//int y_size = frame2->width*frame2->height;
			//fwrite(frame2->data[0], 1, y_size, yuv_file2);    //Y   
			//fwrite(frame2->data[1], 1, y_size / 4, yuv_file2);  //U  
			//fwrite(frame2->data[2], 1, y_size / 4, yuv_file2);  //V  
			//Sleep(20);

			m_muxVideo1.lock();
			m_qAVFrameVideo1.push(m_pFrameEncodeYUV1);
			m_qAVFrameBuffer1.push(m_VideoEncodebuffer1);
			m_bFlagStart = true;
			m_muxVideo1.unlock();
			Sleep(200);
		}
		else if (cc == ac)//音频
		{
			//cout << "Audio" << endl;
		}
	}
}
cout << "m_qAVFrameVideo end" << endl;

}
void EncodeVideoFrame::startEncode()
{
AVDictionary *opts = NULL;
int re = avformat_open_input(&ic, path.c_str(), 0, &opts);
if (re != 0)
{
XError(re);
return;
}
re = avformat_find_stream_info(ic, 0);
int totalMs = ic->duration / (AV_TIME_BASE / 1000);
videoStream = av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);;
audioStream = av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);;

AVCodec *vcodec = avcodec_find_decoder(ic->streams[videoStream]->codecpar->codec_id);
if (!vcodec)
{
	cout << "can't find the codec id " << ic->streams[videoStream]->codecpar->codec_id;
	getchar();
	return;
}
cout << "find the AVCodec " << ic->streams[videoStream]->codecpar->codec_id << endl;
vc = avcodec_alloc_context3(vcodec);
///配置解码器上下文参数
avcodec_parameters_to_context(vc, ic->streams[videoStream]->codecpar);

猜你喜欢

转载自blog.csdn.net/2501_90329823/article/details/145248596
今日推荐