粗略分析ffplay源码

背景

研究FFmpeg源码有段时间了,云里雾里。先修整一下,分析一下ffplay源码。这次分析没有深入细节,注重的是框架结构。

使用

ffplay -i rtmp://172.16.11.97:1935/myapp/123

源码分析

main入口

// 通过解析输入参数,获取文件名称,文件格式是NULL
VideoState *stream_open(const char *filename, AVInputFormat *iformat)
	is = av_mallocz(sizeof(VideoState)); // 负责全局管理的结构体
	is->filename = av_strdup(filename);  // 用户输入的文件名如:rtmp://172.16.11.97:1935/myapp/123
	is->iformat = iformat;				 // 此时iformat是NULL
	// frame queue init
	frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1);
	frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0);
    frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1);
	// packet queue init
	packet_queue_init(&is->videoq);
	packet_queue_init(&is->audioq);
	packet_queue_init(&is->subtitleq);
	// clock init
	init_clock(&is->vidclk, &is->videoq.serial);
    init_clock(&is->audclk, &is->audioq.serial);
    init_clock(&is->extclk, &is->extclk.serial);
	// 创建线程
	is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);

// GUI显示线程,原理就是从frame队列中获取帧数据,然后通过SDL显示
static void event_loop(VideoState *cur_stream)
    refresh_loop_wait_event(cur_stream, &event);
		video_refresh(is, &remaining_time);
			video_display(is);
				video_image_display(is);

read_thread线程,从文件或者网络获取码流

static int read_thread(void *arg)
	VideoState *is = arg;
	AVFormatContext *ic = NULL;
	AVPacket pkt1, *pkt = &pkt1;
	
	// 创建输入格式上下文
	ic = avformat_alloc_context();
	// 初始化输入格式上下文,根据文件名称来探测出文件格式,iformat为NULL,没有什么卵用
	avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
	
	// 
	av_format_inject_global_side_data(ic);
	
	// 获取流信息
	avformat_find_stream_info(ic, opts);

	// 找到最合适的流
	st_index[AVMEDIA_TYPE_VIDEO] = av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO, st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);

	// 设置窗口大小
	AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
    AVCodecParameters *codecpar = st->codecpar;
    AVRational sar = av_guess_sample_aspect_ratio(ic, st, NULL);
    if (codecpar->width)
        set_default_window_size(codecpar->width, codecpar->height, sar);
	
	// 打开流
	stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);

	// 循环读取码流信息
	for (;;) {
    
    
        ret = av_read_frame(ic, pkt);
        /* check if packet is in play range specified by user, then queue, otherwise discard */
        stream_start_time = ic->streams[pkt->stream_index]->start_time;
        pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
        pkt_in_play_range = duration == AV_NOPTS_VALUE ||
                (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
                av_q2d(ic->streams[pkt->stream_index]->time_base) -
                (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
                <= ((double)duration / 1000000);
        if (pkt->stream_index == is->video_stream && pkt_in_play_range)
            packet_queue_put(&is->videoq, pkt);
        av_packet_unref(pkt);
    }

stream_component_open 打开给定的码流

static int stream_component_open(VideoState *is, int stream_index)
	AVFormatContext *ic = is->ic;
	AVCodecContext *avctx;
    AVCodec *codec;

	// 创建编解码器上下文
	avctx = avcodec_alloc_context3(NULL);

	// 根据码流信息,初始化编解码器上下文
	avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
	avctx->pkt_timebase = ic->streams[stream_index]->time_base;

	// 根据编解码器ID,找到对应的编解码器
	codec = avcodec_find_decoder(avctx->codec_id);
	avctx->codec_id = codec->id;

	// 初始化解码器,并创建解码器线程
	decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
	decoder_start(&is->viddec, video_thread, "video_decoder", is);

video_thread,从队列读取码流并解码

static int video_thread(void *arg)
	VideoState *is = arg;
	AVFrame *frame = av_frame_alloc();

	for (;;) {
    
    
        // 解码出一帧数据并插入显示队列
        ret = get_video_frame(is, frame);
        duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){
    
    frame_rate.den, frame_rate.num}) : 0);
        pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
        ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
        av_frame_unref(frame);
    }

总结

仅以视频为例,总共有三个线程:

  • Demux线程,read_thread线程

    从网络中接收码流,进一步解复用,获取视频码流包,并插入packet队列。

  • Decode线程,video_thread线程

    从packet队列获取包数据,通过相应的解码器,解码视频码流包,并插入到frame队列。

  • GUI线程,main入口所在线程

    从frame队列获取frame数据,然后通过底层SDL方式显示视频画面。

猜你喜欢

转载自blog.csdn.net/donglicaiju76152/article/details/117225356