第一部分 scrcpy主函数中函数清单
源码版本: v1.19
bool scrcpy(const struct scrcpy_options *options) {
static struct scrcpy scrcpy;
struct scrcpy *s = &scrcpy;
server_init(&s->server); ///> 1. server_init()
struct server_params params = {
.serial = options->serial,
.port_range = options->port_range,
.bit_rate = options->bit_rate,
.max_fps = options->max_fps,
.display_id = options->display_id,
.codec_options = options->codec_options,
.encoder_name = options->encoder_name,
.force_adb_forward = options->force_adb_forward,
};
server_start(&s->server, ¶ms); ///> 2. server_start();
server_started = true;
sdl_init_and_configure(options->display, options->render_driver,
options->disable_screensaver);
server_connect_to(&s->server, device_name, &frame_size); ///> 3. server_connect_to();
file_handler_init(&s->file_handler, s->server.serial,
options->push_target); ///> 4. file_handler_init(); socket init & 服务端代码adb push
decoder_init(&s->decoder); ///> 5. decoder_init();
av_log_set_callback(av_log_callback); ///> 6. av_log_set_callback();
static const struct stream_callbacks stream_cbs = {
///> 7. stream_init();
.on_eos = stream_on_eos,
};
stream_init(&s->stream, s->server.video_socket, &stream_cbs, NULL);
stream_add_sink(&s->stream, &dec->packet_sink); ///> 8. stream_add_sink(); dec
stream_add_sink(&s->stream, &rec->packet_sink); ///> 9. stream_add_sink(); rec
controller_init(&s->controller, s->server.control_socket); ///> 10. controller_init(); control_socket
controller_start(&s->controller); ///> 11. controller_start();
struct screen_params screen_params = {
.window_title = window_title,
.frame_size = frame_size,
.always_on_top = options->always_on_top,
.window_x = options->window_x,
.window_y = options->window_y,
.window_width = options->window_width,
.window_height = options->window_height,
.window_borderless = options->window_borderless,
.rotation = options->rotation,
.mipmaps = options->mipmaps,
.fullscreen = options->fullscreen,
.buffering_time = options->display_buffer,
};
screen_init(&s->screen, &screen_params); ///> 12. screen_init();
decoder_add_sink(&s->decoder, &s->screen.frame_sink); ///> 13. decoder_add_sink();
#ifdef HAVE_V4L2
sc_v4l2_sink_init(&s->v4l2_sink, options->v4l2_device, frame_size,
options->v4l2_buffer); ///> 14. sc_v4l2_sink_init();
decoder_add_sink(&s->decoder, &s->v4l2_sink.frame_sink);
#endif
stream_start(&s->stream); ///> 14+.流启动配置,第一次发布时遗漏咯,很抱歉.补充
input_manager_init(&s->input_manager, &s->controller, &s->screen, options); ///> 15. input_manager_init();
ret = event_loop(s, options); ///> 16. event_loop();
///> 程序推出释放资源相关内容
screen_hide_window(&s->screen);
controller_stop(&s->controller);
file_handler_stop(&s->file_handler);
screen_interrupt(&s->screen);
server_stop(&s->server);
stream_join(&s->stream);
sc_v4l2_sink_destroy(&s->v4l2_sink);
screen_join(&s->screen);
screen_destroy(&s->screen);
controller_join(&s->controller);
controller_destroy(&s->controller);
recorder_destroy(&s->recorder);
file_handler_join(&s->file_handler);
file_handler_destroy(&s->file_handler);
server_destroy(&s->server); ///> 销毁 server
return ret;
}
第二部分 socket 数据流是如何转换到视频流
///> 主函数中调用 stream_start(&s->stream)此函数;
bool
stream_start(struct stream *stream) {
LOGD("Starting stream thread");
printf("%s, %d DEBUG\n", __FILE__, __LINE__);
bool ok = sc_thread_create(&stream->thread, run_stream, "stream", stream);
if (!ok) {
LOGC("Could not start stream thread");
return false;
}
return true;
}
///> 视频流封装过程线程
run_stream(void *data) {
AVCodec *codec = avcodec_find_decoder(AV_CODEC_ID_H264);
stream->codec_ctx = avcodec_alloc_context3(codec);
stream_open_sinks(stream, codec);
stream->parser = av_parser_init(AV_CODEC_ID_H264);
AVPacket *packet = av_packet_alloc();
for (;;) {
bool ok = stream_recv_packet(stream, packet); ///> 1>.读取socket 裸视频流数据,存放到 packet 中
if (!ok) {
// end of stream
break;
}
ok = stream_push_packet(stream, packet); ///> 2>. 把单帧 AVPacket 数据放到 ffmpeg->codec 的流中
av_packet_unref(packet);
if (!ok) {
// cannot process packet (error already logged)
break;
}
}
if (stream->pending) {
av_packet_free(&stream->pending);
}
av_packet_free(&packet);
av_parser_close(stream->parser);
stream_close_sinks(stream);
avcodec_free_context(&stream->codec_ctx);
stream->cbs->on_eos(stream, stream->cbs_userdata);
}
///> 1>.读取socket 裸视频流数据,存放到 packet 中
static bool
stream_recv_packet(struct stream *stream, AVPacket *packet) {
// The video stream contains raw packets, without time information. When we
// record, we retrieve the timestamps separately, from a "meta" header
// added by the server before each raw packet.
// 重要::此部分是scrcpy视频数据编码格式
// The "meta" header length is 12 bytes:
// [. . . . . . . .|. . . .]. . . . . . . . . . . . . . . ...
// <-------------> <-----> <-----------------------------...
// PTS packet raw packet
// size
//
// It is followed by <packet_size> bytes containing the packet/frame.
uint8_t header[HEADER_SIZE];
ssize_t r = net_recv_all(stream->socket, header, HEADER_SIZE);
if (r < HEADER_SIZE) {
return false;
}
uint64_t pts = buffer_read64be(header);
uint32_t len = buffer_read32be(&header[8]);
assert(pts == NO_PTS || (pts & 0x8000000000000000) == 0);
assert(len);
if (av_new_packet(packet, len)) {
///> 申请 AVPacket 内存空间
LOGE("Could not allocate packet");
return false;
}
r = net_recv_all(stream->socket, packet->data, len); ///> 把 socket 流中数据放到 AVPacket 的 帧数据区
if (r < 0 || ((uint32_t) r) < len) {
av_packet_unref(packet);
return false;
}
packet->pts = pts != NO_PTS ? (int64_t) pts : AV_NOPTS_VALUE;///> 配置 packet-> pts 参数
LOGI("%s, %d , LEN:%d",__FILE__,__LINE__,len);
return true;
}
///> 2>. 把单帧 AVPacket 数据放到 ffmpeg->codec 的流中,等待FFMPEG的视频渲染
static bool
stream_push_packet(struct stream *stream, AVPacket *packet) {
bool is_config = packet->pts == AV_NOPTS_VALUE;
// A config packet must not be decoded immediately (it contains no
// frame); instead, it must be concatenated with the future data packet.
if (stream->pending || is_config) {
size_t offset;
if (stream->pending) {
offset = stream->pending->size;
if (av_grow_packet(stream->pending, packet->size)) {
///> 把帧追加到流槽中
LOGE("Could not grow packet");
return false;
}
} else {
offset = 0;
stream->pending = av_packet_alloc();
if (!stream->pending) {
LOGE("Could not allocate packet");
return false;
}
if (av_new_packet(stream->pending, packet->size)) {
///> 把帧放到流槽中
LOGE("Could not create packet");
av_packet_free(&stream->pending);
return false;
}
}
memcpy(stream->pending->data + offset, packet->data, packet->size);
if (!is_config) {
// prepare the concat packet to send to the decoder
stream->pending->pts = packet->pts;
stream->pending->dts = packet->dts;
stream->pending->flags = packet->flags;
packet = stream->pending;
}
}
if (is_config) {
// config packet
bool ok = push_packet_to_sinks(stream, packet); ///> 3>. 把 packet 放到 stream 中,具体实现参考 push_packet_to_sinks 函数
if (!ok) {
return false;
}
} else {
// data packet
bool ok = stream_parse(stream, packet); ///> 4>. 裸数据流中分离出一帧一帧的压缩H264编码数据
if (stream->pending) {
// the pending packet must be discarded (consumed or error)
av_packet_free(&stream->pending);
}
if (!ok) {
return false;
}
}
return true;
}
///> 3>. 把 packet 放到 stream 中
static bool
push_packet_to_sinks(struct stream *stream, const AVPacket *packet) {
for (unsigned i = 0; i < stream->sink_count; ++i) {
struct sc_packet_sink *sink = stream->sinks[i];
if (!sink->ops->push(sink, packet)) {
///> 详细的过程、请参考第三部分 push() 回调函数走读
LOGE("Could not send config packet to sink %d", i);
return false;
}
}
return true;
}
///> 4>. 裸数据流中分离出一帧一帧的压缩H264编码数据
static bool
stream_parse(struct stream *stream, AVPacket *packet) {
uint8_t *in_data = packet->data;
int in_len = packet->size;
uint8_t *out_data = NULL;
int out_len = 0;
int r = av_parser_parse2(stream->parser, stream->codec_ctx, ///> 从输入的裸数据流中分离出一帧一帧的压缩H264编码数据。
&out_data, &out_len, in_data, in_len,
AV_NOPTS_VALUE, AV_NOPTS_VALUE, -1);
// PARSER_FLAG_COMPLETE_FRAMES is set
assert(r == in_len);
(void) r;
assert(out_len == in_len);
if (stream->parser->key_frame == 1) {
packet->flags |= AV_PKT_FLAG_KEY;
}
packet->dts = packet->pts;
bool ok = push_packet_to_sinks(stream, packet); ///> 把 packet 放到 stream 中, 详细的过程、请参考第三部分 push() 回调函数走读
if (!ok) {
LOGE("Could not process packet");
return false;
}
return true;
}
以上部分内容我把他划分为 socket 数据流、转换到 stream 流过程,此数据是H264编码裸视频数据。
源码片段中已经标注出序号, 1> ~ 4> 清晰指示出数据流处理过程.
当获取到裸视频数据后,就进入H264视频解码过程,我们继续向下走读源码。
第三部分 视频解码过程
///> 解码器初始化
void
decoder_init(struct decoder *decoder) {
decoder->sink_count = 0;
static const struct sc_packet_sink_ops ops = {
.open = decoder_packet_sink_open, ///> 调用 decoder_open(struct decoder *decoder, const AVCodec *codec) 函数
.close = decoder_packet_sink_close,
.push = decoder_packet_sink_push, ///> 调用 decoder_push(struct decoder *decoder, const AVPacket *packet) 函数
};
decoder->packet_sink.ops = &ops;
}
///> 打开解码器
static bool
decoder_open(struct decoder *decoder, const AVCodec *codec) {
decoder->codec_ctx = avcodec_alloc_context3(codec); ///> 申请 codec context3 空间
if (!decoder->codec_ctx) {
LOGC("Could not allocate decoder context");
return false;
}
if (avcodec_open2(decoder->codec_ctx, codec, NULL) < 0) {
///> 把 context 内容放入到decoder中
LOGE("Could not open codec");
avcodec_free_context(&decoder->codec_ctx);
return false;
}
decoder->frame = av_frame_alloc(); ///> 申请 AV Frame 空间,此处是 播放的Frame帧
if (!decoder->frame) {
LOGE("Could not create decoder frame");
avcodec_close(decoder->codec_ctx);
avcodec_free_context(&decoder->codec_ctx);
return false;
}
if (!decoder_open_sinks(decoder)) {
///> 解码
LOGE("Could not open decoder sinks");
av_frame_free(&decoder->frame);
avcodec_close(decoder->codec_ctx);
avcodec_free_context(&decoder->codec_ctx);
return false;
}
return true;
}
///>
static bool
decoder_push(struct decoder *decoder, const AVPacket *packet) {
bool is_config = packet->pts == AV_NOPTS_VALUE;
if (is_config) {
// nothing to do
return true;
}
int ret = avcodec_send_packet(decoder->codec_ctx, packet); ///> 1>. 把 Packet 的数据送到FFMPEG 解码器中
if (ret < 0 && ret != AVERROR(EAGAIN)) {
LOGE("Could not send video packet: %d", ret);
return false;
}
ret = avcodec_receive_frame(decoder->codec_ctx, decoder->frame); ///> 2>. 接收 FFMPEG 解码后的数据
if (!ret) {
// a frame was received
bool ok = push_frame_to_sinks(decoder, decoder->frame); ///> 3>. 把 Frame 数据送到播放器流中
// A frame lost should not make the whole pipeline fail. The error, if
// any, is already logged.
(void) ok;
av_frame_unref(decoder->frame);
} else if (ret != AVERROR(EAGAIN)) {
LOGE("Could not receive video frame: %d", ret);
return false;
}
return true;
}
///> push_frame_to_sinks() 函数
static bool
push_frame_to_sinks(struct decoder *decoder, const AVFrame *frame) {
for (unsigned i = 0; i < decoder->sink_count; ++i) {
struct sc_frame_sink *sink = decoder->sinks[i];
if (!sink->ops->push(sink, frame)) {
///> 此处回调的函数是: 第四部分 5>. screen_frame_sink_push() 函数,把数据送入屏幕属性sink入口。
LOGE("Could not send frame to sink %d", i);
return false;
}
}
return true;
}
此部分 1> ~ 3> 过程是 FFMPEG 标准视频解码过程,由此我们已经把裸视频数据解码后、编码成为可播放视频流。
第四部分 视频播发器
接下来我们在回到主程序中,走读视频播放的程序入口。
///> 4.1 主程序中调用screen_init(&s->screen, &screen_params)此函数。
screen_init(struct screen *screen, const struct screen_params *params) {
static const struct sc_video_buffer_callbacks cbs = {
///> 1>. sc_video_buffer 的回调函数
.on_new_frame = sc_video_buffer_on_new_frame,
};
bool ok = sc_video_buffer_init(&screen->vb, params->buffering_time, &cbs, screen); ///> 2>. sc_video_buffer_init()初始化函数
ok = sc_video_buffer_start(&screen->vb); ///> 3>. 启动 sc_video_buffer
fps_counter_init(&screen->fps_counter);
screen->window = SDL_CreateWindow(params->window_title, x, y,
window_size.width, window_size.height,
window_flags);
screen->renderer = SDL_CreateRenderer(screen->window, -1,
SDL_RENDERER_ACCELERATED);
int r = SDL_GetRendererInfo(screen->renderer, &renderer_info);
struct sc_opengl *gl = &screen->gl;
sc_opengl_init(gl);
SDL_Surface *icon = read_xpm(icon_xpm); ///> 安卓小图标文件
SDL_SetWindowIcon(screen->window, icon);
SDL_FreeSurface(icon);
screen->texture = create_texture(screen);
SDL_SetWindowSize(screen->window, window_size.width, window_size.height);
screen_update_content_rect(screen);
if (params->fullscreen) {
screen_switch_fullscreen(screen);
}
SDL_AddEventWatch(event_watcher, screen);
static const struct sc_frame_sink_ops ops = {
.open = screen_frame_sink_open,
.close = screen_frame_sink_close,
.push = screen_frame_sink_push, ///> 5>. screen_frame_sink_push()函数
};
screen->frame_sink.ops = &ops;
return true;
}
///> 4.2 主程序调用 decoder_add_sink(&s->decoder, &s->screen.frame_sink) 函数
void
decoder_add_sink(struct decoder *decoder, struct sc_frame_sink *sink) {
assert(decoder->sink_count < DECODER_MAX_SINKS);
assert(sink);
assert(sink->ops);
decoder->sinks[decoder->sink_count++] = sink;
}
由函数 decoder_add_sink() 我们大致能够猜测出, 把解码器 和 屏幕sink对接上后,
解码器输出内容就直接被屏幕给播放了.根据 FFMPEG 或 Gstreamer 管用方法推测。
是否正确呢,我们一起走读源码,接下主要分析 screen_init() 函数中5个有关函数。
///> 2>. sc_video_buffer_init()初始化函数
bool
sc_video_buffer_init(struct sc_video_buffer *vb, sc_tick buffering_time,
const struct sc_video_buffer_callbacks *cbs,
void *cbs_userdata) {
bool ok = sc_frame_buffer_init(&vb->fb);
///> 把此初始化函数相关内容直接放到此位置
{
fb->pending_frame = av_frame_alloc(); ///> 申请pending的 av_frame 内存空间
fb->tmp_frame = av_frame_alloc(); ///> 申请tmp的 av_frame 内存空间
fb->pending_frame_consumed = true;
};
ok = sc_cond_init(&vb->b.queue_cond);
ok = sc_cond_init(&vb->b.wait_cond);
sc_clock_init(&vb->b.clock);
sc_queue_init(&vb->b.queue);
vb->buffering_time = buffering_time; ///> buffering_time 时间
vb->cbs = cbs; ///> 回调函数赋值指向 sc_video_buffer_on_new_frame() 函数
vb->cbs_userdata = cbs_userdata; ///> 回调函数入口数据
return true;
}
///> 接下来看看相关数据结构
struct sc_video_buffer_frame_queue SC_QUEUE(struct sc_video_buffer_frame);
struct sc_video_buffer {
struct sc_frame_buffer fb;
sc_tick buffering_time;
// only if buffering_time > 0
struct {
sc_thread thread;
sc_mutex mutex;
sc_cond queue_cond;
sc_cond wait_cond;
struct sc_clock clock;
struct sc_video_buffer_frame_queue queue; ///> 屏幕buf 的队列.
bool stopped;
} b; // buffering
const struct sc_video_buffer_callbacks *cbs;
void *cbs_userdata;
};
struct sc_video_buffer_callbacks {
void (*on_new_frame)(struct sc_video_buffer *vb, bool previous_skipped,
void *userdata);
};
///> 1>. sc_video_buffer 的回调函数
static void
sc_video_buffer_on_new_frame(struct sc_video_buffer *vb, bool previous_skipped,
void *userdata) {
(void) vb;
struct screen *screen = userdata;
if (previous_skipped) {
fps_counter_add_skipped_frame(&screen->fps_counter);
// The EVENT_NEW_FRAME triggered for the previous frame will consume
// this new frame instead
} else {
static SDL_Event new_frame_event = {
.type = EVENT_NEW_FRAME, ///> 4>. SDL 的事件类型 EVENT_NEW_FRAME
};
// Post the event on the UI thread
SDL_PushEvent(&new_frame_event);
}
}
/// 当新的数据frame到达时,就调用 SDL_PushEvent(&new_frame_event) 发布事件通知。
/// 两个问题:1 新帧的数据是从哪里来的? 2. SDL_PushEvent() 通知给谁了,接到这个通知他会做什么呢?
///> 3>. 启动 sc_video_buffer_start(&screen->vb);
bool
sc_video_buffer_start(struct sc_video_buffer *vb) {
if (vb->buffering_time) {
bool ok =
sc_thread_create(&vb->b.thread, run_buffering, "buffering", vb);
if (!ok) {
LOGE("Could not start buffering thread");
return false;
}
}
return true;
}
///> 启动 run_buffering 线程
static int
run_buffering(void *data) {
struct sc_video_buffer *vb = data;
assert(vb->buffering_time > 0);
for (;;) {
sc_mutex_lock(&vb->b.mutex);
while (!vb->b.stopped && sc_queue_is_empty(&vb->b.queue)) {
sc_cond_wait(&vb->b.queue_cond, &vb->b.mutex);
}
if (vb->b.stopped) {
sc_mutex_unlock(&vb->b.mutex);
goto stopped;
}
struct sc_video_buffer_frame *vb_frame;
sc_queue_take(&vb->b.queue, next, &vb_frame); ///> 从 队列中拿出 frame
sc_tick max_deadline = sc_tick_now() + vb->buffering_time;
// PTS (written by the server) are expressed in microseconds
sc_tick pts = SC_TICK_TO_US(vb_frame->frame->pts);
bool timed_out = false;
while (!vb->b.stopped && !timed_out) {
sc_tick deadline = sc_clock_to_system_time(&vb->b.clock, pts)
+ vb->buffering_time;
if (deadline > max_deadline) {
deadline = max_deadline;
}
timed_out =
!sc_cond_timedwait(&vb->b.wait_cond, &vb->b.mutex, deadline);
}
if (vb->b.stopped) {
sc_video_buffer_frame_delete(vb_frame);
sc_mutex_unlock(&vb->b.mutex);
goto stopped;
}
sc_mutex_unlock(&vb->b.mutex);
#ifndef SC_BUFFERING_NDEBUG
LOGD("Buffering: %" PRItick ";%" PRItick ";%" PRItick,
pts, vb_frame->push_date, sc_tick_now());
#endif
sc_video_buffer_offer(vb, vb_frame->frame); ///> 给 video_buffer 供应 Frame 数据,关键函数找到。
sc_video_buffer_frame_delete(vb_frame);
}
stopped:
// Flush queue
while (!sc_queue_is_empty(&vb->b.queue)) {
struct sc_video_buffer_frame *vb_frame;
sc_queue_take(&vb->b.queue, next, &vb_frame);
sc_video_buffer_frame_delete(vb_frame);
}
LOGD("Buffering thread ended");
return 0;
}
///> sc_video_buffer_offer() 数据提供方法
static bool
sc_video_buffer_offer(struct sc_video_buffer *vb, const AVFrame *frame) {
bool previous_skipped;
bool ok = sc_frame_buffer_push(&vb->fb, frame, &previous_skipped);
if (!ok) {
return false;
}
vb->cbs->on_new_frame(vb, previous_skipped, vb->cbs_userdata); ///> 执行 on_new_frame 回调函数,sc_video_buffer_on_new_frame()
return true; ///> 回答第一个问题,新帧是由 run_buffering() 线程监控queue有数据后发出来。
}
///> 数据帧提供方
bool
sc_frame_buffer_push(struct sc_frame_buffer *fb, const AVFrame *frame,
bool *previous_frame_skipped) {
sc_mutex_lock(&fb->mutex);
// Use a temporary frame to preserve pending_frame in case of error.
// tmp_frame is an empty frame, no need to call av_frame_unref() beforehand.
int r = av_frame_ref(fb->tmp_frame, frame);
if (r) {
LOGE("Could not ref frame: %d", r);
return false;
}
// Now that av_frame_ref() succeeded, we can replace the previous
// pending_frame
swap_frames(&fb->pending_frame, &fb->tmp_frame);
av_frame_unref(fb->tmp_frame);
if (previous_frame_skipped) {
*previous_frame_skipped = !fb->pending_frame_consumed;
}
fb->pending_frame_consumed = false;
sc_mutex_unlock(&fb->mutex);
return true;
}
///> 4>. SDL 的事件类型 EVENT_NEW_FRAME
/// 监听此类型事件程序是 screen_handle_event() 函数
bool
screen_handle_event(struct screen *screen, SDL_Event *event) {
switch (event->type) {
case EVENT_NEW_FRAME:
if (!screen->has_frame) {
screen->has_frame = true;
// this is the very first frame, show the window
screen_show_window(screen);
}
bool ok = screen_update_frame(screen); ///> 有新的 frame 事件到来,刷新屏幕显示
if (!ok) {
LOGW("Frame update failed\n");
}
return true;
case SDL_WINDOWEVENT:
if (!screen->has_frame) {
// Do nothing
return true;
}
switch (event->window.event) {
case SDL_WINDOWEVENT_EXPOSED:
screen_render(screen, true);
break;
case SDL_WINDOWEVENT_SIZE_CHANGED:
screen_render(screen, true);
break;
case SDL_WINDOWEVENT_MAXIMIZED:
screen->maximized = true;
break;
case SDL_WINDOWEVENT_RESTORED:
if (screen->fullscreen) {
// On Windows, in maximized+fullscreen, disabling
// fullscreen mode unexpectedly triggers the "restored"
// then "maximized" events, leaving the window in a
// weird state (maximized according to the events, but
// not maximized visually).
break;
}
screen->maximized = false;
apply_pending_resize(screen);
screen_render(screen, true);
break;
}
return true;
}
return false;
}
/// 此处回答第二个问题, SDL 事件通知给了 screen_handle_event() 函数,此函数做了屏幕刷新工作.
/// 至此我们就把视频数据从 "socket数据 ==> 解码器 ==> 屏幕刷新" 整个流程分析通透了。
/// 现在的问题是 视频 frame 渲染呢,它在这个流程中,那个部分呢?我继续分析 screen_update_frame() 函数
static bool
screen_update_frame(struct screen *screen) {
av_frame_unref(screen->frame);
sc_video_buffer_consume(&screen->vb, screen->frame);
AVFrame *frame = screen->frame;
fps_counter_add_rendered_frame(&screen->fps_counter);
struct size new_frame_size = {
frame->width, frame->height};
if (!prepare_for_frame(screen, new_frame_size)) {
return false;
}
update_texture(screen, frame); ///> 此函数是干什么用的呢?屏幕尺寸变化时,调整尺寸比例。
screen_render(screen, false); ///> 这个函数又做什么使呢?
return true;
}
///>
void
screen_render(struct screen *screen, bool update_content_rect) {
if (update_content_rect) {
screen_update_content_rect(screen);
}
SDL_RenderClear(screen->renderer);
if (screen->rotation == 0) {
SDL_RenderCopy(screen->renderer, screen->texture, NULL, &screen->rect);
} else {
// rotation in RenderCopyEx() is clockwise, while screen->rotation is
// counterclockwise (to be consistent with --lock-video-orientation)
int cw_rotation = (4 - screen->rotation) % 4;
double angle = 90 * cw_rotation;
SDL_Rect *dstrect = NULL;
SDL_Rect rect;
if (screen->rotation & 1) {
rect.x = screen->rect.x + (screen->rect.w - screen->rect.h) / 2;
rect.y = screen->rect.y + (screen->rect.h - screen->rect.w) / 2;
rect.w = screen->rect.h;
rect.h = screen->rect.w;
dstrect = ▭
} else {
assert(screen->rotation == 2);
dstrect = &screen->rect;
}
SDL_RenderCopyEx(screen->renderer, screen->texture, NULL, dstrect,
angle, NULL, 0);
}
SDL_RenderPresent(screen->renderer); ///> 使用 SDL_Render 接口进行渲染。
}
/// 至此我们把处理流程补充为 "socket数据 ==> 解码器 ==> 屏幕刷新 ==> 渲染"。 好像还缺了点什么内容呢?
///> 5>. screen_frame_sink_push()函数
{
static const struct sc_frame_sink_ops ops = {
.open = screen_frame_sink_open,
.close = screen_frame_sink_close,
.push = screen_frame_sink_push, ///> 5>. screen_frame_sink_push()函数
};
screen->frame_sink.ops = &ops;
}
///> 还记得 4.2 主程序调用 decoder_add_sink(&s->decoder, &s->screen.frame_sink) 函数吧,不采用流程图方式,有点绕哈。
void
decoder_add_sink(struct decoder *decoder, struct sc_frame_sink *sink) {
assert(decoder->sink_count < DECODER_MAX_SINKS);
assert(sink);
assert(sink->ops);
decoder->sinks[decoder->sink_count++] = sink; ///>
}
/*
此部分是关联 屏幕与解码器的sink点, 把 screen->frame_sink 赋值给了 解码器的 decoder->sinks[decoder->sink_count] 数组,
单解码器解码成功后,调用的 decoder->sinks[decoder->sink_count]->sink.ops->push() 函数,就是执行screen_frame_sink_push()此函数.
*/
static bool
screen_frame_sink_push(struct sc_frame_sink *sink, const AVFrame *frame) {
struct screen *screen = DOWNCAST(sink);
return sc_video_buffer_push(&screen->vb, frame);
}
bool
sc_video_buffer_push(struct sc_video_buffer *vb, const AVFrame *frame) {
if (!vb->buffering_time) {
// No buffering
return sc_video_buffer_offer(vb, frame); //> 快速刷屏渠道
}
sc_mutex_lock(&vb->b.mutex);
sc_tick pts = SC_TICK_FROM_US(frame->pts);
sc_clock_update(&vb->b.clock, sc_tick_now(), pts);
sc_cond_signal(&vb->b.wait_cond);
if (vb->b.clock.count == 1) {
sc_mutex_unlock(&vb->b.mutex);
// First frame, offer it immediately, for two reasons:
// - not to delay the opening of the scrcpy window
// - the buffering estimation needs at least two clock points, so it
// could not handle the first frame
return sc_video_buffer_offer(vb, frame); //> 快速刷屏渠道
}
struct sc_video_buffer_frame *vb_frame = sc_video_buffer_frame_new(frame);
if (!vb_frame) {
sc_mutex_unlock(&vb->b.mutex);
LOGE("Could not allocate frame");
return false;
}
#ifndef SC_BUFFERING_NDEBUG
vb_frame->push_date = sc_tick_now();
#endif
sc_queue_push(&vb->b.queue, next, vb_frame); ///> 把 vb_frame push到队列中,通过 run_buffering 线程去刷新屏幕.
sc_cond_signal(&vb->b.queue_cond); ///> 发送signal给 queue_cond
sc_mutex_unlock(&vb->b.mutex);
return true;
}
static bool
sc_video_buffer_offer(struct sc_video_buffer *vb, const AVFrame *frame) {
bool previous_skipped;
bool ok = sc_frame_buffer_push(&vb->fb, frame, &previous_skipped);
if (!ok) {
return false;
}
vb->cbs->on_new_frame(vb, previous_skipped, vb->cbs_userdata);
return true;
}
也就是说 解码器解码成功后、直接通过此接口调用屏幕刷新,如果是第一屏 或 No buffering 状态就快速刷新屏幕,
其他情况就把数据放到 队列中,由 run_buffering 线程去刷新屏幕,由此实现屏幕刷新速率参数。
接下来我会重构 android 开源的视频播放器代码,来验证此部分代码走读结果。在没有进一步验证前请智慧的你、鉴别此代码走读的逻辑合理性。如果发现有不妥之处、请留言指正,谢谢。