staticinlinevoid_dispatch_barrier_sync_f_inline(dispatch_queue_t dq,void*ctxt,dispatch_function_t func,uintptr_t dc_flags){
// 获取线程ID -- mach pthread --
dispatch_tid tid =_dispatch_tid_self();if(unlikely(dx_metatype(dq)!= _DISPATCH_LANE_TYPE)){
DISPATCH_CLIENT_CRASH(0,"Queue type doesn't support dispatch_sync");}dispatch_lane_t dl =upcast(dq)._dl;// The more correct thing to do would be to merge the qos of the thread// that just acquired the barrier lock into the queue state.//// However this is too expensive for the fast path, so skip doing it.// The chosen tradeoff is that if an enqueue on a lower priority thread// contends with this fast path, this thread may receive a useless override.//// Global concurrent queues and queues bound to non-dispatch threads// always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE// 死锁 - 线程同步 - 从os底层获取到一个statusif(unlikely(!_dispatch_queue_try_acquire_barrier_sync(dl, tid))){
return_dispatch_sync_f_slow(dl, ctxt, func, DC_FLAG_BARRIER, dl,
DC_FLAG_BARRIER | dc_flags);}if(unlikely(dl->do_targetq->do_targetq)){
return_dispatch_sync_recurse(dl, ctxt, func,
DC_FLAG_BARRIER | dc_flags);}_dispatch_introspection_sync_begin(dl);_dispatch_lane_barrier_sync_invoke_and_complete(dl, ctxt, func
DISPATCH_TRACE_ARG(_dispatch_trace_item_sync_push_pop(
dq, ctxt, func, dc_flags | DC_FLAG_BARRIER)));}
_dispatch_sync_block_with_privdataкодовый блок
staticvoid_dispatch_sync_block_with_privdata(dispatch_queue_t dq,dispatch_block_t work,uintptr_t dc_flags){
dispatch_block_private_data_t dbpd =_dispatch_block_get_data(work);pthread_priority_t op =0, p =0;dispatch_block_flags_t flags = dbpd->dbpd_flags;if(flags & DISPATCH_BLOCK_BARRIER){
dc_flags |= DC_FLAG_BLOCK_WITH_PRIVATE_DATA | DC_FLAG_BARRIER;}else{
dc_flags |= DC_FLAG_BLOCK_WITH_PRIVATE_DATA;}
op =_dispatch_block_invoke_should_set_priority(flags, dbpd->dbpd_priority);if(op){
p = dbpd->dbpd_priority;}voucher_t ov, v = DISPATCH_NO_VOUCHER;if(flags & DISPATCH_BLOCK_HAS_VOUCHER){
v = dbpd->dbpd_voucher;}
ov =_dispatch_set_priority_and_voucher(p, v,0);// balanced in d_block_sync_invoke or d_block_waitif(os_atomic_cmpxchg2o(dbpd, dbpd_queue,NULL, dq, relaxed)){
_dispatch_retain_2(dq);}if(dc_flags & DC_FLAG_BARRIER){
_dispatch_barrier_sync_f(dq, work, _dispatch_block_sync_invoke,
dc_flags);}else{
_dispatch_sync_f(dq, work, _dispatch_block_sync_invoke, dc_flags);}_dispatch_reset_priority_and_voucher(op, ov);}
_dispatch_queue_try_acquire_barrier_syncкодовый блок
staticvoid__DISPATCH_WAIT_FOR_QUEUE__(dispatch_sync_context_t dsc,dispatch_queue_t dq){
uint64_t dq_state =_dispatch_wait_prepare(dq);// 判断是否是同一个线程,如果是则crashif(unlikely(_dq_state_drain_locked_by(dq_state, dsc->dsc_waiter))){
DISPATCH_CLIENT_CRASH((uintptr_t)dq_state,"dispatch_sync called on queue ""already owned by current thread");}// Blocks submitted to the main thread MUST run on the main thread, and// dispatch_async_and_wait also executes on the remote context rather than// the current thread.//// For both these cases we need to save the frame linkage for the sake of// _dispatch_async_and_wait_invoke_dispatch_thread_frame_save_state(&dsc->dsc_dtf);if(_dq_state_is_suspended(dq_state)||_dq_state_is_base_anon(dq_state)){
dsc->dc_data = DISPATCH_WLH_ANON;}elseif(_dq_state_is_base_wlh(dq_state)){
dsc->dc_data =(dispatch_wlh_t)dq;}else{
_dispatch_wait_compute_wlh(upcast(dq)._dl, dsc);}if(dsc->dc_data == DISPATCH_WLH_ANON){
dsc->dsc_override_qos_floor = dsc->dsc_override_qos =(uint8_t)_dispatch_get_basepri_override_qos_floor();_dispatch_thread_event_init(&dsc->dsc_event);}dx_push(dq, dsc,_dispatch_qos_from_pp(dsc->dc_priority));_dispatch_trace_runtime_event(sync_wait, dq,0);if(dsc->dc_data == DISPATCH_WLH_ANON){
_dispatch_thread_event_wait(&dsc->dsc_event);// acquire}else{
_dispatch_event_loop_wait_for_ownership(dsc);}if(dsc->dc_data == DISPATCH_WLH_ANON){
_dispatch_thread_event_destroy(&dsc->dsc_event);// If _dispatch_sync_waiter_wake() gave this thread an override,// ensure that the root queue sees it.if(dsc->dsc_override_qos > dsc->dsc_override_qos_floor){
_dispatch_set_basepri_override_qos(dsc->dsc_override_qos);}}}
// continuation is a dispatch_sync or dispatch_barrier_sync#defineDC_FLAG_SYNC_WAITER0x001ul// continuation acts as a barrier#defineDC_FLAG_BARRIER0x002ul// continuation resources are freed on run// this is set on async or for non event_handler source handlers#defineDC_FLAG_CONSUME0x004ul// continuation has a group in dc_data#defineDC_FLAG_GROUP_ASYNC0x008ul// continuation function is a block (copied in dc_ctxt)#defineDC_FLAG_BLOCK0x010ul// continuation function is a block with private data, implies BLOCK_BIT#defineDC_FLAG_BLOCK_WITH_PRIVATE_DATA0x020ul// source handler requires fetching context from source#defineDC_FLAG_FETCH_CONTEXT0x040ul// continuation is a dispatch_async_and_wait#defineDC_FLAG_ASYNC_AND_WAIT0x080ul// bit used to make sure dc_flags is never 0 for allocated continuations#defineDC_FLAG_ALLOCATED0x100ul// continuation is an internal implementation detail that should not be// introspected#defineDC_FLAG_NO_INTROSPECTION0x200ul// never set on continuations, used by mach.c only#defineDC_FLAG_MACH_BARRIER0x1000000ul