GCD 底层源码探索 (上)

标签: GCD GCD源码


本章节 主要介绍以下 底层源码的实现逻辑

  1. GCD 队列 底层原理探索
  2. GCD 异步函数 底层原理探索
  3. GCD 同步函数 底层原理探索
  4. GCD 单例 底层原理探索

准备工作
GCD源码下载

1 GCD 队列底层原理探索

在源码中搜索dispatch_queue_create

dispatch_queue_t
dispatch_queue_create(const char *label, dispatch_queue_attr_t attr)
{
	return _dispatch_lane_create_with_target(label, attr,
			DISPATCH_TARGET_QUEUE_DEFAULT, true);
}
复制代码

1.1 _dispatch_lane_create_with_target

进入_dispatch_lane_create_with_target

DISPATCH_NOINLINE
static dispatch_queue_t
_dispatch_lane_create_with_target(const char *label, dispatch_queue_attr_t dqa,
        dispatch_queue_t tq, bool legacy)
{
    // dqai 创建 -
    dispatch_queue_attr_info_t dqai = _dispatch_queue_attr_to_info(dqa);
    
    //第一步:规范化参数,例如qos, overcommit, tq
    ...
    
    //拼接队列名称
    const void *vtable;
    dispatch_queue_flags_t dqf = legacy ? DQF_MUTABLE : 0;
    if (dqai.dqai_concurrent) { //vtable表示类的类型
        // OS_dispatch_queue_concurrent
        vtable = DISPATCH_VTABLE(queue_concurrent);
    } else {
        vtable = DISPATCH_VTABLE(queue_serial);
    }
    
    ...
    
    //创建队列,并初始化
    dispatch_lane_t dq = _dispatch_object_alloc(vtable,
            sizeof(struct dispatch_lane_s)); // alloc
    //根据dqai.dqai_concurrent的值,就能判断队列 是 串行 还是并发
    _dispatch_queue_init(dq, dqf, dqai.dqai_concurrent ?
            DISPATCH_QUEUE_WIDTH_MAX : 1, DISPATCH_QUEUE_ROLE_INNER |
            (dqai.dqai_inactive ? DISPATCH_QUEUE_INACTIVE : 0)); // init
    //设置队列label标识符
    dq->dq_label = label;//label赋值
    dq->dq_priority = _dispatch_priority_make((dispatch_qos_t)dqai.dqai_qos, dqai.dqai_relpri);//优先级处理
    ...
    //类似于类与元类的绑定,不是直接的继承关系,而是类似于模型与模板的关系
    dq->do_targetq = tq;
    _dispatch_object_debug(dq, "%s", __func__);
    return _dispatch_trace_queue_create(dq)._dq;//研究dq
}
复制代码

1.1.1 _dispatch_queue_attr_to_info

通过_dispatch_queue_attr_to_info方法传入dqa(即队列类型,串行、并发等)创建dispatch_queue_attr_info_t类型的对象dqai,用于存储队列的相关属性信息

dispatch_queue_attr_info_t
_dispatch_queue_attr_to_info(dispatch_queue_attr_t dqa)
{
	dispatch_queue_attr_info_t dqai = { };

	if (!dqa) return dqai;

#if DISPATCH_VARIANT_STATIC
	if (dqa == &_dispatch_queue_attr_concurrent) {
		dqai.dqai_concurrent = true;
		return dqai;
	}
#endif

	if (dqa < _dispatch_queue_attrs ||
			dqa >= &_dispatch_queue_attrs[DISPATCH_QUEUE_ATTR_COUNT]) {
		DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute");
	}

	size_t idx = (size_t)(dqa - _dispatch_queue_attrs);
//设置队列相关联的属性,例如服务质量qos等
	dqai.dqai_inactive = (idx % DISPATCH_QUEUE_ATTR_INACTIVE_COUNT);
	idx /= DISPATCH_QUEUE_ATTR_INACTIVE_COUNT;

	dqai.dqai_concurrent = !(idx % DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT);
	idx /= DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT;

	dqai.dqai_relpri = -(int)(idx % DISPATCH_QUEUE_ATTR_PRIO_COUNT);
	idx /= DISPATCH_QUEUE_ATTR_PRIO_COUNT;

	dqai.dqai_qos = idx % DISPATCH_QUEUE_ATTR_QOS_COUNT;
	idx /= DISPATCH_QUEUE_ATTR_QOS_COUNT;

	dqai.dqai_autorelease_frequency =
			idx % DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT;
	idx /= DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT;

	dqai.dqai_overcommit = idx % DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT;
	idx /= DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT;

	return dqai;
}
复制代码

1.1.2 设置队列相关联的属性,例如服务质量qos等

//设置队列相关联的属性,例如服务质量qos等
   dqai.dqai_inactive = (idx % DISPATCH_QUEUE_ATTR_INACTIVE_COUNT);
   idx /= DISPATCH_QUEUE_ATTR_INACTIVE_COUNT;

   dqai.dqai_concurrent = !(idx % DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT);
   idx /= DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT;

   dqai.dqai_relpri = -(int)(idx % DISPATCH_QUEUE_ATTR_PRIO_COUNT);
   idx /= DISPATCH_QUEUE_ATTR_PRIO_COUNT;

   dqai.dqai_qos = idx % DISPATCH_QUEUE_ATTR_QOS_COUNT;
   idx /= DISPATCH_QUEUE_ATTR_QOS_COUNT;

   dqai.dqai_autorelease_frequency =
   		idx % DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT;
   idx /= DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT;

   dqai.dqai_overcommit = idx % DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT;
   idx /= DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT;
复制代码

1.1.3 DISPATCH_VTABLE拼接队列名称

通过DISPATCH_VTABLE拼接队列名称,即vtable,其中DISPATCH_VTABLE是宏定义,如下所示,所以队列的类型是通过OS_dispatch_queue_+队列类型(queue_concurrent) 拼接而成的

#define DISPATCH_VTABLE(name) DISPATCH_OBJC_CLASS(name)
#define DISPATCH_OBJC_CLASS(name)   (&DISPATCH_CLASS_SYMBOL(name))
#define DISPATCH_CLASS(name) OS_dispatch_##name
复制代码
 // 串行队列
      dispatch_queue_t serial = dispatch_queue_create("ypy", DISPATCH_QUEUE_SERIAL);
      // 并行队列
      dispatch_queue_t concurrent = dispatch_queue_create("ypy", DISPATCH_QUEUE_CONCURRENT);
      // 主队列(串行队列)
      dispatch_queue_t mainQueue = dispatch_get_main_queue();
      // 全局队列 (并行队列)
      dispatch_queue_t globalQueue = dispatch_get_global_queue(0, 0);
          NSLog(@"\n%@ \n%@ \n%@ \n%@", object_getClass(serial), object_getClass(concurrent), object_getClass(mainQueue), object_getClass(globalQueue));

复制代码
2021-08-03 19:15:41.368990+0800 001---函数与队列[20480:1105392] 
OS_dispatch_queue_serial 
OS_dispatch_queue_concurrent 
OS_dispatch_queue_main 
OS_dispatch_queue_global
复制代码

1.1.4 _dispatch_object_alloc + _dispatch_queue_init 创建队列对象

通过alloc+init初始化队列,即dq,其中在_dispatch_queue_init传参中根据dqai.dqai_concurrent的布尔值,就能判断队列 是 串行 还是并发,而 vtable表示队列的类型,说明队列也是对象

1.1.4.1 _dispatch_object_alloc

进入_dispatch_object_alloc ->
_os_object_alloc_realized方法中设置了isa的指向,从这里可以验证队列也是对象的说法

void *
_dispatch_object_alloc(const void *vtable, size_t size)
{
#if OS_OBJECT_HAVE_OBJC1
	const struct dispatch_object_vtable_s *_vtable = vtable;
	dispatch_object_t dou;
	dou._os_obj = _os_object_alloc_realized(_vtable->_os_obj_objc_isa, size);
	dou._do->do_vtable = vtable;
	return dou._do;
#else
	return _os_object_alloc_realized(vtable, size);
#endif
}
复制代码
inline _os_object_t
_os_object_alloc_realized(const void *cls, size_t size)
{
	_os_object_t obj;
	dispatch_assert(size >= sizeof(struct _os_object_s));
	while (unlikely(!(obj = calloc(1u, size)))) {
		_dispatch_temporary_resource_shortage();
	}
	obj->os_obj_isa = cls;
	return obj;
}

复制代码

1.1.4.2 _dispatch_queue_init

  • 进入_dispatch_queue_init方法,队列类型是dispatch_queue_t,并设置队列的相关属性
// Note to later developers: ensure that any initialization changes are
// made for statically allocated queues (i.e. _dispatch_main_q).
static inline dispatch_queue_class_t
_dispatch_queue_init(dispatch_queue_class_t dqu, dispatch_queue_flags_t dqf,
		uint16_t width, uint64_t initial_state_bits)
{
	uint64_t dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(width);
	dispatch_queue_t dq = dqu._dq;

	dispatch_assert((initial_state_bits & ~(DISPATCH_QUEUE_ROLE_MASK |
			DISPATCH_QUEUE_INACTIVE)) == 0);

	if (initial_state_bits & DISPATCH_QUEUE_INACTIVE) {
		dq->do_ref_cnt += 2; // rdar://8181908 see _dispatch_lane_resume
		if (dx_metatype(dq) == _DISPATCH_SOURCE_TYPE) {
			dq->do_ref_cnt++; // released when DSF_DELETED is set
		}
	}

	dq_state |= initial_state_bits;
	dq->do_next = DISPATCH_OBJECT_LISTLESS;
	dqf |= DQF_WIDTH(width);
	os_atomic_store2o(dq, dq_atomic_flags, dqf, relaxed);
	dq->dq_state = dq_state;
	dq->dq_serialnum =
			os_atomic_inc_orig(&_dispatch_queue_serial_numbers, relaxed);
	return dqu;
}
复制代码

1.1.5 _dispatch_trace_queue_create

通过_dispatch_trace_queue_create对创建的队列进行处理,通过_dispatch_trace_queue_create调用_dispatch_introspection_queue_create,最后会返回处理过的_dq

DISPATCH_ALWAYS_INLINE
static inline dispatch_queue_class_t
_dispatch_trace_queue_create(dispatch_queue_class_t dqu)
{
	_dispatch_only_if_ktrace_enabled({
		uint64_t dq_label[4] = {0}; // So that we get the right null termination
		dispatch_queue_t dq = dqu._dq;
		strncpy((char *)dq_label, (char *)dq->dq_label ?: "", sizeof(dq_label));

		_dispatch_ktrace2(DISPATCH_QOS_TRACE_queue_creation_start,
				dq->dq_serialnum,
				_dispatch_priority_to_pp_prefer_fallback(dq->dq_priority));

		_dispatch_ktrace4(DISPATCH_QOS_TRACE_queue_creation_end,
						dq_label[0], dq_label[1], dq_label[2], dq_label[3]);
	});

	return _dispatch_introspection_queue_create(dqu);
}
复制代码
dispatch_queue_class_t
_dispatch_introspection_queue_create(dispatch_queue_t dq)
{
	dispatch_queue_introspection_context_t dqic;
	size_t sz = sizeof(struct dispatch_queue_introspection_context_s);

	if (!_dispatch_introspection.debug_queue_inversions) {
		sz = offsetof(struct dispatch_queue_introspection_context_s,
				__dqic_no_queue_inversion);
	}
	dqic = _dispatch_calloc(1, sz);
	dqic->dqic_queue._dq = dq; //dispatch_queue_t
	if (_dispatch_introspection.debug_queue_inversions) {
		LIST_INIT(&dqic->dqic_order_top_head);
		LIST_INIT(&dqic->dqic_order_bottom_head);
	}
	dq->do_finalizer = dqic;

	_dispatch_unfair_lock_lock(&_dispatch_introspection.queues_lock);
	LIST_INSERT_HEAD(&_dispatch_introspection.queues, dqic, dqic_list);
	_dispatch_unfair_lock_unlock(&_dispatch_introspection.queues_lock);

	DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(queue_create, dq);
	if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_create)) {
		_dispatch_introspection_queue_create_hook(dq);
	}
	return upcast(dq)._dqu; //dispatch_queue_class_t
}
复制代码
1.1.5.1 _dispatch_introspection_queue_create_hook

进入_dispatch_introspection_queue_create_hook ->
dispatch_introspection_queue_get_info ->
_dispatch_introspection_lane_get_info中可以看出,与我们自定义的类还是有所区别的,创建队列在底层的实现是通过模板创建的

DISPATCH_NOINLINE
static void
_dispatch_introspection_queue_create_hook(dispatch_queue_t dq)
{
	dispatch_introspection_queue_s diq;
	diq = dispatch_introspection_queue_get_info(dq);
	dispatch_introspection_hook_callout_queue_create(&diq);
}
复制代码
dispatch_introspection_queue_s
dispatch_introspection_queue_get_info(dispatch_queue_t dq)
{
	if (dx_metatype(dq) == _DISPATCH_WORKLOOP_TYPE) {
		return _dispatch_introspection_workloop_get_info(upcast(dq)._dwl);
	}
	return _dispatch_introspection_lane_get_info(upcast(dq)._dl);
}
复制代码
static inline dispatch_introspection_queue_s
_dispatch_introspection_lane_get_info(dispatch_lane_class_t dqu)
{
	dispatch_lane_t dq = dqu._dl;
	bool global = _dispatch_object_is_global(dq);
	uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed);
//模板创建队列
	dispatch_introspection_queue_s diq = {
		.queue = dq->_as_dq,
		.target_queue = dq->do_targetq,
		.label = dq->dq_label,
		.serialnum = dq->dq_serialnum,
		.width = dq->dq_width,
		.suspend_count = _dq_state_suspend_cnt(dq_state) + dq->dq_side_suspend_cnt,
		.enqueued = _dq_state_is_enqueued(dq_state) && !global,
		.barrier = _dq_state_is_in_barrier(dq_state) && !global,
		.draining = (dq->dq_items_head == (void*)~0ul) ||
				(!dq->dq_items_head && dq->dq_items_tail),
		.global = global,
		.main = dx_type(dq) == DISPATCH_QUEUE_MAIN_TYPE,
	};
	return diq;
}
复制代码

1.2 总结

  • dispatch_queue_create中的参数二(即队列类型),决定了下层中 max & 1(用于区分是 串行 还是并发),1表示串行
  • queue 也是一个对象,需要底层通过alloc + init创建,并且在alloc中也有一个class,这个class是通过宏定义拼接而成,并且同时会指定isa的指向
  • 创建队列在底层的处理是通过模板创建的,其类型是dispatch_introspection_queue_s结构体

dispatch_queue_create底层分析流程如下图所示
此处输入图片的描述

2 GCD 异步函数 底层原理探索

void
dispatch_async(dispatch_queue_t dq, dispatch_block_t work)//work 任务
{
    dispatch_continuation_t dc = _dispatch_continuation_alloc();
    uintptr_t dc_flags = DC_FLAG_CONSUME;
    dispatch_qos_t qos;

    // 任务包装器(work在这里才有使用) - 接受work - 保存work - 并函数式编程
    // 保存 block 
    qos = _dispatch_continuation_init(dc, dq, work, 0, dc_flags);
    //并发处理
    _dispatch_continuation_async(dq, dc, qos, dc->dc_flags);
}
复制代码

进入dispatch_async的源码实现,主要分析两个函数

2.1 _dispatch_continuation_init:任务包装函数

进入_dispatch_continuation_init源码实现,主要是包装任务,并设置线程的回程函数,相当于初始化

DISPATCH_ALWAYS_INLINE
static inline dispatch_qos_t
_dispatch_continuation_init(dispatch_continuation_t dc,
        dispatch_queue_class_t dqu, dispatch_block_t work,
        dispatch_block_flags_t flags, uintptr_t dc_flags)
{
    void *ctxt = _dispatch_Block_copy(work);//拷贝任务

    dc_flags |= DC_FLAG_BLOCK | DC_FLAG_ALLOCATED;
    if (unlikely(_dispatch_block_has_private_data(work))) {
        dc->dc_flags = dc_flags;
        dc->dc_ctxt = ctxt;//赋值
        // will initialize all fields but requires dc_flags & dc_ctxt to be set
        return _dispatch_continuation_init_slow(dc, dqu, flags);
    }

    dispatch_function_t func = _dispatch_Block_invoke(work);//封装work - 异步回调
    if (dc_flags & DC_FLAG_CONSUME) {
        func = _dispatch_call_block_and_release;//回调函数赋值 - 同步回调
    }
    return _dispatch_continuation_init_f(dc, dqu, ctxt, func, flags, dc_flags);
}
复制代码

主要有以下几步

  • 通过_dispatch_Block_copy拷贝任务
  • 通过_dispatch_Block_invoke封装任务,其中_dispatch_Block_invoke是个宏定义,根据以上分析得知是异步回调
#define _dispatch_Block_invoke(bb) \
        ((dispatch_function_t)((struct Block_layout *)bb)->invoke)
复制代码
  • 如果是同步的,则回调函数赋值为_dispatch_call_block_and_release
  • 通过_dispatch_continuation_init_f方法将回调函数赋值,即f就是func,将其保存在属性中
DISPATCH_ALWAYS_INLINE
static inline dispatch_qos_t
_dispatch_continuation_init_f(dispatch_continuation_t dc,
		dispatch_queue_class_t dqu, void *ctxt, dispatch_function_t f,
		dispatch_block_flags_t flags, uintptr_t dc_flags)
{
	pthread_priority_t pp = 0;
	dc->dc_flags = dc_flags | DC_FLAG_ALLOCATED;
	dc->dc_func = f;
	dc->dc_ctxt = ctxt;
	// in this context DISPATCH_BLOCK_HAS_PRIORITY means that the priority
	// should not be propagated, only taken from the handler if it has one
	if (!(flags & DISPATCH_BLOCK_HAS_PRIORITY)) {
		pp = _dispatch_priority_propagate();
	}
	_dispatch_continuation_voucher_set(dc, flags);
	return _dispatch_continuation_priority_set(dc, dqu, pp, flags);
}
复制代码

2.2 _dispatch_continuation_async:并发处理函数

这个函数中,主要是执行block回调
进入_dispatch_continuation_async的源码实现

DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_continuation_async(dispatch_queue_class_t dqu,
        dispatch_continuation_t dc, dispatch_qos_t qos, uintptr_t dc_flags)
{
#if DISPATCH_INTROSPECTION
    if (!(dc_flags & DC_FLAG_NO_INTROSPECTION)) {
        _dispatch_trace_item_push(dqu, dc);//跟踪日志
    }
#else
    (void)dc_flags;
#endif
    return dx_push(dqu._dq, dc, qos);//与dx_invoke一样,都是宏
}
复制代码

其中的关键代码是dx_push(dqu._dq, dc, qos),dx_push是宏定义,如下所示

 #define dx_push(x, y, z) dx_vtable(x)->dq_push(x, y, z)
复制代码

而其中的dq_push需要根据队列的类型,执行不同的函数


DISPATCH_VTABLE_INSTANCE(workloop,
	.do_type        = DISPATCH_WORKLOOP_TYPE,
	.do_dispose     = _dispatch_workloop_dispose,
	.do_debug       = _dispatch_queue_debug,
	.do_invoke      = _dispatch_workloop_invoke,

	.dq_activate    = _dispatch_queue_no_activate,
	.dq_wakeup      = _dispatch_workloop_wakeup,
	.dq_push        = _dispatch_workloop_push,
);

DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_serial, lane,
	.do_type        = DISPATCH_QUEUE_SERIAL_TYPE,
	.do_dispose     = _dispatch_lane_dispose,
	.do_debug       = _dispatch_queue_debug,
	.do_invoke      = _dispatch_lane_invoke,

	.dq_activate    = _dispatch_lane_activate,
	.dq_wakeup      = _dispatch_lane_wakeup,
	.dq_push        = _dispatch_lane_push,
);

DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_concurrent, lane,
	.do_type        = DISPATCH_QUEUE_CONCURRENT_TYPE,
	.do_dispose     = _dispatch_lane_dispose,
	.do_debug       = _dispatch_queue_debug,
	.do_invoke      = _dispatch_lane_invoke,

	.dq_activate    = _dispatch_lane_activate,
	.dq_wakeup      = _dispatch_lane_wakeup,
	.dq_push        = _dispatch_lane_concurrent_push,
);

DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_global, lane,
	.do_type        = DISPATCH_QUEUE_GLOBAL_ROOT_TYPE,
	.do_dispose     = _dispatch_object_no_dispose,
	.do_debug       = _dispatch_queue_debug,
	.do_invoke      = _dispatch_object_no_invoke,

	.dq_activate    = _dispatch_queue_no_activate,
	.dq_wakeup      = _dispatch_root_queue_wakeup,
	.dq_push        = _dispatch_root_queue_push,
);

#if DISPATCH_USE_PTHREAD_ROOT_QUEUES
DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_pthread_root, lane,
	.do_type        = DISPATCH_QUEUE_PTHREAD_ROOT_TYPE,
	.do_dispose     = _dispatch_pthread_root_queue_dispose,
	.do_debug       = _dispatch_queue_debug,
	.do_invoke      = _dispatch_object_no_invoke,

	.dq_activate    = _dispatch_queue_no_activate,
	.dq_wakeup      = _dispatch_root_queue_wakeup,
	.dq_push        = _dispatch_root_queue_push,
);
#endif // DISPATCH_USE_PTHREAD_ROOT_QUEUES

DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_mgr, lane,
	.do_type        = DISPATCH_QUEUE_MGR_TYPE,
	.do_dispose     = _dispatch_object_no_dispose,
	.do_debug       = _dispatch_queue_debug,
#if DISPATCH_USE_MGR_THREAD
	.do_invoke      = _dispatch_mgr_thread,
#else
	.do_invoke      = _dispatch_object_no_invoke,
#endif

	.dq_activate    = _dispatch_queue_no_activate,
	.dq_wakeup      = _dispatch_mgr_queue_wakeup,
	.dq_push        = _dispatch_mgr_queue_push,
);

DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_main, lane,
	.do_type        = DISPATCH_QUEUE_MAIN_TYPE,
	.do_dispose     = _dispatch_lane_dispose,
	.do_debug       = _dispatch_queue_debug,
	.do_invoke      = _dispatch_lane_invoke,

	.dq_activate    = _dispatch_queue_no_activate,
	.dq_wakeup      = _dispatch_main_queue_wakeup,
	.dq_push        = _dispatch_main_queue_push,
);

#if DISPATCH_COCOA_COMPAT
DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_runloop, lane,
	.do_type        = DISPATCH_QUEUE_RUNLOOP_TYPE,
	.do_dispose     = _dispatch_runloop_queue_dispose,
	.do_debug       = _dispatch_queue_debug,
	.do_invoke      = _dispatch_lane_invoke,

	.dq_activate    = _dispatch_queue_no_activate,
	.dq_wakeup      = _dispatch_runloop_queue_wakeup,
	.dq_push        = _dispatch_lane_push,
);
#endif

DISPATCH_VTABLE_INSTANCE(source,
	.do_type        = DISPATCH_SOURCE_KEVENT_TYPE,
	.do_dispose     = _dispatch_source_dispose,
	.do_debug       = _dispatch_source_debug,
	.do_invoke      = _dispatch_source_invoke,

	.dq_activate    = _dispatch_source_activate,
	.dq_wakeup      = _dispatch_source_wakeup,
	.dq_push        = _dispatch_lane_push,
);

DISPATCH_VTABLE_INSTANCE(channel,
	.do_type        = DISPATCH_CHANNEL_TYPE,
	.do_dispose     = _dispatch_channel_dispose,
	.do_debug       = _dispatch_channel_debug,
	.do_invoke      = _dispatch_channel_invoke,

	.dq_activate    = _dispatch_lane_activate,
	.dq_wakeup      = _dispatch_channel_wakeup,
	.dq_push        = _dispatch_lane_push,
);

#if HAVE_MACH
DISPATCH_VTABLE_INSTANCE(mach,
	.do_type        = DISPATCH_MACH_CHANNEL_TYPE,
	.do_dispose     = _dispatch_mach_dispose,
	.do_debug       = _dispatch_mach_debug,
	.do_invoke      = _dispatch_mach_invoke,

	.dq_activate    = _dispatch_mach_activate,
	.dq_wakeup      = _dispatch_mach_wakeup,
	.dq_push        = _dispatch_lane_push,
);
复制代码

符号断点调试执行函数

运行demo,通过符号断点,来判断执行的是哪个函数,由于是并发队列,通过增加_dispatch_lane_concurrent_push符号断点,看看是否会走到这里

dispatch_queue_t conque = dispatch_queue_create("com.ypy.queue", DISPATCH_QUEUE_CONCURRENT);
dispatch_async(conque, ^{
    NSLog(@"异步函数");
});
复制代码
  • 运行发现,走的确实是_dispatch_lane_concurrent_push

此处输入图片的描述

  • 进入_dispatch_lane_concurrent_push源码,发现有两步,继续通过符号断点_dispatch_continuation_redirect_push和_dispatch_lane_push调试,发现走的是_dispatch_continuation_redirect_push

此处输入图片的描述

  • 进入_dispatch_continuation_redirect_push源码,发现又走到了dx_push,即递归了,综合前面队列创建时可知,队列也是一个对象,有父类、根类,所以会递归执行到根类的方法
DISPATCH_NOINLINE
static void
_dispatch_continuation_redirect_push(dispatch_lane_t dl,
		dispatch_object_t dou, dispatch_qos_t qos)
{
	if (likely(!_dispatch_object_is_redirection(dou))) {
		dou._dc = _dispatch_async_redirect_wrap(dl, dou);
	} else if (!dou._dc->dc_ctxt) {
		// find first queue in descending target queue order that has
		// an autorelease frequency set, and use that as the frequency for
		// this continuation.
		dou._dc->dc_ctxt = (void *)
		(uintptr_t)_dispatch_queue_autorelease_frequency(dl);
	}

	dispatch_queue_t dq = dl->do_targetq;
	if (!qos) qos = _dispatch_priority_qos(dq->dq_priority);
	dx_push(dq, dou, qos);//递归 队列也是一个对象,又父类 根类
}
复制代码
  • 接下来,通过根类的_dispatch_root_queue_push符号断点,来验证猜想是否正确,从运行结果看出,完全是正确的

此处输入图片的描述

  • 进入_dispatch_root_queue_push -> _dispatch_root_queue_push_inline ->_dispatch_root_queue_poke -> _dispatch_root_queue_poke_slow源码,经过符号断点验证,确实是走的这里,查看该方法的源码实现,主要有两步操作

    • 通过_dispatch_root_queues_init方法注册回调
    • 通过do-while循环创建线程,使用pthread_create方法
DISPATCH_NOINLINE
static void
_dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor)
{
    int remaining = n;
    int r = ENOSYS;

    _dispatch_root_queues_init();//重点
    
    ...
    //do-while循环创建线程
    do {
        _dispatch_retain(dq); // released in _dispatch_worker_thread
        while ((r = pthread_create(pthr, attr, _dispatch_worker_thread, dq))) {
            if (r != EAGAIN) {
                (void)dispatch_assume_zero(r);
            }
            _dispatch_temporary_resource_shortage();
        }
    } while (--remaining);
    
    ...
}
复制代码

_dispatch_root_queues_init

  • 进入_dispatch_root_queues_init源码实现,发现是一个dispatch_once_f单例(请查看后续单例的底层分析们,这里不作说明),其中传入的func是_dispatch_root_queues_init_once。
 DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_root_queues_init(void)
{
    dispatch_once_f(&_dispatch_root_queues_pred, NULL, _dispatch_root_queues_init_once);
}
复制代码

进入_dispatch_root_queues_init_once的源码,其内部不同事务的调用句柄都是_dispatch_worker_thread2
此处输入图片的描述
其block回调执行的调用路径为:_dispatch_root_queues_init_once ->_dispatch_worker_thread2 -> _dispatch_root_queue_drain -> _dispatch_root_queue_drain -> _dispatch_continuation_pop_inline -> _dispatch_continuation_invoke_inline -> _dispatch_client_callout -> dispatch_call_block_and_release
这个路径可以通过断点,bt打印堆栈信息得出
此处输入图片的描述
在这里需要说明一点的是,单例的block回调和异步函数的block回调是不同的

  • 单例中,block回调中的func是_dispatch_Block_invoke(block)
  • 而异步函数中,block回调中的func是dispatch_call_block_and_release

2.3 总结

异步函数的底层分析如下

  • 【准备工作】:首先,将异步任务拷贝并封装,并设置回调函数func
  • 【block回调】:底层通过dx_push递归,会重定向到根队列,然后通过pthread_creat创建线程,最后通过dx_invoke执行block回调(dx_push 和 dx_invoke 是成对的)

异步函数的底层分析流程如图所示
此处输入图片的描述

3 GCD 同步函数 底层原理探索

进入dispatch_sync源码实现,其底层的实现是通过栅栏函数实现的

DISPATCH_NOINLINE
void
dispatch_sync(dispatch_queue_t dq, dispatch_block_t work)
{
    uintptr_t dc_flags = DC_FLAG_BLOCK;
    if (unlikely(_dispatch_block_has_private_data(work))) {
        return _dispatch_sync_block_with_privdata(dq, work, dc_flags);
    }
    _dispatch_sync_f(dq, work, _dispatch_Block_invoke(work), dc_flags);
}
复制代码

3.1进入_dispatch_sync_f源码

DISPATCH_NOINLINE
static void
_dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func,
		uintptr_t dc_flags)
{
	_dispatch_sync_f_inline(dq, ctxt, func, dc_flags);
}
复制代码

3.1.1 _dispatch_sync_f_inline源码

查看_dispatch_sync_f_inline源码,其中width = 1表示是串行队列,其中有两个重点:

  • 栅栏:_dispatch_barrier_sync_f,(在文章的后面会写到) 可以得出同步函数的底层实现其实是同步栅栏函数
  • 死锁:_dispatch_sync_f_slow,如果存在相互等待的情况,就会造成死锁
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_sync_f_inline(dispatch_queue_t dq, void *ctxt,
		dispatch_function_t func, uintptr_t dc_flags)
{
	if (likely(dq->dq_width == 1)) {
		return _dispatch_barrier_sync_f(dq, ctxt, func, dc_flags);//栅栏
	}

	if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) {
		DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync");
	}

	dispatch_lane_t dl = upcast(dq)._dl;
	// Global concurrent queues and queues bound to non-dispatch threads
	// always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE
	if (unlikely(!_dispatch_queue_try_reserve_sync_width(dl))) {
		return _dispatch_sync_f_slow(dl, ctxt, func, 0, dl, dc_flags);//死锁
	}

	if (unlikely(dq->do_targetq->do_targetq)) {
		return _dispatch_sync_recurse(dl, ctxt, func, dc_flags);
	}
	_dispatch_introspection_sync_begin(dl);//处理当前信息
	_dispatch_sync_invoke_and_complete(dl, ctxt, func DISPATCH_TRACE_ARG(
			_dispatch_trace_item_sync_push_pop(dq, ctxt, func, dc_flags)));
}
复制代码
3.1.1.2 _dispatch_barrier_sync_f (在文章的后面会写到) 可以得出同步函数的底层实现其实是同步栅栏函数
3.1.1.2 _dispatch_sync_f_slow 死锁

进入_dispatch_sync_f_slow,当前的主队列是挂起、阻塞的

#pragma mark dispatch_sync / dispatch_barrier_sync

DISPATCH_NOINLINE
static void
_dispatch_sync_f_slow(dispatch_queue_class_t top_dqu, void *ctxt,
		dispatch_function_t func, uintptr_t top_dc_flags,
		dispatch_queue_class_t dqu, uintptr_t dc_flags)
{
	dispatch_queue_t top_dq = top_dqu._dq;
	dispatch_queue_t dq = dqu._dq;
	if (unlikely(!dq->do_targetq)) {
		return _dispatch_sync_function_invoke(dq, ctxt, func);
	}

	pthread_priority_t pp = _dispatch_get_priority();
	struct dispatch_sync_context_s dsc = {
		.dc_flags    = DC_FLAG_SYNC_WAITER | dc_flags,
		.dc_func     = _dispatch_async_and_wait_invoke,
		.dc_ctxt     = &dsc,
		.dc_other    = top_dq,
		.dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG,
		.dc_voucher  = _voucher_get(),
		.dsc_func    = func,
		.dsc_ctxt    = ctxt,
		.dsc_waiter  = _dispatch_tid_self(),
	};

	_dispatch_trace_item_push(top_dq, &dsc);
	__DISPATCH_WAIT_FOR_QUEUE__(&dsc, dq);

	if (dsc.dsc_func == NULL) {
		dispatch_queue_t stop_dq = dsc.dc_other;
		return _dispatch_sync_complete_recurse(top_dq, stop_dq, top_dc_flags);
	}

	_dispatch_introspection_sync_begin(top_dq);
	_dispatch_trace_item_pop(top_dq, &dsc);
	_dispatch_sync_invoke_and_complete_recurse(top_dq, ctxt, func,top_dc_flags
			DISPATCH_TRACE_ARG(&dsc));
}

复制代码

往一个队列中 加入任务,会push加入主队列,进入_dispatch_trace_item_push

DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_trace_item_push(dispatch_queue_class_t dqu, dispatch_object_t _tail)
{
	if (unlikely(DISPATCH_QUEUE_PUSH_ENABLED())) {
		_dispatch_trace_continuation(dqu._dq, _tail._do, DISPATCH_QUEUE_PUSH);
	}

	_dispatch_trace_item_push_inline(dqu._dq, _tail._do);
	_dispatch_introspection_queue_push(dqu, _tail);
}
复制代码

进入__DISPATCH_WAIT_FOR_QUEUE__,判断dq是否为正在等待的队列,然后给出一个状态state,然后将dq的状态和当前任务依赖的队列进行匹配

DISPATCH_NOINLINE
static void
__DISPATCH_WAIT_FOR_QUEUE__(dispatch_sync_context_t dsc, dispatch_queue_t dq)
{
	uint64_t dq_state = _dispatch_wait_prepare(dq);
	if (unlikely(_dq_state_drain_locked_by(dq_state, dsc->dsc_waiter))) {
		DISPATCH_CLIENT_CRASH((uintptr_t)dq_state,
				"dispatch_sync called on queue "
				"already owned by current thread");
	}

	// Blocks submitted to the main thread MUST run on the main thread, and
	// dispatch_async_and_wait also executes on the remote context rather than
	// the current thread.
	//
	// For both these cases we need to save the frame linkage for the sake of
	// _dispatch_async_and_wait_invoke
	_dispatch_thread_frame_save_state(&dsc->dsc_dtf);

	if (_dq_state_is_suspended(dq_state) ||
			_dq_state_is_base_anon(dq_state)) {
		dsc->dc_data = DISPATCH_WLH_ANON;
	} else if (_dq_state_is_base_wlh(dq_state)) {
		dsc->dc_data = (dispatch_wlh_t)dq;
	} else {
		_dispatch_wait_compute_wlh(upcast(dq)._dl, dsc);
	}

	if (dsc->dc_data == DISPATCH_WLH_ANON) {
		dsc->dsc_override_qos_floor = dsc->dsc_override_qos =
				(uint8_t)_dispatch_get_basepri_override_qos_floor();
		_dispatch_thread_event_init(&dsc->dsc_event);
	}
	dx_push(dq, dsc, _dispatch_qos_from_pp(dsc->dc_priority));
	_dispatch_trace_runtime_event(sync_wait, dq, 0);
	if (dsc->dc_data == DISPATCH_WLH_ANON) {
		_dispatch_thread_event_wait(&dsc->dsc_event); // acquire
	} else {
		_dispatch_event_loop_wait_for_ownership(dsc);
	}
	if (dsc->dc_data == DISPATCH_WLH_ANON) {
		_dispatch_thread_event_destroy(&dsc->dsc_event);
		// If _dispatch_sync_waiter_wake() gave this thread an override,
		// ensure that the root queue sees it.
		if (dsc->dsc_override_qos > dsc->dsc_override_qos_floor) {
			_dispatch_set_basepri_override_qos(dsc->dsc_override_qos);
		}
	}
}
复制代码

进入_dq_state_drain_locked_by -> _dispatch_lock_is_locked_by源码

DISPATCH_ALWAYS_INLINE
static inline bool
_dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_tid tid)
{
    // equivalent to _dispatch_lock_owner(lock_value) == tid
    //异或操作:相同为0,不同为1,如果相同,则为0,0 &任何数都为0
    //即判断 当前要等待的任务 和 正在执行的任务是否一样,通俗的解释就是 执行和等待的是否在同一队列
    return ((lock_value ^ tid) & DLOCK_OWNER_MASK) == 0;
}
复制代码

如果当前等待的和正在执行的是同一个队列,即判断线程ID是否相乘,如果相等,则会造成死锁

同步函数 + 并发队列 顺序执行的原因

在_dispatch_sync_invoke_and_complete -> _dispatch_sync_function_invoke_inline源码中,主要有三个步骤:

  • 将任务压入队列: _dispatch_thread_frame_push
  • 执行任务的block回调: _dispatch_client_callout
  • 将任务出队:_dispatch_thread_frame_pop

从实现中可以看出,是先将任务push队列中,然后执行block回调,再将任务pop,所以任务是顺序执行的。

3.2总结

同步函数的底层实现如下:

  • 同步函数的底层实现实际是同步栅栏函数
  • 同步函数中如果当前正在执行的队列和等待的是同一个队列,形成相互等待的局面,则会造成死锁

同步函数的底层实现流程如图所示
此处输入图片的描述

  1. GCD 单例 底层原理探索

4.1 dispatch_once

static dispatch_once_t onceToken;
dispatch_once(&onceToken, ^{
    NSLog(@"单例应用");
});
复制代码

进入dispatch_once源码实现,底层是通过dispatch_once_f实现的

  • 参数1:onceToken,它是一个静态变量,由于不同位置定义的静态变量是不同的,所以静态变量具有唯一性
  • 参数2:block回调
void
dispatch_once(dispatch_once_t *val, dispatch_block_t block)
{
    dispatch_once_f(val, block, _dispatch_Block_invoke(block));
}
复制代码

4.1.1 dispatch_once_f 源码

DISPATCH_NOINLINE
void
dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func)
{
    dispatch_once_gate_t l = (dispatch_once_gate_t)val;

#if !DISPATCH_ONCE_INLINE_FASTPATH || DISPATCH_ONCE_USE_QUIESCENT_COUNTER
    uintptr_t v = os_atomic_load(&l->dgo_once, acquire);//load
    if (likely(v == DLOCK_ONCE_DONE)) {//已经执行过了,直接返回
        return;
    }
#if DISPATCH_ONCE_USE_QUIESCENT_COUNTER
    if (likely(DISPATCH_ONCE_IS_GEN(v))) {
        return _dispatch_once_mark_done_if_quiesced(l, v);
    }
#endif
#endif
    if (_dispatch_once_gate_tryenter(l)) {//尝试进入
        return _dispatch_once_callout(l, ctxt, func);
    }
    return _dispatch_once_wait(l);//无限次等待
}
复制代码

进入dispatch_once_f源码,其中的val是外界传入的onceToken静态变量,而func是_dispatch_Block_invoke(block),其中单例的底层主要分为以下几步

  • 将val,也就是静态变量转换为dispatch_once_gate_t类型的变量l
  • 通过os_atomic_load获取此时的任务的标识符v
    • 如果v等于DLOCK_ONCE_DONE,表示任务已经执行过了,直接return
    • 如果 任务执行后,加锁失败了,则走到_dispatch_once_mark_done_if_quiesced函数,再次进行存储,将标识符置为DLOCK_ONCE_DONE
    • 反之,则通过_dispatch_once_gate_tryenter尝试进入任务,即解锁,然后执行_dispatch_once_callout执行block回调
  • 如果此时有任务正在执行,再次进来一个任务2,则通过_dispatch_once_wait函数让任务2进入无限次等待

4.1.2 _dispatch_once_gate_tryenter 解锁

DISPATCH_ALWAYS_INLINE
static inline bool
_dispatch_once_gate_tryenter(dispatch_once_gate_t l)
{
    return os_atomic_cmpxchg(&l->dgo_once, DLOCK_ONCE_UNLOCKED,
            (uintptr_t)_dispatch_lock_value_for_self(), relaxed);//首先对比,然后进行改变
}
复制代码

查看其源码,主要是通过底层os_atomic_cmpxchg方法进行对比,如果比较没有问题,则进行加锁,即任务的标识符置为DLOCK_ONCE_UNLOCKED

4.1.3 _dispatch_once_callout 回调

DISPATCH_NOINLINE
static void
_dispatch_once_callout(dispatch_once_gate_t l, void *ctxt,
        dispatch_function_t func)
{
    _dispatch_client_callout(ctxt, func);//block调用执行
    _dispatch_once_gate_broadcast(l);//进行广播:告诉别人有了归属,不要找我了
复制代码

进入_dispatch_once_callout源码,主要就两步

  • _dispatch_client_callout:block回调执行
  • _dispatch_once_gate_broadcast:进行广播

4.1.3.1 _dispatch_client_callout 源码

#undef _dispatch_client_callout
void
_dispatch_client_callout(void *ctxt, dispatch_function_t f)
{
    @try {
        return f(ctxt);
    }
    @catch (...) {
        objc_terminate();
    }
}
复制代码

进入_dispatch_client_callout源码,主要就是执行block回调,其中的f等于_dispatch_Block_invoke(block),即异步回调

4.1.3.2 _dispatch_once_gate_broadcast -> _dispatch_once_mark_done源码

DISPATCH_ALWAYS_INLINE
static inline uintptr_t
_dispatch_once_mark_done(dispatch_once_gate_t dgo)
{
    //如果不相同,直接改为相同,然后上锁 -- DLOCK_ONCE_DONE
    return os_atomic_xchg(&dgo->dgo_once, DLOCK_ONCE_DONE, release);
}
复制代码

进入 _dispatch_once_gate_broadcast -> _dispatch_once_mark_done源码,主要就是给dgo->dgo_once一个值,然后将任务的标识符为DLOCK_ONCE_DONE,即解锁

4.2 总结

针对单例的底层实现,主要说明如下:

  • 【单例只执行一次的原理】:GCD单例中,有两个重要参数,onceToken 和
    block,其中onceToken是静态变量,具有唯一性,在底层被封装成了dispatch_once_gate_t类型的变量l,l主要是用来获取底层原子封装性的关联,即变量v,通过v来查询任务的状态,如果此时v等于DLOCK_ONCE_DONE,说明任务已经处理过一次了,直接return
  • 【block调用时机】:如果此时任务没有执行过,则会在底层通过C++函数的比较,将任务进行加锁,即任务状态置为DLOCK_ONCE_UNLOCK,目的是为了保证当前任务执行的唯一性,防止在其他地方有多次定义。加锁之后进行block回调函数的执行,执行完成后,将当前任务解锁,将当前的任务状态置为DLOCK_ONCE_DONE,在下次进来时,就不会在执行,会直接返回
  • 【多线程影响】:如果在当前任务执行期间,有其他任务进来,会进入无限次等待,原因是当前任务已经获取了锁,进行了加锁,其他任务是无法获取锁的

单例的底层流程图
此处输入图片的描述

© 版权声明
THE END
喜欢就支持一下吧
点赞0 分享