sk->sk_lock.owned 的设定

read的实现

[   45.024721] [<c07e8104>] (lock_sock_nested) from [<c07e8adc>] (sk_wait_data+0xa0/0x120)
[   45.032827] [<c07e8adc>] (sk_wait_data) from [<c085711c>] (tcp_recvmsg+0x388/0xac0)
[   45.040589] [<c085711c>] (tcp_recvmsg) from [<c08807c4>] (inet_recvmsg+0xa4/0xcc)
[   45.048179] [<c08807c4>] (inet_recvmsg) from [<c07e3bcc>] (sock_read_iter+0x9c/0xe0)
[   45.056031] [<c07e3bcc>] (sock_read_iter) from [<c03004ec>] (__vfs_read+0xdc/0x12c)
[   45.063797] [<c03004ec>] (__vfs_read) from [<c0301258>] (vfs_read+0x8c/0x118)
[   45.071014] [<c0301258>] (vfs_read) from [<c03020dc>] (SyS_read+0x4c/0xac)
[   45.077969] [<c03020dc>] (SyS_read) from [<c0208580>] (ret_fast_syscall+0x0/0x34)
/**
 * sk_wait_data - wait for data to arrive at sk_receive_queue
 * @sk:    sock to wait on
 * @timeo: for how long
 * @skb:   last skb seen on sk_receive_queue
 *
 * Now socket state including sk->sk_err is changed only under lock,
 * hence we may omit checks after joining wait queue.
 * We check receive queue before schedule() only as optimization;
 * it is very likely that release_sock() added new data.
 */
int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
{
	int rc;
	DEFINE_WAIT(wait);

	prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
	rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb);
	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
	finish_wait(sk_sleep(sk), &wait);
	return rc;
}


/**
 *	skb_peek_tail - peek at the tail of an &sk_buff_head
 *	@list_: list to peek at
 *
 *	Peek an &sk_buff. Unlike most other operations you _MUST_
 *	be careful with this one. A peek leaves the buffer on the
 *	list and someone else may run off with it. You must hold
 *	the appropriate locks or have a private queue to do this.
 *
 *	Returns %NULL for an empty list or a pointer to the tail element.
 *	The reference count is not incremented and the reference is therefore
 *	volatile. Use with caution.
 */
static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
{
	struct sk_buff *skb = list_->prev;

	if (skb == (struct sk_buff *)list_)
		skb = NULL;
	return skb;

}
#define sk_wait_event(__sk, __timeo, __condition)			\
	({	int __rc;						\
		release_sock(__sk);					\
		__rc = __condition;					\
		if (!__rc) {						\
			*(__timeo) = schedule_timeout(*(__timeo));	\
		}							\
		sched_annotate_sleep();						\
		lock_sock(__sk);					\
		__rc = __condition;					\
		__rc;							\
	})

static inline void lock_sock(struct sock *sk)
{
	lock_sock_nested(sk, 0);
}


void lock_sock_nested(struct sock *sk, int subclass)
{
	might_sleep();
	spin_lock_bh(&sk->sk_lock.slock);
	
	if (sk->sk_lock.owned)
		__lock_sock(sk);
	sk->sk_lock.owned = 1;
	spin_unlock(&sk->sk_lock.slock);
	/*
	 * The sk_lock has mutex_lock() semantics here:
	 */
	mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
	local_bh_enable();
}

猜你喜欢

转载自blog.csdn.net/yinming4u/article/details/84313320
今日推荐