mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git/
synced 2026-04-17 22:23:45 -04:00
skmsg: Introduce a spinlock to protect ingress_msg
Currently we rely on lock_sock to protect ingress_msg, it is too big for this, we can actually just use a spinlock to protect this list like protecting other skb queues. __tcp_bpf_recvmsg() is still special because of peeking, it still has to use lock_sock. Signed-off-by: Cong Wang <cong.wang@bytedance.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Jakub Sitnicki <jakub@cloudflare.com> Acked-by: John Fastabend <john.fastabend@gmail.com> Link: https://lore.kernel.org/bpf/20210331023237.41094-3-xiyou.wangcong@gmail.com
This commit is contained in:
committed by
Alexei Starovoitov
parent
37f0e514db
commit
b01fd6e802
@@ -89,6 +89,7 @@ struct sk_psock {
|
||||
#endif
|
||||
struct sk_buff_head ingress_skb;
|
||||
struct list_head ingress_msg;
|
||||
spinlock_t ingress_lock;
|
||||
unsigned long state;
|
||||
struct list_head link;
|
||||
spinlock_t link_lock;
|
||||
@@ -284,7 +285,45 @@ static inline struct sk_psock *sk_psock(const struct sock *sk)
|
||||
static inline void sk_psock_queue_msg(struct sk_psock *psock,
|
||||
struct sk_msg *msg)
|
||||
{
|
||||
spin_lock_bh(&psock->ingress_lock);
|
||||
list_add_tail(&msg->list, &psock->ingress_msg);
|
||||
spin_unlock_bh(&psock->ingress_lock);
|
||||
}
|
||||
|
||||
static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock)
|
||||
{
|
||||
struct sk_msg *msg;
|
||||
|
||||
spin_lock_bh(&psock->ingress_lock);
|
||||
msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
|
||||
if (msg)
|
||||
list_del(&msg->list);
|
||||
spin_unlock_bh(&psock->ingress_lock);
|
||||
return msg;
|
||||
}
|
||||
|
||||
static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock)
|
||||
{
|
||||
struct sk_msg *msg;
|
||||
|
||||
spin_lock_bh(&psock->ingress_lock);
|
||||
msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
|
||||
spin_unlock_bh(&psock->ingress_lock);
|
||||
return msg;
|
||||
}
|
||||
|
||||
static inline struct sk_msg *sk_psock_next_msg(struct sk_psock *psock,
|
||||
struct sk_msg *msg)
|
||||
{
|
||||
struct sk_msg *ret;
|
||||
|
||||
spin_lock_bh(&psock->ingress_lock);
|
||||
if (list_is_last(&msg->list, &psock->ingress_msg))
|
||||
ret = NULL;
|
||||
else
|
||||
ret = list_next_entry(msg, list);
|
||||
spin_unlock_bh(&psock->ingress_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline bool sk_psock_queue_empty(const struct sk_psock *psock)
|
||||
@@ -292,6 +331,13 @@ static inline bool sk_psock_queue_empty(const struct sk_psock *psock)
|
||||
return psock ? list_empty(&psock->ingress_msg) : true;
|
||||
}
|
||||
|
||||
static inline void kfree_sk_msg(struct sk_msg *msg)
|
||||
{
|
||||
if (msg->skb)
|
||||
consume_skb(msg->skb);
|
||||
kfree(msg);
|
||||
}
|
||||
|
||||
static inline void sk_psock_report_error(struct sk_psock *psock, int err)
|
||||
{
|
||||
struct sock *sk = psock->sk;
|
||||
|
||||
Reference in New Issue
Block a user