net: add sk_drops_read(), sk_drops_inc() and sk_drops_reset() helpers
We want to split sk->sk_drops in the future to reduce potential contention on this field. Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com> Link: https://patch.msgid.link/20250826125031.1578842-2-edumazet@google.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
parent
c2a756891b
commit
f86f42ed2c
|
@ -2682,11 +2682,26 @@ struct sock_skb_cb {
|
|||
#define sock_skb_cb_check_size(size) \
|
||||
BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET)
|
||||
|
||||
static inline void sk_drops_inc(struct sock *sk)
|
||||
{
|
||||
atomic_inc(&sk->sk_drops);
|
||||
}
|
||||
|
||||
static inline int sk_drops_read(const struct sock *sk)
|
||||
{
|
||||
return atomic_read(&sk->sk_drops);
|
||||
}
|
||||
|
||||
static inline void sk_drops_reset(struct sock *sk)
|
||||
{
|
||||
atomic_set(&sk->sk_drops, 0);
|
||||
}
|
||||
|
||||
static inline void
|
||||
sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ?
|
||||
atomic_read(&sk->sk_drops) : 0;
|
||||
sk_drops_read(sk) : 0;
|
||||
}
|
||||
|
||||
static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
|
||||
|
|
|
@ -2612,7 +2612,7 @@ static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
|
|||
*/
|
||||
static inline void tcp_listendrop(const struct sock *sk)
|
||||
{
|
||||
atomic_inc(&((struct sock *)sk)->sk_drops);
|
||||
sk_drops_inc((struct sock *)sk);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
|
||||
}
|
||||
|
||||
|
|
|
@ -345,7 +345,7 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
|
|||
spin_unlock_bh(&sk_queue->lock);
|
||||
}
|
||||
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(__sk_queue_drop_skb);
|
||||
|
|
|
@ -491,13 +491,13 @@ int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|||
struct sk_buff_head *list = &sk->sk_receive_queue;
|
||||
|
||||
if (atomic_read(&sk->sk_rmem_alloc) >= READ_ONCE(sk->sk_rcvbuf)) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
trace_sock_rcvqueue_full(sk, skb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
|
@ -562,7 +562,7 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
|
|||
skb->dev = NULL;
|
||||
|
||||
if (sk_rcvqueues_full(sk, READ_ONCE(sk->sk_rcvbuf))) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
|
||||
goto discard_and_relse;
|
||||
}
|
||||
|
@ -585,7 +585,7 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
|
|||
reason = SKB_DROP_REASON_PFMEMALLOC;
|
||||
if (err == -ENOBUFS)
|
||||
reason = SKB_DROP_REASON_SOCKET_BACKLOG;
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
goto discard_and_relse;
|
||||
}
|
||||
|
||||
|
@ -2505,7 +2505,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
|
|||
newsk->sk_wmem_queued = 0;
|
||||
newsk->sk_forward_alloc = 0;
|
||||
newsk->sk_reserved_mem = 0;
|
||||
atomic_set(&newsk->sk_drops, 0);
|
||||
sk_drops_reset(newsk);
|
||||
newsk->sk_send_head = NULL;
|
||||
newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
|
||||
atomic_set(&newsk->sk_zckey, 0);
|
||||
|
@ -3713,7 +3713,7 @@ void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid)
|
|||
*/
|
||||
smp_wmb();
|
||||
refcount_set(&sk->sk_refcnt, 1);
|
||||
atomic_set(&sk->sk_drops, 0);
|
||||
sk_drops_reset(sk);
|
||||
}
|
||||
EXPORT_SYMBOL(sock_init_data_uid);
|
||||
|
||||
|
@ -3973,7 +3973,7 @@ void sk_get_meminfo(const struct sock *sk, u32 *mem)
|
|||
mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued);
|
||||
mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
|
||||
mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
|
||||
mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
|
||||
mem[SK_MEMINFO_DROPS] = sk_drops_read(sk);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
|
|
@ -1119,7 +1119,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
|
|||
from_kuid_munged(seq_user_ns(f), sk_uid(sp)),
|
||||
0, sock_i_ino(sp),
|
||||
refcount_read(&sp->sk_refcnt), sp,
|
||||
atomic_read(&sp->sk_drops));
|
||||
sk_drops_read(sp));
|
||||
}
|
||||
|
||||
static int ping_v4_seq_show(struct seq_file *seq, void *v)
|
||||
|
|
|
@ -178,7 +178,7 @@ static int raw_v4_input(struct net *net, struct sk_buff *skb,
|
|||
|
||||
if (atomic_read(&sk->sk_rmem_alloc) >=
|
||||
READ_ONCE(sk->sk_rcvbuf)) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|||
int raw_rcv(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_XFRM_POLICY);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
@ -1045,7 +1045,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
|
|||
0, 0L, 0,
|
||||
from_kuid_munged(seq_user_ns(seq), sk_uid(sp)),
|
||||
0, sock_i_ino(sp),
|
||||
refcount_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
|
||||
refcount_read(&sp->sk_refcnt), sp, sk_drops_read(sp));
|
||||
}
|
||||
|
||||
static int raw_seq_show(struct seq_file *seq, void *v)
|
||||
|
|
|
@ -1787,7 +1787,7 @@ uncharge_drop:
|
|||
atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
|
||||
|
||||
drop:
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
busylock_release(busy);
|
||||
return err;
|
||||
}
|
||||
|
@ -1852,7 +1852,7 @@ static struct sk_buff *__first_packet_length(struct sock *sk,
|
|||
IS_UDPLITE(sk));
|
||||
__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
|
||||
IS_UDPLITE(sk));
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
__skb_unlink(skb, rcvq);
|
||||
*total += skb->truesize;
|
||||
kfree_skb_reason(skb, SKB_DROP_REASON_UDP_CSUM);
|
||||
|
@ -2008,7 +2008,7 @@ try_again:
|
|||
|
||||
__UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, is_udplite);
|
||||
__UDP_INC_STATS(net, UDP_MIB_INERRORS, is_udplite);
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
kfree_skb_reason(skb, SKB_DROP_REASON_UDP_CSUM);
|
||||
goto try_again;
|
||||
}
|
||||
|
@ -2078,7 +2078,7 @@ try_again:
|
|||
|
||||
if (unlikely(err)) {
|
||||
if (!peeking) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
UDP_INC_STATS(sock_net(sk),
|
||||
UDP_MIB_INERRORS, is_udplite);
|
||||
}
|
||||
|
@ -2449,7 +2449,7 @@ csum_error:
|
|||
__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
|
||||
drop:
|
||||
__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
sk_skb_reason_drop(sk, skb, drop_reason);
|
||||
return -1;
|
||||
}
|
||||
|
@ -2534,7 +2534,7 @@ start_lookup:
|
|||
nskb = skb_clone(skb, GFP_ATOMIC);
|
||||
|
||||
if (unlikely(!nskb)) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
__UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
|
||||
IS_UDPLITE(sk));
|
||||
__UDP_INC_STATS(net, UDP_MIB_INERRORS,
|
||||
|
@ -3386,7 +3386,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
|
|||
from_kuid_munged(seq_user_ns(f), sk_uid(sp)),
|
||||
0, sock_i_ino(sp),
|
||||
refcount_read(&sp->sk_refcnt), sp,
|
||||
atomic_read(&sp->sk_drops));
|
||||
sk_drops_read(sp));
|
||||
}
|
||||
|
||||
int udp4_seq_show(struct seq_file *seq, void *v)
|
||||
|
|
|
@ -1068,5 +1068,5 @@ void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
|
|||
0,
|
||||
sock_i_ino(sp),
|
||||
refcount_read(&sp->sk_refcnt), sp,
|
||||
atomic_read(&sp->sk_drops));
|
||||
sk_drops_read(sp));
|
||||
}
|
||||
|
|
|
@ -163,7 +163,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
|
|||
|
||||
if (atomic_read(&sk->sk_rmem_alloc) >=
|
||||
READ_ONCE(sk->sk_rcvbuf)) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -361,7 +361,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|||
|
||||
if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
|
||||
skb_checksum_complete(skb)) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_SKB_CSUM);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
@ -389,7 +389,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
|
|||
struct raw6_sock *rp = raw6_sk(sk);
|
||||
|
||||
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_XFRM_POLICY);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
@ -414,7 +414,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
|
|||
|
||||
if (inet_test_bit(HDRINCL, sk)) {
|
||||
if (skb_checksum_complete(skb)) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_SKB_CSUM);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
|
|
@ -524,7 +524,7 @@ try_again:
|
|||
}
|
||||
if (unlikely(err)) {
|
||||
if (!peeking) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
|
||||
}
|
||||
kfree_skb(skb);
|
||||
|
@ -908,7 +908,7 @@ csum_error:
|
|||
__UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
|
||||
drop:
|
||||
__UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
sk_skb_reason_drop(sk, skb, drop_reason);
|
||||
return -1;
|
||||
}
|
||||
|
@ -1013,7 +1013,7 @@ start_lookup:
|
|||
}
|
||||
nskb = skb_clone(skb, GFP_ATOMIC);
|
||||
if (unlikely(!nskb)) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
__UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
|
||||
IS_UDPLITE(sk));
|
||||
__UDP6_INC_STATS(net, UDP_MIB_INERRORS,
|
||||
|
|
|
@ -1187,7 +1187,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
|
|||
|
||||
IUCV_SKB_CB(skb)->offset = 0;
|
||||
if (sk_filter(sk, skb)) {
|
||||
atomic_inc(&sk->sk_drops); /* skb rejected by filter */
|
||||
sk_drops_inc(sk); /* skb rejected by filter */
|
||||
kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
|
@ -2011,7 +2011,7 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
|
|||
skb_reset_network_header(skb);
|
||||
IUCV_SKB_CB(skb)->offset = 0;
|
||||
if (sk_filter(sk, skb)) {
|
||||
atomic_inc(&sk->sk_drops); /* skb rejected by filter */
|
||||
sk_drops_inc(sk); /* skb rejected by filter */
|
||||
kfree_skb(skb);
|
||||
return NET_RX_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -356,7 +356,7 @@ static void netlink_overrun(struct sock *sk)
|
|||
sk_error_report(sk);
|
||||
}
|
||||
}
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
}
|
||||
|
||||
static void netlink_rcv_wake(struct sock *sk)
|
||||
|
@ -2711,7 +2711,7 @@ static int netlink_native_seq_show(struct seq_file *seq, void *v)
|
|||
sk_wmem_alloc_get(s),
|
||||
READ_ONCE(nlk->cb_running),
|
||||
refcount_read(&s->sk_refcnt),
|
||||
atomic_read(&s->sk_drops),
|
||||
sk_drops_read(s),
|
||||
sock_i_ino(s)
|
||||
);
|
||||
|
||||
|
|
|
@ -2265,7 +2265,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
|
|||
|
||||
drop_n_acct:
|
||||
atomic_inc(&po->tp_drops);
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
drop_reason = SKB_DROP_REASON_PACKET_SOCK_ERROR;
|
||||
|
||||
drop_n_restore:
|
||||
|
|
|
@ -376,7 +376,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
|
|||
|
||||
case PNS_PEP_CTRL_REQ:
|
||||
if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
break;
|
||||
}
|
||||
__skb_pull(skb, 4);
|
||||
|
@ -397,7 +397,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
|
|||
}
|
||||
|
||||
if (pn->rx_credits == 0) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
err = -ENOBUFS;
|
||||
break;
|
||||
}
|
||||
|
@ -567,7 +567,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
|
|||
}
|
||||
|
||||
if (pn->rx_credits == 0) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
err = NET_RX_DROP;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -587,7 +587,7 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
|
|||
from_kuid_munged(seq_user_ns(seq), sk_uid(sk)),
|
||||
sock_i_ino(sk),
|
||||
refcount_read(&sk->sk_refcnt), sk,
|
||||
atomic_read(&sk->sk_drops));
|
||||
sk_drops_read(sk));
|
||||
}
|
||||
seq_pad(seq, '\n');
|
||||
return 0;
|
||||
|
|
|
@ -173,7 +173,7 @@ static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc,
|
|||
mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
|
||||
mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
|
||||
mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
|
||||
mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
|
||||
mem[SK_MEMINFO_DROPS] = sk_drops_read(sk);
|
||||
|
||||
if (nla_put(skb, INET_DIAG_SKMEMINFO, sizeof(mem), &mem) < 0)
|
||||
goto errout;
|
||||
|
|
|
@ -2366,7 +2366,7 @@ static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
|
|||
else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
|
||||
trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL,
|
||||
"err_overload2!");
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
err = TIPC_ERR_OVERLOAD;
|
||||
}
|
||||
|
||||
|
@ -2458,7 +2458,7 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
|
|||
trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!");
|
||||
/* Overload => reject message back to sender */
|
||||
onode = tipc_own_addr(sock_net(sk));
|
||||
atomic_inc(&sk->sk_drops);
|
||||
sk_drops_inc(sk);
|
||||
if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) {
|
||||
trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL,
|
||||
"@sk_enqueue!");
|
||||
|
@ -3657,7 +3657,7 @@ int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
|
|||
nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
|
||||
skb_queue_len(&sk->sk_write_queue)) ||
|
||||
nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
|
||||
atomic_read(&sk->sk_drops)))
|
||||
sk_drops_read(sk)))
|
||||
goto stat_msg_cancel;
|
||||
|
||||
if (tsk->cong_link_cnt &&
|
||||
|
|
Loading…
Reference in New Issue