tcp: move tcp_clean_acked to tcp_sock_read_tx group

tp->tcp_clean_acked is fetched in tx path when snd_una is updated.

This field thus belongs to tcp_sock_read_tx group.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
Link: https://patch.msgid.link/20250919204856.2977245-7-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Eric Dumazet 2025-09-19 20:48:54 +00:00 committed by Jakub Kicinski
parent 969904dcd7
commit a105ea47a4
3 changed files with 7 additions and 7 deletions

View File

@ -27,7 +27,7 @@ u32 dsack_dups
u32 snd_una read_mostly read_write tcp_wnd_end,tcp_urg_mode,tcp_minshall_check,tcp_cwnd_validate(tx);tcp_ack,tcp_may_update_window,tcp_clean_rtx_queue(write),tcp_ack_tstamp(rx)
u32 snd_sml read_write tcp_minshall_check,tcp_minshall_update
u32 rcv_tstamp read_write read_write tcp_ack
void * tcp_clean_acked read_mostly tcp_ack
void * tcp_clean_acked read_mostly tcp_ack
u32 lsndtime read_write tcp_slow_start_after_idle_check,tcp_event_data_sent
u32 last_oow_ack_time
u32 compressed_ack_rcv_nxt

View File

@ -215,6 +215,9 @@ struct tcp_sock {
u16 gso_segs; /* Max number of segs per GSO packet */
/* from STCP, retrans queue hinting */
struct sk_buff *retransmit_skb_hint;
#if defined(CONFIG_TLS_DEVICE)
void (*tcp_clean_acked)(struct sock *sk, u32 acked_seq);
#endif
__cacheline_group_end(tcp_sock_read_tx);
/* TXRX read-mostly hotpath cache lines */
@ -250,9 +253,6 @@ struct tcp_sock {
struct minmax rtt_min;
/* OOO segments go in this rbtree. Socket lock must be held. */
struct rb_root out_of_order_queue;
#if defined(CONFIG_TLS_DEVICE)
void (*tcp_clean_acked)(struct sock *sk, u32 acked_seq);
#endif
__cacheline_group_end(tcp_sock_read_rx);
/* TX read-write hotpath cache lines */

View File

@ -5101,6 +5101,9 @@ static void __init tcp_struct_check(void)
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, notsent_lowat);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, gso_segs);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, retransmit_skb_hint);
#if IS_ENABLED(CONFIG_TLS_DEVICE)
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, tcp_clean_acked);
#endif
/* TXRX read-mostly hotpath cache lines */
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, tsoffset);
@ -5124,9 +5127,6 @@ static void __init tcp_struct_check(void)
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rtt_min);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, out_of_order_queue);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, snd_ssthresh);
#if IS_ENABLED(CONFIG_TLS_DEVICE)
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, tcp_clean_acked);
#endif
/* TX read-write hotpath cache lines */
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, segs_out);