net: macb: Fix tx_ptr_lock locking

macb_start_xmit and macb_tx_poll can be called with bottom-halves
disabled (e.g. from softirq) as well as with interrupts disabled (with
netpoll). Because of this, all other functions taking tx_ptr_lock must
use spin_lock_irqsave.

Fixes: 138badbc21 ("net: macb: use NAPI for TX completion path")
Reported-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Sean Anderson <sean.anderson@linux.dev>
Link: https://patch.msgid.link/20250829143521.1686062-1-sean.anderson@linux.dev
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Sean Anderson
2025-08-29 10:35:21 -04:00
committed by Jakub Kicinski
parent b434a3772d
commit 6bc8a5098b

View File

@@ -1223,12 +1223,13 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
{ {
struct macb *bp = queue->bp; struct macb *bp = queue->bp;
u16 queue_index = queue - bp->queues; u16 queue_index = queue - bp->queues;
unsigned long flags;
unsigned int tail; unsigned int tail;
unsigned int head; unsigned int head;
int packets = 0; int packets = 0;
u32 bytes = 0; u32 bytes = 0;
spin_lock(&queue->tx_ptr_lock); spin_lock_irqsave(&queue->tx_ptr_lock, flags);
head = queue->tx_head; head = queue->tx_head;
for (tail = queue->tx_tail; tail != head && packets < budget; tail++) { for (tail = queue->tx_tail; tail != head && packets < budget; tail++) {
struct macb_tx_skb *tx_skb; struct macb_tx_skb *tx_skb;
@@ -1291,7 +1292,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
CIRC_CNT(queue->tx_head, queue->tx_tail, CIRC_CNT(queue->tx_head, queue->tx_tail,
bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp)) bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
netif_wake_subqueue(bp->dev, queue_index); netif_wake_subqueue(bp->dev, queue_index);
spin_unlock(&queue->tx_ptr_lock); spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
return packets; return packets;
} }
@@ -1707,8 +1708,9 @@ static void macb_tx_restart(struct macb_queue *queue)
{ {
struct macb *bp = queue->bp; struct macb *bp = queue->bp;
unsigned int head_idx, tbqp; unsigned int head_idx, tbqp;
unsigned long flags;
spin_lock(&queue->tx_ptr_lock); spin_lock_irqsave(&queue->tx_ptr_lock, flags);
if (queue->tx_head == queue->tx_tail) if (queue->tx_head == queue->tx_tail)
goto out_tx_ptr_unlock; goto out_tx_ptr_unlock;
@@ -1720,19 +1722,20 @@ static void macb_tx_restart(struct macb_queue *queue)
if (tbqp == head_idx) if (tbqp == head_idx)
goto out_tx_ptr_unlock; goto out_tx_ptr_unlock;
spin_lock_irq(&bp->lock); spin_lock(&bp->lock);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
spin_unlock_irq(&bp->lock); spin_unlock(&bp->lock);
out_tx_ptr_unlock: out_tx_ptr_unlock:
spin_unlock(&queue->tx_ptr_lock); spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
} }
static bool macb_tx_complete_pending(struct macb_queue *queue) static bool macb_tx_complete_pending(struct macb_queue *queue)
{ {
bool retval = false; bool retval = false;
unsigned long flags;
spin_lock(&queue->tx_ptr_lock); spin_lock_irqsave(&queue->tx_ptr_lock, flags);
if (queue->tx_head != queue->tx_tail) { if (queue->tx_head != queue->tx_tail) {
/* Make hw descriptor updates visible to CPU */ /* Make hw descriptor updates visible to CPU */
rmb(); rmb();
@@ -1740,7 +1743,7 @@ static bool macb_tx_complete_pending(struct macb_queue *queue)
if (macb_tx_desc(queue, queue->tx_tail)->ctrl & MACB_BIT(TX_USED)) if (macb_tx_desc(queue, queue->tx_tail)->ctrl & MACB_BIT(TX_USED))
retval = true; retval = true;
} }
spin_unlock(&queue->tx_ptr_lock); spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
return retval; return retval;
} }
@@ -2308,6 +2311,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct macb_queue *queue = &bp->queues[queue_index]; struct macb_queue *queue = &bp->queues[queue_index];
unsigned int desc_cnt, nr_frags, frag_size, f; unsigned int desc_cnt, nr_frags, frag_size, f;
unsigned int hdrlen; unsigned int hdrlen;
unsigned long flags;
bool is_lso; bool is_lso;
netdev_tx_t ret = NETDEV_TX_OK; netdev_tx_t ret = NETDEV_TX_OK;
@@ -2368,7 +2372,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length); desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
} }
spin_lock_bh(&queue->tx_ptr_lock); spin_lock_irqsave(&queue->tx_ptr_lock, flags);
/* This is a hard error, log it. */ /* This is a hard error, log it. */
if (CIRC_SPACE(queue->tx_head, queue->tx_tail, if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
@@ -2392,15 +2396,15 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(bp->dev, queue_index), netdev_tx_sent_queue(netdev_get_tx_queue(bp->dev, queue_index),
skb->len); skb->len);
spin_lock_irq(&bp->lock); spin_lock(&bp->lock);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
spin_unlock_irq(&bp->lock); spin_unlock(&bp->lock);
if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
netif_stop_subqueue(dev, queue_index); netif_stop_subqueue(dev, queue_index);
unlock: unlock:
spin_unlock_bh(&queue->tx_ptr_lock); spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
return ret; return ret;
} }