Skip to content

Commit 8ea87c9

Browse files
committed
net: macb: add TX stall watchdog as defence-in-depth safety net
Patches 1/3 and 2/3 address two candidate races that could lead to a TCOMP completion being missed on PCIe-attached macb instances. This patch adds a defence-in-depth safety net, in case a further race remains that we have not identified. The watchdog is a per-queue delayed_work that runs once per second. Movement is tracked via a tx_stall_tail_moved boolean: macb_tx_complete() sets it under tx_ptr_lock whenever tx_tail advances, and the watchdog clears it under the same lock at each tick. If the ring is non-empty (tx_head != tx_tail) and the boolean was still false at the next tick, the watchdog calls macb_tx_restart(). A boolean is used in preference to snapshotting tx_tail and comparing across ticks, because per-queue ring indices are bounded and reused; under sustained load a snapshot comparison can false-positive when the index happens to land on the same value between two ticks. Both writes share tx_ptr_lock with the existing tx_head / tx_tail updates, so no atomic is required. No new recovery logic is introduced. macb_tx_restart() already exists in this file, is correctly locked (tx_ptr_lock, bp->lock), and verifies that the hardware's TBQP is behind the driver's head index before re-asserting TSTART. On a healthy ring it is a no-op at the hardware level; the watchdog only supplies the missing trigger. On a healthy queue the per-tick cost is one spin_lock_irqsave() / spin_unlock_irqrestore(), one branch, and one byte store. The delayed_work is only scheduled between macb_open() and macb_close(), and is cancelled synchronously on close. Context for submission: on our 24-node Raspberry Pi 5 fleet, before this series, an out-of-band user-space watchdog (monitoring tx_packets from /sys/class/net/.../statistics and toggling the link down/up when it froze) was required to keep nodes usable. We include this kernel-side watchdog as a cleaner in-kernel equivalent for any residual stall that patches 1 and 2 do not cover. We are willing to drop this patch if the view is that 1 and 2 should stand alone. Link: cilium/cilium#43198 Link: https://bugs.launchpad.net/ubuntu/+source/linux-raspi/+bug/2133877 Signed-off-by: Lukasz Raczylo <lukasz@raczylo.com>
1 parent 3ccf780 commit 8ea87c9

2 files changed

Lines changed: 76 additions & 0 deletions

File tree

drivers/net/ethernet/cadence/macb.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1294,6 +1294,17 @@ struct macb_queue {
12941294
struct work_struct tx_error_task;
12951295
bool txubr_pending;
12961296
bool tx_pending;
1297+
1298+
/* TX stall watchdog -- see macb_tx_stall_watchdog() in macb_main.c.
1299+
* tx_stall_tail_moved is set by macb_tx_complete() under tx_ptr_lock
1300+
* whenever tx_tail advances, and cleared by the watchdog tick on the
1301+
* same lock. A bool avoids the index-aliasing false-positive that a
1302+
* snapshot-of-tx_tail comparison would have when the ring index space
1303+
* happens to wrap to the same value between two ticks.
1304+
*/
1305+
struct delayed_work tx_stall_watchdog_work;
1306+
bool tx_stall_tail_moved;
1307+
12971308
struct napi_struct napi_tx;
12981309

12991310
dma_addr_t rx_ring_dma;

drivers/net/ethernet/cadence/macb_main.c

Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1505,6 +1505,8 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
15051505
packets, bytes);
15061506

15071507
queue->tx_tail = tail;
1508+
if (packets)
1509+
queue->tx_stall_tail_moved = true;
15081510
if (__netif_subqueue_stopped(bp->dev, queue_index) &&
15091511
CIRC_CNT(queue->tx_head, queue->tx_tail,
15101512
bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
@@ -2028,6 +2030,63 @@ static int macb_tx_poll(struct napi_struct *napi, int budget)
20282030
return work_done;
20292031
}
20302032

2033+
#define MACB_TX_STALL_INTERVAL_MS 1000
2034+
2035+
/* TX stall watchdog.
2036+
*
2037+
* Defence-in-depth against lost TCOMP interrupts. macb already has a
2038+
* recovery chain (tx_pending -> txubr_pending -> macb_tx_restart())
2039+
* that fires on TCOMP; if TCOMP itself is lost the TX ring stalls
2040+
* silently until something else kicks TSTART. This watchdog runs
2041+
* once per second per queue and calls macb_tx_restart() if the ring
2042+
* is non-empty and tx_tail has not advanced since the previous tick.
2043+
*
2044+
* Movement is tracked via the tx_stall_tail_moved boolean rather
2045+
* than by snapshotting tx_tail. Per-queue ring indices are bounded
2046+
* (and reused), so a snapshot comparison can false-positive when the
2047+
* index happens to land on the same value between two ticks under
2048+
* sustained load. The boolean is set by macb_tx_complete() whenever
2049+
* tx_tail advances and cleared by this watchdog after each tick;
2050+
* both writes are under tx_ptr_lock, so no atomic is required.
2051+
*
2052+
* macb_tx_restart() already checks the hardware's TBQP against the
2053+
* driver's head index before re-asserting TSTART, so on a healthy
2054+
* ring this is a no-op at the hardware level. The watchdog only
2055+
* adds the missing trigger.
2056+
*/
2057+
static void macb_tx_stall_watchdog(struct work_struct *work)
2058+
{
2059+
struct macb_queue *queue = container_of(to_delayed_work(work),
2060+
struct macb_queue,
2061+
tx_stall_watchdog_work);
2062+
struct macb *bp = queue->bp;
2063+
unsigned int cur_tail, cur_head;
2064+
bool stalled = false;
2065+
unsigned long flags;
2066+
2067+
if (!netif_running(bp->dev))
2068+
return;
2069+
2070+
spin_lock_irqsave(&queue->tx_ptr_lock, flags);
2071+
cur_tail = queue->tx_tail;
2072+
cur_head = queue->tx_head;
2073+
if (cur_head != cur_tail && !queue->tx_stall_tail_moved)
2074+
stalled = true;
2075+
queue->tx_stall_tail_moved = false;
2076+
spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
2077+
2078+
if (stalled) {
2079+
netdev_warn_once(bp->dev,
2080+
"TX stall detected on queue %u (tail=%u head=%u); re-kicking TSTART\n",
2081+
(unsigned int)(queue - bp->queues),
2082+
cur_tail, cur_head);
2083+
macb_tx_restart(queue);
2084+
}
2085+
2086+
schedule_delayed_work(&queue->tx_stall_watchdog_work,
2087+
msecs_to_jiffies(MACB_TX_STALL_INTERVAL_MS));
2088+
}
2089+
20312090
static void macb_hresp_error_task(struct work_struct *work)
20322091
{
20332092
struct macb *bp = from_work(bp, work, hresp_err_bh_work);
@@ -3294,6 +3353,9 @@ static int macb_open(struct net_device *dev)
32943353
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
32953354
napi_enable(&queue->napi_rx);
32963355
napi_enable(&queue->napi_tx);
3356+
queue->tx_stall_tail_moved = true;
3357+
schedule_delayed_work(&queue->tx_stall_watchdog_work,
3358+
msecs_to_jiffies(MACB_TX_STALL_INTERVAL_MS));
32973359
}
32983360

32993361
macb_init_hw(bp);
@@ -3340,6 +3402,7 @@ static int macb_close(struct net_device *dev)
33403402
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
33413403
napi_disable(&queue->napi_rx);
33423404
napi_disable(&queue->napi_tx);
3405+
cancel_delayed_work_sync(&queue->tx_stall_watchdog_work);
33433406
netdev_tx_reset_queue(netdev_get_tx_queue(dev, q));
33443407
}
33453408

@@ -4938,6 +5001,8 @@ static int macb_init(struct platform_device *pdev)
49385001
}
49395002

49405003
INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
5004+
INIT_DELAYED_WORK(&queue->tx_stall_watchdog_work,
5005+
macb_tx_stall_watchdog);
49415006
q++;
49425007
}
49435008

0 commit comments

Comments
 (0)