Skip to content

Commit e4bdd11

Browse files
tlebnbuchwitz
authored andcommitted
net: macb: move Rx buffers alloc from link up to open
mog_alloc_rx_buffers(), getting called at open, does not do rx buffer alloc on GEM. The bulk of the work is done by gem_rx_refill() filling up all slots with valid buffers. gem_rx_refill() is called at link up by gem_init_rings() == bp->macbgem_ops.mog_init_rings(). Move operation to macb_open(), mostly to allow it to fail early and loudly rather than init the device with Rx mostly broken. About `bool fail_early`: - When called from macb_open(), ring init fails as soon as a queue cannot be refilled. - When called from macb_hresp_error_task(), we do our best to reinit the device: we still iterate over all queues and try refilling all even if a previous queue failed. Signed-off-by: Théo Lebrun <theo.lebrun@bootlin.com> Signed-off-by: Paolo Valerio <pvalerio@redhat.com>
1 parent f02621a commit e4bdd11

File tree

2 files changed

+28
-9
lines changed

2 files changed

+28
-9
lines changed

drivers/net/ethernet/cadence/macb.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1218,7 +1218,7 @@ struct macb_queue;
12181218
struct macb_or_gem_ops {
12191219
int (*mog_alloc_rx_buffers)(struct macb *bp);
12201220
void (*mog_free_rx_buffers)(struct macb *bp);
1221-
void (*mog_init_rings)(struct macb *bp);
1221+
int (*mog_init_rings)(struct macb *bp, bool fail_early);
12221222
int (*mog_rx)(struct macb_queue *queue, struct napi_struct *napi,
12231223
int budget);
12241224
};

drivers/net/ethernet/cadence/macb_main.c

Lines changed: 27 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1424,13 +1424,14 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
14241424
return packets;
14251425
}
14261426

1427-
static void gem_rx_refill(struct macb_queue *queue)
1427+
static int gem_rx_refill(struct macb_queue *queue)
14281428
{
14291429
unsigned int entry;
14301430
struct sk_buff *skb;
14311431
dma_addr_t paddr;
14321432
struct macb *bp = queue->bp;
14331433
struct macb_dma_desc *desc;
1434+
int err = 0;
14341435

14351436
while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
14361437
bp->rx_ring_size) > 0) {
@@ -1447,6 +1448,7 @@ static void gem_rx_refill(struct macb_queue *queue)
14471448
if (unlikely(!skb)) {
14481449
netdev_err(bp->dev,
14491450
"Unable to allocate sk_buff\n");
1451+
err = -ENOMEM;
14501452
break;
14511453
}
14521454

@@ -1485,6 +1487,7 @@ static void gem_rx_refill(struct macb_queue *queue)
14851487

14861488
netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
14871489
queue, queue->rx_prepared_head, queue->rx_tail);
1490+
return err;
14881491
}
14891492

14901493
/* Mark DMA descriptors from begin up to and not including end as unused */
@@ -1942,7 +1945,7 @@ static void macb_hresp_error_task(struct work_struct *work)
19421945
netif_tx_stop_all_queues(dev);
19431946
netif_carrier_off(dev);
19441947

1945-
bp->macbgem_ops.mog_init_rings(bp);
1948+
bp->macbgem_ops.mog_init_rings(bp, false);
19461949

19471950
/* Initialize TX and RX buffers */
19481951
macb_init_buffers(bp);
@@ -2730,8 +2733,6 @@ static int macb_alloc_consistent(struct macb *bp)
27302733
if (!queue->tx_skb)
27312734
goto out_err;
27322735
}
2733-
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
2734-
goto out_err;
27352736

27362737
/* Required for tie off descriptor for PM cases */
27372738
if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE)) {
@@ -2743,6 +2744,11 @@ static int macb_alloc_consistent(struct macb *bp)
27432744
goto out_err;
27442745
}
27452746

2747+
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
2748+
goto out_err;
2749+
if (bp->macbgem_ops.mog_init_rings(bp, true))
2750+
goto out_err;
2751+
27462752
return 0;
27472753

27482754
out_err:
@@ -2763,11 +2769,13 @@ static void macb_init_tieoff(struct macb *bp)
27632769
desc->ctrl = 0;
27642770
}
27652771

2766-
static void gem_init_rings(struct macb *bp)
2772+
static int gem_init_rings(struct macb *bp, bool fail_early)
27672773
{
27682774
struct macb_queue *queue;
27692775
struct macb_dma_desc *desc = NULL;
2776+
int last_err = 0;
27702777
unsigned int q;
2778+
int err;
27712779
int i;
27722780

27732781
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
@@ -2783,13 +2791,24 @@ static void gem_init_rings(struct macb *bp)
27832791
queue->rx_tail = 0;
27842792
queue->rx_prepared_head = 0;
27852793

2786-
gem_rx_refill(queue);
2794+
/* We get called in two cases:
2795+
* - open: we can propagate alloc errors (so fail early),
2796+
* - HRESP error: cannot propagate, we attempt to reinit
2797+
* all queues in case of failure.
2798+
*/
2799+
err = gem_rx_refill(queue);
2800+
if (err) {
2801+
last_err = err;
2802+
if (fail_early)
2803+
break;
2804+
}
27872805
}
27882806

27892807
macb_init_tieoff(bp);
2808+
return last_err;
27902809
}
27912810

2792-
static void macb_init_rings(struct macb *bp)
2811+
static int macb_init_rings(struct macb *bp, bool fail_early)
27932812
{
27942813
int i;
27952814
struct macb_dma_desc *desc = NULL;
@@ -2806,6 +2825,7 @@ static void macb_init_rings(struct macb *bp)
28062825
desc->ctrl |= MACB_BIT(TX_WRAP);
28072826

28082827
macb_init_tieoff(bp);
2828+
return 0;
28092829
}
28102830

28112831
static void macb_reset_hw(struct macb *bp)
@@ -3173,7 +3193,6 @@ static int macb_open(struct net_device *dev)
31733193
goto pm_exit;
31743194
}
31753195

3176-
bp->macbgem_ops.mog_init_rings(bp);
31773196
macb_init_buffers(bp);
31783197

31793198
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {

0 commit comments

Comments
 (0)