Skip to content

Commit 96d8790

Browse files
vlrplnbuchwitz
authored andcommitted
net: macb: generalize tx buffer handling
Rename struct macb_tx_buff member skb to ptr and introduce macb_tx_buff_type to identify the buffer type macb_tx_buff represents. Currently the buffer can only be MACB_TYPE_SKB, so the sk_buff case is handled unconditionally in the tx path. The remaining type handling will be handled by subsequent patches. This is the last preparatory step for XDP xmit support. Signed-off-by: Paolo Valerio <pvalerio@redhat.com>
1 parent cbe6575 commit 96d8790

File tree

2 files changed

+43
-27
lines changed

2 files changed

+43
-27
lines changed

drivers/net/ethernet/cadence/macb.h

Lines changed: 17 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -988,19 +988,28 @@ struct macb_dma_desc_ptp {
988988
/* Scaled PPM fraction */
989989
#define PPM_FRACTION 16
990990

991-
/* struct macb_tx_buff - data about an skb which is being transmitted
992-
* @skb: skb currently being transmitted, only set for the last buffer
993-
* of the frame
994-
* @mapping: DMA address of the skb's fragment buffer
991+
enum macb_tx_buff_type {
992+
MACB_TYPE_SKB,
993+
MACB_TYPE_XDP_TX,
994+
MACB_TYPE_XDP_NDO,
995+
};
996+
997+
/* struct macb_tx_buff - data about an skb or xdp frame which is being
998+
* transmitted.
999+
* @ptr: pointer to skb or xdp frame being transmitted, only set
1000+
* for the last buffer for sk_buff
1001+
* @mapping: DMA address of the skb's fragment or xdp buffer
9951002
* @size: size of the DMA mapped buffer
9961003
* @mapped_as_page: true when buffer was mapped with skb_frag_dma_map(),
9971004
* false when buffer was mapped with dma_map_single()
1005+
* @type: type of buffer (MACB_TYPE_SKB, MACB_TYPE_XDP_TX, MACB_TYPE_XDP_NDO)
9981006
*/
9991007
struct macb_tx_buff {
1000-
struct sk_buff *skb;
1001-
dma_addr_t mapping;
1002-
size_t size;
1003-
bool mapped_as_page;
1008+
void *ptr;
1009+
dma_addr_t mapping;
1010+
size_t size;
1011+
bool mapped_as_page;
1012+
enum macb_tx_buff_type type;
10041013
};
10051014

10061015
/* Hardware-collected statistics. Used when updating the network

drivers/net/ethernet/cadence/macb_main.c

Lines changed: 26 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1230,7 +1230,8 @@ static int macb_halt_tx(struct macb *bp)
12301230
bp, TSR);
12311231
}
12321232

1233-
static void macb_tx_unmap(struct macb *bp, struct macb_tx_buff *tx_buff, int budget)
1233+
static void macb_tx_unmap(struct macb *bp, struct macb_tx_buff *tx_buff,
1234+
int budget)
12341235
{
12351236
if (tx_buff->mapping) {
12361237
if (tx_buff->mapped_as_page)
@@ -1242,9 +1243,9 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_buff *tx_buff, int bud
12421243
tx_buff->mapping = 0;
12431244
}
12441245

1245-
if (tx_buff->skb) {
1246-
napi_consume_skb(tx_buff->skb, budget);
1247-
tx_buff->skb = NULL;
1246+
if (tx_buff->ptr) {
1247+
napi_consume_skb(tx_buff->ptr, budget);
1248+
tx_buff->ptr = NULL;
12481249
}
12491250
}
12501251

@@ -1335,15 +1336,15 @@ static void macb_tx_error_task(struct work_struct *work)
13351336
desc = macb_tx_desc(queue, tail);
13361337
ctrl = desc->ctrl;
13371338
tx_buff = macb_tx_buff(queue, tail);
1338-
skb = tx_buff->skb;
1339+
skb = tx_buff->ptr;
13391340

13401341
if (ctrl & MACB_BIT(TX_USED)) {
13411342
/* skb is set for the last buffer of the frame */
13421343
while (!skb) {
13431344
macb_tx_unmap(bp, tx_buff, 0);
13441345
tail++;
13451346
tx_buff = macb_tx_buff(queue, tail);
1346-
skb = tx_buff->skb;
1347+
skb = tx_buff->ptr;
13471348
}
13481349

13491350
/* ctrl still refers to the first buffer descriptor
@@ -1440,20 +1441,22 @@ static bool ptp_one_step_sync(struct sk_buff *skb)
14401441
static int macb_tx_complete(struct macb_queue *queue, int budget)
14411442
{
14421443
struct macb *bp = queue->bp;
1443-
u16 queue_index = queue - bp->queues;
14441444
unsigned long flags;
14451445
unsigned int tail;
14461446
unsigned int head;
1447+
u16 queue_index;
14471448
int packets = 0;
14481449
u32 bytes = 0;
14491450

1451+
queue_index = queue - bp->queues;
1452+
14501453
spin_lock_irqsave(&queue->tx_ptr_lock, flags);
14511454
head = queue->tx_head;
14521455
for (tail = queue->tx_tail; tail != head && packets < budget; tail++) {
1453-
struct macb_tx_buff *tx_buff;
1454-
struct sk_buff *skb;
1455-
struct macb_dma_desc *desc;
1456-
u32 ctrl;
1456+
struct macb_tx_buff *tx_buff;
1457+
struct macb_dma_desc *desc;
1458+
struct sk_buff *skb;
1459+
u32 ctrl;
14571460

14581461
desc = macb_tx_desc(queue, tail);
14591462

@@ -1471,7 +1474,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
14711474
/* Process all buffers of the current transmitted frame */
14721475
for (;; tail++) {
14731476
tx_buff = macb_tx_buff(queue, tail);
1474-
skb = tx_buff->skb;
1477+
skb = tx_buff->ptr;
14751478

14761479
/* First, update TX stats if needed */
14771480
if (skb) {
@@ -2408,7 +2411,8 @@ static unsigned int macb_tx_map(struct macb *bp,
24082411
goto dma_error;
24092412

24102413
/* Save info to properly release resources */
2411-
tx_buff->skb = NULL;
2414+
tx_buff->ptr = NULL;
2415+
tx_buff->type = MACB_TYPE_SKB;
24122416
tx_buff->mapping = mapping;
24132417
tx_buff->size = size;
24142418
tx_buff->mapped_as_page = false;
@@ -2437,7 +2441,8 @@ static unsigned int macb_tx_map(struct macb *bp,
24372441
goto dma_error;
24382442

24392443
/* Save info to properly release resources */
2440-
tx_buff->skb = NULL;
2444+
tx_buff->ptr = NULL;
2445+
tx_buff->type = MACB_TYPE_SKB;
24412446
tx_buff->mapping = mapping;
24422447
tx_buff->size = size;
24432448
tx_buff->mapped_as_page = true;
@@ -2456,7 +2461,8 @@ static unsigned int macb_tx_map(struct macb *bp,
24562461
}
24572462

24582463
/* This is the last buffer of the frame: save socket buffer */
2459-
tx_buff->skb = skb;
2464+
tx_buff->ptr = skb;
2465+
tx_buff->type = MACB_TYPE_SKB;
24602466

24612467
/* Update TX ring: update buffer descriptors in reverse order
24622468
* to avoid race condition
@@ -5377,8 +5383,9 @@ static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
53775383
netif_stop_queue(dev);
53785384

53795385
/* Store packet information (to free when Tx completed) */
5380-
lp->rm9200_txq[desc].skb = skb;
5386+
lp->rm9200_txq[desc].ptr = skb;
53815387
lp->rm9200_txq[desc].size = skb->len;
5388+
lp->rm9200_txq[desc].type = MACB_TYPE_SKB;
53825389
lp->rm9200_txq[desc].mapping = dma_map_single(&lp->pdev->dev, skb->data,
53835390
skb->len, DMA_TO_DEVICE);
53845391
if (dma_mapping_error(&lp->pdev->dev, lp->rm9200_txq[desc].mapping)) {
@@ -5470,9 +5477,9 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
54705477
dev->stats.tx_errors++;
54715478

54725479
desc = 0;
5473-
if (lp->rm9200_txq[desc].skb) {
5474-
dev_consume_skb_irq(lp->rm9200_txq[desc].skb);
5475-
lp->rm9200_txq[desc].skb = NULL;
5480+
if (lp->rm9200_txq[desc].ptr) {
5481+
dev_consume_skb_irq(lp->rm9200_txq[desc].ptr);
5482+
lp->rm9200_txq[desc].ptr = NULL;
54765483
dma_unmap_single(&lp->pdev->dev, lp->rm9200_txq[desc].mapping,
54775484
lp->rm9200_txq[desc].size, DMA_TO_DEVICE);
54785485
dev->stats.tx_packets++;

0 commit comments

Comments
 (0)