Skip to content

Commit 069e1f8

Browse files
vlrplnbuchwitz
authored andcommitted
net: macb: make macb_tx_skb generic
The macb_tx_skb structure is renamed to macb_tx_buff with no functional changes. This is a preparatory step for adding xdp xmit support. Signed-off-by: Paolo Valerio <pvalerio@redhat.com>
1 parent ca83bb3 commit 069e1f8

File tree

2 files changed

+56
-59
lines changed

2 files changed

+56
-59
lines changed

drivers/net/ethernet/cadence/macb.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -988,15 +988,15 @@ struct macb_dma_desc_ptp {
988988
/* Scaled PPM fraction */
989989
#define PPM_FRACTION 16
990990

991-
/* struct macb_tx_skb - data about an skb which is being transmitted
991+
/* struct macb_tx_buff - data about an skb which is being transmitted
992992
* @skb: skb currently being transmitted, only set for the last buffer
993993
* of the frame
994994
* @mapping: DMA address of the skb's fragment buffer
995995
* @size: size of the DMA mapped buffer
996996
* @mapped_as_page: true when buffer was mapped with skb_frag_dma_map(),
997997
* false when buffer was mapped with dma_map_single()
998998
*/
999-
struct macb_tx_skb {
999+
struct macb_tx_buff {
10001000
struct sk_buff *skb;
10011001
dma_addr_t mapping;
10021002
size_t size;
@@ -1290,7 +1290,7 @@ struct macb_queue {
12901290
spinlock_t tx_ptr_lock;
12911291
unsigned int tx_head, tx_tail;
12921292
struct macb_dma_desc *tx_ring;
1293-
struct macb_tx_skb *tx_skb;
1293+
struct macb_tx_buff *tx_buff;
12941294
dma_addr_t tx_ring_dma;
12951295
struct work_struct tx_error_task;
12961296
bool txubr_pending;
@@ -1376,7 +1376,7 @@ struct macb {
13761376
int phy_reset_ms;
13771377

13781378
/* AT91RM9200 transmit queue (1 on wire + 1 queued) */
1379-
struct macb_tx_skb rm9200_txq[2];
1379+
struct macb_tx_buff rm9200_txq[2];
13801380
unsigned int max_tx_length;
13811381

13821382
u64 ethtool_stats[GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES];

drivers/net/ethernet/cadence/macb_main.c

Lines changed: 52 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -191,10 +191,10 @@ static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
191191
return &queue->tx_ring[index];
192192
}
193193

194-
static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
195-
unsigned int index)
194+
static struct macb_tx_buff *macb_tx_buff(struct macb_queue *queue,
195+
unsigned int index)
196196
{
197-
return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
197+
return &queue->tx_buff[macb_tx_ring_wrap(queue->bp, index)];
198198
}
199199

200200
static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
@@ -1136,21 +1136,21 @@ static int macb_halt_tx(struct macb *bp)
11361136
bp, TSR);
11371137
}
11381138

1139-
static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget)
1139+
static void macb_tx_unmap(struct macb *bp, struct macb_tx_buff *tx_buff, int budget)
11401140
{
1141-
if (tx_skb->mapping) {
1142-
if (tx_skb->mapped_as_page)
1143-
dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
1144-
tx_skb->size, DMA_TO_DEVICE);
1141+
if (tx_buff->mapping) {
1142+
if (tx_buff->mapped_as_page)
1143+
dma_unmap_page(&bp->pdev->dev, tx_buff->mapping,
1144+
tx_buff->size, DMA_TO_DEVICE);
11451145
else
1146-
dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
1147-
tx_skb->size, DMA_TO_DEVICE);
1148-
tx_skb->mapping = 0;
1146+
dma_unmap_single(&bp->pdev->dev, tx_buff->mapping,
1147+
tx_buff->size, DMA_TO_DEVICE);
1148+
tx_buff->mapping = 0;
11491149
}
11501150

1151-
if (tx_skb->skb) {
1152-
napi_consume_skb(tx_skb->skb, budget);
1153-
tx_skb->skb = NULL;
1151+
if (tx_buff->skb) {
1152+
napi_consume_skb(tx_buff->skb, budget);
1153+
tx_buff->skb = NULL;
11541154
}
11551155
}
11561156

@@ -1200,7 +1200,7 @@ static void macb_tx_error_task(struct work_struct *work)
12001200
u32 queue_index;
12011201
u32 packets = 0;
12021202
u32 bytes = 0;
1203-
struct macb_tx_skb *tx_skb;
1203+
struct macb_tx_buff *tx_buff;
12041204
struct macb_dma_desc *desc;
12051205
struct sk_buff *skb;
12061206
unsigned int tail;
@@ -1240,16 +1240,16 @@ static void macb_tx_error_task(struct work_struct *work)
12401240

12411241
desc = macb_tx_desc(queue, tail);
12421242
ctrl = desc->ctrl;
1243-
tx_skb = macb_tx_skb(queue, tail);
1244-
skb = tx_skb->skb;
1243+
tx_buff = macb_tx_buff(queue, tail);
1244+
skb = tx_buff->skb;
12451245

12461246
if (ctrl & MACB_BIT(TX_USED)) {
12471247
/* skb is set for the last buffer of the frame */
12481248
while (!skb) {
1249-
macb_tx_unmap(bp, tx_skb, 0);
1249+
macb_tx_unmap(bp, tx_buff, 0);
12501250
tail++;
1251-
tx_skb = macb_tx_skb(queue, tail);
1252-
skb = tx_skb->skb;
1251+
tx_buff = macb_tx_buff(queue, tail);
1252+
skb = tx_buff->skb;
12531253
}
12541254

12551255
/* ctrl still refers to the first buffer descriptor
@@ -1278,7 +1278,7 @@ static void macb_tx_error_task(struct work_struct *work)
12781278
desc->ctrl = ctrl | MACB_BIT(TX_USED);
12791279
}
12801280

1281-
macb_tx_unmap(bp, tx_skb, 0);
1281+
macb_tx_unmap(bp, tx_buff, 0);
12821282
}
12831283

12841284
netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
@@ -1356,7 +1356,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
13561356
spin_lock_irqsave(&queue->tx_ptr_lock, flags);
13571357
head = queue->tx_head;
13581358
for (tail = queue->tx_tail; tail != head && packets < budget; tail++) {
1359-
struct macb_tx_skb *tx_skb;
1359+
struct macb_tx_buff *tx_buff;
13601360
struct sk_buff *skb;
13611361
struct macb_dma_desc *desc;
13621362
u32 ctrl;
@@ -1376,8 +1376,8 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
13761376

13771377
/* Process all buffers of the current transmitted frame */
13781378
for (;; tail++) {
1379-
tx_skb = macb_tx_skb(queue, tail);
1380-
skb = tx_skb->skb;
1379+
tx_buff = macb_tx_buff(queue, tail);
1380+
skb = tx_buff->skb;
13811381

13821382
/* First, update TX stats if needed */
13831383
if (skb) {
@@ -1397,7 +1397,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
13971397
}
13981398

13991399
/* Now we can safely release resources */
1400-
macb_tx_unmap(bp, tx_skb, budget);
1400+
macb_tx_unmap(bp, tx_buff, budget);
14011401

14021402
/* skb is set only for the last buffer of the frame.
14031403
* WARNING: at this point skb has been freed by
@@ -2279,13 +2279,13 @@ static unsigned int macb_tx_map(struct macb *bp,
22792279
unsigned int hdrlen)
22802280
{
22812281
dma_addr_t mapping;
2282+
unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
22822283
unsigned int len, entry, i, tx_head = queue->tx_head;
2283-
struct macb_tx_skb *tx_skb = NULL;
2284+
u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
2285+
struct macb_tx_buff *tx_buff = NULL;
2286+
unsigned int eof = 1, mss_mfs = 0;
22842287
struct macb_dma_desc *desc;
22852288
unsigned int offset, size, count = 0;
2286-
unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
2287-
unsigned int eof = 1, mss_mfs = 0;
2288-
u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
22892289

22902290
/* LSO */
22912291
if (skb_shinfo(skb)->gso_size != 0) {
@@ -2305,8 +2305,7 @@ static unsigned int macb_tx_map(struct macb *bp,
23052305

23062306
offset = 0;
23072307
while (len) {
2308-
entry = macb_tx_ring_wrap(bp, tx_head);
2309-
tx_skb = &queue->tx_skb[entry];
2308+
tx_buff = macb_tx_buff(queue, tx_head);
23102309

23112310
mapping = dma_map_single(&bp->pdev->dev,
23122311
skb->data + offset,
@@ -2315,10 +2314,10 @@ static unsigned int macb_tx_map(struct macb *bp,
23152314
goto dma_error;
23162315

23172316
/* Save info to properly release resources */
2318-
tx_skb->skb = NULL;
2319-
tx_skb->mapping = mapping;
2320-
tx_skb->size = size;
2321-
tx_skb->mapped_as_page = false;
2317+
tx_buff->skb = NULL;
2318+
tx_buff->mapping = mapping;
2319+
tx_buff->size = size;
2320+
tx_buff->mapped_as_page = false;
23222321

23232322
len -= size;
23242323
offset += size;
@@ -2336,19 +2335,18 @@ static unsigned int macb_tx_map(struct macb *bp,
23362335
offset = 0;
23372336
while (len) {
23382337
size = min(len, bp->max_tx_length);
2339-
entry = macb_tx_ring_wrap(bp, tx_head);
2340-
tx_skb = &queue->tx_skb[entry];
2338+
tx_buff = macb_tx_buff(queue, tx_head);
23412339

23422340
mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
23432341
offset, size, DMA_TO_DEVICE);
23442342
if (dma_mapping_error(&bp->pdev->dev, mapping))
23452343
goto dma_error;
23462344

23472345
/* Save info to properly release resources */
2348-
tx_skb->skb = NULL;
2349-
tx_skb->mapping = mapping;
2350-
tx_skb->size = size;
2351-
tx_skb->mapped_as_page = true;
2346+
tx_buff->skb = NULL;
2347+
tx_buff->mapping = mapping;
2348+
tx_buff->size = size;
2349+
tx_buff->mapped_as_page = true;
23522350

23532351
len -= size;
23542352
offset += size;
@@ -2358,13 +2356,13 @@ static unsigned int macb_tx_map(struct macb *bp,
23582356
}
23592357

23602358
/* Should never happen */
2361-
if (unlikely(!tx_skb)) {
2359+
if (unlikely(!tx_buff)) {
23622360
netdev_err(bp->dev, "BUG! empty skb!\n");
23632361
return 0;
23642362
}
23652363

23662364
/* This is the last buffer of the frame: save socket buffer */
2367-
tx_skb->skb = skb;
2365+
tx_buff->skb = skb;
23682366

23692367
/* Update TX ring: update buffer descriptors in reverse order
23702368
* to avoid race condition
@@ -2396,11 +2394,10 @@ static unsigned int macb_tx_map(struct macb *bp,
23962394

23972395
do {
23982396
i--;
2399-
entry = macb_tx_ring_wrap(bp, i);
2400-
tx_skb = &queue->tx_skb[entry];
2401-
desc = macb_tx_desc(queue, entry);
2397+
tx_buff = macb_tx_buff(queue, i);
2398+
desc = macb_tx_desc(queue, i);
24022399

2403-
ctrl = (u32)tx_skb->size;
2400+
ctrl = (u32)tx_buff->size;
24042401
if (eof) {
24052402
ctrl |= MACB_BIT(TX_LAST);
24062403
eof = 0;
@@ -2423,7 +2420,7 @@ static unsigned int macb_tx_map(struct macb *bp,
24232420
ctrl |= MACB_BF(MSS_MFS, mss_mfs);
24242421

24252422
/* Set TX buffer descriptor */
2426-
macb_set_addr(bp, desc, tx_skb->mapping);
2423+
macb_set_addr(bp, desc, tx_buff->mapping);
24272424
/* desc->addr must be visible to hardware before clearing
24282425
* 'TX_USED' bit in desc->ctrl.
24292426
*/
@@ -2439,9 +2436,9 @@ static unsigned int macb_tx_map(struct macb *bp,
24392436
netdev_err(bp->dev, "TX DMA map failed\n");
24402437

24412438
for (i = queue->tx_head; i != tx_head; i++) {
2442-
tx_skb = macb_tx_skb(queue, i);
2439+
tx_buff = macb_tx_buff(queue, i);
24432440

2444-
macb_tx_unmap(bp, tx_skb, 0);
2441+
macb_tx_unmap(bp, tx_buff, 0);
24452442
}
24462443

24472444
return 0;
@@ -2771,8 +2768,8 @@ static void macb_free_consistent(struct macb *bp)
27712768
dma_free_coherent(dev, size, bp->queues[0].rx_ring, bp->queues[0].rx_ring_dma);
27722769

27732770
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2774-
kfree(queue->tx_skb);
2775-
queue->tx_skb = NULL;
2771+
kfree(queue->tx_buff);
2772+
queue->tx_buff = NULL;
27762773
queue->tx_ring = NULL;
27772774
queue->rx_ring = NULL;
27782775
}
@@ -2850,9 +2847,9 @@ static int macb_alloc_consistent(struct macb *bp)
28502847
queue->rx_ring = rx + macb_rx_ring_size_per_queue(bp) * q;
28512848
queue->rx_ring_dma = rx_dma + macb_rx_ring_size_per_queue(bp) * q;
28522849

2853-
size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
2854-
queue->tx_skb = kmalloc(size, GFP_KERNEL);
2855-
if (!queue->tx_skb)
2850+
size = bp->tx_ring_size * sizeof(struct macb_tx_buff);
2851+
queue->tx_buff = kmalloc(size, GFP_KERNEL);
2852+
if (!queue->tx_buff)
28562853
goto out_err;
28572854
}
28582855

0 commit comments

Comments
 (0)