@@ -1142,11 +1142,13 @@ static void macb_tx_release_buff(void *buff, enum macb_tx_buff_type type, int bu
11421142{
11431143 if (type == MACB_TYPE_SKB ) {
11441144 napi_consume_skb (buff , budget );
1145- } else {
1145+ } else if ( type == MACB_TYPE_XDP_TX ) {
11461146 if (!budget )
11471147 xdp_return_frame (buff );
11481148 else
11491149 xdp_return_frame_rx_napi (buff );
1150+ } else {
1151+ xdp_return_frame (buff );
11501152 }
11511153}
11521154
@@ -1608,20 +1610,24 @@ static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
16081610}
16091611
16101612static int macb_xdp_submit_frame (struct macb * bp , struct xdp_frame * xdpf ,
1611- struct net_device * dev , dma_addr_t addr )
1613+ struct net_device * dev , bool dma_map ,
1614+ dma_addr_t addr )
16121615{
1616+ enum macb_tx_buff_type buff_type ;
16131617 struct macb_tx_buff * tx_buff ;
16141618 int cpu = smp_processor_id ();
16151619 struct macb_dma_desc * desc ;
16161620 struct macb_queue * queue ;
16171621 unsigned int next_head ;
16181622 unsigned long flags ;
1623+ dma_addr_t mapping ;
16191624 u16 queue_index ;
16201625 int err = 0 ;
16211626 u32 ctrl ;
16221627
16231628 queue_index = cpu % bp -> num_queues ;
16241629 queue = & bp -> queues [queue_index ];
1630+ buff_type = dma_map ? MACB_TYPE_XDP_NDO : MACB_TYPE_XDP_TX ;
16251631
16261632 spin_lock_irqsave (& queue -> tx_ptr_lock , flags );
16271633
@@ -1634,14 +1640,23 @@ static int macb_xdp_submit_frame(struct macb *bp, struct xdp_frame *xdpf,
16341640 goto unlock ;
16351641 }
16361642
1637- /* progs can adjust the head. Sync and set the adjusted one.
1638- * This also implicitly takes into account ip alignment,
1639- * if present.
1640- */
1641- addr += xdpf -> headroom + sizeof (* xdpf );
1642-
1643- dma_sync_single_for_device (& bp -> pdev -> dev , addr ,
1644- xdpf -> len , DMA_BIDIRECTIONAL );
1643+ if (dma_map ) {
1644+ mapping = dma_map_single (& bp -> pdev -> dev ,
1645+ xdpf -> data ,
1646+ xdpf -> len , DMA_TO_DEVICE );
1647+ if (unlikely (dma_mapping_error (& bp -> pdev -> dev , mapping ))) {
1648+ err = - ENOMEM ;
1649+ goto unlock ;
1650+ }
1651+ } else {
1652+ /* progs can adjust the head. Sync and set the adjusted one.
1653+ * This also implicitly takes into account ip alignment,
1654+ * if present.
1655+ */
1656+ mapping = addr + xdpf -> headroom + sizeof (* xdpf );
1657+ dma_sync_single_for_device (& bp -> pdev -> dev , mapping ,
1658+ xdpf -> len , DMA_BIDIRECTIONAL );
1659+ }
16451660
16461661 next_head = queue -> tx_head + 1 ;
16471662
@@ -1652,8 +1667,8 @@ static int macb_xdp_submit_frame(struct macb *bp, struct xdp_frame *xdpf,
16521667 desc = macb_tx_desc (queue , queue -> tx_head );
16531668 tx_buff = macb_tx_buff (queue , queue -> tx_head );
16541669 tx_buff -> ptr = xdpf ;
1655- tx_buff -> type = MACB_TYPE_XDP_TX ;
1656- tx_buff -> mapping = 0 ;
1670+ tx_buff -> type = buff_type ;
1671+ tx_buff -> mapping = dma_map ? mapping : 0 ;
16571672 tx_buff -> size = xdpf -> len ;
16581673 tx_buff -> mapped_as_page = false;
16591674
@@ -1664,7 +1679,7 @@ static int macb_xdp_submit_frame(struct macb *bp, struct xdp_frame *xdpf,
16641679 ctrl |= MACB_BIT (TX_WRAP );
16651680
16661681 /* Set TX buffer descriptor */
1667- macb_set_addr (bp , desc , addr );
1682+ macb_set_addr (bp , desc , mapping );
16681683 /* desc->addr must be visible to hardware before clearing
16691684 * 'TX_USED' bit in desc->ctrl.
16701685 */
@@ -1688,6 +1703,32 @@ static int macb_xdp_submit_frame(struct macb *bp, struct xdp_frame *xdpf,
16881703 return err ;
16891704}
16901705
1706+ static int gem_xdp_xmit (struct net_device * dev , int num_frame ,
1707+ struct xdp_frame * * frames , u32 flags )
1708+ {
1709+ struct macb * bp = netdev_priv (dev );
1710+ u32 xmitted = 0 ;
1711+ int i ;
1712+
1713+ if (!macb_is_gem (bp ))
1714+ return - EOPNOTSUPP ;
1715+
1716+ if (unlikely (!netif_carrier_ok (dev )))
1717+ return - ENETDOWN ;
1718+
1719+ if (unlikely (flags & ~XDP_XMIT_FLAGS_MASK ))
1720+ return - EINVAL ;
1721+
1722+ for (i = 0 ; i < num_frame ; i ++ ) {
1723+ if (macb_xdp_submit_frame (bp , frames [i ], dev , true, 0 ))
1724+ break ;
1725+
1726+ xmitted ++ ;
1727+ }
1728+
1729+ return xmitted ;
1730+ }
1731+
16911732static u32 gem_xdp_run (struct macb_queue * queue , void * buff_head ,
16921733 unsigned int * len , unsigned int * headroom ,
16931734 dma_addr_t addr )
@@ -1725,7 +1766,7 @@ static u32 gem_xdp_run(struct macb_queue *queue, void *buff_head,
17251766 case XDP_TX :
17261767 xdpf = xdp_convert_buff_to_frame (& xdp );
17271768 if (unlikely (!xdpf ) || macb_xdp_submit_frame (queue -> bp , xdpf ,
1728- dev , addr )) {
1769+ dev , false, addr )) {
17291770 act = XDP_DROP ;
17301771 break ;
17311772 }
@@ -5066,6 +5107,7 @@ static const struct net_device_ops macb_netdev_ops = {
50665107 .ndo_hwtstamp_get = macb_hwtstamp_get ,
50675108 .ndo_setup_tc = macb_setup_tc ,
50685109 .ndo_bpf = gem_xdp ,
5110+ .ndo_xdp_xmit = gem_xdp_xmit ,
50695111};
50705112
50715113/* Configure peripheral capabilities according to device tree
@@ -6361,7 +6403,8 @@ static int macb_probe(struct platform_device *pdev)
63616403 bp -> rx_ip_align = NET_IP_ALIGN ;
63626404
63636405 dev -> xdp_features = NETDEV_XDP_ACT_BASIC |
6364- NETDEV_XDP_ACT_REDIRECT ;
6406+ NETDEV_XDP_ACT_REDIRECT |
6407+ NETDEV_XDP_ACT_NDO_XMIT ;
63656408 }
63666409
63676410 netif_carrier_off (dev );
0 commit comments