@@ -1235,11 +1235,13 @@ static void macb_tx_release_buff(void *buff, enum macb_tx_buff_type type, int bu
12351235{
12361236 if (type == MACB_TYPE_SKB ) {
12371237 napi_consume_skb (buff , budget );
1238- } else {
1238+ } else if ( type == MACB_TYPE_XDP_TX ) {
12391239 if (!budget )
12401240 xdp_return_frame (buff );
12411241 else
12421242 xdp_return_frame_rx_napi (buff );
1243+ } else {
1244+ xdp_return_frame (buff );
12431245 }
12441246}
12451247
@@ -1701,20 +1703,24 @@ static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
17011703}
17021704
17031705static int macb_xdp_submit_frame (struct macb * bp , struct xdp_frame * xdpf ,
1704- struct net_device * dev , dma_addr_t addr )
1706+ struct net_device * dev , bool dma_map ,
1707+ dma_addr_t addr )
17051708{
1709+ enum macb_tx_buff_type buff_type ;
17061710 struct macb_tx_buff * tx_buff ;
17071711 int cpu = smp_processor_id ();
17081712 struct macb_dma_desc * desc ;
17091713 struct macb_queue * queue ;
17101714 unsigned int next_head ;
17111715 unsigned long flags ;
1716+ dma_addr_t mapping ;
17121717 u16 queue_index ;
17131718 int err = 0 ;
17141719 u32 ctrl ;
17151720
17161721 queue_index = cpu % bp -> num_queues ;
17171722 queue = & bp -> queues [queue_index ];
1723+ buff_type = dma_map ? MACB_TYPE_XDP_NDO : MACB_TYPE_XDP_TX ;
17181724
17191725 spin_lock_irqsave (& queue -> tx_ptr_lock , flags );
17201726
@@ -1727,14 +1733,23 @@ static int macb_xdp_submit_frame(struct macb *bp, struct xdp_frame *xdpf,
17271733 goto unlock ;
17281734 }
17291735
1730- /* progs can adjust the head. Sync and set the adjusted one.
1731- * This also implicitly takes into account ip alignment,
1732- * if present.
1733- */
1734- addr += xdpf -> headroom + sizeof (* xdpf );
1735-
1736- dma_sync_single_for_device (& bp -> pdev -> dev , addr ,
1737- xdpf -> len , DMA_BIDIRECTIONAL );
1736+ if (dma_map ) {
1737+ mapping = dma_map_single (& bp -> pdev -> dev ,
1738+ xdpf -> data ,
1739+ xdpf -> len , DMA_TO_DEVICE );
1740+ if (unlikely (dma_mapping_error (& bp -> pdev -> dev , mapping ))) {
1741+ err = - ENOMEM ;
1742+ goto unlock ;
1743+ }
1744+ } else {
1745+ /* progs can adjust the head. Sync and set the adjusted one.
1746+ * This also implicitly takes into account ip alignment,
1747+ * if present.
1748+ */
1749+ mapping = addr + xdpf -> headroom + sizeof (* xdpf );
1750+ dma_sync_single_for_device (& bp -> pdev -> dev , mapping ,
1751+ xdpf -> len , DMA_BIDIRECTIONAL );
1752+ }
17381753
17391754 next_head = queue -> tx_head + 1 ;
17401755
@@ -1745,8 +1760,8 @@ static int macb_xdp_submit_frame(struct macb *bp, struct xdp_frame *xdpf,
17451760 desc = macb_tx_desc (queue , queue -> tx_head );
17461761 tx_buff = macb_tx_buff (queue , queue -> tx_head );
17471762 tx_buff -> ptr = xdpf ;
1748- tx_buff -> type = MACB_TYPE_XDP_TX ;
1749- tx_buff -> mapping = 0 ;
1763+ tx_buff -> type = buff_type ;
1764+ tx_buff -> mapping = dma_map ? mapping : 0 ;
17501765 tx_buff -> size = xdpf -> len ;
17511766 tx_buff -> mapped_as_page = false;
17521767
@@ -1757,7 +1772,7 @@ static int macb_xdp_submit_frame(struct macb *bp, struct xdp_frame *xdpf,
17571772 ctrl |= MACB_BIT (TX_WRAP );
17581773
17591774 /* Set TX buffer descriptor */
1760- macb_set_addr (bp , desc , addr );
1775+ macb_set_addr (bp , desc , mapping );
17611776 /* desc->addr must be visible to hardware before clearing
17621777 * 'TX_USED' bit in desc->ctrl.
17631778 */
@@ -1782,6 +1797,32 @@ static int macb_xdp_submit_frame(struct macb *bp, struct xdp_frame *xdpf,
17821797 return err ;
17831798}
17841799
1800+ static int gem_xdp_xmit (struct net_device * dev , int num_frame ,
1801+ struct xdp_frame * * frames , u32 flags )
1802+ {
1803+ struct macb * bp = netdev_priv (dev );
1804+ u32 xmitted = 0 ;
1805+ int i ;
1806+
1807+ if (!macb_is_gem (bp ))
1808+ return - EOPNOTSUPP ;
1809+
1810+ if (unlikely (!netif_carrier_ok (dev )))
1811+ return - ENETDOWN ;
1812+
1813+ if (unlikely (flags & ~XDP_XMIT_FLAGS_MASK ))
1814+ return - EINVAL ;
1815+
1816+ for (i = 0 ; i < num_frame ; i ++ ) {
1817+ if (macb_xdp_submit_frame (bp , frames [i ], dev , true, 0 ))
1818+ break ;
1819+
1820+ xmitted ++ ;
1821+ }
1822+
1823+ return xmitted ;
1824+ }
1825+
17851826static u32 gem_xdp_run (struct macb_queue * queue , void * buff_head ,
17861827 unsigned int * len , unsigned int * headroom ,
17871828 dma_addr_t addr )
@@ -1819,7 +1860,7 @@ static u32 gem_xdp_run(struct macb_queue *queue, void *buff_head,
18191860 case XDP_TX :
18201861 xdpf = xdp_convert_buff_to_frame (& xdp );
18211862 if (unlikely (!xdpf ) || macb_xdp_submit_frame (queue -> bp , xdpf ,
1822- dev , addr )) {
1863+ dev , false, addr )) {
18231864 act = XDP_DROP ;
18241865 break ;
18251866 }
@@ -5162,6 +5203,7 @@ static const struct net_device_ops macb_netdev_ops = {
51625203 .ndo_hwtstamp_get = macb_hwtstamp_get ,
51635204 .ndo_setup_tc = macb_setup_tc ,
51645205 .ndo_bpf = gem_xdp ,
5206+ .ndo_xdp_xmit = gem_xdp_xmit ,
51655207};
51665208
51675209/* Configure peripheral capabilities according to device tree
@@ -6459,7 +6501,8 @@ static int macb_probe(struct platform_device *pdev)
64596501 bp -> rx_headroom += NET_IP_ALIGN ;
64606502
64616503 dev -> xdp_features = NETDEV_XDP_ACT_BASIC |
6462- NETDEV_XDP_ACT_REDIRECT ;
6504+ NETDEV_XDP_ACT_REDIRECT |
6505+ NETDEV_XDP_ACT_NDO_XMIT ;
64636506 }
64646507
64656508 netif_carrier_off (dev );
0 commit comments