156156#define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7
157157
158158/* XDP */
159- #define AM65_CPSW_XDP_CONSUMED 2
160- #define AM65_CPSW_XDP_REDIRECT 1
159+ #define AM65_CPSW_XDP_CONSUMED BIT(1)
160+ #define AM65_CPSW_XDP_REDIRECT BIT(0)
161161#define AM65_CPSW_XDP_PASS 0
162162
163163/* Include headroom compatible with both skb and xdpf */
164- #define AM65_CPSW_HEADROOM (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
164+ #define AM65_CPSW_HEADROOM_NA (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
165+ #define AM65_CPSW_HEADROOM ALIGN(AM65_CPSW_HEADROOM_NA, sizeof(long))
165166
166167static void am65_cpsw_port_set_sl_mac (struct am65_cpsw_port * slave ,
167168 const u8 * dev_addr )
@@ -933,7 +934,7 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
933934 host_desc = k3_cppi_desc_pool_alloc (tx_chn -> desc_pool );
934935 if (unlikely (!host_desc )) {
935936 ndev -> stats .tx_dropped ++ ;
936- return - ENOMEM ;
937+ return AM65_CPSW_XDP_CONSUMED ; /* drop */
937938 }
938939
939940 am65_cpsw_nuss_set_buf_type (tx_chn , host_desc , buf_type );
@@ -942,7 +943,7 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
942943 pkt_len , DMA_TO_DEVICE );
943944 if (unlikely (dma_mapping_error (tx_chn -> dma_dev , dma_buf ))) {
944945 ndev -> stats .tx_dropped ++ ;
945- ret = - ENOMEM ;
946+ ret = AM65_CPSW_XDP_CONSUMED ; /* drop */
946947 goto pool_free ;
947948 }
948949
@@ -977,6 +978,7 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
977978 /* Inform BQL */
978979 netdev_tx_completed_queue (netif_txq , 1 , pkt_len );
979980 ndev -> stats .tx_errors ++ ;
981+ ret = AM65_CPSW_XDP_CONSUMED ; /* drop */
980982 goto dma_unmap ;
981983 }
982984
@@ -996,14 +998,17 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
996998 int desc_idx , int cpu , int * len )
997999{
9981000 struct am65_cpsw_rx_chn * rx_chn = & common -> rx_chns ;
1001+ struct am65_cpsw_ndev_priv * ndev_priv ;
9991002 struct net_device * ndev = port -> ndev ;
1003+ struct am65_cpsw_ndev_stats * stats ;
10001004 int ret = AM65_CPSW_XDP_CONSUMED ;
10011005 struct am65_cpsw_tx_chn * tx_chn ;
10021006 struct netdev_queue * netif_txq ;
10031007 struct xdp_frame * xdpf ;
10041008 struct bpf_prog * prog ;
10051009 struct page * page ;
10061010 u32 act ;
1011+ int err ;
10071012
10081013 prog = READ_ONCE (port -> xdp_prog );
10091014 if (!prog )
@@ -1013,6 +1018,9 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
10131018 /* XDP prog might have changed packet data and boundaries */
10141019 * len = xdp -> data_end - xdp -> data ;
10151020
1021+ ndev_priv = netdev_priv (ndev );
1022+ stats = this_cpu_ptr (ndev_priv -> stats );
1023+
10161024 switch (act ) {
10171025 case XDP_PASS :
10181026 ret = AM65_CPSW_XDP_PASS ;
@@ -1023,31 +1031,36 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
10231031
10241032 xdpf = xdp_convert_buff_to_frame (xdp );
10251033 if (unlikely (!xdpf ))
1026- break ;
1034+ goto drop ;
10271035
10281036 __netif_tx_lock (netif_txq , cpu );
1029- ret = am65_cpsw_xdp_tx_frame (ndev , tx_chn , xdpf ,
1037+ err = am65_cpsw_xdp_tx_frame (ndev , tx_chn , xdpf ,
10301038 AM65_CPSW_TX_BUF_TYPE_XDP_TX );
10311039 __netif_tx_unlock (netif_txq );
1032- if (ret )
1033- break ;
1040+ if (err )
1041+ goto drop ;
10341042
1035- ndev -> stats .rx_bytes += * len ;
1036- ndev -> stats .rx_packets ++ ;
1043+ u64_stats_update_begin (& stats -> syncp );
1044+ stats -> rx_bytes += * len ;
1045+ stats -> rx_packets ++ ;
1046+ u64_stats_update_end (& stats -> syncp );
10371047 ret = AM65_CPSW_XDP_CONSUMED ;
10381048 goto out ;
10391049 case XDP_REDIRECT :
10401050 if (unlikely (xdp_do_redirect (ndev , xdp , prog )))
1041- break ;
1051+ goto drop ;
10421052
1043- ndev -> stats .rx_bytes += * len ;
1044- ndev -> stats .rx_packets ++ ;
1053+ u64_stats_update_begin (& stats -> syncp );
1054+ stats -> rx_bytes += * len ;
1055+ stats -> rx_packets ++ ;
1056+ u64_stats_update_end (& stats -> syncp );
10451057 ret = AM65_CPSW_XDP_REDIRECT ;
10461058 goto out ;
10471059 default :
10481060 bpf_warn_invalid_xdp_action (ndev , prog , act );
10491061 fallthrough ;
10501062 case XDP_ABORTED :
1063+ drop :
10511064 trace_xdp_exception (ndev , prog , act );
10521065 fallthrough ;
10531066 case XDP_DROP :
@@ -1056,7 +1069,6 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
10561069
10571070 page = virt_to_head_page (xdp -> data );
10581071 am65_cpsw_put_page (rx_chn , page , true, desc_idx );
1059-
10601072out :
10611073 return ret ;
10621074}
@@ -1095,7 +1107,7 @@ static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
10951107}
10961108
10971109static int am65_cpsw_nuss_rx_packets (struct am65_cpsw_common * common ,
1098- u32 flow_idx , int cpu )
1110+ u32 flow_idx , int cpu , int * xdp_state )
10991111{
11001112 struct am65_cpsw_rx_chn * rx_chn = & common -> rx_chns ;
11011113 u32 buf_dma_len , pkt_len , port_id = 0 , csum_info ;
@@ -1114,6 +1126,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
11141126 void * * swdata ;
11151127 u32 * psdata ;
11161128
1129+ * xdp_state = AM65_CPSW_XDP_PASS ;
11171130 ret = k3_udma_glue_pop_rx_chn (rx_chn -> rx_chn , flow_idx , & desc_dma );
11181131 if (ret ) {
11191132 if (ret != - ENODATA )
@@ -1161,15 +1174,13 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
11611174 }
11621175
11631176 if (port -> xdp_prog ) {
1164- xdp_init_buff (& xdp , AM65_CPSW_MAX_PACKET_SIZE , & port -> xdp_rxq );
1165-
1166- xdp_prepare_buff (& xdp , page_addr , skb_headroom (skb ),
1177+ xdp_init_buff (& xdp , PAGE_SIZE , & port -> xdp_rxq );
1178+ xdp_prepare_buff (& xdp , page_addr , AM65_CPSW_HEADROOM ,
11671179 pkt_len , false);
1168-
1169- ret = am65_cpsw_run_xdp (common , port , & xdp , desc_idx ,
1170- cpu , & pkt_len );
1171- if (ret != AM65_CPSW_XDP_PASS )
1172- return ret ;
1180+ * xdp_state = am65_cpsw_run_xdp (common , port , & xdp , desc_idx ,
1181+ cpu , & pkt_len );
1182+ if (* xdp_state != AM65_CPSW_XDP_PASS )
1183+ goto allocate ;
11731184
11741185 /* Compute additional headroom to be reserved */
11751186 headroom = (xdp .data - xdp .data_hard_start ) - skb_headroom (skb );
@@ -1193,9 +1204,13 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
11931204 stats -> rx_bytes += pkt_len ;
11941205 u64_stats_update_end (& stats -> syncp );
11951206
1207+ allocate :
11961208 new_page = page_pool_dev_alloc_pages (rx_chn -> page_pool );
1197- if (unlikely (!new_page ))
1209+ if (unlikely (!new_page )) {
1210+ dev_err (dev , "page alloc failed\n" );
11981211 return - ENOMEM ;
1212+ }
1213+
11991214 rx_chn -> pages [desc_idx ] = new_page ;
12001215
12011216 if (netif_dormant (ndev )) {
@@ -1229,29 +1244,29 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
12291244 struct am65_cpsw_common * common = am65_cpsw_napi_to_common (napi_rx );
12301245 int flow = AM65_CPSW_MAX_RX_FLOWS ;
12311246 int cpu = smp_processor_id ();
1232- bool xdp_redirect = false ;
1247+ int xdp_state_or = 0 ;
12331248 int cur_budget , ret ;
1249+ int xdp_state ;
12341250 int num_rx = 0 ;
12351251
12361252 /* process every flow */
12371253 while (flow -- ) {
12381254 cur_budget = budget - num_rx ;
12391255
12401256 while (cur_budget -- ) {
1241- ret = am65_cpsw_nuss_rx_packets (common , flow , cpu );
1242- if ( ret ) {
1243- if ( ret == AM65_CPSW_XDP_REDIRECT )
1244- xdp_redirect = true;
1257+ ret = am65_cpsw_nuss_rx_packets (common , flow , cpu ,
1258+ & xdp_state );
1259+ xdp_state_or |= xdp_state ;
1260+ if ( ret )
12451261 break ;
1246- }
12471262 num_rx ++ ;
12481263 }
12491264
12501265 if (num_rx >= budget )
12511266 break ;
12521267 }
12531268
1254- if (xdp_redirect )
1269+ if (xdp_state_or & AM65_CPSW_XDP_REDIRECT )
12551270 xdp_do_flush ();
12561271
12571272 dev_dbg (common -> dev , "%s num_rx:%d %d\n" , __func__ , num_rx , budget );
@@ -1918,12 +1933,13 @@ static int am65_cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
19181933static int am65_cpsw_ndo_xdp_xmit (struct net_device * ndev , int n ,
19191934 struct xdp_frame * * frames , u32 flags )
19201935{
1936+ struct am65_cpsw_common * common = am65_ndev_to_common (ndev );
19211937 struct am65_cpsw_tx_chn * tx_chn ;
19221938 struct netdev_queue * netif_txq ;
19231939 int cpu = smp_processor_id ();
19241940 int i , nxmit = 0 ;
19251941
1926- tx_chn = & am65_ndev_to_common ( ndev ) -> tx_chns [cpu % AM65_CPSW_MAX_TX_QUEUES ];
1942+ tx_chn = & common -> tx_chns [cpu % common -> tx_ch_num ];
19271943 netif_txq = netdev_get_tx_queue (ndev , tx_chn -> id );
19281944
19291945 __netif_tx_lock (netif_txq , cpu );
0 commit comments