14fa9c49fSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
27ac6653aSJeff Kirsher /*******************************************************************************
37ac6653aSJeff Kirsher   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
47ac6653aSJeff Kirsher   ST Ethernet IPs are built around a Synopsys IP Core.
57ac6653aSJeff Kirsher 
6286a8372SGiuseppe CAVALLARO 	Copyright(C) 2007-2011 STMicroelectronics Ltd
77ac6653aSJeff Kirsher 
87ac6653aSJeff Kirsher 
97ac6653aSJeff Kirsher   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
107ac6653aSJeff Kirsher 
117ac6653aSJeff Kirsher   Documentation available at:
127ac6653aSJeff Kirsher 	http://www.stlinux.com
137ac6653aSJeff Kirsher   Support available at:
147ac6653aSJeff Kirsher 	https://bugzilla.stlinux.com/
157ac6653aSJeff Kirsher *******************************************************************************/
167ac6653aSJeff Kirsher 
176a81c26fSViresh Kumar #include <linux/clk.h>
187ac6653aSJeff Kirsher #include <linux/kernel.h>
197ac6653aSJeff Kirsher #include <linux/interrupt.h>
207ac6653aSJeff Kirsher #include <linux/ip.h>
217ac6653aSJeff Kirsher #include <linux/tcp.h>
227ac6653aSJeff Kirsher #include <linux/skbuff.h>
237ac6653aSJeff Kirsher #include <linux/ethtool.h>
247ac6653aSJeff Kirsher #include <linux/if_ether.h>
257ac6653aSJeff Kirsher #include <linux/crc32.h>
267ac6653aSJeff Kirsher #include <linux/mii.h>
2701789349SJiri Pirko #include <linux/if.h>
287ac6653aSJeff Kirsher #include <linux/if_vlan.h>
297ac6653aSJeff Kirsher #include <linux/dma-mapping.h>
307ac6653aSJeff Kirsher #include <linux/slab.h>
315ec55823SJoakim Zhang #include <linux/pm_runtime.h>
327ac6653aSJeff Kirsher #include <linux/prefetch.h>
33db88f10aSSrinivas Kandagatla #include <linux/pinctrl/consumer.h>
3450fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
357ac29055SGiuseppe CAVALLARO #include <linux/debugfs.h>
367ac29055SGiuseppe CAVALLARO #include <linux/seq_file.h>
3750fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
38891434b1SRayagond Kokatanur #include <linux/net_tstamp.h>
39eeef2f6bSJose Abreu #include <linux/phylink.h>
40b7766206SJose Abreu #include <linux/udp.h>
415fabb012SOng Boon Leong #include <linux/bpf_trace.h>
42a9ca9f9cSYunsheng Lin #include <net/page_pool/helpers.h>
434dbbe8ddSJose Abreu #include <net/pkt_cls.h>
44bba2556eSOng Boon Leong #include <net/xdp_sock_drv.h>
45891434b1SRayagond Kokatanur #include "stmmac_ptp.h"
46286a8372SGiuseppe CAVALLARO #include "stmmac.h"
475fabb012SOng Boon Leong #include "stmmac_xdp.h"
48c5e4ddbdSChen-Yu Tsai #include <linux/reset.h>
495790cf3cSMathieu Olivari #include <linux/of_mdio.h>
5019d857c9SPhil Reid #include "dwmac1000.h"
517d9e6c5aSJose Abreu #include "dwxgmac2.h"
5242de047dSJose Abreu #include "hwif.h"
537ac6653aSJeff Kirsher 
54a6da2bbbSHolger Assmann /* As long as the interface is active, we keep the timestamping counter enabled
55a6da2bbbSHolger Assmann  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56a6da2bbbSHolger Assmann  * (clock jumps) when changing timestamping settings at runtime.
57a6da2bbbSHolger Assmann  */
58a6da2bbbSHolger Assmann #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59a6da2bbbSHolger Assmann 				 PTP_TCR_TSCTRLSSR)
60a6da2bbbSHolger Assmann 
618d558f02SJose Abreu #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62f748be53SAlexandre TORGUE #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
637ac6653aSJeff Kirsher 
647ac6653aSJeff Kirsher /* Module parameters */
6532ceabcaSGiuseppe CAVALLARO #define TX_TIMEO	5000
667ac6653aSJeff Kirsher static int watchdog = TX_TIMEO;
67d3757ba4SJoe Perches module_param(watchdog, int, 0644);
6832ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
697ac6653aSJeff Kirsher 
7032ceabcaSGiuseppe CAVALLARO static int debug = -1;
71d3757ba4SJoe Perches module_param(debug, int, 0644);
7232ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
737ac6653aSJeff Kirsher 
7447d1f71fSstephen hemminger static int phyaddr = -1;
75d3757ba4SJoe Perches module_param(phyaddr, int, 0444);
767ac6653aSJeff Kirsher MODULE_PARM_DESC(phyaddr, "Physical device address");
777ac6653aSJeff Kirsher 
788531c808SChristian Marangi #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
798531c808SChristian Marangi #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
807ac6653aSJeff Kirsher 
81132c32eeSOng Boon Leong /* Limit to make sure XDP TX and slow path can coexist */
82132c32eeSOng Boon Leong #define STMMAC_XSK_TX_BUDGET_MAX	256
83132c32eeSOng Boon Leong #define STMMAC_TX_XSK_AVAIL		16
84bba2556eSOng Boon Leong #define STMMAC_RX_FILL_BATCH		16
85bba2556eSOng Boon Leong 
865fabb012SOng Boon Leong #define STMMAC_XDP_PASS		0
875fabb012SOng Boon Leong #define STMMAC_XDP_CONSUMED	BIT(0)
88be8b38a7SOng Boon Leong #define STMMAC_XDP_TX		BIT(1)
898b278a5bSOng Boon Leong #define STMMAC_XDP_REDIRECT	BIT(2)
905fabb012SOng Boon Leong 
91e9989339SJose Abreu static int flow_ctrl = FLOW_AUTO;
92d3757ba4SJoe Perches module_param(flow_ctrl, int, 0644);
937ac6653aSJeff Kirsher MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
947ac6653aSJeff Kirsher 
957ac6653aSJeff Kirsher static int pause = PAUSE_TIME;
96d3757ba4SJoe Perches module_param(pause, int, 0644);
977ac6653aSJeff Kirsher MODULE_PARM_DESC(pause, "Flow Control Pause Time");
987ac6653aSJeff Kirsher 
997ac6653aSJeff Kirsher #define TC_DEFAULT 64
1007ac6653aSJeff Kirsher static int tc = TC_DEFAULT;
101d3757ba4SJoe Perches module_param(tc, int, 0644);
1027ac6653aSJeff Kirsher MODULE_PARM_DESC(tc, "DMA threshold control value");
1037ac6653aSJeff Kirsher 
104d916701cSGiuseppe CAVALLARO #define	DEFAULT_BUFSIZE	1536
105d916701cSGiuseppe CAVALLARO static int buf_sz = DEFAULT_BUFSIZE;
106d3757ba4SJoe Perches module_param(buf_sz, int, 0644);
1077ac6653aSJeff Kirsher MODULE_PARM_DESC(buf_sz, "DMA buffer size");
1087ac6653aSJeff Kirsher 
10922ad3838SGiuseppe Cavallaro #define	STMMAC_RX_COPYBREAK	256
11022ad3838SGiuseppe Cavallaro 
1117ac6653aSJeff Kirsher static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
1127ac6653aSJeff Kirsher 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
1137ac6653aSJeff Kirsher 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
1147ac6653aSJeff Kirsher 
115d765955dSGiuseppe CAVALLARO #define STMMAC_DEFAULT_LPI_TIMER	1000
116d765955dSGiuseppe CAVALLARO static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117d3757ba4SJoe Perches module_param(eee_timer, int, 0644);
118d765955dSGiuseppe CAVALLARO MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119388e201dSVineetha G. Jaya Kumaran #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120d765955dSGiuseppe CAVALLARO 
12122d3efe5SPavel Machek /* By default the driver will use the ring mode to manage tx and rx descriptors,
12222d3efe5SPavel Machek  * but allow user to force to use the chain instead of the ring
1234a7d666aSGiuseppe CAVALLARO  */
1244a7d666aSGiuseppe CAVALLARO static unsigned int chain_mode;
125d3757ba4SJoe Perches module_param(chain_mode, int, 0444);
1264a7d666aSGiuseppe CAVALLARO MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
1274a7d666aSGiuseppe CAVALLARO 
1287ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
1298532f613SOng Boon Leong /* For MSI interrupts handling */
1308532f613SOng Boon Leong static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
1318532f613SOng Boon Leong static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
1328532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
1338532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134f9ec5723SChristian Marangi static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135f9ec5723SChristian Marangi static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136f9ec5723SChristian Marangi static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137132c32eeSOng Boon Leong static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138132c32eeSOng Boon Leong static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
1393a6c12a0SXiaoliang Yang static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1403a6c12a0SXiaoliang Yang 					  u32 rxmode, u32 chan);
1417ac6653aSJeff Kirsher 
14250fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
143481a7d15SJiping Ma static const struct net_device_ops stmmac_netdev_ops;
1448d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev);
145466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev);
146bfab27a1SGiuseppe CAVALLARO #endif
147bfab27a1SGiuseppe CAVALLARO 
148d5a05e69SVincent Whitchurch #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
1499125cdd1SGiuseppe CAVALLARO 
stmmac_bus_clks_config(struct stmmac_priv * priv,bool enabled)1505ec55823SJoakim Zhang int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
1515ec55823SJoakim Zhang {
1525ec55823SJoakim Zhang 	int ret = 0;
1535ec55823SJoakim Zhang 
1545ec55823SJoakim Zhang 	if (enabled) {
1555ec55823SJoakim Zhang 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
1565ec55823SJoakim Zhang 		if (ret)
1575ec55823SJoakim Zhang 			return ret;
1585ec55823SJoakim Zhang 		ret = clk_prepare_enable(priv->plat->pclk);
1595ec55823SJoakim Zhang 		if (ret) {
1605ec55823SJoakim Zhang 			clk_disable_unprepare(priv->plat->stmmac_clk);
1615ec55823SJoakim Zhang 			return ret;
1625ec55823SJoakim Zhang 		}
163b4d45aeeSJoakim Zhang 		if (priv->plat->clks_config) {
164b4d45aeeSJoakim Zhang 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165b4d45aeeSJoakim Zhang 			if (ret) {
166b4d45aeeSJoakim Zhang 				clk_disable_unprepare(priv->plat->stmmac_clk);
167b4d45aeeSJoakim Zhang 				clk_disable_unprepare(priv->plat->pclk);
168b4d45aeeSJoakim Zhang 				return ret;
169b4d45aeeSJoakim Zhang 			}
170b4d45aeeSJoakim Zhang 		}
1715ec55823SJoakim Zhang 	} else {
1725ec55823SJoakim Zhang 		clk_disable_unprepare(priv->plat->stmmac_clk);
1735ec55823SJoakim Zhang 		clk_disable_unprepare(priv->plat->pclk);
174b4d45aeeSJoakim Zhang 		if (priv->plat->clks_config)
175b4d45aeeSJoakim Zhang 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
1765ec55823SJoakim Zhang 	}
1775ec55823SJoakim Zhang 
1785ec55823SJoakim Zhang 	return ret;
1795ec55823SJoakim Zhang }
1805ec55823SJoakim Zhang EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
1815ec55823SJoakim Zhang 
1827ac6653aSJeff Kirsher /**
1837ac6653aSJeff Kirsher  * stmmac_verify_args - verify the driver parameters.
184732fdf0eSGiuseppe CAVALLARO  * Description: it checks the driver parameters and set a default in case of
185732fdf0eSGiuseppe CAVALLARO  * errors.
1867ac6653aSJeff Kirsher  */
stmmac_verify_args(void)1877ac6653aSJeff Kirsher static void stmmac_verify_args(void)
1887ac6653aSJeff Kirsher {
1897ac6653aSJeff Kirsher 	if (unlikely(watchdog < 0))
1907ac6653aSJeff Kirsher 		watchdog = TX_TIMEO;
191d916701cSGiuseppe CAVALLARO 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192d916701cSGiuseppe CAVALLARO 		buf_sz = DEFAULT_BUFSIZE;
1937ac6653aSJeff Kirsher 	if (unlikely(flow_ctrl > 1))
1947ac6653aSJeff Kirsher 		flow_ctrl = FLOW_AUTO;
1957ac6653aSJeff Kirsher 	else if (likely(flow_ctrl < 0))
1967ac6653aSJeff Kirsher 		flow_ctrl = FLOW_OFF;
1977ac6653aSJeff Kirsher 	if (unlikely((pause < 0) || (pause > 0xffff)))
1987ac6653aSJeff Kirsher 		pause = PAUSE_TIME;
199d765955dSGiuseppe CAVALLARO 	if (eee_timer < 0)
200d765955dSGiuseppe CAVALLARO 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
2017ac6653aSJeff Kirsher }
2027ac6653aSJeff Kirsher 
__stmmac_disable_all_queues(struct stmmac_priv * priv)203bba2556eSOng Boon Leong static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204c22a3f48SJoao Pinto {
205c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
2068fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
2078fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208c22a3f48SJoao Pinto 	u32 queue;
209c22a3f48SJoao Pinto 
2108fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
2118fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
212c22a3f48SJoao Pinto 
213132c32eeSOng Boon Leong 		if (stmmac_xdp_is_enabled(priv) &&
214132c32eeSOng Boon Leong 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215132c32eeSOng Boon Leong 			napi_disable(&ch->rxtx_napi);
216132c32eeSOng Boon Leong 			continue;
217132c32eeSOng Boon Leong 		}
218132c32eeSOng Boon Leong 
2194ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
2204ccb4585SJose Abreu 			napi_disable(&ch->rx_napi);
2214ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
2224ccb4585SJose Abreu 			napi_disable(&ch->tx_napi);
223c22a3f48SJoao Pinto 	}
224c22a3f48SJoao Pinto }
225c22a3f48SJoao Pinto 
226c22a3f48SJoao Pinto /**
227bba2556eSOng Boon Leong  * stmmac_disable_all_queues - Disable all queues
228bba2556eSOng Boon Leong  * @priv: driver private structure
229bba2556eSOng Boon Leong  */
stmmac_disable_all_queues(struct stmmac_priv * priv)230bba2556eSOng Boon Leong static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231bba2556eSOng Boon Leong {
232bba2556eSOng Boon Leong 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233bba2556eSOng Boon Leong 	struct stmmac_rx_queue *rx_q;
234bba2556eSOng Boon Leong 	u32 queue;
235bba2556eSOng Boon Leong 
236bba2556eSOng Boon Leong 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237bba2556eSOng Boon Leong 	for (queue = 0; queue < rx_queues_cnt; queue++) {
2388531c808SChristian Marangi 		rx_q = &priv->dma_conf.rx_queue[queue];
239bba2556eSOng Boon Leong 		if (rx_q->xsk_pool) {
240bba2556eSOng Boon Leong 			synchronize_rcu();
241bba2556eSOng Boon Leong 			break;
242bba2556eSOng Boon Leong 		}
243bba2556eSOng Boon Leong 	}
244bba2556eSOng Boon Leong 
245bba2556eSOng Boon Leong 	__stmmac_disable_all_queues(priv);
246bba2556eSOng Boon Leong }
247bba2556eSOng Boon Leong 
248bba2556eSOng Boon Leong /**
249c22a3f48SJoao Pinto  * stmmac_enable_all_queues - Enable all queues
250c22a3f48SJoao Pinto  * @priv: driver private structure
251c22a3f48SJoao Pinto  */
stmmac_enable_all_queues(struct stmmac_priv * priv)252c22a3f48SJoao Pinto static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253c22a3f48SJoao Pinto {
254c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
2558fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
2568fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257c22a3f48SJoao Pinto 	u32 queue;
258c22a3f48SJoao Pinto 
2598fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
2608fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
261c22a3f48SJoao Pinto 
262132c32eeSOng Boon Leong 		if (stmmac_xdp_is_enabled(priv) &&
263132c32eeSOng Boon Leong 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264132c32eeSOng Boon Leong 			napi_enable(&ch->rxtx_napi);
265132c32eeSOng Boon Leong 			continue;
266132c32eeSOng Boon Leong 		}
267132c32eeSOng Boon Leong 
2684ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
2694ccb4585SJose Abreu 			napi_enable(&ch->rx_napi);
2704ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
2714ccb4585SJose Abreu 			napi_enable(&ch->tx_napi);
272c22a3f48SJoao Pinto 	}
273c22a3f48SJoao Pinto }
274c22a3f48SJoao Pinto 
stmmac_service_event_schedule(struct stmmac_priv * priv)27534877a15SJose Abreu static void stmmac_service_event_schedule(struct stmmac_priv *priv)
27634877a15SJose Abreu {
27734877a15SJose Abreu 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
27834877a15SJose Abreu 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
27934877a15SJose Abreu 		queue_work(priv->wq, &priv->service_task);
28034877a15SJose Abreu }
28134877a15SJose Abreu 
stmmac_global_err(struct stmmac_priv * priv)28234877a15SJose Abreu static void stmmac_global_err(struct stmmac_priv *priv)
28334877a15SJose Abreu {
28434877a15SJose Abreu 	netif_carrier_off(priv->dev);
28534877a15SJose Abreu 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
28634877a15SJose Abreu 	stmmac_service_event_schedule(priv);
28734877a15SJose Abreu }
28834877a15SJose Abreu 
289c22a3f48SJoao Pinto /**
29032ceabcaSGiuseppe CAVALLARO  * stmmac_clk_csr_set - dynamically set the MDC clock
29132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
29232ceabcaSGiuseppe CAVALLARO  * Description: this is to dynamically set the MDC clock according to the csr
29332ceabcaSGiuseppe CAVALLARO  * clock input.
29432ceabcaSGiuseppe CAVALLARO  * Note:
29532ceabcaSGiuseppe CAVALLARO  *	If a specific clk_csr value is passed from the platform
29632ceabcaSGiuseppe CAVALLARO  *	this means that the CSR Clock Range selection cannot be
29732ceabcaSGiuseppe CAVALLARO  *	changed at run-time and it is fixed (as reported in the driver
29832ceabcaSGiuseppe CAVALLARO  *	documentation). Viceversa the driver will try to set the MDC
29932ceabcaSGiuseppe CAVALLARO  *	clock dynamically according to the actual clock input.
30032ceabcaSGiuseppe CAVALLARO  */
stmmac_clk_csr_set(struct stmmac_priv * priv)301cd7201f4SGiuseppe CAVALLARO static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302cd7201f4SGiuseppe CAVALLARO {
303cd7201f4SGiuseppe CAVALLARO 	u32 clk_rate;
304cd7201f4SGiuseppe CAVALLARO 
305f573c0b9Sjpinto 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306cd7201f4SGiuseppe CAVALLARO 
307cd7201f4SGiuseppe CAVALLARO 	/* Platform provided default clk_csr would be assumed valid
308ceb69499SGiuseppe CAVALLARO 	 * for all other cases except for the below mentioned ones.
309ceb69499SGiuseppe CAVALLARO 	 * For values higher than the IEEE 802.3 specified frequency
310ceb69499SGiuseppe CAVALLARO 	 * we can not estimate the proper divider as it is not known
311ceb69499SGiuseppe CAVALLARO 	 * the frequency of clk_csr_i. So we do not change the default
312ceb69499SGiuseppe CAVALLARO 	 * divider.
313ceb69499SGiuseppe CAVALLARO 	 */
314cd7201f4SGiuseppe CAVALLARO 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315cd7201f4SGiuseppe CAVALLARO 		if (clk_rate < CSR_F_35M)
316cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_20_35M;
317cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_35_60M;
319cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_60_100M;
321cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_100_150M;
323cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_150_250M;
32508dad2f4SJesper Nilsson 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_250_300M;
327ceb69499SGiuseppe CAVALLARO 	}
3289f93ac8dSLABBE Corentin 
329d8daff28SBartosz Golaszewski 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
3309f93ac8dSLABBE Corentin 		if (clk_rate > 160000000)
3319f93ac8dSLABBE Corentin 			priv->clk_csr = 0x03;
3329f93ac8dSLABBE Corentin 		else if (clk_rate > 80000000)
3339f93ac8dSLABBE Corentin 			priv->clk_csr = 0x02;
3349f93ac8dSLABBE Corentin 		else if (clk_rate > 40000000)
3359f93ac8dSLABBE Corentin 			priv->clk_csr = 0x01;
3369f93ac8dSLABBE Corentin 		else
3379f93ac8dSLABBE Corentin 			priv->clk_csr = 0;
3389f93ac8dSLABBE Corentin 	}
3397d9e6c5aSJose Abreu 
3407d9e6c5aSJose Abreu 	if (priv->plat->has_xgmac) {
3417d9e6c5aSJose Abreu 		if (clk_rate > 400000000)
3427d9e6c5aSJose Abreu 			priv->clk_csr = 0x5;
3437d9e6c5aSJose Abreu 		else if (clk_rate > 350000000)
3447d9e6c5aSJose Abreu 			priv->clk_csr = 0x4;
3457d9e6c5aSJose Abreu 		else if (clk_rate > 300000000)
3467d9e6c5aSJose Abreu 			priv->clk_csr = 0x3;
3477d9e6c5aSJose Abreu 		else if (clk_rate > 250000000)
3487d9e6c5aSJose Abreu 			priv->clk_csr = 0x2;
3497d9e6c5aSJose Abreu 		else if (clk_rate > 150000000)
3507d9e6c5aSJose Abreu 			priv->clk_csr = 0x1;
3517d9e6c5aSJose Abreu 		else
3527d9e6c5aSJose Abreu 			priv->clk_csr = 0x0;
3537d9e6c5aSJose Abreu 	}
354cd7201f4SGiuseppe CAVALLARO }
355cd7201f4SGiuseppe CAVALLARO 
print_pkt(unsigned char * buf,int len)3567ac6653aSJeff Kirsher static void print_pkt(unsigned char *buf, int len)
3577ac6653aSJeff Kirsher {
358424c4f78SAndy Shevchenko 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359424c4f78SAndy Shevchenko 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
3607ac6653aSJeff Kirsher }
3617ac6653aSJeff Kirsher 
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)362ce736788SJoao Pinto static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
3637ac6653aSJeff Kirsher {
3648531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365a6a3e026SLABBE Corentin 	u32 avail;
366e3ad57c9SGiuseppe Cavallaro 
367ce736788SJoao Pinto 	if (tx_q->dirty_tx > tx_q->cur_tx)
368ce736788SJoao Pinto 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369e3ad57c9SGiuseppe Cavallaro 	else
3708531c808SChristian Marangi 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371e3ad57c9SGiuseppe Cavallaro 
372e3ad57c9SGiuseppe Cavallaro 	return avail;
373e3ad57c9SGiuseppe Cavallaro }
374e3ad57c9SGiuseppe Cavallaro 
37554139cf3SJoao Pinto /**
37654139cf3SJoao Pinto  * stmmac_rx_dirty - Get RX queue dirty
37754139cf3SJoao Pinto  * @priv: driver private structure
37854139cf3SJoao Pinto  * @queue: RX queue index
37954139cf3SJoao Pinto  */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)38054139cf3SJoao Pinto static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381e3ad57c9SGiuseppe Cavallaro {
3828531c808SChristian Marangi 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383a6a3e026SLABBE Corentin 	u32 dirty;
384e3ad57c9SGiuseppe Cavallaro 
38554139cf3SJoao Pinto 	if (rx_q->dirty_rx <= rx_q->cur_rx)
38654139cf3SJoao Pinto 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387e3ad57c9SGiuseppe Cavallaro 	else
3888531c808SChristian Marangi 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389e3ad57c9SGiuseppe Cavallaro 
390e3ad57c9SGiuseppe Cavallaro 	return dirty;
3917ac6653aSJeff Kirsher }
3927ac6653aSJeff Kirsher 
stmmac_lpi_entry_timer_config(struct stmmac_priv * priv,bool en)393be1c7eaeSVineetha G. Jaya Kumaran static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394be1c7eaeSVineetha G. Jaya Kumaran {
395be1c7eaeSVineetha G. Jaya Kumaran 	int tx_lpi_timer;
396be1c7eaeSVineetha G. Jaya Kumaran 
397be1c7eaeSVineetha G. Jaya Kumaran 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398be1c7eaeSVineetha G. Jaya Kumaran 	priv->eee_sw_timer_en = en ? 0 : 1;
399be1c7eaeSVineetha G. Jaya Kumaran 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400be1c7eaeSVineetha G. Jaya Kumaran 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401be1c7eaeSVineetha G. Jaya Kumaran }
402be1c7eaeSVineetha G. Jaya Kumaran 
40332ceabcaSGiuseppe CAVALLARO /**
404732fdf0eSGiuseppe CAVALLARO  * stmmac_enable_eee_mode - check and enter in LPI mode
40532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
406732fdf0eSGiuseppe CAVALLARO  * Description: this function is to verify and enter in LPI mode in case of
407732fdf0eSGiuseppe CAVALLARO  * EEE.
40832ceabcaSGiuseppe CAVALLARO  */
stmmac_enable_eee_mode(struct stmmac_priv * priv)409c74ead22SJisheng Zhang static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410d765955dSGiuseppe CAVALLARO {
411ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412ce736788SJoao Pinto 	u32 queue;
413ce736788SJoao Pinto 
414ce736788SJoao Pinto 	/* check if all TX queues have the work finished */
415ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
4168531c808SChristian Marangi 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417ce736788SJoao Pinto 
418ce736788SJoao Pinto 		if (tx_q->dirty_tx != tx_q->cur_tx)
419c74ead22SJisheng Zhang 			return -EBUSY; /* still unfinished work */
420ce736788SJoao Pinto 	}
421ce736788SJoao Pinto 
422d765955dSGiuseppe CAVALLARO 	/* Check and enter in LPI mode */
423ce736788SJoao Pinto 	if (!priv->tx_path_in_lpi_mode)
424c10d4c82SJose Abreu 		stmmac_set_eee_mode(priv, priv->hw,
4259d0c0d5eSBartosz Golaszewski 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426c74ead22SJisheng Zhang 	return 0;
427d765955dSGiuseppe CAVALLARO }
428d765955dSGiuseppe CAVALLARO 
42932ceabcaSGiuseppe CAVALLARO /**
430732fdf0eSGiuseppe CAVALLARO  * stmmac_disable_eee_mode - disable and exit from LPI mode
43132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
43232ceabcaSGiuseppe CAVALLARO  * Description: this function is to exit and disable EEE in case of
43332ceabcaSGiuseppe CAVALLARO  * LPI state is true. This is called by the xmit.
43432ceabcaSGiuseppe CAVALLARO  */
stmmac_disable_eee_mode(struct stmmac_priv * priv)435d765955dSGiuseppe CAVALLARO void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436d765955dSGiuseppe CAVALLARO {
437be1c7eaeSVineetha G. Jaya Kumaran 	if (!priv->eee_sw_timer_en) {
438be1c7eaeSVineetha G. Jaya Kumaran 		stmmac_lpi_entry_timer_config(priv, 0);
439be1c7eaeSVineetha G. Jaya Kumaran 		return;
440be1c7eaeSVineetha G. Jaya Kumaran 	}
441be1c7eaeSVineetha G. Jaya Kumaran 
442c10d4c82SJose Abreu 	stmmac_reset_eee_mode(priv, priv->hw);
443d765955dSGiuseppe CAVALLARO 	del_timer_sync(&priv->eee_ctrl_timer);
444d765955dSGiuseppe CAVALLARO 	priv->tx_path_in_lpi_mode = false;
445d765955dSGiuseppe CAVALLARO }
446d765955dSGiuseppe CAVALLARO 
447d765955dSGiuseppe CAVALLARO /**
448732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449d0ea5cbdSJesse Brandeburg  * @t:  timer_list struct containing private info
450d765955dSGiuseppe CAVALLARO  * Description:
45132ceabcaSGiuseppe CAVALLARO  *  if there is no data transfer and if we are not in LPI state,
452d765955dSGiuseppe CAVALLARO  *  then MAC Transmitter can be moved to LPI state.
453d765955dSGiuseppe CAVALLARO  */
stmmac_eee_ctrl_timer(struct timer_list * t)454e99e88a9SKees Cook static void stmmac_eee_ctrl_timer(struct timer_list *t)
455d765955dSGiuseppe CAVALLARO {
456e99e88a9SKees Cook 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457d765955dSGiuseppe CAVALLARO 
458c74ead22SJisheng Zhang 	if (stmmac_enable_eee_mode(priv))
459388e201dSVineetha G. Jaya Kumaran 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460d765955dSGiuseppe CAVALLARO }
461d765955dSGiuseppe CAVALLARO 
462d765955dSGiuseppe CAVALLARO /**
463732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_init - init EEE
46432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
465d765955dSGiuseppe CAVALLARO  * Description:
466732fdf0eSGiuseppe CAVALLARO  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467732fdf0eSGiuseppe CAVALLARO  *  can also manage EEE, this function enable the LPI state and start related
468732fdf0eSGiuseppe CAVALLARO  *  timer.
469d765955dSGiuseppe CAVALLARO  */
stmmac_eee_init(struct stmmac_priv * priv)470d765955dSGiuseppe CAVALLARO bool stmmac_eee_init(struct stmmac_priv *priv)
471d765955dSGiuseppe CAVALLARO {
472388e201dSVineetha G. Jaya Kumaran 	int eee_tw_timer = priv->eee_tw_timer;
473879626e3SJerome Brunet 
474f5351ef7SGiuseppe CAVALLARO 	/* Using PCS we cannot dial with the phy registers at this stage
475f5351ef7SGiuseppe CAVALLARO 	 * so we do not support extra feature like EEE.
476f5351ef7SGiuseppe CAVALLARO 	 */
477a47b9e15SDejin Zheng 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
478a47b9e15SDejin Zheng 	    priv->hw->pcs == STMMAC_PCS_RTBI)
47974371272SJose Abreu 		return false;
480f5351ef7SGiuseppe CAVALLARO 
48174371272SJose Abreu 	/* Check if MAC core supports the EEE feature. */
48274371272SJose Abreu 	if (!priv->dma_cap.eee)
48374371272SJose Abreu 		return false;
484d765955dSGiuseppe CAVALLARO 
48529555fa3SThierry Reding 	mutex_lock(&priv->lock);
48674371272SJose Abreu 
48774371272SJose Abreu 	/* Check if it needs to be deactivated */
488177d935aSJon Hunter 	if (!priv->eee_active) {
489177d935aSJon Hunter 		if (priv->eee_enabled) {
49038ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "disable EEE\n");
491be1c7eaeSVineetha G. Jaya Kumaran 			stmmac_lpi_entry_timer_config(priv, 0);
49283bf79b6SGiuseppe CAVALLARO 			del_timer_sync(&priv->eee_ctrl_timer);
493388e201dSVineetha G. Jaya Kumaran 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494d4aeaed8SWong Vee Khee 			if (priv->hw->xpcs)
495d4aeaed8SWong Vee Khee 				xpcs_config_eee(priv->hw->xpcs,
496d4aeaed8SWong Vee Khee 						priv->plat->mult_fact_100ns,
497d4aeaed8SWong Vee Khee 						false);
498177d935aSJon Hunter 		}
4990867bb97SJon Hunter 		mutex_unlock(&priv->lock);
50074371272SJose Abreu 		return false;
50174371272SJose Abreu 	}
50274371272SJose Abreu 
50374371272SJose Abreu 	if (priv->eee_active && !priv->eee_enabled) {
50474371272SJose Abreu 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
50574371272SJose Abreu 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506388e201dSVineetha G. Jaya Kumaran 				     eee_tw_timer);
507656ed8b0SWong Vee Khee 		if (priv->hw->xpcs)
508656ed8b0SWong Vee Khee 			xpcs_config_eee(priv->hw->xpcs,
509656ed8b0SWong Vee Khee 					priv->plat->mult_fact_100ns,
510656ed8b0SWong Vee Khee 					true);
51183bf79b6SGiuseppe CAVALLARO 	}
51274371272SJose Abreu 
513be1c7eaeSVineetha G. Jaya Kumaran 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514be1c7eaeSVineetha G. Jaya Kumaran 		del_timer_sync(&priv->eee_ctrl_timer);
515be1c7eaeSVineetha G. Jaya Kumaran 		priv->tx_path_in_lpi_mode = false;
516be1c7eaeSVineetha G. Jaya Kumaran 		stmmac_lpi_entry_timer_config(priv, 1);
517be1c7eaeSVineetha G. Jaya Kumaran 	} else {
518be1c7eaeSVineetha G. Jaya Kumaran 		stmmac_lpi_entry_timer_config(priv, 0);
519be1c7eaeSVineetha G. Jaya Kumaran 		mod_timer(&priv->eee_ctrl_timer,
520be1c7eaeSVineetha G. Jaya Kumaran 			  STMMAC_LPI_T(priv->tx_lpi_timer));
521be1c7eaeSVineetha G. Jaya Kumaran 	}
522388e201dSVineetha G. Jaya Kumaran 
52329555fa3SThierry Reding 	mutex_unlock(&priv->lock);
52438ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
52574371272SJose Abreu 	return true;
526d765955dSGiuseppe CAVALLARO }
527d765955dSGiuseppe CAVALLARO 
528732fdf0eSGiuseppe CAVALLARO /* stmmac_get_tx_hwtstamp - get HW TX timestamps
52932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
530ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
531891434b1SRayagond Kokatanur  * @skb : the socket buffer
532891434b1SRayagond Kokatanur  * Description :
533891434b1SRayagond Kokatanur  * This function will read timestamp from the descriptor & pass it to stack.
534891434b1SRayagond Kokatanur  * and also perform some sanity checks.
535891434b1SRayagond Kokatanur  */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)536891434b1SRayagond Kokatanur static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *p, struct sk_buff *skb)
538891434b1SRayagond Kokatanur {
539891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps shhwtstamp;
54025e80cd0SJose Abreu 	bool found = false;
541df103170SNathan Chancellor 	u64 ns = 0;
542891434b1SRayagond Kokatanur 
543891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en)
544891434b1SRayagond Kokatanur 		return;
545891434b1SRayagond Kokatanur 
546ceb69499SGiuseppe CAVALLARO 	/* exit if skb doesn't support hw tstamp */
54775e4364fSdamuzi000 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548891434b1SRayagond Kokatanur 		return;
549891434b1SRayagond Kokatanur 
550891434b1SRayagond Kokatanur 	/* check tx tstamp status */
55142de047dSJose Abreu 	if (stmmac_get_tx_timestamp_status(priv, p)) {
55242de047dSJose Abreu 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
55325e80cd0SJose Abreu 		found = true;
55425e80cd0SJose Abreu 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
55525e80cd0SJose Abreu 		found = true;
55625e80cd0SJose Abreu 	}
557891434b1SRayagond Kokatanur 
55825e80cd0SJose Abreu 	if (found) {
559c6d5f193SKurt Kanzenbach 		ns -= priv->plat->cdc_error_adj;
5603600be5fSVoon Weifeng 
561891434b1SRayagond Kokatanur 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562891434b1SRayagond Kokatanur 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
563ba1ffd74SGiuseppe CAVALLARO 
56433d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565891434b1SRayagond Kokatanur 		/* pass tstamp to stack */
566891434b1SRayagond Kokatanur 		skb_tstamp_tx(skb, &shhwtstamp);
567ba1ffd74SGiuseppe CAVALLARO 	}
568891434b1SRayagond Kokatanur }
569891434b1SRayagond Kokatanur 
570732fdf0eSGiuseppe CAVALLARO /* stmmac_get_rx_hwtstamp - get HW RX timestamps
57132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
572ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
573ba1ffd74SGiuseppe CAVALLARO  * @np : next descriptor pointer
574891434b1SRayagond Kokatanur  * @skb : the socket buffer
575891434b1SRayagond Kokatanur  * Description :
576891434b1SRayagond Kokatanur  * This function will read received packet's timestamp from the descriptor
577891434b1SRayagond Kokatanur  * and pass it to stack. It also perform some sanity checks.
578891434b1SRayagond Kokatanur  */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)579ba1ffd74SGiuseppe CAVALLARO static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *np, struct sk_buff *skb)
581891434b1SRayagond Kokatanur {
582891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
58398870943SJose Abreu 	struct dma_desc *desc = p;
584df103170SNathan Chancellor 	u64 ns = 0;
585891434b1SRayagond Kokatanur 
586891434b1SRayagond Kokatanur 	if (!priv->hwts_rx_en)
587891434b1SRayagond Kokatanur 		return;
588ba1ffd74SGiuseppe CAVALLARO 	/* For GMAC4, the valid timestamp is from CTX next desc. */
5897d9e6c5aSJose Abreu 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
59098870943SJose Abreu 		desc = np;
591891434b1SRayagond Kokatanur 
59298870943SJose Abreu 	/* Check if timestamp is available */
59342de047dSJose Abreu 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
59442de047dSJose Abreu 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
5953600be5fSVoon Weifeng 
596c6d5f193SKurt Kanzenbach 		ns -= priv->plat->cdc_error_adj;
5973600be5fSVoon Weifeng 
59833d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599891434b1SRayagond Kokatanur 		shhwtstamp = skb_hwtstamps(skb);
600891434b1SRayagond Kokatanur 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601891434b1SRayagond Kokatanur 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
602ba1ffd74SGiuseppe CAVALLARO 	} else  {
60333d4c482SMario Molitor 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604ba1ffd74SGiuseppe CAVALLARO 	}
605891434b1SRayagond Kokatanur }
606891434b1SRayagond Kokatanur 
607891434b1SRayagond Kokatanur /**
608d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_set - control hardware timestamping.
609891434b1SRayagond Kokatanur  *  @dev: device pointer.
6108d45e42bSLABBE Corentin  *  @ifr: An IOCTL specific structure, that can contain a pointer to
611891434b1SRayagond Kokatanur  *  a proprietary structure used to pass information to the driver.
612891434b1SRayagond Kokatanur  *  Description:
613891434b1SRayagond Kokatanur  *  This function configures the MAC to enable/disable both outgoing(TX)
614891434b1SRayagond Kokatanur  *  and incoming(RX) packets time stamping based on user input.
615891434b1SRayagond Kokatanur  *  Return Value:
616891434b1SRayagond Kokatanur  *  0 on success and an appropriate -ve integer on failure.
617891434b1SRayagond Kokatanur  */
stmmac_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)618d6228b7cSArtem Panfilov static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619891434b1SRayagond Kokatanur {
620891434b1SRayagond Kokatanur 	struct stmmac_priv *priv = netdev_priv(dev);
621891434b1SRayagond Kokatanur 	struct hwtstamp_config config;
622891434b1SRayagond Kokatanur 	u32 ptp_v2 = 0;
623891434b1SRayagond Kokatanur 	u32 tstamp_all = 0;
624891434b1SRayagond Kokatanur 	u32 ptp_over_ipv4_udp = 0;
625891434b1SRayagond Kokatanur 	u32 ptp_over_ipv6_udp = 0;
626891434b1SRayagond Kokatanur 	u32 ptp_over_ethernet = 0;
627891434b1SRayagond Kokatanur 	u32 snap_type_sel = 0;
628891434b1SRayagond Kokatanur 	u32 ts_master_en = 0;
629891434b1SRayagond Kokatanur 	u32 ts_event_en = 0;
630891434b1SRayagond Kokatanur 
631891434b1SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632891434b1SRayagond Kokatanur 		netdev_alert(priv->dev, "No support for HW time stamping\n");
633891434b1SRayagond Kokatanur 		priv->hwts_tx_en = 0;
634891434b1SRayagond Kokatanur 		priv->hwts_rx_en = 0;
635891434b1SRayagond Kokatanur 
636891434b1SRayagond Kokatanur 		return -EOPNOTSUPP;
637891434b1SRayagond Kokatanur 	}
638891434b1SRayagond Kokatanur 
639891434b1SRayagond Kokatanur 	if (copy_from_user(&config, ifr->ifr_data,
640d6228b7cSArtem Panfilov 			   sizeof(config)))
641891434b1SRayagond Kokatanur 		return -EFAULT;
642891434b1SRayagond Kokatanur 
64338ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644891434b1SRayagond Kokatanur 		   __func__, config.flags, config.tx_type, config.rx_filter);
645891434b1SRayagond Kokatanur 
6465f3da328SBen Hutchings 	if (config.tx_type != HWTSTAMP_TX_OFF &&
6475f3da328SBen Hutchings 	    config.tx_type != HWTSTAMP_TX_ON)
648891434b1SRayagond Kokatanur 		return -ERANGE;
649891434b1SRayagond Kokatanur 
650891434b1SRayagond Kokatanur 	if (priv->adv_ts) {
651891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
652891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
653ceb69499SGiuseppe CAVALLARO 			/* time stamp no incoming packet at all */
654891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
655891434b1SRayagond Kokatanur 			break;
656891434b1SRayagond Kokatanur 
657891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, any kind of event packet */
659891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
6607d8e249fSIlias Apalodimas 			/* 'xmac' hardware can support Sync, Pdelay_Req and
6617d8e249fSIlias Apalodimas 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
6627d8e249fSIlias Apalodimas 			 * This leaves Delay_Req timestamps out.
6637d8e249fSIlias Apalodimas 			 * Enable all events *and* general purpose message
6647d8e249fSIlias Apalodimas 			 * timestamping
6657d8e249fSIlias Apalodimas 			 */
666891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669891434b1SRayagond Kokatanur 			break;
670891434b1SRayagond Kokatanur 
671891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Sync packet */
673891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
675891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
676891434b1SRayagond Kokatanur 
677891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679891434b1SRayagond Kokatanur 			break;
680891434b1SRayagond Kokatanur 
681891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Delay_req packet */
683891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
685891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
686891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
687891434b1SRayagond Kokatanur 
688891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690891434b1SRayagond Kokatanur 			break;
691891434b1SRayagond Kokatanur 
692891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, any kind of event packet */
694891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
696891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
697891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698891434b1SRayagond Kokatanur 
699891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701891434b1SRayagond Kokatanur 			break;
702891434b1SRayagond Kokatanur 
703891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Sync packet */
705891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
707891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
708891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
709891434b1SRayagond Kokatanur 
710891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712891434b1SRayagond Kokatanur 			break;
713891434b1SRayagond Kokatanur 
714891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Delay_req packet */
716891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
718891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
719891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
720891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
721891434b1SRayagond Kokatanur 
722891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724891434b1SRayagond Kokatanur 			break;
725891434b1SRayagond Kokatanur 
726891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
727ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1 any layer, any kind of event packet */
728891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
730891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
7313cb95802SKurt Kanzenbach 			if (priv->synopsys_id < DWMAC_CORE_4_10)
73214f34733SJose Abreu 				ts_event_en = PTP_TCR_TSEVNTENA;
733891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
736891434b1SRayagond Kokatanur 			break;
737891434b1SRayagond Kokatanur 
738891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
739ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Sync packet */
740891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
742891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
743891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
744891434b1SRayagond Kokatanur 
745891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
748891434b1SRayagond Kokatanur 			break;
749891434b1SRayagond Kokatanur 
750891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Delay_req packet */
752891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
754891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
755891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
756891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
757891434b1SRayagond Kokatanur 
758891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
761891434b1SRayagond Kokatanur 			break;
762891434b1SRayagond Kokatanur 
763e3412575SMiroslav Lichvar 		case HWTSTAMP_FILTER_NTP_ALL:
764891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_ALL:
765ceb69499SGiuseppe CAVALLARO 			/* time stamp any incoming packet */
766891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_ALL;
767891434b1SRayagond Kokatanur 			tstamp_all = PTP_TCR_TSENALL;
768891434b1SRayagond Kokatanur 			break;
769891434b1SRayagond Kokatanur 
770891434b1SRayagond Kokatanur 		default:
771891434b1SRayagond Kokatanur 			return -ERANGE;
772891434b1SRayagond Kokatanur 		}
773891434b1SRayagond Kokatanur 	} else {
774891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
775891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
776891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
777891434b1SRayagond Kokatanur 			break;
778891434b1SRayagond Kokatanur 		default:
779891434b1SRayagond Kokatanur 			/* PTP v1, UDP, any kind of event packet */
780891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781891434b1SRayagond Kokatanur 			break;
782891434b1SRayagond Kokatanur 		}
783891434b1SRayagond Kokatanur 	}
784891434b1SRayagond Kokatanur 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
7855f3da328SBen Hutchings 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786891434b1SRayagond Kokatanur 
787a6da2bbbSHolger Assmann 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
788891434b1SRayagond Kokatanur 
789a6da2bbbSHolger Assmann 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
790a6da2bbbSHolger Assmann 		priv->systime_flags |= tstamp_all | ptp_v2 |
791a6da2bbbSHolger Assmann 				       ptp_over_ethernet | ptp_over_ipv6_udp |
792a6da2bbbSHolger Assmann 				       ptp_over_ipv4_udp | ts_event_en |
793a6da2bbbSHolger Assmann 				       ts_master_en | snap_type_sel;
794891434b1SRayagond Kokatanur 	}
795891434b1SRayagond Kokatanur 
796a6da2bbbSHolger Assmann 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797a6da2bbbSHolger Assmann 
798d6228b7cSArtem Panfilov 	memcpy(&priv->tstamp_config, &config, sizeof(config));
799d6228b7cSArtem Panfilov 
800891434b1SRayagond Kokatanur 	return copy_to_user(ifr->ifr_data, &config,
801d6228b7cSArtem Panfilov 			    sizeof(config)) ? -EFAULT : 0;
802d6228b7cSArtem Panfilov }
803d6228b7cSArtem Panfilov 
804d6228b7cSArtem Panfilov /**
805d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_get - read hardware timestamping.
806d6228b7cSArtem Panfilov  *  @dev: device pointer.
807d6228b7cSArtem Panfilov  *  @ifr: An IOCTL specific structure, that can contain a pointer to
808d6228b7cSArtem Panfilov  *  a proprietary structure used to pass information to the driver.
809d6228b7cSArtem Panfilov  *  Description:
810d6228b7cSArtem Panfilov  *  This function obtain the current hardware timestamping settings
811d0ea5cbdSJesse Brandeburg  *  as requested.
812d6228b7cSArtem Panfilov  */
stmmac_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)813d6228b7cSArtem Panfilov static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814d6228b7cSArtem Panfilov {
815d6228b7cSArtem Panfilov 	struct stmmac_priv *priv = netdev_priv(dev);
816d6228b7cSArtem Panfilov 	struct hwtstamp_config *config = &priv->tstamp_config;
817d6228b7cSArtem Panfilov 
818d6228b7cSArtem Panfilov 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819d6228b7cSArtem Panfilov 		return -EOPNOTSUPP;
820d6228b7cSArtem Panfilov 
821d6228b7cSArtem Panfilov 	return copy_to_user(ifr->ifr_data, config,
822d6228b7cSArtem Panfilov 			    sizeof(*config)) ? -EFAULT : 0;
823891434b1SRayagond Kokatanur }
824891434b1SRayagond Kokatanur 
82532ceabcaSGiuseppe CAVALLARO /**
826a6da2bbbSHolger Assmann  * stmmac_init_tstamp_counter - init hardware timestamping counter
827a6da2bbbSHolger Assmann  * @priv: driver private structure
828a6da2bbbSHolger Assmann  * @systime_flags: timestamping flags
829a6da2bbbSHolger Assmann  * Description:
830a6da2bbbSHolger Assmann  * Initialize hardware counter for packet timestamping.
831a6da2bbbSHolger Assmann  * This is valid as long as the interface is open and not suspended.
832a6da2bbbSHolger Assmann  * Will be rerun after resuming from suspend, case in which the timestamping
833a6da2bbbSHolger Assmann  * flags updated by stmmac_hwtstamp_set() also need to be restored.
834a6da2bbbSHolger Assmann  */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)835a6da2bbbSHolger Assmann int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836a6da2bbbSHolger Assmann {
837a6da2bbbSHolger Assmann 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838a6da2bbbSHolger Assmann 	struct timespec64 now;
839a6da2bbbSHolger Assmann 	u32 sec_inc = 0;
840a6da2bbbSHolger Assmann 	u64 temp = 0;
841a6da2bbbSHolger Assmann 
842a6da2bbbSHolger Assmann 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843a6da2bbbSHolger Assmann 		return -EOPNOTSUPP;
844a6da2bbbSHolger Assmann 
845a6da2bbbSHolger Assmann 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846a6da2bbbSHolger Assmann 	priv->systime_flags = systime_flags;
847a6da2bbbSHolger Assmann 
848a6da2bbbSHolger Assmann 	/* program Sub Second Increment reg */
849a6da2bbbSHolger Assmann 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850a6da2bbbSHolger Assmann 					   priv->plat->clk_ptp_rate,
851a6da2bbbSHolger Assmann 					   xmac, &sec_inc);
852a6da2bbbSHolger Assmann 	temp = div_u64(1000000000ULL, sec_inc);
853a6da2bbbSHolger Assmann 
854a6da2bbbSHolger Assmann 	/* Store sub second increment for later use */
855a6da2bbbSHolger Assmann 	priv->sub_second_inc = sec_inc;
856a6da2bbbSHolger Assmann 
857a6da2bbbSHolger Assmann 	/* calculate default added value:
858a6da2bbbSHolger Assmann 	 * formula is :
859a6da2bbbSHolger Assmann 	 * addend = (2^32)/freq_div_ratio;
860a6da2bbbSHolger Assmann 	 * where, freq_div_ratio = 1e9ns/sec_inc
861a6da2bbbSHolger Assmann 	 */
862a6da2bbbSHolger Assmann 	temp = (u64)(temp << 32);
863a6da2bbbSHolger Assmann 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864a6da2bbbSHolger Assmann 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865a6da2bbbSHolger Assmann 
866a6da2bbbSHolger Assmann 	/* initialize system time */
867a6da2bbbSHolger Assmann 	ktime_get_real_ts64(&now);
868a6da2bbbSHolger Assmann 
869a6da2bbbSHolger Assmann 	/* lower 32 bits of tv_sec are safe until y2106 */
870a6da2bbbSHolger Assmann 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871a6da2bbbSHolger Assmann 
872a6da2bbbSHolger Assmann 	return 0;
873a6da2bbbSHolger Assmann }
874a6da2bbbSHolger Assmann EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875a6da2bbbSHolger Assmann 
876a6da2bbbSHolger Assmann /**
877732fdf0eSGiuseppe CAVALLARO  * stmmac_init_ptp - init PTP
87832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
879732fdf0eSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
88032ceabcaSGiuseppe CAVALLARO  * This is done by looking at the HW cap. register.
881732fdf0eSGiuseppe CAVALLARO  * This function also registers the ptp driver.
88232ceabcaSGiuseppe CAVALLARO  */
stmmac_init_ptp(struct stmmac_priv * priv)88392ba6888SRayagond Kokatanur static int stmmac_init_ptp(struct stmmac_priv *priv)
884891434b1SRayagond Kokatanur {
8857d9e6c5aSJose Abreu 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886a6da2bbbSHolger Assmann 	int ret;
8877d9e6c5aSJose Abreu 
88894c82de4SMohammad Athari Bin Ismail 	if (priv->plat->ptp_clk_freq_config)
88994c82de4SMohammad Athari Bin Ismail 		priv->plat->ptp_clk_freq_config(priv);
89094c82de4SMohammad Athari Bin Ismail 
891a6da2bbbSHolger Assmann 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892a6da2bbbSHolger Assmann 	if (ret)
893a6da2bbbSHolger Assmann 		return ret;
89492ba6888SRayagond Kokatanur 
895891434b1SRayagond Kokatanur 	priv->adv_ts = 0;
8967d9e6c5aSJose Abreu 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
8977d9e6c5aSJose Abreu 	if (xmac && priv->dma_cap.atime_stamp)
898be9b3174SGiuseppe CAVALLARO 		priv->adv_ts = 1;
899be9b3174SGiuseppe CAVALLARO 	/* Dwmac 3.x core with extend_desc can support adv_ts */
900be9b3174SGiuseppe CAVALLARO 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901891434b1SRayagond Kokatanur 		priv->adv_ts = 1;
9027cd01399SVince Bridgers 
903be9b3174SGiuseppe CAVALLARO 	if (priv->dma_cap.time_stamp)
904be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
9057cd01399SVince Bridgers 
906be9b3174SGiuseppe CAVALLARO 	if (priv->adv_ts)
907be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev,
908be9b3174SGiuseppe CAVALLARO 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
909891434b1SRayagond Kokatanur 
910891434b1SRayagond Kokatanur 	priv->hwts_tx_en = 0;
911891434b1SRayagond Kokatanur 	priv->hwts_rx_en = 0;
91292ba6888SRayagond Kokatanur 
91326cfb838SJohannes Zink 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
91426cfb838SJohannes Zink 		stmmac_hwtstamp_correct_latency(priv, priv);
91526cfb838SJohannes Zink 
916c30a70d3SGiuseppe CAVALLARO 	return 0;
91792ba6888SRayagond Kokatanur }
91892ba6888SRayagond Kokatanur 
stmmac_release_ptp(struct stmmac_priv * priv)91992ba6888SRayagond Kokatanur static void stmmac_release_ptp(struct stmmac_priv *priv)
92092ba6888SRayagond Kokatanur {
921f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
92292ba6888SRayagond Kokatanur 	stmmac_ptp_unregister(priv);
923891434b1SRayagond Kokatanur }
924891434b1SRayagond Kokatanur 
9257ac6653aSJeff Kirsher /**
92629feff39SJoao Pinto  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
92729feff39SJoao Pinto  *  @priv: driver private structure
928d0ea5cbdSJesse Brandeburg  *  @duplex: duplex passed to the next function
92929feff39SJoao Pinto  *  Description: It is used for configuring the flow control in all queues
93029feff39SJoao Pinto  */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex)93129feff39SJoao Pinto static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
93229feff39SJoao Pinto {
93329feff39SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
93429feff39SJoao Pinto 
935c10d4c82SJose Abreu 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
93629feff39SJoao Pinto 			priv->pause, tx_cnt);
93729feff39SJoao Pinto }
93829feff39SJoao Pinto 
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)93972e94511SRussell King (Oracle) static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
94072e94511SRussell King (Oracle) 						 phy_interface_t interface)
94172e94511SRussell King (Oracle) {
94272e94511SRussell King (Oracle) 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
94372e94511SRussell King (Oracle) 
9445d1f3fe7SMaxime Chevallier 	if (priv->hw->xpcs)
94572e94511SRussell King (Oracle) 		return &priv->hw->xpcs->pcs;
9465d1f3fe7SMaxime Chevallier 
9475d1f3fe7SMaxime Chevallier 	if (priv->hw->lynx_pcs)
9485d1f3fe7SMaxime Chevallier 		return priv->hw->lynx_pcs;
9495d1f3fe7SMaxime Chevallier 
9505d1f3fe7SMaxime Chevallier 	return NULL;
95172e94511SRussell King (Oracle) }
95272e94511SRussell King (Oracle) 
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)95374371272SJose Abreu static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
95474371272SJose Abreu 			      const struct phylink_link_state *state)
9559ad372fcSJose Abreu {
95611059740SVladimir Oltean 	/* Nothing to do, xpcs_config() handles everything */
957eeef2f6bSJose Abreu }
958eeef2f6bSJose Abreu 
stmmac_fpe_link_state_handle(struct stmmac_priv * priv,bool is_up)9595a558611SOng Boon Leong static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
9605a558611SOng Boon Leong {
9615a558611SOng Boon Leong 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
9625a558611SOng Boon Leong 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
9635a558611SOng Boon Leong 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
9645a558611SOng Boon Leong 	bool *hs_enable = &fpe_cfg->hs_enable;
9655a558611SOng Boon Leong 
9665a558611SOng Boon Leong 	if (is_up && *hs_enable) {
967e1fbdef9SJianheng Zhang 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
968e1fbdef9SJianheng Zhang 					MPACKET_VERIFY);
9695a558611SOng Boon Leong 	} else {
9701f7096f0SWong Vee Khee 		*lo_state = FPE_STATE_OFF;
9711f7096f0SWong Vee Khee 		*lp_state = FPE_STATE_OFF;
9725a558611SOng Boon Leong 	}
9735a558611SOng Boon Leong }
9745a558611SOng Boon Leong 
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)97574371272SJose Abreu static void stmmac_mac_link_down(struct phylink_config *config,
97674371272SJose Abreu 				 unsigned int mode, phy_interface_t interface)
9779ad372fcSJose Abreu {
97874371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
9799ad372fcSJose Abreu 
9809ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
98174371272SJose Abreu 	priv->eee_active = false;
982388e201dSVineetha G. Jaya Kumaran 	priv->tx_lpi_enabled = false;
983d4aeaed8SWong Vee Khee 	priv->eee_enabled = stmmac_eee_init(priv);
98474371272SJose Abreu 	stmmac_set_eee_pls(priv, priv->hw, false);
9855a558611SOng Boon Leong 
98663c173ffSMohammad Athari Bin Ismail 	if (priv->dma_cap.fpesel)
9875a558611SOng Boon Leong 		stmmac_fpe_link_state_handle(priv, false);
9889ad372fcSJose Abreu }
9899ad372fcSJose Abreu 
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)99074371272SJose Abreu static void stmmac_mac_link_up(struct phylink_config *config,
99191a208f2SRussell King 			       struct phy_device *phy,
99274371272SJose Abreu 			       unsigned int mode, phy_interface_t interface,
99391a208f2SRussell King 			       int speed, int duplex,
99491a208f2SRussell King 			       bool tx_pause, bool rx_pause)
9959ad372fcSJose Abreu {
99674371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
997a3a57bf0SHeiner Kallweit 	u32 old_ctrl, ctrl;
99846f69dedSJose Abreu 
999efe92571SBartosz Golaszewski 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1000efe92571SBartosz Golaszewski 	    priv->plat->serdes_powerup)
1001a46e9010SRevanth Kumar Uppala 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1002a46e9010SRevanth Kumar Uppala 
1003a3a57bf0SHeiner Kallweit 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1004a3a57bf0SHeiner Kallweit 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
100546f69dedSJose Abreu 
100646f69dedSJose Abreu 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
100746f69dedSJose Abreu 		switch (speed) {
100846f69dedSJose Abreu 		case SPEED_10000:
100946f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed10000;
101046f69dedSJose Abreu 			break;
101146f69dedSJose Abreu 		case SPEED_5000:
101246f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed5000;
101346f69dedSJose Abreu 			break;
101446f69dedSJose Abreu 		case SPEED_2500:
101546f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed2500;
101646f69dedSJose Abreu 			break;
101746f69dedSJose Abreu 		default:
101846f69dedSJose Abreu 			return;
101946f69dedSJose Abreu 		}
10208a880936SJose Abreu 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
10218a880936SJose Abreu 		switch (speed) {
10228a880936SJose Abreu 		case SPEED_100000:
10238a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed100000;
10248a880936SJose Abreu 			break;
10258a880936SJose Abreu 		case SPEED_50000:
10268a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed50000;
10278a880936SJose Abreu 			break;
10288a880936SJose Abreu 		case SPEED_40000:
10298a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed40000;
10308a880936SJose Abreu 			break;
10318a880936SJose Abreu 		case SPEED_25000:
10328a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed25000;
10338a880936SJose Abreu 			break;
10348a880936SJose Abreu 		case SPEED_10000:
10358a880936SJose Abreu 			ctrl |= priv->hw->link.xgmii.speed10000;
10368a880936SJose Abreu 			break;
10378a880936SJose Abreu 		case SPEED_2500:
10388a880936SJose Abreu 			ctrl |= priv->hw->link.speed2500;
10398a880936SJose Abreu 			break;
10408a880936SJose Abreu 		case SPEED_1000:
10418a880936SJose Abreu 			ctrl |= priv->hw->link.speed1000;
10428a880936SJose Abreu 			break;
10438a880936SJose Abreu 		default:
10448a880936SJose Abreu 			return;
10458a880936SJose Abreu 		}
104646f69dedSJose Abreu 	} else {
104746f69dedSJose Abreu 		switch (speed) {
104846f69dedSJose Abreu 		case SPEED_2500:
104946f69dedSJose Abreu 			ctrl |= priv->hw->link.speed2500;
105046f69dedSJose Abreu 			break;
105146f69dedSJose Abreu 		case SPEED_1000:
105246f69dedSJose Abreu 			ctrl |= priv->hw->link.speed1000;
105346f69dedSJose Abreu 			break;
105446f69dedSJose Abreu 		case SPEED_100:
105546f69dedSJose Abreu 			ctrl |= priv->hw->link.speed100;
105646f69dedSJose Abreu 			break;
105746f69dedSJose Abreu 		case SPEED_10:
105846f69dedSJose Abreu 			ctrl |= priv->hw->link.speed10;
105946f69dedSJose Abreu 			break;
106046f69dedSJose Abreu 		default:
106146f69dedSJose Abreu 			return;
106246f69dedSJose Abreu 		}
106346f69dedSJose Abreu 	}
106446f69dedSJose Abreu 
106546f69dedSJose Abreu 	priv->speed = speed;
106646f69dedSJose Abreu 
106746f69dedSJose Abreu 	if (priv->plat->fix_mac_speed)
10681fc04a0bSShenwei Wang 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
106946f69dedSJose Abreu 
107046f69dedSJose Abreu 	if (!duplex)
107146f69dedSJose Abreu 		ctrl &= ~priv->hw->link.duplex;
107246f69dedSJose Abreu 	else
107346f69dedSJose Abreu 		ctrl |= priv->hw->link.duplex;
107446f69dedSJose Abreu 
107546f69dedSJose Abreu 	/* Flow Control operation */
1076cc3d2b5fSGoh, Wei Sheng 	if (rx_pause && tx_pause)
1077cc3d2b5fSGoh, Wei Sheng 		priv->flow_ctrl = FLOW_AUTO;
1078cc3d2b5fSGoh, Wei Sheng 	else if (rx_pause && !tx_pause)
1079cc3d2b5fSGoh, Wei Sheng 		priv->flow_ctrl = FLOW_RX;
1080cc3d2b5fSGoh, Wei Sheng 	else if (!rx_pause && tx_pause)
1081cc3d2b5fSGoh, Wei Sheng 		priv->flow_ctrl = FLOW_TX;
1082cc3d2b5fSGoh, Wei Sheng 	else
1083cc3d2b5fSGoh, Wei Sheng 		priv->flow_ctrl = FLOW_OFF;
1084cc3d2b5fSGoh, Wei Sheng 
108546f69dedSJose Abreu 	stmmac_mac_flow_ctrl(priv, duplex);
108646f69dedSJose Abreu 
1087a3a57bf0SHeiner Kallweit 	if (ctrl != old_ctrl)
108846f69dedSJose Abreu 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
10899ad372fcSJose Abreu 
10909ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
10915b111770SJose Abreu 	if (phy && priv->dma_cap.eee) {
109254aa39a5SAndrey Konovalov 		priv->eee_active =
1093743dd1dbSBartosz Golaszewski 			phy_init_eee(phy, !(priv->plat->flags &
1094743dd1dbSBartosz Golaszewski 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
109574371272SJose Abreu 		priv->eee_enabled = stmmac_eee_init(priv);
1096388e201dSVineetha G. Jaya Kumaran 		priv->tx_lpi_enabled = priv->eee_enabled;
109774371272SJose Abreu 		stmmac_set_eee_pls(priv, priv->hw, true);
109874371272SJose Abreu 	}
10995a558611SOng Boon Leong 
110063c173ffSMohammad Athari Bin Ismail 	if (priv->dma_cap.fpesel)
11015a558611SOng Boon Leong 		stmmac_fpe_link_state_handle(priv, true);
110226cfb838SJohannes Zink 
110326cfb838SJohannes Zink 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
110426cfb838SJohannes Zink 		stmmac_hwtstamp_correct_latency(priv, priv);
11059ad372fcSJose Abreu }
11069ad372fcSJose Abreu 
110774371272SJose Abreu static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
110872e94511SRussell King (Oracle) 	.mac_select_pcs = stmmac_mac_select_pcs,
110974371272SJose Abreu 	.mac_config = stmmac_mac_config,
111074371272SJose Abreu 	.mac_link_down = stmmac_mac_link_down,
111174371272SJose Abreu 	.mac_link_up = stmmac_mac_link_up,
1112eeef2f6bSJose Abreu };
1113eeef2f6bSJose Abreu 
111429feff39SJoao Pinto /**
1115732fdf0eSGiuseppe CAVALLARO  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
111632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
111732ceabcaSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PCS.
111832ceabcaSGiuseppe CAVALLARO  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
111932ceabcaSGiuseppe CAVALLARO  * configured for the TBI, RTBI, or SGMII PHY interface.
112032ceabcaSGiuseppe CAVALLARO  */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1121e58bb43fSGiuseppe CAVALLARO static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1122e58bb43fSGiuseppe CAVALLARO {
1123a014c355SRussell King (Oracle) 	int interface = priv->plat->mac_interface;
1124e58bb43fSGiuseppe CAVALLARO 
1125e58bb43fSGiuseppe CAVALLARO 	if (priv->dma_cap.pcs) {
11260d909dcdSByungho An 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
11270d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
11280d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
11290d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
113038ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
11313fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_RGMII;
11320d909dcdSByungho An 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
113338ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
11343fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_SGMII;
1135e58bb43fSGiuseppe CAVALLARO 		}
1136e58bb43fSGiuseppe CAVALLARO 	}
1137e58bb43fSGiuseppe CAVALLARO }
1138e58bb43fSGiuseppe CAVALLARO 
11397ac6653aSJeff Kirsher /**
11407ac6653aSJeff Kirsher  * stmmac_init_phy - PHY initialization
11417ac6653aSJeff Kirsher  * @dev: net device structure
11427ac6653aSJeff Kirsher  * Description: it initializes the driver's PHY state, and attaches the PHY
11437ac6653aSJeff Kirsher  * to the mac driver.
11447ac6653aSJeff Kirsher  *  Return value:
11457ac6653aSJeff Kirsher  *  0 on success
11467ac6653aSJeff Kirsher  */
stmmac_init_phy(struct net_device * dev)11477ac6653aSJeff Kirsher static int stmmac_init_phy(struct net_device *dev)
11487ac6653aSJeff Kirsher {
11497ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
11508fbc10b9SMichael Sit Wei Hong 	struct fwnode_handle *phy_fwnode;
1151ab21cf92SOng Boon Leong 	struct fwnode_handle *fwnode;
115274371272SJose Abreu 	int ret;
11537ac6653aSJeff Kirsher 
11548fbc10b9SMichael Sit Wei Hong 	if (!phylink_expects_phy(priv->phylink))
11558fbc10b9SMichael Sit Wei Hong 		return 0;
11568fbc10b9SMichael Sit Wei Hong 
1157e80af2acSRussell King (Oracle) 	fwnode = priv->plat->port_node;
1158ab21cf92SOng Boon Leong 	if (!fwnode)
1159ab21cf92SOng Boon Leong 		fwnode = dev_fwnode(priv->device);
116074371272SJose Abreu 
1161ab21cf92SOng Boon Leong 	if (fwnode)
11628fbc10b9SMichael Sit Wei Hong 		phy_fwnode = fwnode_get_phy_node(fwnode);
11638fbc10b9SMichael Sit Wei Hong 	else
11648fbc10b9SMichael Sit Wei Hong 		phy_fwnode = NULL;
116542e87024SJose Abreu 
116642e87024SJose Abreu 	/* Some DT bindings do not set-up the PHY handle. Let's try to
116742e87024SJose Abreu 	 * manually parse it
116842e87024SJose Abreu 	 */
11698fbc10b9SMichael Sit Wei Hong 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
117074371272SJose Abreu 		int addr = priv->plat->phy_addr;
117174371272SJose Abreu 		struct phy_device *phydev;
1172f142af2eSSrinivas Kandagatla 
11731f3bd64aSHeiner Kallweit 		if (addr < 0) {
11741f3bd64aSHeiner Kallweit 			netdev_err(priv->dev, "no phy found\n");
11751f3bd64aSHeiner Kallweit 			return -ENODEV;
11761f3bd64aSHeiner Kallweit 		}
11771f3bd64aSHeiner Kallweit 
117874371272SJose Abreu 		phydev = mdiobus_get_phy(priv->mii, addr);
117974371272SJose Abreu 		if (!phydev) {
118074371272SJose Abreu 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
11817ac6653aSJeff Kirsher 			return -ENODEV;
11827ac6653aSJeff Kirsher 		}
11838e99fc5fSGiuseppe Cavallaro 
118474371272SJose Abreu 		ret = phylink_connect_phy(priv->phylink, phydev);
11858fbc10b9SMichael Sit Wei Hong 	} else {
11868fbc10b9SMichael Sit Wei Hong 		fwnode_handle_put(phy_fwnode);
11878fbc10b9SMichael Sit Wei Hong 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
118874371272SJose Abreu 	}
1189c51e424dSFlorian Fainelli 
1190576f9eacSJoakim Zhang 	if (!priv->plat->pmt) {
1191576f9eacSJoakim Zhang 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1192576f9eacSJoakim Zhang 
11931d8e5b0fSJisheng Zhang 		phylink_ethtool_get_wol(priv->phylink, &wol);
11941d8e5b0fSJisheng Zhang 		device_set_wakeup_capable(priv->device, !!wol.supported);
1195a9334b70SRongguang Wei 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1196576f9eacSJoakim Zhang 	}
11971d8e5b0fSJisheng Zhang 
119874371272SJose Abreu 	return ret;
119974371272SJose Abreu }
120074371272SJose Abreu 
stmmac_phy_setup(struct stmmac_priv * priv)120174371272SJose Abreu static int stmmac_phy_setup(struct stmmac_priv *priv)
120274371272SJose Abreu {
12032b070cddSRussell King (Oracle) 	struct stmmac_mdio_bus_data *mdio_bus_data;
12040060c878SAlexandru Ardelean 	int mode = priv->plat->phy_interface;
12051a37c1c1SRussell King (Oracle) 	struct fwnode_handle *fwnode;
120674371272SJose Abreu 	struct phylink *phylink;
1207a4ac612bSRussell King (Oracle) 	int max_speed;
120874371272SJose Abreu 
120974371272SJose Abreu 	priv->phylink_config.dev = &priv->dev->dev;
121074371272SJose Abreu 	priv->phylink_config.type = PHYLINK_NETDEV;
121164961f1bSRussell King (Oracle) 	priv->phylink_config.mac_managed_pm = true;
12122b070cddSRussell King (Oracle) 
12132b070cddSRussell King (Oracle) 	mdio_bus_data = priv->plat->mdio_bus_data;
12142b070cddSRussell King (Oracle) 	if (mdio_bus_data)
1215e5e5b771SOng Boon Leong 		priv->phylink_config.ovr_an_inband =
121612628565SDavid S. Miller 			mdio_bus_data->xpcs_an_inband;
121774371272SJose Abreu 
1218a014c355SRussell King (Oracle) 	/* Set the platform/firmware specified interface mode. Note, phylink
1219a014c355SRussell King (Oracle) 	 * deals with the PHY interface mode, not the MAC interface mode.
1220a014c355SRussell King (Oracle) 	 */
1221d194923dSRussell King (Oracle) 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1222d194923dSRussell King (Oracle) 
1223d194923dSRussell King (Oracle) 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1224d194923dSRussell King (Oracle) 	if (priv->hw->xpcs)
1225d194923dSRussell King (Oracle) 		xpcs_get_interfaces(priv->hw->xpcs,
1226d194923dSRussell King (Oracle) 				    priv->phylink_config.supported_interfaces);
1227d194923dSRussell King (Oracle) 
1228d42ca04eSRussell King (Oracle) 	/* Get the MAC specific capabilities */
1229d42ca04eSRussell King (Oracle) 	stmmac_mac_phylink_get_caps(priv);
1230d42ca04eSRussell King (Oracle) 
123193d565ebSSerge Semin 	priv->phylink_config.mac_capabilities = priv->hw->link.caps;
123293d565ebSSerge Semin 
1233a4ac612bSRussell King (Oracle) 	max_speed = priv->plat->max_speed;
1234a4ac612bSRussell King (Oracle) 	if (max_speed)
1235a4ac612bSRussell King (Oracle) 		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1236a4ac612bSRussell King (Oracle) 
12371a37c1c1SRussell King (Oracle) 	fwnode = priv->plat->port_node;
12381a37c1c1SRussell King (Oracle) 	if (!fwnode)
12391a37c1c1SRussell King (Oracle) 		fwnode = dev_fwnode(priv->device);
12401a37c1c1SRussell King (Oracle) 
1241c63d1e5cSArnd Bergmann 	phylink = phylink_create(&priv->phylink_config, fwnode,
124274371272SJose Abreu 				 mode, &stmmac_phylink_mac_ops);
124374371272SJose Abreu 	if (IS_ERR(phylink))
124474371272SJose Abreu 		return PTR_ERR(phylink);
124574371272SJose Abreu 
124674371272SJose Abreu 	priv->phylink = phylink;
12477ac6653aSJeff Kirsher 	return 0;
12487ac6653aSJeff Kirsher }
12497ac6653aSJeff Kirsher 
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1250ba39b344SChristian Marangi static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1251ba39b344SChristian Marangi 				    struct stmmac_dma_conf *dma_conf)
1252c24602efSGiuseppe CAVALLARO {
125354139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1254bfaf91caSJoakim Zhang 	unsigned int desc_size;
125571fedb01SJoao Pinto 	void *head_rx;
125654139cf3SJoao Pinto 	u32 queue;
125754139cf3SJoao Pinto 
125854139cf3SJoao Pinto 	/* Display RX rings */
125954139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
1260ba39b344SChristian Marangi 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
126154139cf3SJoao Pinto 
126254139cf3SJoao Pinto 		pr_info("\tRX Queue %u rings\n", queue);
1263d0225e7dSAlexandre TORGUE 
1264bfaf91caSJoakim Zhang 		if (priv->extend_desc) {
126554139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_erx;
1266bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_extended_desc);
1267bfaf91caSJoakim Zhang 		} else {
126854139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_rx;
1269bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_desc);
1270bfaf91caSJoakim Zhang 		}
127171fedb01SJoao Pinto 
127271fedb01SJoao Pinto 		/* Display RX ring */
1273ba39b344SChristian Marangi 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1274bfaf91caSJoakim Zhang 				    rx_q->dma_rx_phy, desc_size);
12755bacd778SLABBE Corentin 	}
127654139cf3SJoao Pinto }
1277d0225e7dSAlexandre TORGUE 
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1278ba39b344SChristian Marangi static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1279ba39b344SChristian Marangi 				    struct stmmac_dma_conf *dma_conf)
128071fedb01SJoao Pinto {
1281ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1282bfaf91caSJoakim Zhang 	unsigned int desc_size;
128371fedb01SJoao Pinto 	void *head_tx;
1284ce736788SJoao Pinto 	u32 queue;
1285ce736788SJoao Pinto 
1286ce736788SJoao Pinto 	/* Display TX rings */
1287ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
1288ba39b344SChristian Marangi 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1289ce736788SJoao Pinto 
1290ce736788SJoao Pinto 		pr_info("\tTX Queue %d rings\n", queue);
129171fedb01SJoao Pinto 
1292bfaf91caSJoakim Zhang 		if (priv->extend_desc) {
1293ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_etx;
1294bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_extended_desc);
1295bfaf91caSJoakim Zhang 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1296579a25a8SJose Abreu 			head_tx = (void *)tx_q->dma_entx;
1297bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_edesc);
1298bfaf91caSJoakim Zhang 		} else {
1299ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_tx;
1300bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_desc);
1301bfaf91caSJoakim Zhang 		}
130271fedb01SJoao Pinto 
1303ba39b344SChristian Marangi 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1304bfaf91caSJoakim Zhang 				    tx_q->dma_tx_phy, desc_size);
1305c24602efSGiuseppe CAVALLARO 	}
1306ce736788SJoao Pinto }
1307c24602efSGiuseppe CAVALLARO 
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1308ba39b344SChristian Marangi static void stmmac_display_rings(struct stmmac_priv *priv,
1309ba39b344SChristian Marangi 				 struct stmmac_dma_conf *dma_conf)
131071fedb01SJoao Pinto {
131171fedb01SJoao Pinto 	/* Display RX ring */
1312ba39b344SChristian Marangi 	stmmac_display_rx_rings(priv, dma_conf);
131371fedb01SJoao Pinto 
131471fedb01SJoao Pinto 	/* Display TX ring */
1315ba39b344SChristian Marangi 	stmmac_display_tx_rings(priv, dma_conf);
131671fedb01SJoao Pinto }
131771fedb01SJoao Pinto 
stmmac_set_bfsize(int mtu,int bufsize)1318286a8372SGiuseppe CAVALLARO static int stmmac_set_bfsize(int mtu, int bufsize)
1319286a8372SGiuseppe CAVALLARO {
1320286a8372SGiuseppe CAVALLARO 	int ret = bufsize;
1321286a8372SGiuseppe CAVALLARO 
1322b2f3a481SJose Abreu 	if (mtu >= BUF_SIZE_8KiB)
1323b2f3a481SJose Abreu 		ret = BUF_SIZE_16KiB;
1324b2f3a481SJose Abreu 	else if (mtu >= BUF_SIZE_4KiB)
1325286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_8KiB;
1326286a8372SGiuseppe CAVALLARO 	else if (mtu >= BUF_SIZE_2KiB)
1327286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_4KiB;
1328d916701cSGiuseppe CAVALLARO 	else if (mtu > DEFAULT_BUFSIZE)
1329286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_2KiB;
1330286a8372SGiuseppe CAVALLARO 	else
1331d916701cSGiuseppe CAVALLARO 		ret = DEFAULT_BUFSIZE;
1332286a8372SGiuseppe CAVALLARO 
1333286a8372SGiuseppe CAVALLARO 	return ret;
1334286a8372SGiuseppe CAVALLARO }
1335286a8372SGiuseppe CAVALLARO 
133632ceabcaSGiuseppe CAVALLARO /**
133771fedb01SJoao Pinto  * stmmac_clear_rx_descriptors - clear RX descriptors
133832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
1339ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
134054139cf3SJoao Pinto  * @queue: RX queue index
134171fedb01SJoao Pinto  * Description: this function is called to clear the RX descriptors
134232ceabcaSGiuseppe CAVALLARO  * in case of both basic and extended descriptors are used.
134332ceabcaSGiuseppe CAVALLARO  */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1344ba39b344SChristian Marangi static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1345ba39b344SChristian Marangi 					struct stmmac_dma_conf *dma_conf,
1346ba39b344SChristian Marangi 					u32 queue)
1347c24602efSGiuseppe CAVALLARO {
1348ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
13495bacd778SLABBE Corentin 	int i;
1350c24602efSGiuseppe CAVALLARO 
135171fedb01SJoao Pinto 	/* Clear the RX descriptors */
1352ba39b344SChristian Marangi 	for (i = 0; i < dma_conf->dma_rx_size; i++)
13535bacd778SLABBE Corentin 		if (priv->extend_desc)
135442de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
13555bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1356ba39b344SChristian Marangi 					(i == dma_conf->dma_rx_size - 1),
1357ba39b344SChristian Marangi 					dma_conf->dma_buf_sz);
13585bacd778SLABBE Corentin 		else
135942de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
13605bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1361ba39b344SChristian Marangi 					(i == dma_conf->dma_rx_size - 1),
1362ba39b344SChristian Marangi 					dma_conf->dma_buf_sz);
136371fedb01SJoao Pinto }
136471fedb01SJoao Pinto 
136571fedb01SJoao Pinto /**
136671fedb01SJoao Pinto  * stmmac_clear_tx_descriptors - clear tx descriptors
136771fedb01SJoao Pinto  * @priv: driver private structure
1368ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1369ce736788SJoao Pinto  * @queue: TX queue index.
137071fedb01SJoao Pinto  * Description: this function is called to clear the TX descriptors
137171fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
137271fedb01SJoao Pinto  */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1373ba39b344SChristian Marangi static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1374ba39b344SChristian Marangi 					struct stmmac_dma_conf *dma_conf,
1375ba39b344SChristian Marangi 					u32 queue)
137671fedb01SJoao Pinto {
1377ba39b344SChristian Marangi 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
137871fedb01SJoao Pinto 	int i;
137971fedb01SJoao Pinto 
138071fedb01SJoao Pinto 	/* Clear the TX descriptors */
1381ba39b344SChristian Marangi 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1382ba39b344SChristian Marangi 		int last = (i == (dma_conf->dma_tx_size - 1));
1383579a25a8SJose Abreu 		struct dma_desc *p;
1384579a25a8SJose Abreu 
13855bacd778SLABBE Corentin 		if (priv->extend_desc)
1386579a25a8SJose Abreu 			p = &tx_q->dma_etx[i].basic;
1387579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1388579a25a8SJose Abreu 			p = &tx_q->dma_entx[i].basic;
13895bacd778SLABBE Corentin 		else
1390579a25a8SJose Abreu 			p = &tx_q->dma_tx[i];
1391579a25a8SJose Abreu 
1392579a25a8SJose Abreu 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1393579a25a8SJose Abreu 	}
1394c24602efSGiuseppe CAVALLARO }
1395c24602efSGiuseppe CAVALLARO 
1396732fdf0eSGiuseppe CAVALLARO /**
139771fedb01SJoao Pinto  * stmmac_clear_descriptors - clear descriptors
139871fedb01SJoao Pinto  * @priv: driver private structure
1399ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
140071fedb01SJoao Pinto  * Description: this function is called to clear the TX and RX descriptors
140171fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
140271fedb01SJoao Pinto  */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1403ba39b344SChristian Marangi static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1404ba39b344SChristian Marangi 				     struct stmmac_dma_conf *dma_conf)
140571fedb01SJoao Pinto {
140654139cf3SJoao Pinto 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1407ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
140854139cf3SJoao Pinto 	u32 queue;
140954139cf3SJoao Pinto 
141071fedb01SJoao Pinto 	/* Clear the RX descriptors */
141154139cf3SJoao Pinto 	for (queue = 0; queue < rx_queue_cnt; queue++)
1412ba39b344SChristian Marangi 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
141371fedb01SJoao Pinto 
141471fedb01SJoao Pinto 	/* Clear the TX descriptors */
1415ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++)
1416ba39b344SChristian Marangi 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
141771fedb01SJoao Pinto }
141871fedb01SJoao Pinto 
141971fedb01SJoao Pinto /**
1420732fdf0eSGiuseppe CAVALLARO  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1421732fdf0eSGiuseppe CAVALLARO  * @priv: driver private structure
1422ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1423732fdf0eSGiuseppe CAVALLARO  * @p: descriptor pointer
1424732fdf0eSGiuseppe CAVALLARO  * @i: descriptor index
142554139cf3SJoao Pinto  * @flags: gfp flag
142654139cf3SJoao Pinto  * @queue: RX queue index
1427732fdf0eSGiuseppe CAVALLARO  * Description: this function is called to allocate a receive buffer, perform
1428732fdf0eSGiuseppe CAVALLARO  * the DMA mapping and init the descriptor.
1429732fdf0eSGiuseppe CAVALLARO  */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1430ba39b344SChristian Marangi static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1431ba39b344SChristian Marangi 				  struct stmmac_dma_conf *dma_conf,
1432ba39b344SChristian Marangi 				  struct dma_desc *p,
143354139cf3SJoao Pinto 				  int i, gfp_t flags, u32 queue)
1434c24602efSGiuseppe CAVALLARO {
1435ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
14362af6106aSJose Abreu 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1437884d2b84SDavid Wu 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1438884d2b84SDavid Wu 
1439070246e4SJochen Henneberg 	if (priv->dma_cap.host_dma_width <= 32)
1440884d2b84SDavid Wu 		gfp |= GFP_DMA32;
1441c24602efSGiuseppe CAVALLARO 
1442da5ec7f2SOng Boon Leong 	if (!buf->page) {
1443884d2b84SDavid Wu 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
14442af6106aSJose Abreu 		if (!buf->page)
144556329137SBartlomiej Zolnierkiewicz 			return -ENOMEM;
14465fabb012SOng Boon Leong 		buf->page_offset = stmmac_rx_offset(priv);
1447da5ec7f2SOng Boon Leong 	}
1448c24602efSGiuseppe CAVALLARO 
1449da5ec7f2SOng Boon Leong 	if (priv->sph && !buf->sec_page) {
1450884d2b84SDavid Wu 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
145167afd6d1SJose Abreu 		if (!buf->sec_page)
145267afd6d1SJose Abreu 			return -ENOMEM;
145367afd6d1SJose Abreu 
145467afd6d1SJose Abreu 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1455396e13e1SJoakim Zhang 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
145667afd6d1SJose Abreu 	} else {
145767afd6d1SJose Abreu 		buf->sec_page = NULL;
1458396e13e1SJoakim Zhang 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
145967afd6d1SJose Abreu 	}
146067afd6d1SJose Abreu 
14615fabb012SOng Boon Leong 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
14625fabb012SOng Boon Leong 
14632af6106aSJose Abreu 	stmmac_set_desc_addr(priv, p, buf->addr);
1464ba39b344SChristian Marangi 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
14652c520b1cSJose Abreu 		stmmac_init_desc3(priv, p);
1466c24602efSGiuseppe CAVALLARO 
1467c24602efSGiuseppe CAVALLARO 	return 0;
1468c24602efSGiuseppe CAVALLARO }
1469c24602efSGiuseppe CAVALLARO 
147071fedb01SJoao Pinto /**
147171fedb01SJoao Pinto  * stmmac_free_rx_buffer - free RX dma buffers
147271fedb01SJoao Pinto  * @priv: private structure
1473ba39b344SChristian Marangi  * @rx_q: RX queue
147471fedb01SJoao Pinto  * @i: buffer index.
147571fedb01SJoao Pinto  */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1476ba39b344SChristian Marangi static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1477ba39b344SChristian Marangi 				  struct stmmac_rx_queue *rx_q,
1478ba39b344SChristian Marangi 				  int i)
147956329137SBartlomiej Zolnierkiewicz {
14802af6106aSJose Abreu 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
148154139cf3SJoao Pinto 
14822af6106aSJose Abreu 	if (buf->page)
1483458de8a9SIlias Apalodimas 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
14842af6106aSJose Abreu 	buf->page = NULL;
148567afd6d1SJose Abreu 
148667afd6d1SJose Abreu 	if (buf->sec_page)
1487458de8a9SIlias Apalodimas 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
148867afd6d1SJose Abreu 	buf->sec_page = NULL;
148956329137SBartlomiej Zolnierkiewicz }
149056329137SBartlomiej Zolnierkiewicz 
14917ac6653aSJeff Kirsher /**
149271fedb01SJoao Pinto  * stmmac_free_tx_buffer - free RX dma buffers
149371fedb01SJoao Pinto  * @priv: private structure
1494ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1495ce736788SJoao Pinto  * @queue: RX queue index
149671fedb01SJoao Pinto  * @i: buffer index.
149771fedb01SJoao Pinto  */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1498ba39b344SChristian Marangi static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1499ba39b344SChristian Marangi 				  struct stmmac_dma_conf *dma_conf,
1500ba39b344SChristian Marangi 				  u32 queue, int i)
150171fedb01SJoao Pinto {
1502ba39b344SChristian Marangi 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1503ce736788SJoao Pinto 
1504be8b38a7SOng Boon Leong 	if (tx_q->tx_skbuff_dma[i].buf &&
1505be8b38a7SOng Boon Leong 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1506ce736788SJoao Pinto 		if (tx_q->tx_skbuff_dma[i].map_as_page)
150771fedb01SJoao Pinto 			dma_unmap_page(priv->device,
1508ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].buf,
1509ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].len,
151071fedb01SJoao Pinto 				       DMA_TO_DEVICE);
151171fedb01SJoao Pinto 		else
151271fedb01SJoao Pinto 			dma_unmap_single(priv->device,
1513ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].buf,
1514ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].len,
151571fedb01SJoao Pinto 					 DMA_TO_DEVICE);
151671fedb01SJoao Pinto 	}
151771fedb01SJoao Pinto 
1518be8b38a7SOng Boon Leong 	if (tx_q->xdpf[i] &&
15198b278a5bSOng Boon Leong 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
15208b278a5bSOng Boon Leong 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1521be8b38a7SOng Boon Leong 		xdp_return_frame(tx_q->xdpf[i]);
1522be8b38a7SOng Boon Leong 		tx_q->xdpf[i] = NULL;
1523be8b38a7SOng Boon Leong 	}
1524be8b38a7SOng Boon Leong 
1525132c32eeSOng Boon Leong 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1526132c32eeSOng Boon Leong 		tx_q->xsk_frames_done++;
1527132c32eeSOng Boon Leong 
1528be8b38a7SOng Boon Leong 	if (tx_q->tx_skbuff[i] &&
1529be8b38a7SOng Boon Leong 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1530ce736788SJoao Pinto 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1531ce736788SJoao Pinto 		tx_q->tx_skbuff[i] = NULL;
1532be8b38a7SOng Boon Leong 	}
1533be8b38a7SOng Boon Leong 
1534ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[i].buf = 0;
1535ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[i].map_as_page = false;
153671fedb01SJoao Pinto }
153771fedb01SJoao Pinto 
153871fedb01SJoao Pinto /**
15394298255fSOng Boon Leong  * dma_free_rx_skbufs - free RX dma buffers
15404298255fSOng Boon Leong  * @priv: private structure
1541ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
15424298255fSOng Boon Leong  * @queue: RX queue index
15434298255fSOng Boon Leong  */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1544ba39b344SChristian Marangi static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1545ba39b344SChristian Marangi 			       struct stmmac_dma_conf *dma_conf,
1546ba39b344SChristian Marangi 			       u32 queue)
15474298255fSOng Boon Leong {
1548ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
15494298255fSOng Boon Leong 	int i;
15504298255fSOng Boon Leong 
1551ba39b344SChristian Marangi 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1552ba39b344SChristian Marangi 		stmmac_free_rx_buffer(priv, rx_q, i);
15534298255fSOng Boon Leong }
15544298255fSOng Boon Leong 
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1555ba39b344SChristian Marangi static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1556ba39b344SChristian Marangi 				   struct stmmac_dma_conf *dma_conf,
1557ba39b344SChristian Marangi 				   u32 queue, gfp_t flags)
15584298255fSOng Boon Leong {
1559ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
15604298255fSOng Boon Leong 	int i;
15614298255fSOng Boon Leong 
1562ba39b344SChristian Marangi 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
15634298255fSOng Boon Leong 		struct dma_desc *p;
15644298255fSOng Boon Leong 		int ret;
15654298255fSOng Boon Leong 
15664298255fSOng Boon Leong 		if (priv->extend_desc)
15674298255fSOng Boon Leong 			p = &((rx_q->dma_erx + i)->basic);
15684298255fSOng Boon Leong 		else
15694298255fSOng Boon Leong 			p = rx_q->dma_rx + i;
15704298255fSOng Boon Leong 
1571ba39b344SChristian Marangi 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
15724298255fSOng Boon Leong 					     queue);
15734298255fSOng Boon Leong 		if (ret)
15744298255fSOng Boon Leong 			return ret;
1575bba2556eSOng Boon Leong 
1576bba2556eSOng Boon Leong 		rx_q->buf_alloc_num++;
15774298255fSOng Boon Leong 	}
15784298255fSOng Boon Leong 
15794298255fSOng Boon Leong 	return 0;
15804298255fSOng Boon Leong }
15814298255fSOng Boon Leong 
15824298255fSOng Boon Leong /**
1583bba2556eSOng Boon Leong  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1584bba2556eSOng Boon Leong  * @priv: private structure
1585ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1586bba2556eSOng Boon Leong  * @queue: RX queue index
1587bba2556eSOng Boon Leong  */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1588ba39b344SChristian Marangi static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1589ba39b344SChristian Marangi 				struct stmmac_dma_conf *dma_conf,
1590ba39b344SChristian Marangi 				u32 queue)
1591bba2556eSOng Boon Leong {
1592ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1593bba2556eSOng Boon Leong 	int i;
1594bba2556eSOng Boon Leong 
1595ba39b344SChristian Marangi 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1596bba2556eSOng Boon Leong 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1597bba2556eSOng Boon Leong 
1598bba2556eSOng Boon Leong 		if (!buf->xdp)
1599bba2556eSOng Boon Leong 			continue;
1600bba2556eSOng Boon Leong 
1601bba2556eSOng Boon Leong 		xsk_buff_free(buf->xdp);
1602bba2556eSOng Boon Leong 		buf->xdp = NULL;
1603bba2556eSOng Boon Leong 	}
1604bba2556eSOng Boon Leong }
1605bba2556eSOng Boon Leong 
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1606ba39b344SChristian Marangi static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1607ba39b344SChristian Marangi 				      struct stmmac_dma_conf *dma_conf,
1608ba39b344SChristian Marangi 				      u32 queue)
1609bba2556eSOng Boon Leong {
1610ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1611bba2556eSOng Boon Leong 	int i;
1612bba2556eSOng Boon Leong 
16139570df35SSong Yoong Siang 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
16149570df35SSong Yoong Siang 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
16159570df35SSong Yoong Siang 	 * use this macro to make sure no size violations.
16169570df35SSong Yoong Siang 	 */
16179570df35SSong Yoong Siang 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
16189570df35SSong Yoong Siang 
1619ba39b344SChristian Marangi 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1620bba2556eSOng Boon Leong 		struct stmmac_rx_buffer *buf;
1621bba2556eSOng Boon Leong 		dma_addr_t dma_addr;
1622bba2556eSOng Boon Leong 		struct dma_desc *p;
1623bba2556eSOng Boon Leong 
1624bba2556eSOng Boon Leong 		if (priv->extend_desc)
1625bba2556eSOng Boon Leong 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1626bba2556eSOng Boon Leong 		else
1627bba2556eSOng Boon Leong 			p = rx_q->dma_rx + i;
1628bba2556eSOng Boon Leong 
1629bba2556eSOng Boon Leong 		buf = &rx_q->buf_pool[i];
1630bba2556eSOng Boon Leong 
1631bba2556eSOng Boon Leong 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1632bba2556eSOng Boon Leong 		if (!buf->xdp)
1633bba2556eSOng Boon Leong 			return -ENOMEM;
1634bba2556eSOng Boon Leong 
1635bba2556eSOng Boon Leong 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1636bba2556eSOng Boon Leong 		stmmac_set_desc_addr(priv, p, dma_addr);
1637bba2556eSOng Boon Leong 		rx_q->buf_alloc_num++;
1638bba2556eSOng Boon Leong 	}
1639bba2556eSOng Boon Leong 
1640bba2556eSOng Boon Leong 	return 0;
1641bba2556eSOng Boon Leong }
1642bba2556eSOng Boon Leong 
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1643bba2556eSOng Boon Leong static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1644bba2556eSOng Boon Leong {
1645bba2556eSOng Boon Leong 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1646bba2556eSOng Boon Leong 		return NULL;
1647bba2556eSOng Boon Leong 
1648bba2556eSOng Boon Leong 	return xsk_get_pool_from_qid(priv->dev, queue);
1649bba2556eSOng Boon Leong }
1650bba2556eSOng Boon Leong 
16519c63faaaSJoakim Zhang /**
1652de0b90e5SOng Boon Leong  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1653de0b90e5SOng Boon Leong  * @priv: driver private structure
1654ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1655de0b90e5SOng Boon Leong  * @queue: RX queue index
16565bacd778SLABBE Corentin  * @flags: gfp flag.
165771fedb01SJoao Pinto  * Description: this function initializes the DMA RX descriptors
16585bacd778SLABBE Corentin  * and allocates the socket buffers. It supports the chained and ring
1659286a8372SGiuseppe CAVALLARO  * modes.
16607ac6653aSJeff Kirsher  */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1661ba39b344SChristian Marangi static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1662ba39b344SChristian Marangi 				    struct stmmac_dma_conf *dma_conf,
1663ba39b344SChristian Marangi 				    u32 queue, gfp_t flags)
16647ac6653aSJeff Kirsher {
1665ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1666de0b90e5SOng Boon Leong 	int ret;
166754139cf3SJoao Pinto 
166854139cf3SJoao Pinto 	netif_dbg(priv, probe, priv->dev,
166954139cf3SJoao Pinto 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
167054139cf3SJoao Pinto 		  (u32)rx_q->dma_rx_phy);
167154139cf3SJoao Pinto 
1672ba39b344SChristian Marangi 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1673cbcf0999SJose Abreu 
1674bba2556eSOng Boon Leong 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1675bba2556eSOng Boon Leong 
1676bba2556eSOng Boon Leong 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1677bba2556eSOng Boon Leong 
1678bba2556eSOng Boon Leong 	if (rx_q->xsk_pool) {
1679bba2556eSOng Boon Leong 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1680bba2556eSOng Boon Leong 						   MEM_TYPE_XSK_BUFF_POOL,
1681bba2556eSOng Boon Leong 						   NULL));
1682bba2556eSOng Boon Leong 		netdev_info(priv->dev,
1683bba2556eSOng Boon Leong 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1684bba2556eSOng Boon Leong 			    rx_q->queue_index);
1685bba2556eSOng Boon Leong 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1686bba2556eSOng Boon Leong 	} else {
1687be8b38a7SOng Boon Leong 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1688be8b38a7SOng Boon Leong 						   MEM_TYPE_PAGE_POOL,
1689be8b38a7SOng Boon Leong 						   rx_q->page_pool));
1690be8b38a7SOng Boon Leong 		netdev_info(priv->dev,
1691be8b38a7SOng Boon Leong 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1692be8b38a7SOng Boon Leong 			    rx_q->queue_index);
1693bba2556eSOng Boon Leong 	}
1694be8b38a7SOng Boon Leong 
1695bba2556eSOng Boon Leong 	if (rx_q->xsk_pool) {
1696bba2556eSOng Boon Leong 		/* RX XDP ZC buffer pool may not be populated, e.g.
1697bba2556eSOng Boon Leong 		 * xdpsock TX-only.
1698bba2556eSOng Boon Leong 		 */
1699ba39b344SChristian Marangi 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1700bba2556eSOng Boon Leong 	} else {
1701ba39b344SChristian Marangi 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
17024298255fSOng Boon Leong 		if (ret < 0)
1703de0b90e5SOng Boon Leong 			return -ENOMEM;
1704bba2556eSOng Boon Leong 	}
170554139cf3SJoao Pinto 
1706c24602efSGiuseppe CAVALLARO 	/* Setup the chained descriptor addresses */
1707c24602efSGiuseppe CAVALLARO 	if (priv->mode == STMMAC_CHAIN_MODE) {
170871fedb01SJoao Pinto 		if (priv->extend_desc)
17092c520b1cSJose Abreu 			stmmac_mode_init(priv, rx_q->dma_erx,
1710aa042f60SSong, Yoong Siang 					 rx_q->dma_rx_phy,
1711ba39b344SChristian Marangi 					 dma_conf->dma_rx_size, 1);
171271fedb01SJoao Pinto 		else
17132c520b1cSJose Abreu 			stmmac_mode_init(priv, rx_q->dma_rx,
1714aa042f60SSong, Yoong Siang 					 rx_q->dma_rx_phy,
1715ba39b344SChristian Marangi 					 dma_conf->dma_rx_size, 0);
171671fedb01SJoao Pinto 	}
1717de0b90e5SOng Boon Leong 
1718de0b90e5SOng Boon Leong 	return 0;
1719de0b90e5SOng Boon Leong }
1720de0b90e5SOng Boon Leong 
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1721ba39b344SChristian Marangi static int init_dma_rx_desc_rings(struct net_device *dev,
1722ba39b344SChristian Marangi 				  struct stmmac_dma_conf *dma_conf,
1723ba39b344SChristian Marangi 				  gfp_t flags)
1724de0b90e5SOng Boon Leong {
1725de0b90e5SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
1726de0b90e5SOng Boon Leong 	u32 rx_count = priv->plat->rx_queues_to_use;
172758e06d05SDan Carpenter 	int queue;
1728de0b90e5SOng Boon Leong 	int ret;
1729de0b90e5SOng Boon Leong 
1730de0b90e5SOng Boon Leong 	/* RX INITIALIZATION */
1731de0b90e5SOng Boon Leong 	netif_dbg(priv, probe, priv->dev,
1732de0b90e5SOng Boon Leong 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1733de0b90e5SOng Boon Leong 
1734de0b90e5SOng Boon Leong 	for (queue = 0; queue < rx_count; queue++) {
1735ba39b344SChristian Marangi 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1736de0b90e5SOng Boon Leong 		if (ret)
1737de0b90e5SOng Boon Leong 			goto err_init_rx_buffers;
173854139cf3SJoao Pinto 	}
173954139cf3SJoao Pinto 
174071fedb01SJoao Pinto 	return 0;
174154139cf3SJoao Pinto 
174271fedb01SJoao Pinto err_init_rx_buffers:
174354139cf3SJoao Pinto 	while (queue >= 0) {
1744ba39b344SChristian Marangi 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1745bba2556eSOng Boon Leong 
1746bba2556eSOng Boon Leong 		if (rx_q->xsk_pool)
1747ba39b344SChristian Marangi 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1748bba2556eSOng Boon Leong 		else
1749ba39b344SChristian Marangi 			dma_free_rx_skbufs(priv, dma_conf, queue);
175054139cf3SJoao Pinto 
1751bba2556eSOng Boon Leong 		rx_q->buf_alloc_num = 0;
1752bba2556eSOng Boon Leong 		rx_q->xsk_pool = NULL;
1753bba2556eSOng Boon Leong 
175454139cf3SJoao Pinto 		queue--;
175554139cf3SJoao Pinto 	}
175654139cf3SJoao Pinto 
175771fedb01SJoao Pinto 	return ret;
175871fedb01SJoao Pinto }
175971fedb01SJoao Pinto 
176071fedb01SJoao Pinto /**
1761de0b90e5SOng Boon Leong  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1762de0b90e5SOng Boon Leong  * @priv: driver private structure
1763ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1764de0b90e5SOng Boon Leong  * @queue: TX queue index
176571fedb01SJoao Pinto  * Description: this function initializes the DMA TX descriptors
176671fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
176771fedb01SJoao Pinto  * modes.
176871fedb01SJoao Pinto  */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1769ba39b344SChristian Marangi static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1770ba39b344SChristian Marangi 				    struct stmmac_dma_conf *dma_conf,
1771ba39b344SChristian Marangi 				    u32 queue)
177271fedb01SJoao Pinto {
1773ba39b344SChristian Marangi 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1774de0b90e5SOng Boon Leong 	int i;
1775ce736788SJoao Pinto 
177671fedb01SJoao Pinto 	netif_dbg(priv, probe, priv->dev,
1777ce736788SJoao Pinto 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1778ce736788SJoao Pinto 		  (u32)tx_q->dma_tx_phy);
177971fedb01SJoao Pinto 
178071fedb01SJoao Pinto 	/* Setup the chained descriptor addresses */
178171fedb01SJoao Pinto 	if (priv->mode == STMMAC_CHAIN_MODE) {
178271fedb01SJoao Pinto 		if (priv->extend_desc)
17832c520b1cSJose Abreu 			stmmac_mode_init(priv, tx_q->dma_etx,
1784aa042f60SSong, Yoong Siang 					 tx_q->dma_tx_phy,
1785ba39b344SChristian Marangi 					 dma_conf->dma_tx_size, 1);
1786579a25a8SJose Abreu 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
17872c520b1cSJose Abreu 			stmmac_mode_init(priv, tx_q->dma_tx,
1788aa042f60SSong, Yoong Siang 					 tx_q->dma_tx_phy,
1789ba39b344SChristian Marangi 					 dma_conf->dma_tx_size, 0);
1790c24602efSGiuseppe CAVALLARO 	}
1791286a8372SGiuseppe CAVALLARO 
1792132c32eeSOng Boon Leong 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1793132c32eeSOng Boon Leong 
1794ba39b344SChristian Marangi 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1795c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
1796de0b90e5SOng Boon Leong 
1797c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
1798ce736788SJoao Pinto 			p = &((tx_q->dma_etx + i)->basic);
1799579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1800579a25a8SJose Abreu 			p = &((tx_q->dma_entx + i)->basic);
1801c24602efSGiuseppe CAVALLARO 		else
1802ce736788SJoao Pinto 			p = tx_q->dma_tx + i;
1803f748be53SAlexandre TORGUE 
180444c67f85SJose Abreu 		stmmac_clear_desc(priv, p);
1805f748be53SAlexandre TORGUE 
1806ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].buf = 0;
1807ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1808ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].len = 0;
1809ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].last_segment = false;
1810ce736788SJoao Pinto 		tx_q->tx_skbuff[i] = NULL;
18114a7d666aSGiuseppe CAVALLARO 	}
1812c24602efSGiuseppe CAVALLARO 
1813de0b90e5SOng Boon Leong 	return 0;
1814c22a3f48SJoao Pinto }
18157ac6653aSJeff Kirsher 
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)1816ba39b344SChristian Marangi static int init_dma_tx_desc_rings(struct net_device *dev,
1817ba39b344SChristian Marangi 				  struct stmmac_dma_conf *dma_conf)
1818de0b90e5SOng Boon Leong {
1819de0b90e5SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
1820de0b90e5SOng Boon Leong 	u32 tx_queue_cnt;
1821de0b90e5SOng Boon Leong 	u32 queue;
1822de0b90e5SOng Boon Leong 
1823de0b90e5SOng Boon Leong 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1824de0b90e5SOng Boon Leong 
1825de0b90e5SOng Boon Leong 	for (queue = 0; queue < tx_queue_cnt; queue++)
1826ba39b344SChristian Marangi 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1827de0b90e5SOng Boon Leong 
182871fedb01SJoao Pinto 	return 0;
182971fedb01SJoao Pinto }
183071fedb01SJoao Pinto 
183171fedb01SJoao Pinto /**
183271fedb01SJoao Pinto  * init_dma_desc_rings - init the RX/TX descriptor rings
183371fedb01SJoao Pinto  * @dev: net device structure
1834ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
183571fedb01SJoao Pinto  * @flags: gfp flag.
183671fedb01SJoao Pinto  * Description: this function initializes the DMA RX/TX descriptors
183771fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
183871fedb01SJoao Pinto  * modes.
183971fedb01SJoao Pinto  */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1840ba39b344SChristian Marangi static int init_dma_desc_rings(struct net_device *dev,
1841ba39b344SChristian Marangi 			       struct stmmac_dma_conf *dma_conf,
1842ba39b344SChristian Marangi 			       gfp_t flags)
184371fedb01SJoao Pinto {
184471fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
184571fedb01SJoao Pinto 	int ret;
184671fedb01SJoao Pinto 
1847ba39b344SChristian Marangi 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
184871fedb01SJoao Pinto 	if (ret)
184971fedb01SJoao Pinto 		return ret;
185071fedb01SJoao Pinto 
1851ba39b344SChristian Marangi 	ret = init_dma_tx_desc_rings(dev, dma_conf);
185271fedb01SJoao Pinto 
1853ba39b344SChristian Marangi 	stmmac_clear_descriptors(priv, dma_conf);
18547ac6653aSJeff Kirsher 
1855c24602efSGiuseppe CAVALLARO 	if (netif_msg_hw(priv))
1856ba39b344SChristian Marangi 		stmmac_display_rings(priv, dma_conf);
185756329137SBartlomiej Zolnierkiewicz 
185856329137SBartlomiej Zolnierkiewicz 	return ret;
18597ac6653aSJeff Kirsher }
18607ac6653aSJeff Kirsher 
186171fedb01SJoao Pinto /**
186271fedb01SJoao Pinto  * dma_free_tx_skbufs - free TX dma buffers
186371fedb01SJoao Pinto  * @priv: private structure
1864ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1865ce736788SJoao Pinto  * @queue: TX queue index
186671fedb01SJoao Pinto  */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1867ba39b344SChristian Marangi static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1868ba39b344SChristian Marangi 			       struct stmmac_dma_conf *dma_conf,
1869ba39b344SChristian Marangi 			       u32 queue)
18707ac6653aSJeff Kirsher {
1871ba39b344SChristian Marangi 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
18727ac6653aSJeff Kirsher 	int i;
18737ac6653aSJeff Kirsher 
1874132c32eeSOng Boon Leong 	tx_q->xsk_frames_done = 0;
1875132c32eeSOng Boon Leong 
1876ba39b344SChristian Marangi 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1877ba39b344SChristian Marangi 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1878132c32eeSOng Boon Leong 
1879132c32eeSOng Boon Leong 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1880132c32eeSOng Boon Leong 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1881132c32eeSOng Boon Leong 		tx_q->xsk_frames_done = 0;
1882132c32eeSOng Boon Leong 		tx_q->xsk_pool = NULL;
1883132c32eeSOng Boon Leong 	}
18847ac6653aSJeff Kirsher }
18857ac6653aSJeff Kirsher 
1886732fdf0eSGiuseppe CAVALLARO /**
18874ec236c7SFugang Duan  * stmmac_free_tx_skbufs - free TX skb buffers
18884ec236c7SFugang Duan  * @priv: private structure
18894ec236c7SFugang Duan  */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)18904ec236c7SFugang Duan static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
18914ec236c7SFugang Duan {
18924ec236c7SFugang Duan 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
18934ec236c7SFugang Duan 	u32 queue;
18944ec236c7SFugang Duan 
18954ec236c7SFugang Duan 	for (queue = 0; queue < tx_queue_cnt; queue++)
1896ba39b344SChristian Marangi 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
18974ec236c7SFugang Duan }
18984ec236c7SFugang Duan 
18994ec236c7SFugang Duan /**
1900da5ec7f2SOng Boon Leong  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
190154139cf3SJoao Pinto  * @priv: private structure
1902ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1903da5ec7f2SOng Boon Leong  * @queue: RX queue index
190454139cf3SJoao Pinto  */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1905ba39b344SChristian Marangi static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1906ba39b344SChristian Marangi 					 struct stmmac_dma_conf *dma_conf,
1907ba39b344SChristian Marangi 					 u32 queue)
190854139cf3SJoao Pinto {
1909ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
191054139cf3SJoao Pinto 
191154139cf3SJoao Pinto 	/* Release the DMA RX socket buffers */
1912bba2556eSOng Boon Leong 	if (rx_q->xsk_pool)
1913ba39b344SChristian Marangi 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1914bba2556eSOng Boon Leong 	else
1915ba39b344SChristian Marangi 		dma_free_rx_skbufs(priv, dma_conf, queue);
191654139cf3SJoao Pinto 
1917bba2556eSOng Boon Leong 	rx_q->buf_alloc_num = 0;
1918bba2556eSOng Boon Leong 	rx_q->xsk_pool = NULL;
1919bba2556eSOng Boon Leong 
192054139cf3SJoao Pinto 	/* Free DMA regions of consistent memory previously allocated */
192154139cf3SJoao Pinto 	if (!priv->extend_desc)
1922ba39b344SChristian Marangi 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1923aa042f60SSong, Yoong Siang 				  sizeof(struct dma_desc),
192454139cf3SJoao Pinto 				  rx_q->dma_rx, rx_q->dma_rx_phy);
192554139cf3SJoao Pinto 	else
1926ba39b344SChristian Marangi 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
192754139cf3SJoao Pinto 				  sizeof(struct dma_extended_desc),
192854139cf3SJoao Pinto 				  rx_q->dma_erx, rx_q->dma_rx_phy);
192954139cf3SJoao Pinto 
1930be8b38a7SOng Boon Leong 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1931be8b38a7SOng Boon Leong 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1932be8b38a7SOng Boon Leong 
19332af6106aSJose Abreu 	kfree(rx_q->buf_pool);
1934c3f812ceSJonathan Lemon 	if (rx_q->page_pool)
19352af6106aSJose Abreu 		page_pool_destroy(rx_q->page_pool);
19362af6106aSJose Abreu }
1937da5ec7f2SOng Boon Leong 
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1938ba39b344SChristian Marangi static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1939ba39b344SChristian Marangi 				       struct stmmac_dma_conf *dma_conf)
1940da5ec7f2SOng Boon Leong {
1941da5ec7f2SOng Boon Leong 	u32 rx_count = priv->plat->rx_queues_to_use;
1942da5ec7f2SOng Boon Leong 	u32 queue;
1943da5ec7f2SOng Boon Leong 
1944da5ec7f2SOng Boon Leong 	/* Free RX queue resources */
1945da5ec7f2SOng Boon Leong 	for (queue = 0; queue < rx_count; queue++)
1946ba39b344SChristian Marangi 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
194754139cf3SJoao Pinto }
194854139cf3SJoao Pinto 
194954139cf3SJoao Pinto /**
1950da5ec7f2SOng Boon Leong  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1951ce736788SJoao Pinto  * @priv: private structure
1952ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1953da5ec7f2SOng Boon Leong  * @queue: TX queue index
1954ce736788SJoao Pinto  */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1955ba39b344SChristian Marangi static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1956ba39b344SChristian Marangi 					 struct stmmac_dma_conf *dma_conf,
1957ba39b344SChristian Marangi 					 u32 queue)
1958ce736788SJoao Pinto {
1959ba39b344SChristian Marangi 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1960579a25a8SJose Abreu 	size_t size;
1961579a25a8SJose Abreu 	void *addr;
1962ce736788SJoao Pinto 
1963ce736788SJoao Pinto 	/* Release the DMA TX socket buffers */
1964ba39b344SChristian Marangi 	dma_free_tx_skbufs(priv, dma_conf, queue);
1965ce736788SJoao Pinto 
1966579a25a8SJose Abreu 	if (priv->extend_desc) {
1967579a25a8SJose Abreu 		size = sizeof(struct dma_extended_desc);
1968579a25a8SJose Abreu 		addr = tx_q->dma_etx;
1969579a25a8SJose Abreu 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1970579a25a8SJose Abreu 		size = sizeof(struct dma_edesc);
1971579a25a8SJose Abreu 		addr = tx_q->dma_entx;
1972579a25a8SJose Abreu 	} else {
1973579a25a8SJose Abreu 		size = sizeof(struct dma_desc);
1974579a25a8SJose Abreu 		addr = tx_q->dma_tx;
1975579a25a8SJose Abreu 	}
1976579a25a8SJose Abreu 
1977ba39b344SChristian Marangi 	size *= dma_conf->dma_tx_size;
1978579a25a8SJose Abreu 
1979579a25a8SJose Abreu 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1980ce736788SJoao Pinto 
1981ce736788SJoao Pinto 	kfree(tx_q->tx_skbuff_dma);
1982ce736788SJoao Pinto 	kfree(tx_q->tx_skbuff);
1983ce736788SJoao Pinto }
1984da5ec7f2SOng Boon Leong 
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1985ba39b344SChristian Marangi static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1986ba39b344SChristian Marangi 				       struct stmmac_dma_conf *dma_conf)
1987da5ec7f2SOng Boon Leong {
1988da5ec7f2SOng Boon Leong 	u32 tx_count = priv->plat->tx_queues_to_use;
1989da5ec7f2SOng Boon Leong 	u32 queue;
1990da5ec7f2SOng Boon Leong 
1991da5ec7f2SOng Boon Leong 	/* Free TX queue resources */
1992da5ec7f2SOng Boon Leong 	for (queue = 0; queue < tx_count; queue++)
1993ba39b344SChristian Marangi 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
1994ce736788SJoao Pinto }
1995ce736788SJoao Pinto 
1996ce736788SJoao Pinto /**
1997da5ec7f2SOng Boon Leong  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1998732fdf0eSGiuseppe CAVALLARO  * @priv: private structure
1999ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
2000da5ec7f2SOng Boon Leong  * @queue: RX queue index
2001732fdf0eSGiuseppe CAVALLARO  * Description: according to which descriptor can be used (extend or basic)
2002732fdf0eSGiuseppe CAVALLARO  * this function allocates the resources for TX and RX paths. In case of
2003732fdf0eSGiuseppe CAVALLARO  * reception, for example, it pre-allocated the RX socket buffer in order to
2004732fdf0eSGiuseppe CAVALLARO  * allow zero-copy mechanism.
2005732fdf0eSGiuseppe CAVALLARO  */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2006ba39b344SChristian Marangi static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2007ba39b344SChristian Marangi 					 struct stmmac_dma_conf *dma_conf,
2008ba39b344SChristian Marangi 					 u32 queue)
200909f8d696SSrinivas Kandagatla {
2010ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2011be8b38a7SOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
2012da5ec7f2SOng Boon Leong 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
20132af6106aSJose Abreu 	struct page_pool_params pp_params = { 0 };
20144f28bd95SThierry Reding 	unsigned int num_pages;
2015132c32eeSOng Boon Leong 	unsigned int napi_id;
2016be8b38a7SOng Boon Leong 	int ret;
201754139cf3SJoao Pinto 
201854139cf3SJoao Pinto 	rx_q->queue_index = queue;
201954139cf3SJoao Pinto 	rx_q->priv_data = priv;
202054139cf3SJoao Pinto 
20215fabb012SOng Boon Leong 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2022ba39b344SChristian Marangi 	pp_params.pool_size = dma_conf->dma_rx_size;
2023ba39b344SChristian Marangi 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
20244f28bd95SThierry Reding 	pp_params.order = ilog2(num_pages);
20252af6106aSJose Abreu 	pp_params.nid = dev_to_node(priv->device);
20262af6106aSJose Abreu 	pp_params.dev = priv->device;
20275fabb012SOng Boon Leong 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
20285fabb012SOng Boon Leong 	pp_params.offset = stmmac_rx_offset(priv);
20295fabb012SOng Boon Leong 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
20305bacd778SLABBE Corentin 
20312af6106aSJose Abreu 	rx_q->page_pool = page_pool_create(&pp_params);
20322af6106aSJose Abreu 	if (IS_ERR(rx_q->page_pool)) {
20332af6106aSJose Abreu 		ret = PTR_ERR(rx_q->page_pool);
20342af6106aSJose Abreu 		rx_q->page_pool = NULL;
2035da5ec7f2SOng Boon Leong 		return ret;
20362af6106aSJose Abreu 	}
20372af6106aSJose Abreu 
2038ba39b344SChristian Marangi 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2039aa042f60SSong, Yoong Siang 				 sizeof(*rx_q->buf_pool),
20405bacd778SLABBE Corentin 				 GFP_KERNEL);
20412af6106aSJose Abreu 	if (!rx_q->buf_pool)
2042da5ec7f2SOng Boon Leong 		return -ENOMEM;
20435bacd778SLABBE Corentin 
20445bacd778SLABBE Corentin 	if (priv->extend_desc) {
2045750afb08SLuis Chamberlain 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2046ba39b344SChristian Marangi 						   dma_conf->dma_rx_size *
2047aa042f60SSong, Yoong Siang 						   sizeof(struct dma_extended_desc),
204854139cf3SJoao Pinto 						   &rx_q->dma_rx_phy,
20495bacd778SLABBE Corentin 						   GFP_KERNEL);
205054139cf3SJoao Pinto 		if (!rx_q->dma_erx)
2051da5ec7f2SOng Boon Leong 			return -ENOMEM;
20525bacd778SLABBE Corentin 
205371fedb01SJoao Pinto 	} else {
2054750afb08SLuis Chamberlain 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2055ba39b344SChristian Marangi 						  dma_conf->dma_rx_size *
2056aa042f60SSong, Yoong Siang 						  sizeof(struct dma_desc),
205754139cf3SJoao Pinto 						  &rx_q->dma_rx_phy,
205871fedb01SJoao Pinto 						  GFP_KERNEL);
205954139cf3SJoao Pinto 		if (!rx_q->dma_rx)
2060da5ec7f2SOng Boon Leong 			return -ENOMEM;
206171fedb01SJoao Pinto 	}
2062be8b38a7SOng Boon Leong 
2063132c32eeSOng Boon Leong 	if (stmmac_xdp_is_enabled(priv) &&
2064132c32eeSOng Boon Leong 	    test_bit(queue, priv->af_xdp_zc_qps))
2065132c32eeSOng Boon Leong 		napi_id = ch->rxtx_napi.napi_id;
2066132c32eeSOng Boon Leong 	else
2067132c32eeSOng Boon Leong 		napi_id = ch->rx_napi.napi_id;
2068132c32eeSOng Boon Leong 
2069be8b38a7SOng Boon Leong 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2070be8b38a7SOng Boon Leong 			       rx_q->queue_index,
2071132c32eeSOng Boon Leong 			       napi_id);
2072be8b38a7SOng Boon Leong 	if (ret) {
2073be8b38a7SOng Boon Leong 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2074da5ec7f2SOng Boon Leong 		return -EINVAL;
2075be8b38a7SOng Boon Leong 	}
2076da5ec7f2SOng Boon Leong 
2077da5ec7f2SOng Boon Leong 	return 0;
2078da5ec7f2SOng Boon Leong }
2079da5ec7f2SOng Boon Leong 
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2080ba39b344SChristian Marangi static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2081ba39b344SChristian Marangi 				       struct stmmac_dma_conf *dma_conf)
2082da5ec7f2SOng Boon Leong {
2083da5ec7f2SOng Boon Leong 	u32 rx_count = priv->plat->rx_queues_to_use;
2084da5ec7f2SOng Boon Leong 	u32 queue;
2085da5ec7f2SOng Boon Leong 	int ret;
2086da5ec7f2SOng Boon Leong 
2087da5ec7f2SOng Boon Leong 	/* RX queues buffers and DMA */
2088da5ec7f2SOng Boon Leong 	for (queue = 0; queue < rx_count; queue++) {
2089ba39b344SChristian Marangi 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2090da5ec7f2SOng Boon Leong 		if (ret)
2091da5ec7f2SOng Boon Leong 			goto err_dma;
209254139cf3SJoao Pinto 	}
209371fedb01SJoao Pinto 
209471fedb01SJoao Pinto 	return 0;
209571fedb01SJoao Pinto 
209671fedb01SJoao Pinto err_dma:
2097ba39b344SChristian Marangi 	free_dma_rx_desc_resources(priv, dma_conf);
209854139cf3SJoao Pinto 
209971fedb01SJoao Pinto 	return ret;
210071fedb01SJoao Pinto }
210171fedb01SJoao Pinto 
210271fedb01SJoao Pinto /**
2103da5ec7f2SOng Boon Leong  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
210471fedb01SJoao Pinto  * @priv: private structure
2105ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
2106da5ec7f2SOng Boon Leong  * @queue: TX queue index
210771fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
210871fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
210971fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
211071fedb01SJoao Pinto  * allow zero-copy mechanism.
211171fedb01SJoao Pinto  */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2112ba39b344SChristian Marangi static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2113ba39b344SChristian Marangi 					 struct stmmac_dma_conf *dma_conf,
2114ba39b344SChristian Marangi 					 u32 queue)
211571fedb01SJoao Pinto {
2116ba39b344SChristian Marangi 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2117579a25a8SJose Abreu 	size_t size;
2118579a25a8SJose Abreu 	void *addr;
2119ce736788SJoao Pinto 
2120ce736788SJoao Pinto 	tx_q->queue_index = queue;
2121ce736788SJoao Pinto 	tx_q->priv_data = priv;
2122ce736788SJoao Pinto 
2123ba39b344SChristian Marangi 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2124ce736788SJoao Pinto 				      sizeof(*tx_q->tx_skbuff_dma),
212571fedb01SJoao Pinto 				      GFP_KERNEL);
2126ce736788SJoao Pinto 	if (!tx_q->tx_skbuff_dma)
2127da5ec7f2SOng Boon Leong 		return -ENOMEM;
212871fedb01SJoao Pinto 
2129ba39b344SChristian Marangi 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2130ce736788SJoao Pinto 				  sizeof(struct sk_buff *),
213171fedb01SJoao Pinto 				  GFP_KERNEL);
2132ce736788SJoao Pinto 	if (!tx_q->tx_skbuff)
2133da5ec7f2SOng Boon Leong 		return -ENOMEM;
213471fedb01SJoao Pinto 
2135579a25a8SJose Abreu 	if (priv->extend_desc)
2136579a25a8SJose Abreu 		size = sizeof(struct dma_extended_desc);
2137579a25a8SJose Abreu 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2138579a25a8SJose Abreu 		size = sizeof(struct dma_edesc);
2139579a25a8SJose Abreu 	else
2140579a25a8SJose Abreu 		size = sizeof(struct dma_desc);
2141579a25a8SJose Abreu 
2142ba39b344SChristian Marangi 	size *= dma_conf->dma_tx_size;
2143579a25a8SJose Abreu 
2144579a25a8SJose Abreu 	addr = dma_alloc_coherent(priv->device, size,
2145579a25a8SJose Abreu 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2146579a25a8SJose Abreu 	if (!addr)
2147da5ec7f2SOng Boon Leong 		return -ENOMEM;
2148579a25a8SJose Abreu 
2149579a25a8SJose Abreu 	if (priv->extend_desc)
2150579a25a8SJose Abreu 		tx_q->dma_etx = addr;
2151579a25a8SJose Abreu 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2152579a25a8SJose Abreu 		tx_q->dma_entx = addr;
2153579a25a8SJose Abreu 	else
2154579a25a8SJose Abreu 		tx_q->dma_tx = addr;
2155da5ec7f2SOng Boon Leong 
2156da5ec7f2SOng Boon Leong 	return 0;
2157da5ec7f2SOng Boon Leong }
2158da5ec7f2SOng Boon Leong 
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2159ba39b344SChristian Marangi static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2160ba39b344SChristian Marangi 				       struct stmmac_dma_conf *dma_conf)
2161da5ec7f2SOng Boon Leong {
2162da5ec7f2SOng Boon Leong 	u32 tx_count = priv->plat->tx_queues_to_use;
2163da5ec7f2SOng Boon Leong 	u32 queue;
2164da5ec7f2SOng Boon Leong 	int ret;
2165da5ec7f2SOng Boon Leong 
2166da5ec7f2SOng Boon Leong 	/* TX queues buffers and DMA */
2167da5ec7f2SOng Boon Leong 	for (queue = 0; queue < tx_count; queue++) {
2168ba39b344SChristian Marangi 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2169da5ec7f2SOng Boon Leong 		if (ret)
2170da5ec7f2SOng Boon Leong 			goto err_dma;
21715bacd778SLABBE Corentin 	}
21725bacd778SLABBE Corentin 
21735bacd778SLABBE Corentin 	return 0;
21745bacd778SLABBE Corentin 
217562242260SChristophe Jaillet err_dma:
2176ba39b344SChristian Marangi 	free_dma_tx_desc_resources(priv, dma_conf);
217709f8d696SSrinivas Kandagatla 	return ret;
21785bacd778SLABBE Corentin }
217909f8d696SSrinivas Kandagatla 
218071fedb01SJoao Pinto /**
218171fedb01SJoao Pinto  * alloc_dma_desc_resources - alloc TX/RX resources.
218271fedb01SJoao Pinto  * @priv: private structure
2183ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
218471fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
218571fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
218671fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
218771fedb01SJoao Pinto  * allow zero-copy mechanism.
218871fedb01SJoao Pinto  */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2189ba39b344SChristian Marangi static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2190ba39b344SChristian Marangi 				    struct stmmac_dma_conf *dma_conf)
21915bacd778SLABBE Corentin {
219254139cf3SJoao Pinto 	/* RX Allocation */
2193ba39b344SChristian Marangi 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
219471fedb01SJoao Pinto 
219571fedb01SJoao Pinto 	if (ret)
219671fedb01SJoao Pinto 		return ret;
219771fedb01SJoao Pinto 
2198ba39b344SChristian Marangi 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
219971fedb01SJoao Pinto 
220071fedb01SJoao Pinto 	return ret;
220171fedb01SJoao Pinto }
220271fedb01SJoao Pinto 
220371fedb01SJoao Pinto /**
220471fedb01SJoao Pinto  * free_dma_desc_resources - free dma desc resources
220571fedb01SJoao Pinto  * @priv: private structure
2206ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
220771fedb01SJoao Pinto  */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2208ba39b344SChristian Marangi static void free_dma_desc_resources(struct stmmac_priv *priv,
2209ba39b344SChristian Marangi 				    struct stmmac_dma_conf *dma_conf)
221071fedb01SJoao Pinto {
221171fedb01SJoao Pinto 	/* Release the DMA TX socket buffers */
2212ba39b344SChristian Marangi 	free_dma_tx_desc_resources(priv, dma_conf);
2213be8b38a7SOng Boon Leong 
2214be8b38a7SOng Boon Leong 	/* Release the DMA RX socket buffers later
2215be8b38a7SOng Boon Leong 	 * to ensure all pending XDP_TX buffers are returned.
2216be8b38a7SOng Boon Leong 	 */
2217ba39b344SChristian Marangi 	free_dma_rx_desc_resources(priv, dma_conf);
221871fedb01SJoao Pinto }
221971fedb01SJoao Pinto 
222071fedb01SJoao Pinto /**
22219eb12474Sjpinto  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
22229eb12474Sjpinto  *  @priv: driver private structure
22239eb12474Sjpinto  *  Description: It is used for enabling the rx queues in the MAC
22249eb12474Sjpinto  */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)22259eb12474Sjpinto static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
22269eb12474Sjpinto {
22274f6046f5SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
22284f6046f5SJoao Pinto 	int queue;
22294f6046f5SJoao Pinto 	u8 mode;
22309eb12474Sjpinto 
22314f6046f5SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
22324f6046f5SJoao Pinto 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2233c10d4c82SJose Abreu 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
22344f6046f5SJoao Pinto 	}
22359eb12474Sjpinto }
22369eb12474Sjpinto 
22379eb12474Sjpinto /**
2238ae4f0d46SJoao Pinto  * stmmac_start_rx_dma - start RX DMA channel
2239ae4f0d46SJoao Pinto  * @priv: driver private structure
2240ae4f0d46SJoao Pinto  * @chan: RX channel index
2241ae4f0d46SJoao Pinto  * Description:
2242ae4f0d46SJoao Pinto  * This starts a RX DMA channel
2243ae4f0d46SJoao Pinto  */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2244ae4f0d46SJoao Pinto static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2245ae4f0d46SJoao Pinto {
2246ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2247a4e887faSJose Abreu 	stmmac_start_rx(priv, priv->ioaddr, chan);
2248ae4f0d46SJoao Pinto }
2249ae4f0d46SJoao Pinto 
2250ae4f0d46SJoao Pinto /**
2251ae4f0d46SJoao Pinto  * stmmac_start_tx_dma - start TX DMA channel
2252ae4f0d46SJoao Pinto  * @priv: driver private structure
2253ae4f0d46SJoao Pinto  * @chan: TX channel index
2254ae4f0d46SJoao Pinto  * Description:
2255ae4f0d46SJoao Pinto  * This starts a TX DMA channel
2256ae4f0d46SJoao Pinto  */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2257ae4f0d46SJoao Pinto static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2258ae4f0d46SJoao Pinto {
2259ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2260a4e887faSJose Abreu 	stmmac_start_tx(priv, priv->ioaddr, chan);
2261ae4f0d46SJoao Pinto }
2262ae4f0d46SJoao Pinto 
2263ae4f0d46SJoao Pinto /**
2264ae4f0d46SJoao Pinto  * stmmac_stop_rx_dma - stop RX DMA channel
2265ae4f0d46SJoao Pinto  * @priv: driver private structure
2266ae4f0d46SJoao Pinto  * @chan: RX channel index
2267ae4f0d46SJoao Pinto  * Description:
2268ae4f0d46SJoao Pinto  * This stops a RX DMA channel
2269ae4f0d46SJoao Pinto  */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2270ae4f0d46SJoao Pinto static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2271ae4f0d46SJoao Pinto {
2272ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2273a4e887faSJose Abreu 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2274ae4f0d46SJoao Pinto }
2275ae4f0d46SJoao Pinto 
2276ae4f0d46SJoao Pinto /**
2277ae4f0d46SJoao Pinto  * stmmac_stop_tx_dma - stop TX DMA channel
2278ae4f0d46SJoao Pinto  * @priv: driver private structure
2279ae4f0d46SJoao Pinto  * @chan: TX channel index
2280ae4f0d46SJoao Pinto  * Description:
2281ae4f0d46SJoao Pinto  * This stops a TX DMA channel
2282ae4f0d46SJoao Pinto  */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2283ae4f0d46SJoao Pinto static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2284ae4f0d46SJoao Pinto {
2285ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2286a4e887faSJose Abreu 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2287ae4f0d46SJoao Pinto }
2288ae4f0d46SJoao Pinto 
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2289087a7b94SVincent Whitchurch static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2290087a7b94SVincent Whitchurch {
2291087a7b94SVincent Whitchurch 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2292087a7b94SVincent Whitchurch 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2293087a7b94SVincent Whitchurch 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2294087a7b94SVincent Whitchurch 	u32 chan;
2295087a7b94SVincent Whitchurch 
2296087a7b94SVincent Whitchurch 	for (chan = 0; chan < dma_csr_ch; chan++) {
2297087a7b94SVincent Whitchurch 		struct stmmac_channel *ch = &priv->channel[chan];
2298087a7b94SVincent Whitchurch 		unsigned long flags;
2299087a7b94SVincent Whitchurch 
2300087a7b94SVincent Whitchurch 		spin_lock_irqsave(&ch->lock, flags);
2301087a7b94SVincent Whitchurch 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2302087a7b94SVincent Whitchurch 		spin_unlock_irqrestore(&ch->lock, flags);
2303087a7b94SVincent Whitchurch 	}
2304087a7b94SVincent Whitchurch }
2305087a7b94SVincent Whitchurch 
2306ae4f0d46SJoao Pinto /**
2307ae4f0d46SJoao Pinto  * stmmac_start_all_dma - start all RX and TX DMA channels
2308ae4f0d46SJoao Pinto  * @priv: driver private structure
2309ae4f0d46SJoao Pinto  * Description:
2310ae4f0d46SJoao Pinto  * This starts all the RX and TX DMA channels
2311ae4f0d46SJoao Pinto  */
stmmac_start_all_dma(struct stmmac_priv * priv)2312ae4f0d46SJoao Pinto static void stmmac_start_all_dma(struct stmmac_priv *priv)
2313ae4f0d46SJoao Pinto {
2314ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2315ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2316ae4f0d46SJoao Pinto 	u32 chan = 0;
2317ae4f0d46SJoao Pinto 
2318ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2319ae4f0d46SJoao Pinto 		stmmac_start_rx_dma(priv, chan);
2320ae4f0d46SJoao Pinto 
2321ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2322ae4f0d46SJoao Pinto 		stmmac_start_tx_dma(priv, chan);
2323ae4f0d46SJoao Pinto }
2324ae4f0d46SJoao Pinto 
2325ae4f0d46SJoao Pinto /**
2326ae4f0d46SJoao Pinto  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2327ae4f0d46SJoao Pinto  * @priv: driver private structure
2328ae4f0d46SJoao Pinto  * Description:
2329ae4f0d46SJoao Pinto  * This stops the RX and TX DMA channels
2330ae4f0d46SJoao Pinto  */
stmmac_stop_all_dma(struct stmmac_priv * priv)2331ae4f0d46SJoao Pinto static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2332ae4f0d46SJoao Pinto {
2333ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2334ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2335ae4f0d46SJoao Pinto 	u32 chan = 0;
2336ae4f0d46SJoao Pinto 
2337ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2338ae4f0d46SJoao Pinto 		stmmac_stop_rx_dma(priv, chan);
2339ae4f0d46SJoao Pinto 
2340ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2341ae4f0d46SJoao Pinto 		stmmac_stop_tx_dma(priv, chan);
2342ae4f0d46SJoao Pinto }
2343ae4f0d46SJoao Pinto 
2344ae4f0d46SJoao Pinto /**
23457ac6653aSJeff Kirsher  *  stmmac_dma_operation_mode - HW DMA operation mode
234632ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
2347732fdf0eSGiuseppe CAVALLARO  *  Description: it is used for configuring the DMA operation mode register in
2348732fdf0eSGiuseppe CAVALLARO  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
23497ac6653aSJeff Kirsher  */
stmmac_dma_operation_mode(struct stmmac_priv * priv)23507ac6653aSJeff Kirsher static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
23517ac6653aSJeff Kirsher {
23526deee222SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
23536deee222SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2354f88203a2SVince Bridgers 	int rxfifosz = priv->plat->rx_fifo_size;
235552a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
23566deee222SJoao Pinto 	u32 txmode = 0;
23576deee222SJoao Pinto 	u32 rxmode = 0;
23586deee222SJoao Pinto 	u32 chan = 0;
2359a0daae13SJose Abreu 	u8 qmode = 0;
2360f88203a2SVince Bridgers 
236111fbf811SThierry Reding 	if (rxfifosz == 0)
236211fbf811SThierry Reding 		rxfifosz = priv->dma_cap.rx_fifo_size;
236352a76235SJose Abreu 	if (txfifosz == 0)
236452a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
236552a76235SJose Abreu 
236652a76235SJose Abreu 	/* Adjust for real per queue fifo size */
236752a76235SJose Abreu 	rxfifosz /= rx_channels_count;
236852a76235SJose Abreu 	txfifosz /= tx_channels_count;
236911fbf811SThierry Reding 
23706deee222SJoao Pinto 	if (priv->plat->force_thresh_dma_mode) {
23716deee222SJoao Pinto 		txmode = tc;
23726deee222SJoao Pinto 		rxmode = tc;
23736deee222SJoao Pinto 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
23747ac6653aSJeff Kirsher 		/*
23757ac6653aSJeff Kirsher 		 * In case of GMAC, SF mode can be enabled
23767ac6653aSJeff Kirsher 		 * to perform the TX COE in HW. This depends on:
23777ac6653aSJeff Kirsher 		 * 1) TX COE if actually supported
23787ac6653aSJeff Kirsher 		 * 2) There is no bugged Jumbo frame support
23797ac6653aSJeff Kirsher 		 *    that needs to not insert csum in the TDES.
23807ac6653aSJeff Kirsher 		 */
23816deee222SJoao Pinto 		txmode = SF_DMA_MODE;
23826deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
2383b2dec116SSonic Zhang 		priv->xstats.threshold = SF_DMA_MODE;
23846deee222SJoao Pinto 	} else {
23856deee222SJoao Pinto 		txmode = tc;
23866deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
23876deee222SJoao Pinto 	}
23886deee222SJoao Pinto 
23896deee222SJoao Pinto 	/* configure all channels */
2390a0daae13SJose Abreu 	for (chan = 0; chan < rx_channels_count; chan++) {
23918531c808SChristian Marangi 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2392bba2556eSOng Boon Leong 		u32 buf_size;
2393bba2556eSOng Boon Leong 
2394a0daae13SJose Abreu 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
23956deee222SJoao Pinto 
2396a4e887faSJose Abreu 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2397a0daae13SJose Abreu 				rxfifosz, qmode);
2398bba2556eSOng Boon Leong 
2399bba2556eSOng Boon Leong 		if (rx_q->xsk_pool) {
2400bba2556eSOng Boon Leong 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2401bba2556eSOng Boon Leong 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2402bba2556eSOng Boon Leong 					      buf_size,
24034205c88eSJose Abreu 					      chan);
2404bba2556eSOng Boon Leong 		} else {
2405bba2556eSOng Boon Leong 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
24068531c808SChristian Marangi 					      priv->dma_conf.dma_buf_sz,
2407bba2556eSOng Boon Leong 					      chan);
2408bba2556eSOng Boon Leong 		}
2409a0daae13SJose Abreu 	}
2410a0daae13SJose Abreu 
2411a0daae13SJose Abreu 	for (chan = 0; chan < tx_channels_count; chan++) {
2412a0daae13SJose Abreu 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2413a0daae13SJose Abreu 
2414a4e887faSJose Abreu 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2415a0daae13SJose Abreu 				txfifosz, qmode);
2416a0daae13SJose Abreu 	}
24177ac6653aSJeff Kirsher }
24187ac6653aSJeff Kirsher 
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2419132c32eeSOng Boon Leong static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2420132c32eeSOng Boon Leong {
2421132c32eeSOng Boon Leong 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
24228531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
24238070274bSJisheng Zhang 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2424132c32eeSOng Boon Leong 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2425132c32eeSOng Boon Leong 	unsigned int entry = tx_q->cur_tx;
2426132c32eeSOng Boon Leong 	struct dma_desc *tx_desc = NULL;
2427132c32eeSOng Boon Leong 	struct xdp_desc xdp_desc;
2428132c32eeSOng Boon Leong 	bool work_done = true;
2429133466c3SJisheng Zhang 	u32 tx_set_ic_bit = 0;
2430132c32eeSOng Boon Leong 
2431132c32eeSOng Boon Leong 	/* Avoids TX time-out as we are sharing with slow path */
2432e92af33eSAlexander Lobakin 	txq_trans_cond_update(nq);
2433132c32eeSOng Boon Leong 
2434132c32eeSOng Boon Leong 	budget = min(budget, stmmac_tx_avail(priv, queue));
2435132c32eeSOng Boon Leong 
2436132c32eeSOng Boon Leong 	while (budget-- > 0) {
2437132c32eeSOng Boon Leong 		dma_addr_t dma_addr;
2438132c32eeSOng Boon Leong 		bool set_ic;
2439132c32eeSOng Boon Leong 
2440132c32eeSOng Boon Leong 		/* We are sharing with slow path and stop XSK TX desc submission when
2441132c32eeSOng Boon Leong 		 * available TX ring is less than threshold.
2442132c32eeSOng Boon Leong 		 */
2443132c32eeSOng Boon Leong 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2444132c32eeSOng Boon Leong 		    !netif_carrier_ok(priv->dev)) {
2445132c32eeSOng Boon Leong 			work_done = false;
2446132c32eeSOng Boon Leong 			break;
2447132c32eeSOng Boon Leong 		}
2448132c32eeSOng Boon Leong 
2449132c32eeSOng Boon Leong 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2450132c32eeSOng Boon Leong 			break;
2451132c32eeSOng Boon Leong 
2452132c32eeSOng Boon Leong 		if (likely(priv->extend_desc))
2453132c32eeSOng Boon Leong 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2454132c32eeSOng Boon Leong 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2455132c32eeSOng Boon Leong 			tx_desc = &tx_q->dma_entx[entry].basic;
2456132c32eeSOng Boon Leong 		else
2457132c32eeSOng Boon Leong 			tx_desc = tx_q->dma_tx + entry;
2458132c32eeSOng Boon Leong 
2459132c32eeSOng Boon Leong 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2460132c32eeSOng Boon Leong 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2461132c32eeSOng Boon Leong 
2462132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2463132c32eeSOng Boon Leong 
2464132c32eeSOng Boon Leong 		/* To return XDP buffer to XSK pool, we simple call
2465132c32eeSOng Boon Leong 		 * xsk_tx_completed(), so we don't need to fill up
2466132c32eeSOng Boon Leong 		 * 'buf' and 'xdpf'.
2467132c32eeSOng Boon Leong 		 */
2468132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].buf = 0;
2469132c32eeSOng Boon Leong 		tx_q->xdpf[entry] = NULL;
2470132c32eeSOng Boon Leong 
2471132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2472132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2473132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2474132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2475132c32eeSOng Boon Leong 
2476132c32eeSOng Boon Leong 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2477132c32eeSOng Boon Leong 
2478132c32eeSOng Boon Leong 		tx_q->tx_count_frames++;
2479132c32eeSOng Boon Leong 
2480132c32eeSOng Boon Leong 		if (!priv->tx_coal_frames[queue])
2481132c32eeSOng Boon Leong 			set_ic = false;
2482132c32eeSOng Boon Leong 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2483132c32eeSOng Boon Leong 			set_ic = true;
2484132c32eeSOng Boon Leong 		else
2485132c32eeSOng Boon Leong 			set_ic = false;
2486132c32eeSOng Boon Leong 
2487132c32eeSOng Boon Leong 		if (set_ic) {
2488132c32eeSOng Boon Leong 			tx_q->tx_count_frames = 0;
2489132c32eeSOng Boon Leong 			stmmac_set_tx_ic(priv, tx_desc);
2490133466c3SJisheng Zhang 			tx_set_ic_bit++;
2491132c32eeSOng Boon Leong 		}
2492132c32eeSOng Boon Leong 
2493132c32eeSOng Boon Leong 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2494132c32eeSOng Boon Leong 				       true, priv->mode, true, true,
2495132c32eeSOng Boon Leong 				       xdp_desc.len);
2496132c32eeSOng Boon Leong 
2497132c32eeSOng Boon Leong 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2498132c32eeSOng Boon Leong 
24998531c808SChristian Marangi 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2500132c32eeSOng Boon Leong 		entry = tx_q->cur_tx;
2501132c32eeSOng Boon Leong 	}
25029680b2abSPetr Tesarik 	u64_stats_update_begin(&txq_stats->napi_syncp);
25039680b2abSPetr Tesarik 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
25049680b2abSPetr Tesarik 	u64_stats_update_end(&txq_stats->napi_syncp);
2505132c32eeSOng Boon Leong 
2506132c32eeSOng Boon Leong 	if (tx_desc) {
2507132c32eeSOng Boon Leong 		stmmac_flush_tx_descriptors(priv, queue);
2508132c32eeSOng Boon Leong 		xsk_tx_release(pool);
2509132c32eeSOng Boon Leong 	}
2510132c32eeSOng Boon Leong 
2511132c32eeSOng Boon Leong 	/* Return true if all of the 3 conditions are met
2512132c32eeSOng Boon Leong 	 *  a) TX Budget is still available
2513132c32eeSOng Boon Leong 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2514132c32eeSOng Boon Leong 	 *     pending XSK TX for transmission)
2515132c32eeSOng Boon Leong 	 */
2516132c32eeSOng Boon Leong 	return !!budget && work_done;
2517132c32eeSOng Boon Leong }
2518132c32eeSOng Boon Leong 
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)25193a6c12a0SXiaoliang Yang static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
25203a6c12a0SXiaoliang Yang {
25213a6c12a0SXiaoliang Yang 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
25223a6c12a0SXiaoliang Yang 		tc += 64;
25233a6c12a0SXiaoliang Yang 
25243a6c12a0SXiaoliang Yang 		if (priv->plat->force_thresh_dma_mode)
25253a6c12a0SXiaoliang Yang 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
25263a6c12a0SXiaoliang Yang 		else
25273a6c12a0SXiaoliang Yang 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
25283a6c12a0SXiaoliang Yang 						      chan);
25293a6c12a0SXiaoliang Yang 
25303a6c12a0SXiaoliang Yang 		priv->xstats.threshold = tc;
25313a6c12a0SXiaoliang Yang 	}
25323a6c12a0SXiaoliang Yang }
25333a6c12a0SXiaoliang Yang 
25347ac6653aSJeff Kirsher /**
2535732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_clean - to manage the transmission completion
253632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
2537d0ea5cbdSJesse Brandeburg  * @budget: napi budget limiting this functions packet handling
2538ce736788SJoao Pinto  * @queue: TX queue index
2539732fdf0eSGiuseppe CAVALLARO  * Description: it reclaims the transmit resources after transmission completes.
25407ac6653aSJeff Kirsher  */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue)25418fce3331SJose Abreu static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
25427ac6653aSJeff Kirsher {
25438531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
25448070274bSJisheng Zhang 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
254538979574SBeniamino Galvani 	unsigned int bytes_compl = 0, pkts_compl = 0;
2546132c32eeSOng Boon Leong 	unsigned int entry, xmits = 0, count = 0;
2547133466c3SJisheng Zhang 	u32 tx_packets = 0, tx_errors = 0;
25487ac6653aSJeff Kirsher 
25498fce3331SJose Abreu 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2550a9097a96SGiuseppe CAVALLARO 
2551132c32eeSOng Boon Leong 	tx_q->xsk_frames_done = 0;
2552132c32eeSOng Boon Leong 
25538d5f4b07SBernd Edlinger 	entry = tx_q->dirty_tx;
2554132c32eeSOng Boon Leong 
2555132c32eeSOng Boon Leong 	/* Try to clean all TX complete frame in 1 shot */
25568531c808SChristian Marangi 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2557be8b38a7SOng Boon Leong 		struct xdp_frame *xdpf;
2558be8b38a7SOng Boon Leong 		struct sk_buff *skb;
2559c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
2560c363b658SFabrice Gasnier 		int status;
2561c24602efSGiuseppe CAVALLARO 
25628b278a5bSOng Boon Leong 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
25638b278a5bSOng Boon Leong 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2564be8b38a7SOng Boon Leong 			xdpf = tx_q->xdpf[entry];
2565be8b38a7SOng Boon Leong 			skb = NULL;
2566be8b38a7SOng Boon Leong 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2567be8b38a7SOng Boon Leong 			xdpf = NULL;
2568be8b38a7SOng Boon Leong 			skb = tx_q->tx_skbuff[entry];
2569be8b38a7SOng Boon Leong 		} else {
2570be8b38a7SOng Boon Leong 			xdpf = NULL;
2571be8b38a7SOng Boon Leong 			skb = NULL;
2572be8b38a7SOng Boon Leong 		}
2573be8b38a7SOng Boon Leong 
2574c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
2575ce736788SJoao Pinto 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2576579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2577579a25a8SJose Abreu 			p = &tx_q->dma_entx[entry].basic;
2578c24602efSGiuseppe CAVALLARO 		else
2579ce736788SJoao Pinto 			p = tx_q->dma_tx + entry;
25807ac6653aSJeff Kirsher 
2581133466c3SJisheng Zhang 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2582c363b658SFabrice Gasnier 		/* Check if the descriptor is owned by the DMA */
2583c363b658SFabrice Gasnier 		if (unlikely(status & tx_dma_own))
2584c363b658SFabrice Gasnier 			break;
2585c363b658SFabrice Gasnier 
25868fce3331SJose Abreu 		count++;
25878fce3331SJose Abreu 
2588a6b25da5SNiklas Cassel 		/* Make sure descriptor fields are read after reading
2589a6b25da5SNiklas Cassel 		 * the own bit.
2590a6b25da5SNiklas Cassel 		 */
2591a6b25da5SNiklas Cassel 		dma_rmb();
2592a6b25da5SNiklas Cassel 
2593c363b658SFabrice Gasnier 		/* Just consider the last segment and ...*/
2594c363b658SFabrice Gasnier 		if (likely(!(status & tx_not_ls))) {
2595c363b658SFabrice Gasnier 			/* ... verify the status error condition */
2596c363b658SFabrice Gasnier 			if (unlikely(status & tx_err)) {
2597133466c3SJisheng Zhang 				tx_errors++;
25983a6c12a0SXiaoliang Yang 				if (unlikely(status & tx_err_bump_tc))
25993a6c12a0SXiaoliang Yang 					stmmac_bump_dma_threshold(priv, queue);
2600c363b658SFabrice Gasnier 			} else {
2601133466c3SJisheng Zhang 				tx_packets++;
2602c363b658SFabrice Gasnier 			}
2603be8b38a7SOng Boon Leong 			if (skb)
2604ba1ffd74SGiuseppe CAVALLARO 				stmmac_get_tx_hwtstamp(priv, p, skb);
26057ac6653aSJeff Kirsher 		}
26067ac6653aSJeff Kirsher 
2607be8b38a7SOng Boon Leong 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2608be8b38a7SOng Boon Leong 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2609ce736788SJoao Pinto 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2610362b37beSGiuseppe CAVALLARO 				dma_unmap_page(priv->device,
2611ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].buf,
2612ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].len,
26137ac6653aSJeff Kirsher 					       DMA_TO_DEVICE);
2614362b37beSGiuseppe CAVALLARO 			else
2615362b37beSGiuseppe CAVALLARO 				dma_unmap_single(priv->device,
2616ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].buf,
2617ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].len,
2618362b37beSGiuseppe CAVALLARO 						 DMA_TO_DEVICE);
2619ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].buf = 0;
2620ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].len = 0;
2621ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2622cf32deecSRayagond Kokatanur 		}
2623f748be53SAlexandre TORGUE 
26242c520b1cSJose Abreu 		stmmac_clean_desc3(priv, tx_q, p);
2625f748be53SAlexandre TORGUE 
2626ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2627ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
26287ac6653aSJeff Kirsher 
2629be8b38a7SOng Boon Leong 		if (xdpf &&
2630be8b38a7SOng Boon Leong 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2631be8b38a7SOng Boon Leong 			xdp_return_frame_rx_napi(xdpf);
2632be8b38a7SOng Boon Leong 			tx_q->xdpf[entry] = NULL;
2633be8b38a7SOng Boon Leong 		}
2634be8b38a7SOng Boon Leong 
26358b278a5bSOng Boon Leong 		if (xdpf &&
26368b278a5bSOng Boon Leong 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
26378b278a5bSOng Boon Leong 			xdp_return_frame(xdpf);
26388b278a5bSOng Boon Leong 			tx_q->xdpf[entry] = NULL;
26398b278a5bSOng Boon Leong 		}
26408b278a5bSOng Boon Leong 
2641132c32eeSOng Boon Leong 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2642132c32eeSOng Boon Leong 			tx_q->xsk_frames_done++;
2643132c32eeSOng Boon Leong 
2644be8b38a7SOng Boon Leong 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2645be8b38a7SOng Boon Leong 			if (likely(skb)) {
264638979574SBeniamino Galvani 				pkts_compl++;
264738979574SBeniamino Galvani 				bytes_compl += skb->len;
26487c565c33SEric W. Biederman 				dev_consume_skb_any(skb);
2649ce736788SJoao Pinto 				tx_q->tx_skbuff[entry] = NULL;
26507ac6653aSJeff Kirsher 			}
2651be8b38a7SOng Boon Leong 		}
26527ac6653aSJeff Kirsher 
265342de047dSJose Abreu 		stmmac_release_tx_desc(priv, p, priv->mode);
26547ac6653aSJeff Kirsher 
26558531c808SChristian Marangi 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
26567ac6653aSJeff Kirsher 	}
2657ce736788SJoao Pinto 	tx_q->dirty_tx = entry;
265838979574SBeniamino Galvani 
2659c22a3f48SJoao Pinto 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2660c22a3f48SJoao Pinto 				  pkts_compl, bytes_compl);
266138979574SBeniamino Galvani 
2662c22a3f48SJoao Pinto 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2663c22a3f48SJoao Pinto 								queue))) &&
2664aa042f60SSong, Yoong Siang 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2665c22a3f48SJoao Pinto 
2666b3e51069SLABBE Corentin 		netif_dbg(priv, tx_done, priv->dev,
2667b3e51069SLABBE Corentin 			  "%s: restart transmit\n", __func__);
2668c22a3f48SJoao Pinto 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
26697ac6653aSJeff Kirsher 	}
2670d765955dSGiuseppe CAVALLARO 
2671132c32eeSOng Boon Leong 	if (tx_q->xsk_pool) {
2672132c32eeSOng Boon Leong 		bool work_done;
2673132c32eeSOng Boon Leong 
2674132c32eeSOng Boon Leong 		if (tx_q->xsk_frames_done)
2675132c32eeSOng Boon Leong 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2676132c32eeSOng Boon Leong 
2677132c32eeSOng Boon Leong 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2678132c32eeSOng Boon Leong 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2679132c32eeSOng Boon Leong 
2680132c32eeSOng Boon Leong 		/* For XSK TX, we try to send as many as possible.
2681132c32eeSOng Boon Leong 		 * If XSK work done (XSK TX desc empty and budget still
2682132c32eeSOng Boon Leong 		 * available), return "budget - 1" to reenable TX IRQ.
2683132c32eeSOng Boon Leong 		 * Else, return "budget" to make NAPI continue polling.
2684132c32eeSOng Boon Leong 		 */
2685132c32eeSOng Boon Leong 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2686132c32eeSOng Boon Leong 					       STMMAC_XSK_TX_BUDGET_MAX);
2687132c32eeSOng Boon Leong 		if (work_done)
2688132c32eeSOng Boon Leong 			xmits = budget - 1;
2689132c32eeSOng Boon Leong 		else
2690132c32eeSOng Boon Leong 			xmits = budget;
2691132c32eeSOng Boon Leong 	}
2692132c32eeSOng Boon Leong 
2693be1c7eaeSVineetha G. Jaya Kumaran 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2694be1c7eaeSVineetha G. Jaya Kumaran 	    priv->eee_sw_timer_en) {
2695c74ead22SJisheng Zhang 		if (stmmac_enable_eee_mode(priv))
2696388e201dSVineetha G. Jaya Kumaran 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2697d765955dSGiuseppe CAVALLARO 	}
26988fce3331SJose Abreu 
26994ccb4585SJose Abreu 	/* We still have pending packets, let's call for a new scheduling */
27004ccb4585SJose Abreu 	if (tx_q->dirty_tx != tx_q->cur_tx)
2701fa60b816SVincent Whitchurch 		stmmac_tx_timer_arm(priv, queue);
27024ccb4585SJose Abreu 
27039680b2abSPetr Tesarik 	u64_stats_update_begin(&txq_stats->napi_syncp);
27049680b2abSPetr Tesarik 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
27059680b2abSPetr Tesarik 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
27069680b2abSPetr Tesarik 	u64_stats_inc(&txq_stats->napi.tx_clean);
27079680b2abSPetr Tesarik 	u64_stats_update_end(&txq_stats->napi_syncp);
2708133466c3SJisheng Zhang 
2709133466c3SJisheng Zhang 	priv->xstats.tx_errors += tx_errors;
2710133466c3SJisheng Zhang 
27118fce3331SJose Abreu 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
27128fce3331SJose Abreu 
2713132c32eeSOng Boon Leong 	/* Combine decisions from TX clean and XSK TX */
2714132c32eeSOng Boon Leong 	return max(count, xmits);
27157ac6653aSJeff Kirsher }
27167ac6653aSJeff Kirsher 
27177ac6653aSJeff Kirsher /**
2718732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_err - to manage the tx error
271932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
27205bacd778SLABBE Corentin  * @chan: channel index
27217ac6653aSJeff Kirsher  * Description: it cleans the descriptors and restarts the transmission
2722732fdf0eSGiuseppe CAVALLARO  * in case of transmission errors.
27237ac6653aSJeff Kirsher  */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)27245bacd778SLABBE Corentin static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
27257ac6653aSJeff Kirsher {
27268531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2727ce736788SJoao Pinto 
2728c22a3f48SJoao Pinto 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
27297ac6653aSJeff Kirsher 
2730ae4f0d46SJoao Pinto 	stmmac_stop_tx_dma(priv, chan);
2731ba39b344SChristian Marangi 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2732ba39b344SChristian Marangi 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2733f9ec5723SChristian Marangi 	stmmac_reset_tx_queue(priv, chan);
2734f421031eSJongsung Kim 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2735f421031eSJongsung Kim 			    tx_q->dma_tx_phy, chan);
2736ae4f0d46SJoao Pinto 	stmmac_start_tx_dma(priv, chan);
27377ac6653aSJeff Kirsher 
2738133466c3SJisheng Zhang 	priv->xstats.tx_errors++;
2739c22a3f48SJoao Pinto 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
27407ac6653aSJeff Kirsher }
27417ac6653aSJeff Kirsher 
274232ceabcaSGiuseppe CAVALLARO /**
27436deee222SJoao Pinto  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
27446deee222SJoao Pinto  *  @priv: driver private structure
27456deee222SJoao Pinto  *  @txmode: TX operating mode
27466deee222SJoao Pinto  *  @rxmode: RX operating mode
27476deee222SJoao Pinto  *  @chan: channel index
27486deee222SJoao Pinto  *  Description: it is used for configuring of the DMA operation mode in
27496deee222SJoao Pinto  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
27506deee222SJoao Pinto  *  mode.
27516deee222SJoao Pinto  */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)27526deee222SJoao Pinto static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
27536deee222SJoao Pinto 					  u32 rxmode, u32 chan)
27546deee222SJoao Pinto {
2755a0daae13SJose Abreu 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2756a0daae13SJose Abreu 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
275752a76235SJose Abreu 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
275852a76235SJose Abreu 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
27596deee222SJoao Pinto 	int rxfifosz = priv->plat->rx_fifo_size;
276052a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
27616deee222SJoao Pinto 
27626deee222SJoao Pinto 	if (rxfifosz == 0)
27636deee222SJoao Pinto 		rxfifosz = priv->dma_cap.rx_fifo_size;
276452a76235SJose Abreu 	if (txfifosz == 0)
276552a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
276652a76235SJose Abreu 
276752a76235SJose Abreu 	/* Adjust for real per queue fifo size */
276852a76235SJose Abreu 	rxfifosz /= rx_channels_count;
276952a76235SJose Abreu 	txfifosz /= tx_channels_count;
27706deee222SJoao Pinto 
2771ab0204e3SJose Abreu 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2772ab0204e3SJose Abreu 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
27736deee222SJoao Pinto }
27746deee222SJoao Pinto 
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)27758bf993a5SJose Abreu static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
27768bf993a5SJose Abreu {
277763a550fcSJose Abreu 	int ret;
27788bf993a5SJose Abreu 
2779c10d4c82SJose Abreu 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
27808bf993a5SJose Abreu 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2781c10d4c82SJose Abreu 	if (ret && (ret != -EINVAL)) {
27828bf993a5SJose Abreu 		stmmac_global_err(priv);
2783c10d4c82SJose Abreu 		return true;
2784c10d4c82SJose Abreu 	}
2785c10d4c82SJose Abreu 
2786c10d4c82SJose Abreu 	return false;
27878bf993a5SJose Abreu }
27888bf993a5SJose Abreu 
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)27897e1c520cSOng Boon Leong static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
27908fce3331SJose Abreu {
27918fce3331SJose Abreu 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
27927e1c520cSOng Boon Leong 						 &priv->xstats, chan, dir);
27938531c808SChristian Marangi 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
27948531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
27958fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[chan];
2796132c32eeSOng Boon Leong 	struct napi_struct *rx_napi;
2797132c32eeSOng Boon Leong 	struct napi_struct *tx_napi;
2798021bd5e3SJose Abreu 	unsigned long flags;
27998fce3331SJose Abreu 
2800132c32eeSOng Boon Leong 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2801132c32eeSOng Boon Leong 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2802132c32eeSOng Boon Leong 
28034ccb4585SJose Abreu 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2804132c32eeSOng Boon Leong 		if (napi_schedule_prep(rx_napi)) {
2805021bd5e3SJose Abreu 			spin_lock_irqsave(&ch->lock, flags);
2806021bd5e3SJose Abreu 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2807021bd5e3SJose Abreu 			spin_unlock_irqrestore(&ch->lock, flags);
2808132c32eeSOng Boon Leong 			__napi_schedule(rx_napi);
28093ba07debSJose Abreu 		}
28104ccb4585SJose Abreu 	}
28114ccb4585SJose Abreu 
2812021bd5e3SJose Abreu 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2813132c32eeSOng Boon Leong 		if (napi_schedule_prep(tx_napi)) {
2814021bd5e3SJose Abreu 			spin_lock_irqsave(&ch->lock, flags);
2815021bd5e3SJose Abreu 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2816021bd5e3SJose Abreu 			spin_unlock_irqrestore(&ch->lock, flags);
2817132c32eeSOng Boon Leong 			__napi_schedule(tx_napi);
2818021bd5e3SJose Abreu 		}
2819021bd5e3SJose Abreu 	}
28208fce3331SJose Abreu 
28218fce3331SJose Abreu 	return status;
28228fce3331SJose Abreu }
28238fce3331SJose Abreu 
28246deee222SJoao Pinto /**
2825732fdf0eSGiuseppe CAVALLARO  * stmmac_dma_interrupt - DMA ISR
282632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
282732ceabcaSGiuseppe CAVALLARO  * Description: this is the DMA ISR. It is called by the main ISR.
2828732fdf0eSGiuseppe CAVALLARO  * It calls the dwmac dma routine and schedule poll method in case of some
2829732fdf0eSGiuseppe CAVALLARO  * work can be done.
283032ceabcaSGiuseppe CAVALLARO  */
stmmac_dma_interrupt(struct stmmac_priv * priv)28317ac6653aSJeff Kirsher static void stmmac_dma_interrupt(struct stmmac_priv *priv)
28327ac6653aSJeff Kirsher {
2833d62a107aSJoao Pinto 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
28345a6a0445SNiklas Cassel 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
28355a6a0445SNiklas Cassel 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
28365a6a0445SNiklas Cassel 				tx_channel_count : rx_channel_count;
2837d62a107aSJoao Pinto 	u32 chan;
28388ac60ffbSKees Cook 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
28398ac60ffbSKees Cook 
28408ac60ffbSKees Cook 	/* Make sure we never check beyond our status buffer. */
28418ac60ffbSKees Cook 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
28428ac60ffbSKees Cook 		channels_to_check = ARRAY_SIZE(status);
284368e5cfafSJoao Pinto 
28445a6a0445SNiklas Cassel 	for (chan = 0; chan < channels_to_check; chan++)
28457e1c520cSOng Boon Leong 		status[chan] = stmmac_napi_check(priv, chan,
28467e1c520cSOng Boon Leong 						 DMA_DIR_RXTX);
2847d62a107aSJoao Pinto 
28485a6a0445SNiklas Cassel 	for (chan = 0; chan < tx_channel_count; chan++) {
28495a6a0445SNiklas Cassel 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
28507ac6653aSJeff Kirsher 			/* Try to bump up the dma threshold on this failure */
28513a6c12a0SXiaoliang Yang 			stmmac_bump_dma_threshold(priv, chan);
28525a6a0445SNiklas Cassel 		} else if (unlikely(status[chan] == tx_hard_error)) {
28534e593262SJoao Pinto 			stmmac_tx_err(priv, chan);
28547ac6653aSJeff Kirsher 		}
2855d62a107aSJoao Pinto 	}
2856d62a107aSJoao Pinto }
28577ac6653aSJeff Kirsher 
285832ceabcaSGiuseppe CAVALLARO /**
285932ceabcaSGiuseppe CAVALLARO  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
286032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
286132ceabcaSGiuseppe CAVALLARO  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
286232ceabcaSGiuseppe CAVALLARO  */
stmmac_mmc_setup(struct stmmac_priv * priv)28631c901a46SGiuseppe CAVALLARO static void stmmac_mmc_setup(struct stmmac_priv *priv)
28641c901a46SGiuseppe CAVALLARO {
28651c901a46SGiuseppe CAVALLARO 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
28661c901a46SGiuseppe CAVALLARO 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
28671c901a46SGiuseppe CAVALLARO 
28683b1dd2c5SJose Abreu 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
28694f795b25SGiuseppe CAVALLARO 
28704f795b25SGiuseppe CAVALLARO 	if (priv->dma_cap.rmon) {
28713b1dd2c5SJose Abreu 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
28721c901a46SGiuseppe CAVALLARO 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
28734f795b25SGiuseppe CAVALLARO 	} else
287438ddc59dSLABBE Corentin 		netdev_info(priv->dev, "No MAC Management Counters available\n");
28751c901a46SGiuseppe CAVALLARO }
28761c901a46SGiuseppe CAVALLARO 
2877732fdf0eSGiuseppe CAVALLARO /**
2878732fdf0eSGiuseppe CAVALLARO  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
287932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
288019e30c14SGiuseppe CAVALLARO  * Description:
288119e30c14SGiuseppe CAVALLARO  *  new GMAC chip generations have a new register to indicate the
2882e7434821SGiuseppe CAVALLARO  *  presence of the optional feature/functions.
288319e30c14SGiuseppe CAVALLARO  *  This can be also used to override the value passed through the
288419e30c14SGiuseppe CAVALLARO  *  platform and necessary for old MAC10/100 and GMAC chips.
2885e7434821SGiuseppe CAVALLARO  */
stmmac_get_hw_features(struct stmmac_priv * priv)2886e7434821SGiuseppe CAVALLARO static int stmmac_get_hw_features(struct stmmac_priv *priv)
2887e7434821SGiuseppe CAVALLARO {
2888a4e887faSJose Abreu 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2889e7434821SGiuseppe CAVALLARO }
2890e7434821SGiuseppe CAVALLARO 
289132ceabcaSGiuseppe CAVALLARO /**
2892732fdf0eSGiuseppe CAVALLARO  * stmmac_check_ether_addr - check if the MAC addr is valid
289332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
289432ceabcaSGiuseppe CAVALLARO  * Description:
289532ceabcaSGiuseppe CAVALLARO  * it is to verify if the MAC address is valid, in case of failures it
289632ceabcaSGiuseppe CAVALLARO  * generates a random MAC address
289732ceabcaSGiuseppe CAVALLARO  */
stmmac_check_ether_addr(struct stmmac_priv * priv)2898bfab27a1SGiuseppe CAVALLARO static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2899bfab27a1SGiuseppe CAVALLARO {
29007f9b8fe5SJakub Kicinski 	u8 addr[ETH_ALEN];
29017f9b8fe5SJakub Kicinski 
2902bfab27a1SGiuseppe CAVALLARO 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
29037f9b8fe5SJakub Kicinski 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
29047f9b8fe5SJakub Kicinski 		if (is_valid_ether_addr(addr))
29057f9b8fe5SJakub Kicinski 			eth_hw_addr_set(priv->dev, addr);
29067f9b8fe5SJakub Kicinski 		else
2907f2cedb63SDanny Kukawka 			eth_hw_addr_random(priv->dev);
2908af649352SJisheng Zhang 		dev_info(priv->device, "device MAC address %pM\n",
2909bfab27a1SGiuseppe CAVALLARO 			 priv->dev->dev_addr);
2910bfab27a1SGiuseppe CAVALLARO 	}
2911c88460b7SHans de Goede }
2912bfab27a1SGiuseppe CAVALLARO 
291332ceabcaSGiuseppe CAVALLARO /**
2914732fdf0eSGiuseppe CAVALLARO  * stmmac_init_dma_engine - DMA init.
291532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
291632ceabcaSGiuseppe CAVALLARO  * Description:
291732ceabcaSGiuseppe CAVALLARO  * It inits the DMA invoking the specific MAC/GMAC callback.
291832ceabcaSGiuseppe CAVALLARO  * Some DMA parameters can be passed from the platform;
291932ceabcaSGiuseppe CAVALLARO  * in case of these are not passed a default is kept for the MAC or GMAC.
292032ceabcaSGiuseppe CAVALLARO  */
stmmac_init_dma_engine(struct stmmac_priv * priv)29210f1f88a8SGiuseppe CAVALLARO static int stmmac_init_dma_engine(struct stmmac_priv *priv)
29220f1f88a8SGiuseppe CAVALLARO {
292347f2a9ceSJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
292447f2a9ceSJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
292524aaed0cSJose Abreu 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
292654139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q;
2927ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
292847f2a9ceSJoao Pinto 	u32 chan = 0;
2929c24602efSGiuseppe CAVALLARO 	int atds = 0;
2930495db273SGiuseppe Cavallaro 	int ret = 0;
29310f1f88a8SGiuseppe CAVALLARO 
2932a332e2faSNiklas Cassel 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2933a332e2faSNiklas Cassel 		dev_err(priv->device, "Invalid DMA configuration\n");
293489ab75bfSNiklas Cassel 		return -EINVAL;
29350f1f88a8SGiuseppe CAVALLARO 	}
29360f1f88a8SGiuseppe CAVALLARO 
2937c24602efSGiuseppe CAVALLARO 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2938c24602efSGiuseppe CAVALLARO 		atds = 1;
2939c24602efSGiuseppe CAVALLARO 
2940a4e887faSJose Abreu 	ret = stmmac_reset(priv, priv->ioaddr);
2941495db273SGiuseppe Cavallaro 	if (ret) {
2942495db273SGiuseppe Cavallaro 		dev_err(priv->device, "Failed to reset the dma\n");
2943495db273SGiuseppe Cavallaro 		return ret;
2944495db273SGiuseppe Cavallaro 	}
2945495db273SGiuseppe Cavallaro 
29467d9e6c5aSJose Abreu 	/* DMA Configuration */
29477d9e6c5aSJose Abreu 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
29487d9e6c5aSJose Abreu 
29497d9e6c5aSJose Abreu 	if (priv->plat->axi)
29507d9e6c5aSJose Abreu 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
29517d9e6c5aSJose Abreu 
2952af8f3fb7SWeifeng Voon 	/* DMA CSR Channel configuration */
2953087a7b94SVincent Whitchurch 	for (chan = 0; chan < dma_csr_ch; chan++) {
2954af8f3fb7SWeifeng Voon 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2955087a7b94SVincent Whitchurch 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2956087a7b94SVincent Whitchurch 	}
2957af8f3fb7SWeifeng Voon 
295847f2a9ceSJoao Pinto 	/* DMA RX Channel Configuration */
295947f2a9ceSJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++) {
29608531c808SChristian Marangi 		rx_q = &priv->dma_conf.rx_queue[chan];
296154139cf3SJoao Pinto 
296224aaed0cSJose Abreu 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
296324aaed0cSJose Abreu 				    rx_q->dma_rx_phy, chan);
296447f2a9ceSJoao Pinto 
296554139cf3SJoao Pinto 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2966bba2556eSOng Boon Leong 				     (rx_q->buf_alloc_num *
2967aa042f60SSong, Yoong Siang 				      sizeof(struct dma_desc));
2968a4e887faSJose Abreu 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2969a4e887faSJose Abreu 				       rx_q->rx_tail_addr, chan);
297047f2a9ceSJoao Pinto 	}
297147f2a9ceSJoao Pinto 
297247f2a9ceSJoao Pinto 	/* DMA TX Channel Configuration */
297347f2a9ceSJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++) {
29748531c808SChristian Marangi 		tx_q = &priv->dma_conf.tx_queue[chan];
2975ce736788SJoao Pinto 
297624aaed0cSJose Abreu 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
297724aaed0cSJose Abreu 				    tx_q->dma_tx_phy, chan);
2978f748be53SAlexandre TORGUE 
29790431100bSJose Abreu 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2980a4e887faSJose Abreu 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2981a4e887faSJose Abreu 				       tx_q->tx_tail_addr, chan);
298247f2a9ceSJoao Pinto 	}
298324aaed0cSJose Abreu 
2984495db273SGiuseppe Cavallaro 	return ret;
29850f1f88a8SGiuseppe CAVALLARO }
29860f1f88a8SGiuseppe CAVALLARO 
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)29878fce3331SJose Abreu static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
29888fce3331SJose Abreu {
29898531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2990fa60b816SVincent Whitchurch 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
2991fa60b816SVincent Whitchurch 
2992fa60b816SVincent Whitchurch 	if (!tx_coal_timer)
2993fa60b816SVincent Whitchurch 		return;
29948fce3331SJose Abreu 
2995db2f2842SOng Boon Leong 	hrtimer_start(&tx_q->txtimer,
2996fa60b816SVincent Whitchurch 		      STMMAC_COAL_TIMER(tx_coal_timer),
2997d5a05e69SVincent Whitchurch 		      HRTIMER_MODE_REL);
29988fce3331SJose Abreu }
29998fce3331SJose Abreu 
3000bfab27a1SGiuseppe CAVALLARO /**
3001732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_timer - mitigation sw timer for tx.
3002d0ea5cbdSJesse Brandeburg  * @t: data pointer
30039125cdd1SGiuseppe CAVALLARO  * Description:
30049125cdd1SGiuseppe CAVALLARO  * This is the timer handler to directly invoke the stmmac_tx_clean.
30059125cdd1SGiuseppe CAVALLARO  */
stmmac_tx_timer(struct hrtimer * t)3006d5a05e69SVincent Whitchurch static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
30079125cdd1SGiuseppe CAVALLARO {
3008d5a05e69SVincent Whitchurch 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
30098fce3331SJose Abreu 	struct stmmac_priv *priv = tx_q->priv_data;
30108fce3331SJose Abreu 	struct stmmac_channel *ch;
3011132c32eeSOng Boon Leong 	struct napi_struct *napi;
30129125cdd1SGiuseppe CAVALLARO 
30138fce3331SJose Abreu 	ch = &priv->channel[tx_q->queue_index];
3014132c32eeSOng Boon Leong 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
30158fce3331SJose Abreu 
3016132c32eeSOng Boon Leong 	if (likely(napi_schedule_prep(napi))) {
3017021bd5e3SJose Abreu 		unsigned long flags;
3018021bd5e3SJose Abreu 
3019021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
3020021bd5e3SJose Abreu 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3021021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
3022132c32eeSOng Boon Leong 		__napi_schedule(napi);
3023021bd5e3SJose Abreu 	}
3024d5a05e69SVincent Whitchurch 
3025d5a05e69SVincent Whitchurch 	return HRTIMER_NORESTART;
30269125cdd1SGiuseppe CAVALLARO }
30279125cdd1SGiuseppe CAVALLARO 
30289125cdd1SGiuseppe CAVALLARO /**
3029d429b66eSJose Abreu  * stmmac_init_coalesce - init mitigation options.
303032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
30319125cdd1SGiuseppe CAVALLARO  * Description:
3032d429b66eSJose Abreu  * This inits the coalesce parameters: i.e. timer rate,
30339125cdd1SGiuseppe CAVALLARO  * timer handler and default threshold used for enabling the
30349125cdd1SGiuseppe CAVALLARO  * interrupt on completion bit.
30359125cdd1SGiuseppe CAVALLARO  */
stmmac_init_coalesce(struct stmmac_priv * priv)3036d429b66eSJose Abreu static void stmmac_init_coalesce(struct stmmac_priv *priv)
30379125cdd1SGiuseppe CAVALLARO {
30388fce3331SJose Abreu 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3039db2f2842SOng Boon Leong 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
30408fce3331SJose Abreu 	u32 chan;
30418fce3331SJose Abreu 
30428fce3331SJose Abreu 	for (chan = 0; chan < tx_channel_count; chan++) {
30438531c808SChristian Marangi 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
30448fce3331SJose Abreu 
3045db2f2842SOng Boon Leong 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3046db2f2842SOng Boon Leong 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3047db2f2842SOng Boon Leong 
3048d5a05e69SVincent Whitchurch 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3049d5a05e69SVincent Whitchurch 		tx_q->txtimer.function = stmmac_tx_timer;
30508fce3331SJose Abreu 	}
3051db2f2842SOng Boon Leong 
3052db2f2842SOng Boon Leong 	for (chan = 0; chan < rx_channel_count; chan++)
3053db2f2842SOng Boon Leong 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
30549125cdd1SGiuseppe CAVALLARO }
30559125cdd1SGiuseppe CAVALLARO 
stmmac_set_rings_length(struct stmmac_priv * priv)30564854ab99SJoao Pinto static void stmmac_set_rings_length(struct stmmac_priv *priv)
30574854ab99SJoao Pinto {
30584854ab99SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
30594854ab99SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
30604854ab99SJoao Pinto 	u32 chan;
30614854ab99SJoao Pinto 
30624854ab99SJoao Pinto 	/* set TX ring length */
30634854ab99SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
3064a4e887faSJose Abreu 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
30658531c808SChristian Marangi 				       (priv->dma_conf.dma_tx_size - 1), chan);
30664854ab99SJoao Pinto 
30674854ab99SJoao Pinto 	/* set RX ring length */
30684854ab99SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
3069a4e887faSJose Abreu 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
30708531c808SChristian Marangi 				       (priv->dma_conf.dma_rx_size - 1), chan);
30714854ab99SJoao Pinto }
30724854ab99SJoao Pinto 
30739125cdd1SGiuseppe CAVALLARO /**
30746a3a7193SJoao Pinto  *  stmmac_set_tx_queue_weight - Set TX queue weight
30756a3a7193SJoao Pinto  *  @priv: driver private structure
30766a3a7193SJoao Pinto  *  Description: It is used for setting TX queues weight
30776a3a7193SJoao Pinto  */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)30786a3a7193SJoao Pinto static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
30796a3a7193SJoao Pinto {
30806a3a7193SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
30816a3a7193SJoao Pinto 	u32 weight;
30826a3a7193SJoao Pinto 	u32 queue;
30836a3a7193SJoao Pinto 
30846a3a7193SJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
30856a3a7193SJoao Pinto 		weight = priv->plat->tx_queues_cfg[queue].weight;
3086c10d4c82SJose Abreu 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
30876a3a7193SJoao Pinto 	}
30886a3a7193SJoao Pinto }
30896a3a7193SJoao Pinto 
30906a3a7193SJoao Pinto /**
309119d91873SJoao Pinto  *  stmmac_configure_cbs - Configure CBS in TX queue
309219d91873SJoao Pinto  *  @priv: driver private structure
309319d91873SJoao Pinto  *  Description: It is used for configuring CBS in AVB TX queues
309419d91873SJoao Pinto  */
stmmac_configure_cbs(struct stmmac_priv * priv)309519d91873SJoao Pinto static void stmmac_configure_cbs(struct stmmac_priv *priv)
309619d91873SJoao Pinto {
309719d91873SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
309819d91873SJoao Pinto 	u32 mode_to_use;
309919d91873SJoao Pinto 	u32 queue;
310019d91873SJoao Pinto 
310144781fefSJoao Pinto 	/* queue 0 is reserved for legacy traffic */
310244781fefSJoao Pinto 	for (queue = 1; queue < tx_queues_count; queue++) {
310319d91873SJoao Pinto 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
310419d91873SJoao Pinto 		if (mode_to_use == MTL_QUEUE_DCB)
310519d91873SJoao Pinto 			continue;
310619d91873SJoao Pinto 
3107c10d4c82SJose Abreu 		stmmac_config_cbs(priv, priv->hw,
310819d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].send_slope,
310919d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].idle_slope,
311019d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].high_credit,
311119d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].low_credit,
311219d91873SJoao Pinto 				queue);
311319d91873SJoao Pinto 	}
311419d91873SJoao Pinto }
311519d91873SJoao Pinto 
311619d91873SJoao Pinto /**
3117d43042f4SJoao Pinto  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3118d43042f4SJoao Pinto  *  @priv: driver private structure
3119d43042f4SJoao Pinto  *  Description: It is used for mapping RX queues to RX dma channels
3120d43042f4SJoao Pinto  */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3121d43042f4SJoao Pinto static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3122d43042f4SJoao Pinto {
3123d43042f4SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3124d43042f4SJoao Pinto 	u32 queue;
3125d43042f4SJoao Pinto 	u32 chan;
3126d43042f4SJoao Pinto 
3127d43042f4SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
3128d43042f4SJoao Pinto 		chan = priv->plat->rx_queues_cfg[queue].chan;
3129c10d4c82SJose Abreu 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3130d43042f4SJoao Pinto 	}
3131d43042f4SJoao Pinto }
3132d43042f4SJoao Pinto 
3133d43042f4SJoao Pinto /**
3134a8f5102aSJoao Pinto  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3135a8f5102aSJoao Pinto  *  @priv: driver private structure
3136a8f5102aSJoao Pinto  *  Description: It is used for configuring the RX Queue Priority
3137a8f5102aSJoao Pinto  */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3138a8f5102aSJoao Pinto static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3139a8f5102aSJoao Pinto {
3140a8f5102aSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3141a8f5102aSJoao Pinto 	u32 queue;
3142a8f5102aSJoao Pinto 	u32 prio;
3143a8f5102aSJoao Pinto 
3144a8f5102aSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
3145a8f5102aSJoao Pinto 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3146a8f5102aSJoao Pinto 			continue;
3147a8f5102aSJoao Pinto 
3148a8f5102aSJoao Pinto 		prio = priv->plat->rx_queues_cfg[queue].prio;
3149c10d4c82SJose Abreu 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3150a8f5102aSJoao Pinto 	}
3151a8f5102aSJoao Pinto }
3152a8f5102aSJoao Pinto 
3153a8f5102aSJoao Pinto /**
3154a8f5102aSJoao Pinto  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3155a8f5102aSJoao Pinto  *  @priv: driver private structure
3156a8f5102aSJoao Pinto  *  Description: It is used for configuring the TX Queue Priority
3157a8f5102aSJoao Pinto  */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3158a8f5102aSJoao Pinto static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3159a8f5102aSJoao Pinto {
3160a8f5102aSJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3161a8f5102aSJoao Pinto 	u32 queue;
3162a8f5102aSJoao Pinto 	u32 prio;
3163a8f5102aSJoao Pinto 
3164a8f5102aSJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
3165a8f5102aSJoao Pinto 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3166a8f5102aSJoao Pinto 			continue;
3167a8f5102aSJoao Pinto 
3168a8f5102aSJoao Pinto 		prio = priv->plat->tx_queues_cfg[queue].prio;
3169c10d4c82SJose Abreu 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3170a8f5102aSJoao Pinto 	}
3171a8f5102aSJoao Pinto }
3172a8f5102aSJoao Pinto 
3173a8f5102aSJoao Pinto /**
3174abe80fdcSJoao Pinto  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3175abe80fdcSJoao Pinto  *  @priv: driver private structure
3176abe80fdcSJoao Pinto  *  Description: It is used for configuring the RX queue routing
3177abe80fdcSJoao Pinto  */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3178abe80fdcSJoao Pinto static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3179abe80fdcSJoao Pinto {
3180abe80fdcSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3181abe80fdcSJoao Pinto 	u32 queue;
3182abe80fdcSJoao Pinto 	u8 packet;
3183abe80fdcSJoao Pinto 
3184abe80fdcSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
3185abe80fdcSJoao Pinto 		/* no specific packet type routing specified for the queue */
3186abe80fdcSJoao Pinto 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3187abe80fdcSJoao Pinto 			continue;
3188abe80fdcSJoao Pinto 
3189abe80fdcSJoao Pinto 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3190c10d4c82SJose Abreu 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3191abe80fdcSJoao Pinto 	}
3192abe80fdcSJoao Pinto }
3193abe80fdcSJoao Pinto 
stmmac_mac_config_rss(struct stmmac_priv * priv)319476067459SJose Abreu static void stmmac_mac_config_rss(struct stmmac_priv *priv)
319576067459SJose Abreu {
319676067459SJose Abreu 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
319776067459SJose Abreu 		priv->rss.enable = false;
319876067459SJose Abreu 		return;
319976067459SJose Abreu 	}
320076067459SJose Abreu 
320176067459SJose Abreu 	if (priv->dev->features & NETIF_F_RXHASH)
320276067459SJose Abreu 		priv->rss.enable = true;
320376067459SJose Abreu 	else
320476067459SJose Abreu 		priv->rss.enable = false;
320576067459SJose Abreu 
320676067459SJose Abreu 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
320776067459SJose Abreu 			     priv->plat->rx_queues_to_use);
320876067459SJose Abreu }
320976067459SJose Abreu 
3210abe80fdcSJoao Pinto /**
3211d0a9c9f9SJoao Pinto  *  stmmac_mtl_configuration - Configure MTL
3212d0a9c9f9SJoao Pinto  *  @priv: driver private structure
3213d0a9c9f9SJoao Pinto  *  Description: It is used for configurring MTL
3214d0a9c9f9SJoao Pinto  */
stmmac_mtl_configuration(struct stmmac_priv * priv)3215d0a9c9f9SJoao Pinto static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3216d0a9c9f9SJoao Pinto {
3217d0a9c9f9SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3218d0a9c9f9SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3219d0a9c9f9SJoao Pinto 
3220c10d4c82SJose Abreu 	if (tx_queues_count > 1)
32216a3a7193SJoao Pinto 		stmmac_set_tx_queue_weight(priv);
32226a3a7193SJoao Pinto 
3223d0a9c9f9SJoao Pinto 	/* Configure MTL RX algorithms */
3224c10d4c82SJose Abreu 	if (rx_queues_count > 1)
3225c10d4c82SJose Abreu 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3226d0a9c9f9SJoao Pinto 				priv->plat->rx_sched_algorithm);
3227d0a9c9f9SJoao Pinto 
3228d0a9c9f9SJoao Pinto 	/* Configure MTL TX algorithms */
3229c10d4c82SJose Abreu 	if (tx_queues_count > 1)
3230c10d4c82SJose Abreu 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3231d0a9c9f9SJoao Pinto 				priv->plat->tx_sched_algorithm);
3232d0a9c9f9SJoao Pinto 
323319d91873SJoao Pinto 	/* Configure CBS in AVB TX queues */
3234c10d4c82SJose Abreu 	if (tx_queues_count > 1)
323519d91873SJoao Pinto 		stmmac_configure_cbs(priv);
323619d91873SJoao Pinto 
3237d43042f4SJoao Pinto 	/* Map RX MTL to DMA channels */
3238d43042f4SJoao Pinto 	stmmac_rx_queue_dma_chan_map(priv);
3239d43042f4SJoao Pinto 
3240d0a9c9f9SJoao Pinto 	/* Enable MAC RX Queues */
3241d0a9c9f9SJoao Pinto 	stmmac_mac_enable_rx_queues(priv);
32426deee222SJoao Pinto 
3243a8f5102aSJoao Pinto 	/* Set RX priorities */
3244c10d4c82SJose Abreu 	if (rx_queues_count > 1)
3245a8f5102aSJoao Pinto 		stmmac_mac_config_rx_queues_prio(priv);
3246a8f5102aSJoao Pinto 
3247a8f5102aSJoao Pinto 	/* Set TX priorities */
3248c10d4c82SJose Abreu 	if (tx_queues_count > 1)
3249a8f5102aSJoao Pinto 		stmmac_mac_config_tx_queues_prio(priv);
3250abe80fdcSJoao Pinto 
3251abe80fdcSJoao Pinto 	/* Set RX routing */
3252c10d4c82SJose Abreu 	if (rx_queues_count > 1)
3253abe80fdcSJoao Pinto 		stmmac_mac_config_rx_queues_routing(priv);
325476067459SJose Abreu 
325576067459SJose Abreu 	/* Receive Side Scaling */
325676067459SJose Abreu 	if (rx_queues_count > 1)
325776067459SJose Abreu 		stmmac_mac_config_rss(priv);
3258d0a9c9f9SJoao Pinto }
3259d0a9c9f9SJoao Pinto 
stmmac_safety_feat_configuration(struct stmmac_priv * priv)32608bf993a5SJose Abreu static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
32618bf993a5SJose Abreu {
3262c10d4c82SJose Abreu 	if (priv->dma_cap.asp) {
32638bf993a5SJose Abreu 		netdev_info(priv->dev, "Enabling Safety Features\n");
32645ac712dcSWong Vee Khee 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
32655ac712dcSWong Vee Khee 					  priv->plat->safety_feat_cfg);
32668bf993a5SJose Abreu 	} else {
32678bf993a5SJose Abreu 		netdev_info(priv->dev, "No Safety Features support found\n");
32688bf993a5SJose Abreu 	}
32698bf993a5SJose Abreu }
32708bf993a5SJose Abreu 
stmmac_fpe_start_wq(struct stmmac_priv * priv)32715a558611SOng Boon Leong static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
32725a558611SOng Boon Leong {
32735a558611SOng Boon Leong 	char *name;
32745a558611SOng Boon Leong 
32755a558611SOng Boon Leong 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3276db7c691dSMohammad Athari Bin Ismail 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
32775a558611SOng Boon Leong 
32785a558611SOng Boon Leong 	name = priv->wq_name;
32795a558611SOng Boon Leong 	sprintf(name, "%s-fpe", priv->dev->name);
32805a558611SOng Boon Leong 
32815a558611SOng Boon Leong 	priv->fpe_wq = create_singlethread_workqueue(name);
32825a558611SOng Boon Leong 	if (!priv->fpe_wq) {
32835a558611SOng Boon Leong 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
32845a558611SOng Boon Leong 
32855a558611SOng Boon Leong 		return -ENOMEM;
32865a558611SOng Boon Leong 	}
32875a558611SOng Boon Leong 	netdev_info(priv->dev, "FPE workqueue start");
32885a558611SOng Boon Leong 
32895a558611SOng Boon Leong 	return 0;
32905a558611SOng Boon Leong }
32915a558611SOng Boon Leong 
3292d0a9c9f9SJoao Pinto /**
3293732fdf0eSGiuseppe CAVALLARO  * stmmac_hw_setup - setup mac in a usable state.
3294523f11b5SSrinivas Kandagatla  *  @dev : pointer to the device structure.
32950735e639SMohammad Athari Bin Ismail  *  @ptp_register: register PTP if set
3296523f11b5SSrinivas Kandagatla  *  Description:
3297732fdf0eSGiuseppe CAVALLARO  *  this is the main function to setup the HW in a usable state because the
3298732fdf0eSGiuseppe CAVALLARO  *  dma engine is reset, the core registers are configured (e.g. AXI,
3299732fdf0eSGiuseppe CAVALLARO  *  Checksum features, timers). The DMA is ready to start receiving and
3300732fdf0eSGiuseppe CAVALLARO  *  transmitting.
3301523f11b5SSrinivas Kandagatla  *  Return value:
3302523f11b5SSrinivas Kandagatla  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3303523f11b5SSrinivas Kandagatla  *  file on failure.
3304523f11b5SSrinivas Kandagatla  */
stmmac_hw_setup(struct net_device * dev,bool ptp_register)33050735e639SMohammad Athari Bin Ismail static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3306523f11b5SSrinivas Kandagatla {
3307523f11b5SSrinivas Kandagatla 	struct stmmac_priv *priv = netdev_priv(dev);
33083c55d4d0SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3309146617b8SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3310d08d32d1SOng Boon Leong 	bool sph_en;
3311146617b8SJoao Pinto 	u32 chan;
3312523f11b5SSrinivas Kandagatla 	int ret;
3313523f11b5SSrinivas Kandagatla 
3314523f11b5SSrinivas Kandagatla 	/* DMA initialization and SW reset */
3315523f11b5SSrinivas Kandagatla 	ret = stmmac_init_dma_engine(priv);
3316523f11b5SSrinivas Kandagatla 	if (ret < 0) {
331738ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
331838ddc59dSLABBE Corentin 			   __func__);
3319523f11b5SSrinivas Kandagatla 		return ret;
3320523f11b5SSrinivas Kandagatla 	}
3321523f11b5SSrinivas Kandagatla 
3322523f11b5SSrinivas Kandagatla 	/* Copy the MAC addr into the HW  */
3323c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3324523f11b5SSrinivas Kandagatla 
332502e57b9dSGiuseppe CAVALLARO 	/* PS and related bits will be programmed according to the speed */
332602e57b9dSGiuseppe CAVALLARO 	if (priv->hw->pcs) {
332702e57b9dSGiuseppe CAVALLARO 		int speed = priv->plat->mac_port_sel_speed;
332802e57b9dSGiuseppe CAVALLARO 
332902e57b9dSGiuseppe CAVALLARO 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
333002e57b9dSGiuseppe CAVALLARO 		    (speed == SPEED_1000)) {
333102e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = speed;
333202e57b9dSGiuseppe CAVALLARO 		} else {
333302e57b9dSGiuseppe CAVALLARO 			dev_warn(priv->device, "invalid port speed\n");
333402e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = 0;
333502e57b9dSGiuseppe CAVALLARO 		}
333602e57b9dSGiuseppe CAVALLARO 	}
333702e57b9dSGiuseppe CAVALLARO 
3338523f11b5SSrinivas Kandagatla 	/* Initialize the MAC Core */
3339c10d4c82SJose Abreu 	stmmac_core_init(priv, priv->hw, dev);
3340523f11b5SSrinivas Kandagatla 
3341d0a9c9f9SJoao Pinto 	/* Initialize MTL*/
3342d0a9c9f9SJoao Pinto 	stmmac_mtl_configuration(priv);
33439eb12474Sjpinto 
33448bf993a5SJose Abreu 	/* Initialize Safety Features */
33458bf993a5SJose Abreu 	stmmac_safety_feat_configuration(priv);
33468bf993a5SJose Abreu 
3347c10d4c82SJose Abreu 	ret = stmmac_rx_ipc(priv, priv->hw);
3348978aded4SGiuseppe CAVALLARO 	if (!ret) {
334938ddc59dSLABBE Corentin 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3350978aded4SGiuseppe CAVALLARO 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3351d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
3352978aded4SGiuseppe CAVALLARO 	}
3353978aded4SGiuseppe CAVALLARO 
3354523f11b5SSrinivas Kandagatla 	/* Enable the MAC Rx/Tx */
3355c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
3356523f11b5SSrinivas Kandagatla 
3357b4f0a661SJoao Pinto 	/* Set the HW DMA mode and the COE */
3358b4f0a661SJoao Pinto 	stmmac_dma_operation_mode(priv);
3359b4f0a661SJoao Pinto 
3360523f11b5SSrinivas Kandagatla 	stmmac_mmc_setup(priv);
3361523f11b5SSrinivas Kandagatla 
3362f4c7d894SBiao Huang 	if (ptp_register) {
3363f4c7d894SBiao Huang 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3364f4c7d894SBiao Huang 		if (ret < 0)
3365f4c7d894SBiao Huang 			netdev_warn(priv->dev,
3366f4c7d894SBiao Huang 				    "failed to enable PTP reference clock: %pe\n",
3367f4c7d894SBiao Huang 				    ERR_PTR(ret));
3368f4c7d894SBiao Huang 	}
3369f4c7d894SBiao Huang 
3370523f11b5SSrinivas Kandagatla 	ret = stmmac_init_ptp(priv);
3371722eef28SHeiner Kallweit 	if (ret == -EOPNOTSUPP)
33721a212771SHeiner Kallweit 		netdev_info(priv->dev, "PTP not supported by HW\n");
3373722eef28SHeiner Kallweit 	else if (ret)
3374722eef28SHeiner Kallweit 		netdev_warn(priv->dev, "PTP init failed\n");
33750735e639SMohammad Athari Bin Ismail 	else if (ptp_register)
33760735e639SMohammad Athari Bin Ismail 		stmmac_ptp_register(priv);
3377523f11b5SSrinivas Kandagatla 
3378388e201dSVineetha G. Jaya Kumaran 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3379388e201dSVineetha G. Jaya Kumaran 
3380388e201dSVineetha G. Jaya Kumaran 	/* Convert the timer from msec to usec */
3381388e201dSVineetha G. Jaya Kumaran 	if (!priv->tx_lpi_timer)
3382388e201dSVineetha G. Jaya Kumaran 		priv->tx_lpi_timer = eee_timer * 1000;
3383523f11b5SSrinivas Kandagatla 
3384a4e887faSJose Abreu 	if (priv->use_riwt) {
3385db2f2842SOng Boon Leong 		u32 queue;
33864e4337ccSJose Abreu 
3387db2f2842SOng Boon Leong 		for (queue = 0; queue < rx_cnt; queue++) {
3388db2f2842SOng Boon Leong 			if (!priv->rx_riwt[queue])
3389db2f2842SOng Boon Leong 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3390db2f2842SOng Boon Leong 
3391db2f2842SOng Boon Leong 			stmmac_rx_watchdog(priv, priv->ioaddr,
3392db2f2842SOng Boon Leong 					   priv->rx_riwt[queue], queue);
3393db2f2842SOng Boon Leong 		}
3394523f11b5SSrinivas Kandagatla 	}
3395523f11b5SSrinivas Kandagatla 
3396c10d4c82SJose Abreu 	if (priv->hw->pcs)
3397c9ad4c10SBen Dooks (Codethink) 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3398523f11b5SSrinivas Kandagatla 
33994854ab99SJoao Pinto 	/* set TX and RX rings length */
34004854ab99SJoao Pinto 	stmmac_set_rings_length(priv);
34014854ab99SJoao Pinto 
3402f748be53SAlexandre TORGUE 	/* Enable TSO */
3403146617b8SJoao Pinto 	if (priv->tso) {
34045e6038b8SOng Boon Leong 		for (chan = 0; chan < tx_cnt; chan++) {
34058531c808SChristian Marangi 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
34065e6038b8SOng Boon Leong 
34075e6038b8SOng Boon Leong 			/* TSO and TBS cannot co-exist */
34085e6038b8SOng Boon Leong 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
34095e6038b8SOng Boon Leong 				continue;
34105e6038b8SOng Boon Leong 
3411a4e887faSJose Abreu 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3412146617b8SJoao Pinto 		}
34135e6038b8SOng Boon Leong 	}
3414f748be53SAlexandre TORGUE 
341567afd6d1SJose Abreu 	/* Enable Split Header */
3416d08d32d1SOng Boon Leong 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
341767afd6d1SJose Abreu 	for (chan = 0; chan < rx_cnt; chan++)
3418d08d32d1SOng Boon Leong 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3419d08d32d1SOng Boon Leong 
342067afd6d1SJose Abreu 
342130d93227SJose Abreu 	/* VLAN Tag Insertion */
342230d93227SJose Abreu 	if (priv->dma_cap.vlins)
342330d93227SJose Abreu 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
342430d93227SJose Abreu 
3425579a25a8SJose Abreu 	/* TBS */
3426579a25a8SJose Abreu 	for (chan = 0; chan < tx_cnt; chan++) {
34278531c808SChristian Marangi 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3428579a25a8SJose Abreu 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3429579a25a8SJose Abreu 
3430579a25a8SJose Abreu 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3431579a25a8SJose Abreu 	}
3432579a25a8SJose Abreu 
3433686cff3dSAashish Verma 	/* Configure real RX and TX queues */
3434686cff3dSAashish Verma 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3435686cff3dSAashish Verma 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3436686cff3dSAashish Verma 
34377d9e6c5aSJose Abreu 	/* Start the ball rolling... */
34387d9e6c5aSJose Abreu 	stmmac_start_all_dma(priv);
34397d9e6c5aSJose Abreu 
34405a558611SOng Boon Leong 	if (priv->dma_cap.fpesel) {
34415a558611SOng Boon Leong 		stmmac_fpe_start_wq(priv);
34425a558611SOng Boon Leong 
34435a558611SOng Boon Leong 		if (priv->plat->fpe_cfg->enable)
34445a558611SOng Boon Leong 			stmmac_fpe_handshake(priv, true);
34455a558611SOng Boon Leong 	}
34465a558611SOng Boon Leong 
3447523f11b5SSrinivas Kandagatla 	return 0;
3448523f11b5SSrinivas Kandagatla }
3449523f11b5SSrinivas Kandagatla 
stmmac_hw_teardown(struct net_device * dev)3450c66f6c37SThierry Reding static void stmmac_hw_teardown(struct net_device *dev)
3451c66f6c37SThierry Reding {
3452c66f6c37SThierry Reding 	struct stmmac_priv *priv = netdev_priv(dev);
3453c66f6c37SThierry Reding 
3454c66f6c37SThierry Reding 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3455c66f6c37SThierry Reding }
3456c66f6c37SThierry Reding 
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)34578532f613SOng Boon Leong static void stmmac_free_irq(struct net_device *dev,
34588532f613SOng Boon Leong 			    enum request_irq_err irq_err, int irq_idx)
34598532f613SOng Boon Leong {
34608532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
34618532f613SOng Boon Leong 	int j;
34628532f613SOng Boon Leong 
34638532f613SOng Boon Leong 	switch (irq_err) {
34648532f613SOng Boon Leong 	case REQ_IRQ_ERR_ALL:
34658532f613SOng Boon Leong 		irq_idx = priv->plat->tx_queues_to_use;
34668532f613SOng Boon Leong 		fallthrough;
34678532f613SOng Boon Leong 	case REQ_IRQ_ERR_TX:
34688532f613SOng Boon Leong 		for (j = irq_idx - 1; j >= 0; j--) {
34698deec94cSOng Boon Leong 			if (priv->tx_irq[j] > 0) {
34708deec94cSOng Boon Leong 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
34718531c808SChristian Marangi 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
34728532f613SOng Boon Leong 			}
34738deec94cSOng Boon Leong 		}
34748532f613SOng Boon Leong 		irq_idx = priv->plat->rx_queues_to_use;
34758532f613SOng Boon Leong 		fallthrough;
34768532f613SOng Boon Leong 	case REQ_IRQ_ERR_RX:
34778532f613SOng Boon Leong 		for (j = irq_idx - 1; j >= 0; j--) {
34788deec94cSOng Boon Leong 			if (priv->rx_irq[j] > 0) {
34798deec94cSOng Boon Leong 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
34808531c808SChristian Marangi 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
34818532f613SOng Boon Leong 			}
34828deec94cSOng Boon Leong 		}
34838532f613SOng Boon Leong 
34848532f613SOng Boon Leong 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
34858532f613SOng Boon Leong 			free_irq(priv->sfty_ue_irq, dev);
34868532f613SOng Boon Leong 		fallthrough;
34878532f613SOng Boon Leong 	case REQ_IRQ_ERR_SFTY_UE:
34888532f613SOng Boon Leong 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
34898532f613SOng Boon Leong 			free_irq(priv->sfty_ce_irq, dev);
34908532f613SOng Boon Leong 		fallthrough;
34918532f613SOng Boon Leong 	case REQ_IRQ_ERR_SFTY_CE:
34928532f613SOng Boon Leong 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
34938532f613SOng Boon Leong 			free_irq(priv->lpi_irq, dev);
34948532f613SOng Boon Leong 		fallthrough;
34958532f613SOng Boon Leong 	case REQ_IRQ_ERR_LPI:
34968532f613SOng Boon Leong 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
34978532f613SOng Boon Leong 			free_irq(priv->wol_irq, dev);
34988532f613SOng Boon Leong 		fallthrough;
34998532f613SOng Boon Leong 	case REQ_IRQ_ERR_WOL:
35008532f613SOng Boon Leong 		free_irq(dev->irq, dev);
35018532f613SOng Boon Leong 		fallthrough;
35028532f613SOng Boon Leong 	case REQ_IRQ_ERR_MAC:
35038532f613SOng Boon Leong 	case REQ_IRQ_ERR_NO:
35048532f613SOng Boon Leong 		/* If MAC IRQ request error, no more IRQ to free */
35058532f613SOng Boon Leong 		break;
35068532f613SOng Boon Leong 	}
35078532f613SOng Boon Leong }
35088532f613SOng Boon Leong 
stmmac_request_irq_multi_msi(struct net_device * dev)35098532f613SOng Boon Leong static int stmmac_request_irq_multi_msi(struct net_device *dev)
35108532f613SOng Boon Leong {
35118532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
35123e6dc7b6SWong Vee Khee 	enum request_irq_err irq_err;
35138deec94cSOng Boon Leong 	cpumask_t cpu_mask;
35148532f613SOng Boon Leong 	int irq_idx = 0;
35158532f613SOng Boon Leong 	char *int_name;
35168532f613SOng Boon Leong 	int ret;
35178532f613SOng Boon Leong 	int i;
35188532f613SOng Boon Leong 
35198532f613SOng Boon Leong 	/* For common interrupt */
35208532f613SOng Boon Leong 	int_name = priv->int_name_mac;
35218532f613SOng Boon Leong 	sprintf(int_name, "%s:%s", dev->name, "mac");
35228532f613SOng Boon Leong 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
35238532f613SOng Boon Leong 			  0, int_name, dev);
35248532f613SOng Boon Leong 	if (unlikely(ret < 0)) {
35258532f613SOng Boon Leong 		netdev_err(priv->dev,
35268532f613SOng Boon Leong 			   "%s: alloc mac MSI %d (error: %d)\n",
35278532f613SOng Boon Leong 			   __func__, dev->irq, ret);
35288532f613SOng Boon Leong 		irq_err = REQ_IRQ_ERR_MAC;
35298532f613SOng Boon Leong 		goto irq_error;
35308532f613SOng Boon Leong 	}
35318532f613SOng Boon Leong 
35328532f613SOng Boon Leong 	/* Request the Wake IRQ in case of another line
35338532f613SOng Boon Leong 	 * is used for WoL
35348532f613SOng Boon Leong 	 */
3535fed034d2SQiang Ma 	priv->wol_irq_disabled = true;
35368532f613SOng Boon Leong 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
35378532f613SOng Boon Leong 		int_name = priv->int_name_wol;
35388532f613SOng Boon Leong 		sprintf(int_name, "%s:%s", dev->name, "wol");
35398532f613SOng Boon Leong 		ret = request_irq(priv->wol_irq,
35408532f613SOng Boon Leong 				  stmmac_mac_interrupt,
35418532f613SOng Boon Leong 				  0, int_name, dev);
35428532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
35438532f613SOng Boon Leong 			netdev_err(priv->dev,
35448532f613SOng Boon Leong 				   "%s: alloc wol MSI %d (error: %d)\n",
35458532f613SOng Boon Leong 				   __func__, priv->wol_irq, ret);
35468532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_WOL;
35478532f613SOng Boon Leong 			goto irq_error;
35488532f613SOng Boon Leong 		}
35498532f613SOng Boon Leong 	}
35508532f613SOng Boon Leong 
35518532f613SOng Boon Leong 	/* Request the LPI IRQ in case of another line
35528532f613SOng Boon Leong 	 * is used for LPI
35538532f613SOng Boon Leong 	 */
35548532f613SOng Boon Leong 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
35558532f613SOng Boon Leong 		int_name = priv->int_name_lpi;
35568532f613SOng Boon Leong 		sprintf(int_name, "%s:%s", dev->name, "lpi");
35578532f613SOng Boon Leong 		ret = request_irq(priv->lpi_irq,
35588532f613SOng Boon Leong 				  stmmac_mac_interrupt,
35598532f613SOng Boon Leong 				  0, int_name, dev);
35608532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
35618532f613SOng Boon Leong 			netdev_err(priv->dev,
35628532f613SOng Boon Leong 				   "%s: alloc lpi MSI %d (error: %d)\n",
35638532f613SOng Boon Leong 				   __func__, priv->lpi_irq, ret);
35648532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_LPI;
35658532f613SOng Boon Leong 			goto irq_error;
35668532f613SOng Boon Leong 		}
35678532f613SOng Boon Leong 	}
35688532f613SOng Boon Leong 
35698532f613SOng Boon Leong 	/* Request the Safety Feature Correctible Error line in
35708532f613SOng Boon Leong 	 * case of another line is used
35718532f613SOng Boon Leong 	 */
35728532f613SOng Boon Leong 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
35738532f613SOng Boon Leong 		int_name = priv->int_name_sfty_ce;
35748532f613SOng Boon Leong 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
35758532f613SOng Boon Leong 		ret = request_irq(priv->sfty_ce_irq,
35768532f613SOng Boon Leong 				  stmmac_safety_interrupt,
35778532f613SOng Boon Leong 				  0, int_name, dev);
35788532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
35798532f613SOng Boon Leong 			netdev_err(priv->dev,
35808532f613SOng Boon Leong 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
35818532f613SOng Boon Leong 				   __func__, priv->sfty_ce_irq, ret);
35828532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_SFTY_CE;
35838532f613SOng Boon Leong 			goto irq_error;
35848532f613SOng Boon Leong 		}
35858532f613SOng Boon Leong 	}
35868532f613SOng Boon Leong 
35878532f613SOng Boon Leong 	/* Request the Safety Feature Uncorrectible Error line in
35888532f613SOng Boon Leong 	 * case of another line is used
35898532f613SOng Boon Leong 	 */
35908532f613SOng Boon Leong 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
35918532f613SOng Boon Leong 		int_name = priv->int_name_sfty_ue;
35928532f613SOng Boon Leong 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
35938532f613SOng Boon Leong 		ret = request_irq(priv->sfty_ue_irq,
35948532f613SOng Boon Leong 				  stmmac_safety_interrupt,
35958532f613SOng Boon Leong 				  0, int_name, dev);
35968532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
35978532f613SOng Boon Leong 			netdev_err(priv->dev,
35988532f613SOng Boon Leong 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
35998532f613SOng Boon Leong 				   __func__, priv->sfty_ue_irq, ret);
36008532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_SFTY_UE;
36018532f613SOng Boon Leong 			goto irq_error;
36028532f613SOng Boon Leong 		}
36038532f613SOng Boon Leong 	}
36048532f613SOng Boon Leong 
36058532f613SOng Boon Leong 	/* Request Rx MSI irq */
36068532f613SOng Boon Leong 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3607d68c2e1dSArnd Bergmann 		if (i >= MTL_MAX_RX_QUEUES)
36083e0d5699SArnd Bergmann 			break;
36098532f613SOng Boon Leong 		if (priv->rx_irq[i] == 0)
36108532f613SOng Boon Leong 			continue;
36118532f613SOng Boon Leong 
36128532f613SOng Boon Leong 		int_name = priv->int_name_rx_irq[i];
36138532f613SOng Boon Leong 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
36148532f613SOng Boon Leong 		ret = request_irq(priv->rx_irq[i],
36158532f613SOng Boon Leong 				  stmmac_msi_intr_rx,
36168531c808SChristian Marangi 				  0, int_name, &priv->dma_conf.rx_queue[i]);
36178532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
36188532f613SOng Boon Leong 			netdev_err(priv->dev,
36198532f613SOng Boon Leong 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
36208532f613SOng Boon Leong 				   __func__, i, priv->rx_irq[i], ret);
36218532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_RX;
36228532f613SOng Boon Leong 			irq_idx = i;
36238532f613SOng Boon Leong 			goto irq_error;
36248532f613SOng Boon Leong 		}
36258deec94cSOng Boon Leong 		cpumask_clear(&cpu_mask);
36268deec94cSOng Boon Leong 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
36278deec94cSOng Boon Leong 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
36288532f613SOng Boon Leong 	}
36298532f613SOng Boon Leong 
36308532f613SOng Boon Leong 	/* Request Tx MSI irq */
36318532f613SOng Boon Leong 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3632d68c2e1dSArnd Bergmann 		if (i >= MTL_MAX_TX_QUEUES)
36333e0d5699SArnd Bergmann 			break;
36348532f613SOng Boon Leong 		if (priv->tx_irq[i] == 0)
36358532f613SOng Boon Leong 			continue;
36368532f613SOng Boon Leong 
36378532f613SOng Boon Leong 		int_name = priv->int_name_tx_irq[i];
36388532f613SOng Boon Leong 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
36398532f613SOng Boon Leong 		ret = request_irq(priv->tx_irq[i],
36408532f613SOng Boon Leong 				  stmmac_msi_intr_tx,
36418531c808SChristian Marangi 				  0, int_name, &priv->dma_conf.tx_queue[i]);
36428532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
36438532f613SOng Boon Leong 			netdev_err(priv->dev,
36448532f613SOng Boon Leong 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
36458532f613SOng Boon Leong 				   __func__, i, priv->tx_irq[i], ret);
36468532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_TX;
36478532f613SOng Boon Leong 			irq_idx = i;
36488532f613SOng Boon Leong 			goto irq_error;
36498532f613SOng Boon Leong 		}
36508deec94cSOng Boon Leong 		cpumask_clear(&cpu_mask);
36518deec94cSOng Boon Leong 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
36528deec94cSOng Boon Leong 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
36538532f613SOng Boon Leong 	}
36548532f613SOng Boon Leong 
36558532f613SOng Boon Leong 	return 0;
36568532f613SOng Boon Leong 
36578532f613SOng Boon Leong irq_error:
36588532f613SOng Boon Leong 	stmmac_free_irq(dev, irq_err, irq_idx);
36598532f613SOng Boon Leong 	return ret;
36608532f613SOng Boon Leong }
36618532f613SOng Boon Leong 
stmmac_request_irq_single(struct net_device * dev)36628532f613SOng Boon Leong static int stmmac_request_irq_single(struct net_device *dev)
36638532f613SOng Boon Leong {
36648532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
36653e6dc7b6SWong Vee Khee 	enum request_irq_err irq_err;
36668532f613SOng Boon Leong 	int ret;
36678532f613SOng Boon Leong 
36688532f613SOng Boon Leong 	ret = request_irq(dev->irq, stmmac_interrupt,
36698532f613SOng Boon Leong 			  IRQF_SHARED, dev->name, dev);
36708532f613SOng Boon Leong 	if (unlikely(ret < 0)) {
36718532f613SOng Boon Leong 		netdev_err(priv->dev,
36728532f613SOng Boon Leong 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
36738532f613SOng Boon Leong 			   __func__, dev->irq, ret);
36748532f613SOng Boon Leong 		irq_err = REQ_IRQ_ERR_MAC;
36753e6dc7b6SWong Vee Khee 		goto irq_error;
36768532f613SOng Boon Leong 	}
36778532f613SOng Boon Leong 
36788532f613SOng Boon Leong 	/* Request the Wake IRQ in case of another line
36798532f613SOng Boon Leong 	 * is used for WoL
36808532f613SOng Boon Leong 	 */
36818532f613SOng Boon Leong 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
36828532f613SOng Boon Leong 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
36838532f613SOng Boon Leong 				  IRQF_SHARED, dev->name, dev);
36848532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
36858532f613SOng Boon Leong 			netdev_err(priv->dev,
36868532f613SOng Boon Leong 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
36878532f613SOng Boon Leong 				   __func__, priv->wol_irq, ret);
36888532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_WOL;
36893e6dc7b6SWong Vee Khee 			goto irq_error;
36908532f613SOng Boon Leong 		}
36918532f613SOng Boon Leong 	}
36928532f613SOng Boon Leong 
36938532f613SOng Boon Leong 	/* Request the IRQ lines */
36948532f613SOng Boon Leong 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
36958532f613SOng Boon Leong 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
36968532f613SOng Boon Leong 				  IRQF_SHARED, dev->name, dev);
36978532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
36988532f613SOng Boon Leong 			netdev_err(priv->dev,
36998532f613SOng Boon Leong 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
37008532f613SOng Boon Leong 				   __func__, priv->lpi_irq, ret);
37018532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_LPI;
37028532f613SOng Boon Leong 			goto irq_error;
37038532f613SOng Boon Leong 		}
37048532f613SOng Boon Leong 	}
37058532f613SOng Boon Leong 
37068532f613SOng Boon Leong 	return 0;
37078532f613SOng Boon Leong 
37088532f613SOng Boon Leong irq_error:
37098532f613SOng Boon Leong 	stmmac_free_irq(dev, irq_err, 0);
37108532f613SOng Boon Leong 	return ret;
37118532f613SOng Boon Leong }
37128532f613SOng Boon Leong 
stmmac_request_irq(struct net_device * dev)37138532f613SOng Boon Leong static int stmmac_request_irq(struct net_device *dev)
37148532f613SOng Boon Leong {
37158532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
37168532f613SOng Boon Leong 	int ret;
37178532f613SOng Boon Leong 
37188532f613SOng Boon Leong 	/* Request the IRQ lines */
3719956c3f09SBartosz Golaszewski 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
37208532f613SOng Boon Leong 		ret = stmmac_request_irq_multi_msi(dev);
37218532f613SOng Boon Leong 	else
37228532f613SOng Boon Leong 		ret = stmmac_request_irq_single(dev);
37238532f613SOng Boon Leong 
37248532f613SOng Boon Leong 	return ret;
37258532f613SOng Boon Leong }
37268532f613SOng Boon Leong 
3727523f11b5SSrinivas Kandagatla /**
3728ba39b344SChristian Marangi  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3729ba39b344SChristian Marangi  *  @priv: driver private structure
3730ba39b344SChristian Marangi  *  @mtu: MTU to setup the dma queue and buf with
3731ba39b344SChristian Marangi  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3732ba39b344SChristian Marangi  *  Allocate the Tx/Rx DMA queue and init them.
3733ba39b344SChristian Marangi  *  Return value:
3734ba39b344SChristian Marangi  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3735ba39b344SChristian Marangi  */
3736ba39b344SChristian Marangi static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)3737ba39b344SChristian Marangi stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3738ba39b344SChristian Marangi {
3739ba39b344SChristian Marangi 	struct stmmac_dma_conf *dma_conf;
3740ba39b344SChristian Marangi 	int chan, bfsize, ret;
3741ba39b344SChristian Marangi 
3742ba39b344SChristian Marangi 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3743ba39b344SChristian Marangi 	if (!dma_conf) {
3744ba39b344SChristian Marangi 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3745ba39b344SChristian Marangi 			   __func__);
3746ba39b344SChristian Marangi 		return ERR_PTR(-ENOMEM);
3747ba39b344SChristian Marangi 	}
3748ba39b344SChristian Marangi 
3749ba39b344SChristian Marangi 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3750ba39b344SChristian Marangi 	if (bfsize < 0)
3751ba39b344SChristian Marangi 		bfsize = 0;
3752ba39b344SChristian Marangi 
3753ba39b344SChristian Marangi 	if (bfsize < BUF_SIZE_16KiB)
3754ba39b344SChristian Marangi 		bfsize = stmmac_set_bfsize(mtu, 0);
3755ba39b344SChristian Marangi 
3756ba39b344SChristian Marangi 	dma_conf->dma_buf_sz = bfsize;
3757ba39b344SChristian Marangi 	/* Chose the tx/rx size from the already defined one in the
3758ba39b344SChristian Marangi 	 * priv struct. (if defined)
3759ba39b344SChristian Marangi 	 */
3760ba39b344SChristian Marangi 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3761ba39b344SChristian Marangi 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3762ba39b344SChristian Marangi 
3763ba39b344SChristian Marangi 	if (!dma_conf->dma_tx_size)
3764ba39b344SChristian Marangi 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3765ba39b344SChristian Marangi 	if (!dma_conf->dma_rx_size)
3766ba39b344SChristian Marangi 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3767ba39b344SChristian Marangi 
3768ba39b344SChristian Marangi 	/* Earlier check for TBS */
3769ba39b344SChristian Marangi 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3770ba39b344SChristian Marangi 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3771ba39b344SChristian Marangi 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3772ba39b344SChristian Marangi 
3773ba39b344SChristian Marangi 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3774ba39b344SChristian Marangi 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3775ba39b344SChristian Marangi 	}
3776ba39b344SChristian Marangi 
3777ba39b344SChristian Marangi 	ret = alloc_dma_desc_resources(priv, dma_conf);
3778ba39b344SChristian Marangi 	if (ret < 0) {
3779ba39b344SChristian Marangi 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3780ba39b344SChristian Marangi 			   __func__);
3781ba39b344SChristian Marangi 		goto alloc_error;
3782ba39b344SChristian Marangi 	}
3783ba39b344SChristian Marangi 
3784ba39b344SChristian Marangi 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3785ba39b344SChristian Marangi 	if (ret < 0) {
3786ba39b344SChristian Marangi 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3787ba39b344SChristian Marangi 			   __func__);
3788ba39b344SChristian Marangi 		goto init_error;
3789ba39b344SChristian Marangi 	}
3790ba39b344SChristian Marangi 
3791ba39b344SChristian Marangi 	return dma_conf;
3792ba39b344SChristian Marangi 
3793ba39b344SChristian Marangi init_error:
3794ba39b344SChristian Marangi 	free_dma_desc_resources(priv, dma_conf);
3795ba39b344SChristian Marangi alloc_error:
3796ba39b344SChristian Marangi 	kfree(dma_conf);
3797ba39b344SChristian Marangi 	return ERR_PTR(ret);
3798ba39b344SChristian Marangi }
3799ba39b344SChristian Marangi 
3800ba39b344SChristian Marangi /**
3801ba39b344SChristian Marangi  *  __stmmac_open - open entry point of the driver
38027ac6653aSJeff Kirsher  *  @dev : pointer to the device structure.
3803ba39b344SChristian Marangi  *  @dma_conf :  structure to take the dma data
38047ac6653aSJeff Kirsher  *  Description:
38057ac6653aSJeff Kirsher  *  This function is the open entry point of the driver.
38067ac6653aSJeff Kirsher  *  Return value:
38077ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
38087ac6653aSJeff Kirsher  *  file on failure.
38097ac6653aSJeff Kirsher  */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)3810ba39b344SChristian Marangi static int __stmmac_open(struct net_device *dev,
3811ba39b344SChristian Marangi 			 struct stmmac_dma_conf *dma_conf)
38127ac6653aSJeff Kirsher {
38137ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
38149900074eSVladimir Oltean 	int mode = priv->plat->phy_interface;
38158fce3331SJose Abreu 	u32 chan;
38167ac6653aSJeff Kirsher 	int ret;
38177ac6653aSJeff Kirsher 
381885648865SMinghao Chi 	ret = pm_runtime_resume_and_get(priv->device);
381985648865SMinghao Chi 	if (ret < 0)
38205ec55823SJoakim Zhang 		return ret;
38215ec55823SJoakim Zhang 
3822a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3823f213bbe8SJose Abreu 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
38249900074eSVladimir Oltean 	    (!priv->hw->xpcs ||
38255d1f3fe7SMaxime Chevallier 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
38265d1f3fe7SMaxime Chevallier 	    !priv->hw->lynx_pcs) {
38277ac6653aSJeff Kirsher 		ret = stmmac_init_phy(dev);
3828e58bb43fSGiuseppe CAVALLARO 		if (ret) {
382938ddc59dSLABBE Corentin 			netdev_err(priv->dev,
383038ddc59dSLABBE Corentin 				   "%s: Cannot attach to PHY (error: %d)\n",
3831e58bb43fSGiuseppe CAVALLARO 				   __func__, ret);
38325ec55823SJoakim Zhang 			goto init_phy_error;
38337ac6653aSJeff Kirsher 		}
3834e58bb43fSGiuseppe CAVALLARO 	}
38357ac6653aSJeff Kirsher 
383622ad3838SGiuseppe Cavallaro 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
383756329137SBartlomiej Zolnierkiewicz 
3838ba39b344SChristian Marangi 	buf_sz = dma_conf->dma_buf_sz;
38392524299bSEsben Haabendal 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
38402524299bSEsben Haabendal 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
38412524299bSEsben Haabendal 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3842ba39b344SChristian Marangi 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
38435bacd778SLABBE Corentin 
3844f9ec5723SChristian Marangi 	stmmac_reset_queues_param(priv);
3845f9ec5723SChristian Marangi 
3846efe92571SBartosz Golaszewski 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3847efe92571SBartosz Golaszewski 	    priv->plat->serdes_powerup) {
384849725ffcSJunxiao Chang 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
384949725ffcSJunxiao Chang 		if (ret < 0) {
385049725ffcSJunxiao Chang 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
385149725ffcSJunxiao Chang 				   __func__);
385249725ffcSJunxiao Chang 			goto init_error;
385349725ffcSJunxiao Chang 		}
385449725ffcSJunxiao Chang 	}
385549725ffcSJunxiao Chang 
3856fe131929SHuacai Chen 	ret = stmmac_hw_setup(dev, true);
385756329137SBartlomiej Zolnierkiewicz 	if (ret < 0) {
385838ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3859c9324d18SGiuseppe CAVALLARO 		goto init_error;
38607ac6653aSJeff Kirsher 	}
38617ac6653aSJeff Kirsher 
3862d429b66eSJose Abreu 	stmmac_init_coalesce(priv);
3863777da230SGiuseppe CAVALLARO 
386474371272SJose Abreu 	phylink_start(priv->phylink);
386577b28983SJisheng Zhang 	/* We may have called phylink_speed_down before */
386677b28983SJisheng Zhang 	phylink_speed_up(priv->phylink);
38677ac6653aSJeff Kirsher 
38688532f613SOng Boon Leong 	ret = stmmac_request_irq(dev);
38698532f613SOng Boon Leong 	if (ret)
38706c1e5abeSThierry Reding 		goto irq_error;
3871d765955dSGiuseppe CAVALLARO 
3872c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
38739f19306dSOng Boon Leong 	netif_tx_start_all_queues(priv->dev);
3874087a7b94SVincent Whitchurch 	stmmac_enable_all_dma_irq(priv);
38757ac6653aSJeff Kirsher 
38767ac6653aSJeff Kirsher 	return 0;
38777ac6653aSJeff Kirsher 
38786c1e5abeSThierry Reding irq_error:
387974371272SJose Abreu 	phylink_stop(priv->phylink);
38807a13f8f5SFrancesco Virlinzi 
38818fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
38828531c808SChristian Marangi 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
38838fce3331SJose Abreu 
3884c66f6c37SThierry Reding 	stmmac_hw_teardown(dev);
3885c9324d18SGiuseppe CAVALLARO init_error:
388674371272SJose Abreu 	phylink_disconnect_phy(priv->phylink);
38875ec55823SJoakim Zhang init_phy_error:
38885ec55823SJoakim Zhang 	pm_runtime_put(priv->device);
38897ac6653aSJeff Kirsher 	return ret;
38907ac6653aSJeff Kirsher }
38917ac6653aSJeff Kirsher 
stmmac_open(struct net_device * dev)3892ba39b344SChristian Marangi static int stmmac_open(struct net_device *dev)
3893ba39b344SChristian Marangi {
3894ba39b344SChristian Marangi 	struct stmmac_priv *priv = netdev_priv(dev);
3895ba39b344SChristian Marangi 	struct stmmac_dma_conf *dma_conf;
3896ba39b344SChristian Marangi 	int ret;
3897ba39b344SChristian Marangi 
3898ba39b344SChristian Marangi 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3899ba39b344SChristian Marangi 	if (IS_ERR(dma_conf))
3900ba39b344SChristian Marangi 		return PTR_ERR(dma_conf);
3901ba39b344SChristian Marangi 
3902ba39b344SChristian Marangi 	ret = __stmmac_open(dev, dma_conf);
390330134b7cSChristian Marangi 	if (ret)
390430134b7cSChristian Marangi 		free_dma_desc_resources(priv, dma_conf);
390530134b7cSChristian Marangi 
3906ba39b344SChristian Marangi 	kfree(dma_conf);
3907ba39b344SChristian Marangi 	return ret;
3908ba39b344SChristian Marangi }
3909ba39b344SChristian Marangi 
stmmac_fpe_stop_wq(struct stmmac_priv * priv)39105a558611SOng Boon Leong static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
39115a558611SOng Boon Leong {
39125a558611SOng Boon Leong 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
39135a558611SOng Boon Leong 
3914699b103eSJakub Raczynski 	if (priv->fpe_wq) {
39155a558611SOng Boon Leong 		destroy_workqueue(priv->fpe_wq);
3916699b103eSJakub Raczynski 		priv->fpe_wq = NULL;
3917699b103eSJakub Raczynski 	}
39185a558611SOng Boon Leong 
39195a558611SOng Boon Leong 	netdev_info(priv->dev, "FPE workqueue stop");
39205a558611SOng Boon Leong }
39215a558611SOng Boon Leong 
39227ac6653aSJeff Kirsher /**
39237ac6653aSJeff Kirsher  *  stmmac_release - close entry point of the driver
39247ac6653aSJeff Kirsher  *  @dev : device pointer.
39257ac6653aSJeff Kirsher  *  Description:
39267ac6653aSJeff Kirsher  *  This is the stop entry point of the driver.
39277ac6653aSJeff Kirsher  */
stmmac_release(struct net_device * dev)3928ac746c85SOng Boon Leong static int stmmac_release(struct net_device *dev)
39297ac6653aSJeff Kirsher {
39307ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
39318fce3331SJose Abreu 	u32 chan;
39327ac6653aSJeff Kirsher 
393377b28983SJisheng Zhang 	if (device_may_wakeup(priv->device))
393477b28983SJisheng Zhang 		phylink_speed_down(priv->phylink, false);
39357ac6653aSJeff Kirsher 	/* Stop and disconnect the PHY */
393674371272SJose Abreu 	phylink_stop(priv->phylink);
393774371272SJose Abreu 	phylink_disconnect_phy(priv->phylink);
39387ac6653aSJeff Kirsher 
3939c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
39407ac6653aSJeff Kirsher 
39418fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
39428531c808SChristian Marangi 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
39439125cdd1SGiuseppe CAVALLARO 
39447028471eSChristian Marangi 	netif_tx_disable(dev);
39457028471eSChristian Marangi 
39467ac6653aSJeff Kirsher 	/* Free the IRQ lines */
39478532f613SOng Boon Leong 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
39487ac6653aSJeff Kirsher 
39495f585913SFugang Duan 	if (priv->eee_enabled) {
39505f585913SFugang Duan 		priv->tx_path_in_lpi_mode = false;
39515f585913SFugang Duan 		del_timer_sync(&priv->eee_ctrl_timer);
39525f585913SFugang Duan 	}
39535f585913SFugang Duan 
39547ac6653aSJeff Kirsher 	/* Stop TX/RX DMA and clear the descriptors */
3955ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
39567ac6653aSJeff Kirsher 
39577ac6653aSJeff Kirsher 	/* Release and free the Rx/Tx resources */
3958ba39b344SChristian Marangi 	free_dma_desc_resources(priv, &priv->dma_conf);
39597ac6653aSJeff Kirsher 
39607ac6653aSJeff Kirsher 	/* Disable the MAC Rx/Tx */
3961c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
39627ac6653aSJeff Kirsher 
396349725ffcSJunxiao Chang 	/* Powerdown Serdes if there is */
396449725ffcSJunxiao Chang 	if (priv->plat->serdes_powerdown)
396549725ffcSJunxiao Chang 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
396649725ffcSJunxiao Chang 
39677ac6653aSJeff Kirsher 	netif_carrier_off(dev);
39687ac6653aSJeff Kirsher 
396992ba6888SRayagond Kokatanur 	stmmac_release_ptp(priv);
397092ba6888SRayagond Kokatanur 
39715ec55823SJoakim Zhang 	pm_runtime_put(priv->device);
39725ec55823SJoakim Zhang 
39735a558611SOng Boon Leong 	if (priv->dma_cap.fpesel)
39745a558611SOng Boon Leong 		stmmac_fpe_stop_wq(priv);
39755a558611SOng Boon Leong 
39767ac6653aSJeff Kirsher 	return 0;
39777ac6653aSJeff Kirsher }
39787ac6653aSJeff Kirsher 
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)397930d93227SJose Abreu static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
398030d93227SJose Abreu 			       struct stmmac_tx_queue *tx_q)
398130d93227SJose Abreu {
398230d93227SJose Abreu 	u16 tag = 0x0, inner_tag = 0x0;
398330d93227SJose Abreu 	u32 inner_type = 0x0;
398430d93227SJose Abreu 	struct dma_desc *p;
398530d93227SJose Abreu 
398630d93227SJose Abreu 	if (!priv->dma_cap.vlins)
398730d93227SJose Abreu 		return false;
398830d93227SJose Abreu 	if (!skb_vlan_tag_present(skb))
398930d93227SJose Abreu 		return false;
399030d93227SJose Abreu 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
399130d93227SJose Abreu 		inner_tag = skb_vlan_tag_get(skb);
399230d93227SJose Abreu 		inner_type = STMMAC_VLAN_INSERT;
399330d93227SJose Abreu 	}
399430d93227SJose Abreu 
399530d93227SJose Abreu 	tag = skb_vlan_tag_get(skb);
399630d93227SJose Abreu 
3997579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3998579a25a8SJose Abreu 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3999579a25a8SJose Abreu 	else
4000579a25a8SJose Abreu 		p = &tx_q->dma_tx[tx_q->cur_tx];
4001579a25a8SJose Abreu 
400230d93227SJose Abreu 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
400330d93227SJose Abreu 		return false;
400430d93227SJose Abreu 
400530d93227SJose Abreu 	stmmac_set_tx_owner(priv, p);
40068531c808SChristian Marangi 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
400730d93227SJose Abreu 	return true;
400830d93227SJose Abreu }
400930d93227SJose Abreu 
40107ac6653aSJeff Kirsher /**
4011f748be53SAlexandre TORGUE  *  stmmac_tso_allocator - close entry point of the driver
4012f748be53SAlexandre TORGUE  *  @priv: driver private structure
4013f748be53SAlexandre TORGUE  *  @des: buffer start address
4014f748be53SAlexandre TORGUE  *  @total_len: total length to fill in descriptors
4015d0ea5cbdSJesse Brandeburg  *  @last_segment: condition for the last descriptor
4016ce736788SJoao Pinto  *  @queue: TX queue index
4017f748be53SAlexandre TORGUE  *  Description:
4018f748be53SAlexandre TORGUE  *  This function fills descriptor and request new descriptors according to
4019f748be53SAlexandre TORGUE  *  buffer length to fill
4020f748be53SAlexandre TORGUE  */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4021a993db88SJose Abreu static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4022ce736788SJoao Pinto 				 int total_len, bool last_segment, u32 queue)
4023f748be53SAlexandre TORGUE {
40248531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4025f748be53SAlexandre TORGUE 	struct dma_desc *desc;
40265bacd778SLABBE Corentin 	u32 buff_size;
4027ce736788SJoao Pinto 	int tmp_len;
4028f748be53SAlexandre TORGUE 
4029f748be53SAlexandre TORGUE 	tmp_len = total_len;
4030f748be53SAlexandre TORGUE 
4031f748be53SAlexandre TORGUE 	while (tmp_len > 0) {
4032a993db88SJose Abreu 		dma_addr_t curr_addr;
4033a993db88SJose Abreu 
4034aa042f60SSong, Yoong Siang 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
40358531c808SChristian Marangi 						priv->dma_conf.dma_tx_size);
4036b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4037579a25a8SJose Abreu 
4038579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4039579a25a8SJose Abreu 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4040579a25a8SJose Abreu 		else
4041579a25a8SJose Abreu 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4042f748be53SAlexandre TORGUE 
4043a993db88SJose Abreu 		curr_addr = des + (total_len - tmp_len);
4044a993db88SJose Abreu 		if (priv->dma_cap.addr64 <= 32)
4045a993db88SJose Abreu 			desc->des0 = cpu_to_le32(curr_addr);
4046a993db88SJose Abreu 		else
4047a993db88SJose Abreu 			stmmac_set_desc_addr(priv, desc, curr_addr);
4048a993db88SJose Abreu 
4049f748be53SAlexandre TORGUE 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4050f748be53SAlexandre TORGUE 			    TSO_MAX_BUFF_SIZE : tmp_len;
4051f748be53SAlexandre TORGUE 
405242de047dSJose Abreu 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4053f748be53SAlexandre TORGUE 				0, 1,
4054426849e6SNiklas Cassel 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4055f748be53SAlexandre TORGUE 				0, 0);
4056f748be53SAlexandre TORGUE 
4057f748be53SAlexandre TORGUE 		tmp_len -= TSO_MAX_BUFF_SIZE;
4058f748be53SAlexandre TORGUE 	}
4059f748be53SAlexandre TORGUE }
4060f748be53SAlexandre TORGUE 
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4061d96febedSOng Boon Leong static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4062d96febedSOng Boon Leong {
40638531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4064d96febedSOng Boon Leong 	int desc_size;
4065d96febedSOng Boon Leong 
4066d96febedSOng Boon Leong 	if (likely(priv->extend_desc))
4067d96febedSOng Boon Leong 		desc_size = sizeof(struct dma_extended_desc);
4068d96febedSOng Boon Leong 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4069d96febedSOng Boon Leong 		desc_size = sizeof(struct dma_edesc);
4070d96febedSOng Boon Leong 	else
4071d96febedSOng Boon Leong 		desc_size = sizeof(struct dma_desc);
4072d96febedSOng Boon Leong 
4073d96febedSOng Boon Leong 	/* The own bit must be the latest setting done when prepare the
4074d96febedSOng Boon Leong 	 * descriptor and then barrier is needed to make sure that
4075d96febedSOng Boon Leong 	 * all is coherent before granting the DMA engine.
4076d96febedSOng Boon Leong 	 */
4077d96febedSOng Boon Leong 	wmb();
4078d96febedSOng Boon Leong 
4079d96febedSOng Boon Leong 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4080d96febedSOng Boon Leong 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4081d96febedSOng Boon Leong }
4082d96febedSOng Boon Leong 
4083f748be53SAlexandre TORGUE /**
4084f748be53SAlexandre TORGUE  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4085f748be53SAlexandre TORGUE  *  @skb : the socket buffer
4086f748be53SAlexandre TORGUE  *  @dev : device pointer
4087f748be53SAlexandre TORGUE  *  Description: this is the transmit function that is called on TSO frames
4088f748be53SAlexandre TORGUE  *  (support available on GMAC4 and newer chips).
4089f748be53SAlexandre TORGUE  *  Diagram below show the ring programming in case of TSO frames:
4090f748be53SAlexandre TORGUE  *
4091f748be53SAlexandre TORGUE  *  First Descriptor
4092f748be53SAlexandre TORGUE  *   --------
4093f748be53SAlexandre TORGUE  *   | DES0 |---> buffer1 = L2/L3/L4 header
4094f748be53SAlexandre TORGUE  *   | DES1 |---> TCP Payload (can continue on next descr...)
4095f748be53SAlexandre TORGUE  *   | DES2 |---> buffer 1 and 2 len
4096f748be53SAlexandre TORGUE  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4097f748be53SAlexandre TORGUE  *   --------
4098f748be53SAlexandre TORGUE  *	|
4099f748be53SAlexandre TORGUE  *     ...
4100f748be53SAlexandre TORGUE  *	|
4101f748be53SAlexandre TORGUE  *   --------
4102f748be53SAlexandre TORGUE  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4103f748be53SAlexandre TORGUE  *   | DES1 | --|
4104f748be53SAlexandre TORGUE  *   | DES2 | --> buffer 1 and 2 len
4105f748be53SAlexandre TORGUE  *   | DES3 |
4106f748be53SAlexandre TORGUE  *   --------
4107f748be53SAlexandre TORGUE  *
4108f748be53SAlexandre TORGUE  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4109f748be53SAlexandre TORGUE  */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4110f748be53SAlexandre TORGUE static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4111f748be53SAlexandre TORGUE {
4112ce736788SJoao Pinto 	struct dma_desc *desc, *first, *mss_desc = NULL;
4113f748be53SAlexandre TORGUE 	struct stmmac_priv *priv = netdev_priv(dev);
4114f748be53SAlexandre TORGUE 	int nfrags = skb_shinfo(skb)->nr_frags;
4115ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
4116c2837423SJose Abreu 	unsigned int first_entry, tx_packets;
41178070274bSJisheng Zhang 	struct stmmac_txq_stats *txq_stats;
4118d96febedSOng Boon Leong 	int tmp_pay_len = 0, first_tx;
4119ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
4120c2837423SJose Abreu 	bool has_vlan, set_ic;
4121579a25a8SJose Abreu 	u8 proto_hdr_len, hdr;
4122ce736788SJoao Pinto 	u32 pay_len, mss;
4123a993db88SJose Abreu 	dma_addr_t des;
4124f748be53SAlexandre TORGUE 	int i;
4125f748be53SAlexandre TORGUE 
41268531c808SChristian Marangi 	tx_q = &priv->dma_conf.tx_queue[queue];
41278070274bSJisheng Zhang 	txq_stats = &priv->xstats.txq_stats[queue];
4128c2837423SJose Abreu 	first_tx = tx_q->cur_tx;
4129ce736788SJoao Pinto 
4130f748be53SAlexandre TORGUE 	/* Compute header lengths */
4131b7766206SJose Abreu 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4132b7766206SJose Abreu 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4133b7766206SJose Abreu 		hdr = sizeof(struct udphdr);
4134b7766206SJose Abreu 	} else {
4135504148feSEric Dumazet 		proto_hdr_len = skb_tcp_all_headers(skb);
4136b7766206SJose Abreu 		hdr = tcp_hdrlen(skb);
4137b7766206SJose Abreu 	}
4138f748be53SAlexandre TORGUE 
4139f748be53SAlexandre TORGUE 	/* Desc availability based on threshold should be enough safe */
4140ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <
4141f748be53SAlexandre TORGUE 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4142c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4143c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4144c22a3f48SJoao Pinto 								queue));
4145f748be53SAlexandre TORGUE 			/* This is a hard error, log it. */
414638ddc59dSLABBE Corentin 			netdev_err(priv->dev,
414738ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
414838ddc59dSLABBE Corentin 				   __func__);
4149f748be53SAlexandre TORGUE 		}
4150f748be53SAlexandre TORGUE 		return NETDEV_TX_BUSY;
4151f748be53SAlexandre TORGUE 	}
4152f748be53SAlexandre TORGUE 
4153f748be53SAlexandre TORGUE 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4154f748be53SAlexandre TORGUE 
4155f748be53SAlexandre TORGUE 	mss = skb_shinfo(skb)->gso_size;
4156f748be53SAlexandre TORGUE 
4157f748be53SAlexandre TORGUE 	/* set new MSS value if needed */
41588d212a9eSNiklas Cassel 	if (mss != tx_q->mss) {
4159579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4160579a25a8SJose Abreu 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4161579a25a8SJose Abreu 		else
4162579a25a8SJose Abreu 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4163579a25a8SJose Abreu 
416442de047dSJose Abreu 		stmmac_set_mss(priv, mss_desc, mss);
41658d212a9eSNiklas Cassel 		tx_q->mss = mss;
4166aa042f60SSong, Yoong Siang 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
41678531c808SChristian Marangi 						priv->dma_conf.dma_tx_size);
4168b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4169f748be53SAlexandre TORGUE 	}
4170f748be53SAlexandre TORGUE 
4171f748be53SAlexandre TORGUE 	if (netif_msg_tx_queued(priv)) {
4172b7766206SJose Abreu 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4173b7766206SJose Abreu 			__func__, hdr, proto_hdr_len, pay_len, mss);
4174f748be53SAlexandre TORGUE 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4175f748be53SAlexandre TORGUE 			skb->data_len);
4176f748be53SAlexandre TORGUE 	}
4177f748be53SAlexandre TORGUE 
417830d93227SJose Abreu 	/* Check if VLAN can be inserted by HW */
417930d93227SJose Abreu 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
418030d93227SJose Abreu 
4181ce736788SJoao Pinto 	first_entry = tx_q->cur_tx;
4182b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4183f748be53SAlexandre TORGUE 
4184579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4185579a25a8SJose Abreu 		desc = &tx_q->dma_entx[first_entry].basic;
4186579a25a8SJose Abreu 	else
4187579a25a8SJose Abreu 		desc = &tx_q->dma_tx[first_entry];
4188f748be53SAlexandre TORGUE 	first = desc;
4189f748be53SAlexandre TORGUE 
419030d93227SJose Abreu 	if (has_vlan)
419130d93227SJose Abreu 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
419230d93227SJose Abreu 
4193f748be53SAlexandre TORGUE 	/* first descriptor: fill Headers on Buf1 */
4194f748be53SAlexandre TORGUE 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4195f748be53SAlexandre TORGUE 			     DMA_TO_DEVICE);
4196f748be53SAlexandre TORGUE 	if (dma_mapping_error(priv->device, des))
4197f748be53SAlexandre TORGUE 		goto dma_map_err;
4198f748be53SAlexandre TORGUE 
4199ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4200ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4201be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4202be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4203f748be53SAlexandre TORGUE 
4204a993db88SJose Abreu 	if (priv->dma_cap.addr64 <= 32) {
4205f8be0d78SMichael Weiser 		first->des0 = cpu_to_le32(des);
4206f748be53SAlexandre TORGUE 
4207f748be53SAlexandre TORGUE 		/* Fill start of payload in buff2 of first descriptor */
4208f748be53SAlexandre TORGUE 		if (pay_len)
4209f8be0d78SMichael Weiser 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4210f748be53SAlexandre TORGUE 
4211f748be53SAlexandre TORGUE 		/* If needed take extra descriptors to fill the remaining payload */
4212f748be53SAlexandre TORGUE 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4213a993db88SJose Abreu 	} else {
4214a993db88SJose Abreu 		stmmac_set_desc_addr(priv, first, des);
4215a993db88SJose Abreu 		tmp_pay_len = pay_len;
421634c15202Syuqi jin 		des += proto_hdr_len;
4217b2f07199SJose Abreu 		pay_len = 0;
4218a993db88SJose Abreu 	}
4219f748be53SAlexandre TORGUE 
4220ce736788SJoao Pinto 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4221f748be53SAlexandre TORGUE 
4222f748be53SAlexandre TORGUE 	/* Prepare fragments */
4223f748be53SAlexandre TORGUE 	for (i = 0; i < nfrags; i++) {
4224f748be53SAlexandre TORGUE 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4225f748be53SAlexandre TORGUE 
4226f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0,
4227f748be53SAlexandre TORGUE 				       skb_frag_size(frag),
4228f748be53SAlexandre TORGUE 				       DMA_TO_DEVICE);
4229937071c1SThierry Reding 		if (dma_mapping_error(priv->device, des))
4230937071c1SThierry Reding 			goto dma_map_err;
4231f748be53SAlexandre TORGUE 
4232f748be53SAlexandre TORGUE 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4233ce736788SJoao Pinto 				     (i == nfrags - 1), queue);
4234f748be53SAlexandre TORGUE 
4235ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4236ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4237ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4238be8b38a7SOng Boon Leong 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4239f748be53SAlexandre TORGUE 	}
4240f748be53SAlexandre TORGUE 
4241ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4242f748be53SAlexandre TORGUE 
424305cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
424405cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4245be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
424605cf0d1bSNiklas Cassel 
42477df4a3a7SJose Abreu 	/* Manage tx mitigation */
4248c2837423SJose Abreu 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4249c2837423SJose Abreu 	tx_q->tx_count_frames += tx_packets;
4250c2837423SJose Abreu 
4251c2837423SJose Abreu 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4252c2837423SJose Abreu 		set_ic = true;
4253db2f2842SOng Boon Leong 	else if (!priv->tx_coal_frames[queue])
4254c2837423SJose Abreu 		set_ic = false;
4255db2f2842SOng Boon Leong 	else if (tx_packets > priv->tx_coal_frames[queue])
4256c2837423SJose Abreu 		set_ic = true;
4257db2f2842SOng Boon Leong 	else if ((tx_q->tx_count_frames %
4258db2f2842SOng Boon Leong 		  priv->tx_coal_frames[queue]) < tx_packets)
4259c2837423SJose Abreu 		set_ic = true;
4260c2837423SJose Abreu 	else
4261c2837423SJose Abreu 		set_ic = false;
4262c2837423SJose Abreu 
4263c2837423SJose Abreu 	if (set_ic) {
4264579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4265579a25a8SJose Abreu 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4266579a25a8SJose Abreu 		else
42677df4a3a7SJose Abreu 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4268579a25a8SJose Abreu 
42697df4a3a7SJose Abreu 		tx_q->tx_count_frames = 0;
42707df4a3a7SJose Abreu 		stmmac_set_tx_ic(priv, desc);
42717df4a3a7SJose Abreu 	}
42727df4a3a7SJose Abreu 
427305cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
427405cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
427505cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
427605cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
427705cf0d1bSNiklas Cassel 	 */
42788531c808SChristian Marangi 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4279f748be53SAlexandre TORGUE 
4280ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4281b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
428238ddc59dSLABBE Corentin 			  __func__);
4283c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4284f748be53SAlexandre TORGUE 	}
4285f748be53SAlexandre TORGUE 
42869680b2abSPetr Tesarik 	u64_stats_update_begin(&txq_stats->q_syncp);
42879680b2abSPetr Tesarik 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
42889680b2abSPetr Tesarik 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
42899680b2abSPetr Tesarik 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4290133466c3SJisheng Zhang 	if (set_ic)
42919680b2abSPetr Tesarik 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
42929680b2abSPetr Tesarik 	u64_stats_update_end(&txq_stats->q_syncp);
4293f748be53SAlexandre TORGUE 
42948000ddc0SJose Abreu 	if (priv->sarc_type)
42958000ddc0SJose Abreu 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
42968000ddc0SJose Abreu 
4297f748be53SAlexandre TORGUE 	skb_tx_timestamp(skb);
4298f748be53SAlexandre TORGUE 
4299f748be53SAlexandre TORGUE 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4300f748be53SAlexandre TORGUE 		     priv->hwts_tx_en)) {
4301f748be53SAlexandre TORGUE 		/* declare that device is doing timestamping */
4302f748be53SAlexandre TORGUE 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
430342de047dSJose Abreu 		stmmac_enable_tx_timestamp(priv, first);
4304f748be53SAlexandre TORGUE 	}
4305f748be53SAlexandre TORGUE 
4306f748be53SAlexandre TORGUE 	/* Complete the first descriptor before granting the DMA */
430742de047dSJose Abreu 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4308f748be53SAlexandre TORGUE 			proto_hdr_len,
4309f748be53SAlexandre TORGUE 			pay_len,
4310ce736788SJoao Pinto 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4311b7766206SJose Abreu 			hdr / 4, (skb->len - proto_hdr_len));
4312f748be53SAlexandre TORGUE 
4313f748be53SAlexandre TORGUE 	/* If context desc is used to change MSS */
431415d2ee42SNiklas Cassel 	if (mss_desc) {
431515d2ee42SNiklas Cassel 		/* Make sure that first descriptor has been completely
431615d2ee42SNiklas Cassel 		 * written, including its own bit. This is because MSS is
431715d2ee42SNiklas Cassel 		 * actually before first descriptor, so we need to make
431815d2ee42SNiklas Cassel 		 * sure that MSS's own bit is the last thing written.
431915d2ee42SNiklas Cassel 		 */
432015d2ee42SNiklas Cassel 		dma_wmb();
432142de047dSJose Abreu 		stmmac_set_tx_owner(priv, mss_desc);
432215d2ee42SNiklas Cassel 	}
4323f748be53SAlexandre TORGUE 
4324f748be53SAlexandre TORGUE 	if (netif_msg_pktdata(priv)) {
4325f748be53SAlexandre TORGUE 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4326ce736788SJoao Pinto 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4327ce736788SJoao Pinto 			tx_q->cur_tx, first, nfrags);
4328f748be53SAlexandre TORGUE 		pr_info(">>> frame to be transmitted: ");
4329f748be53SAlexandre TORGUE 		print_pkt(skb->data, skb_headlen(skb));
4330f748be53SAlexandre TORGUE 	}
4331f748be53SAlexandre TORGUE 
4332c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4333f748be53SAlexandre TORGUE 
4334d96febedSOng Boon Leong 	stmmac_flush_tx_descriptors(priv, queue);
43354772f26dSJose Abreu 	stmmac_tx_timer_arm(priv, queue);
4336f748be53SAlexandre TORGUE 
4337f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
4338f748be53SAlexandre TORGUE 
4339f748be53SAlexandre TORGUE dma_map_err:
4340f748be53SAlexandre TORGUE 	dev_err(priv->device, "Tx dma map failed\n");
4341f748be53SAlexandre TORGUE 	dev_kfree_skb(skb);
4342133466c3SJisheng Zhang 	priv->xstats.tx_dropped++;
4343f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
4344f748be53SAlexandre TORGUE }
4345f748be53SAlexandre TORGUE 
4346f748be53SAlexandre TORGUE /**
434797d574fcSRomain Gantois  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
434897d574fcSRomain Gantois  * @skb: socket buffer to check
434997d574fcSRomain Gantois  *
435097d574fcSRomain Gantois  * Check if a packet has an ethertype that will trigger the IP header checks
435197d574fcSRomain Gantois  * and IP/TCP checksum engine of the stmmac core.
435297d574fcSRomain Gantois  *
435397d574fcSRomain Gantois  * Return: true if the ethertype can trigger the checksum engine, false
435497d574fcSRomain Gantois  * otherwise
435597d574fcSRomain Gantois  */
stmmac_has_ip_ethertype(struct sk_buff * skb)435697d574fcSRomain Gantois static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
435797d574fcSRomain Gantois {
435897d574fcSRomain Gantois 	int depth = 0;
435997d574fcSRomain Gantois 	__be16 proto;
436097d574fcSRomain Gantois 
436197d574fcSRomain Gantois 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
436297d574fcSRomain Gantois 				    &depth);
436397d574fcSRomain Gantois 
436497d574fcSRomain Gantois 	return (depth <= ETH_HLEN) &&
436597d574fcSRomain Gantois 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
436697d574fcSRomain Gantois }
436797d574fcSRomain Gantois 
436897d574fcSRomain Gantois /**
4369732fdf0eSGiuseppe CAVALLARO  *  stmmac_xmit - Tx entry point of the driver
43707ac6653aSJeff Kirsher  *  @skb : the socket buffer
43717ac6653aSJeff Kirsher  *  @dev : device pointer
437232ceabcaSGiuseppe CAVALLARO  *  Description : this is the tx entry point of the driver.
437332ceabcaSGiuseppe CAVALLARO  *  It programs the chain or the ring and supports oversized frames
437432ceabcaSGiuseppe CAVALLARO  *  and SG feature.
43757ac6653aSJeff Kirsher  */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)43767ac6653aSJeff Kirsher static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
43777ac6653aSJeff Kirsher {
4378c2837423SJose Abreu 	unsigned int first_entry, tx_packets, enh_desc;
43797ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
43800e80bdc9SGiuseppe Cavallaro 	unsigned int nopaged_len = skb_headlen(skb);
43814a7d666aSGiuseppe CAVALLARO 	int i, csum_insertion = 0, is_jumbo = 0;
4382ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
43837ac6653aSJeff Kirsher 	int nfrags = skb_shinfo(skb)->nr_frags;
4384b7766206SJose Abreu 	int gso = skb_shinfo(skb)->gso_type;
43858070274bSJisheng Zhang 	struct stmmac_txq_stats *txq_stats;
4386579a25a8SJose Abreu 	struct dma_edesc *tbs_desc = NULL;
43877ac6653aSJeff Kirsher 	struct dma_desc *desc, *first;
4388ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
4389c2837423SJose Abreu 	bool has_vlan, set_ic;
4390d96febedSOng Boon Leong 	int entry, first_tx;
4391a993db88SJose Abreu 	dma_addr_t des;
4392f748be53SAlexandre TORGUE 
43938531c808SChristian Marangi 	tx_q = &priv->dma_conf.tx_queue[queue];
43948070274bSJisheng Zhang 	txq_stats = &priv->xstats.txq_stats[queue];
4395c2837423SJose Abreu 	first_tx = tx_q->cur_tx;
4396ce736788SJoao Pinto 
4397be1c7eaeSVineetha G. Jaya Kumaran 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4398e2cd682dSJose Abreu 		stmmac_disable_eee_mode(priv);
4399e2cd682dSJose Abreu 
4400f748be53SAlexandre TORGUE 	/* Manage oversized TCP frames for GMAC4 device */
4401f748be53SAlexandre TORGUE 	if (skb_is_gso(skb) && priv->tso) {
4402b7766206SJose Abreu 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4403b7766206SJose Abreu 			return stmmac_tso_xmit(skb, dev);
4404b7766206SJose Abreu 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4405f748be53SAlexandre TORGUE 			return stmmac_tso_xmit(skb, dev);
4406f748be53SAlexandre TORGUE 	}
44077ac6653aSJeff Kirsher 
4408ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4409c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4410c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4411c22a3f48SJoao Pinto 								queue));
44127ac6653aSJeff Kirsher 			/* This is a hard error, log it. */
441338ddc59dSLABBE Corentin 			netdev_err(priv->dev,
441438ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
441538ddc59dSLABBE Corentin 				   __func__);
44167ac6653aSJeff Kirsher 		}
44177ac6653aSJeff Kirsher 		return NETDEV_TX_BUSY;
44187ac6653aSJeff Kirsher 	}
44197ac6653aSJeff Kirsher 
442030d93227SJose Abreu 	/* Check if VLAN can be inserted by HW */
442130d93227SJose Abreu 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
442230d93227SJose Abreu 
4423ce736788SJoao Pinto 	entry = tx_q->cur_tx;
44240e80bdc9SGiuseppe Cavallaro 	first_entry = entry;
4425b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
44267ac6653aSJeff Kirsher 
44277ac6653aSJeff Kirsher 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4428b643b836SRohan G Thomas 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4429b643b836SRohan G Thomas 	 * queues. In that case, checksum offloading for those queues that don't
4430b643b836SRohan G Thomas 	 * support tx coe needs to fallback to software checksum calculation.
443197d574fcSRomain Gantois 	 *
443297d574fcSRomain Gantois 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
443397d574fcSRomain Gantois 	 * also have to be checksummed in software.
4434b643b836SRohan G Thomas 	 */
4435b643b836SRohan G Thomas 	if (csum_insertion &&
443697d574fcSRomain Gantois 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
443797d574fcSRomain Gantois 	     !stmmac_has_ip_ethertype(skb))) {
4438b643b836SRohan G Thomas 		if (unlikely(skb_checksum_help(skb)))
4439b643b836SRohan G Thomas 			goto dma_map_err;
4440b643b836SRohan G Thomas 		csum_insertion = !csum_insertion;
4441b643b836SRohan G Thomas 	}
44427ac6653aSJeff Kirsher 
44430e80bdc9SGiuseppe Cavallaro 	if (likely(priv->extend_desc))
4444ce736788SJoao Pinto 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4445579a25a8SJose Abreu 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4446579a25a8SJose Abreu 		desc = &tx_q->dma_entx[entry].basic;
4447c24602efSGiuseppe CAVALLARO 	else
4448ce736788SJoao Pinto 		desc = tx_q->dma_tx + entry;
4449c24602efSGiuseppe CAVALLARO 
44507ac6653aSJeff Kirsher 	first = desc;
44517ac6653aSJeff Kirsher 
445230d93227SJose Abreu 	if (has_vlan)
445330d93227SJose Abreu 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
445430d93227SJose Abreu 
44550e80bdc9SGiuseppe Cavallaro 	enh_desc = priv->plat->enh_desc;
44564a7d666aSGiuseppe CAVALLARO 	/* To program the descriptors according to the size of the frame */
445729896a67SGiuseppe CAVALLARO 	if (enh_desc)
44582c520b1cSJose Abreu 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
445929896a67SGiuseppe CAVALLARO 
446063a550fcSJose Abreu 	if (unlikely(is_jumbo)) {
44612c520b1cSJose Abreu 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
446263a550fcSJose Abreu 		if (unlikely(entry < 0) && (entry != -EINVAL))
4463362b37beSGiuseppe CAVALLARO 			goto dma_map_err;
446429896a67SGiuseppe CAVALLARO 	}
44657ac6653aSJeff Kirsher 
44667ac6653aSJeff Kirsher 	for (i = 0; i < nfrags; i++) {
44679e903e08SEric Dumazet 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
44689e903e08SEric Dumazet 		int len = skb_frag_size(frag);
4469be434d50SGiuseppe Cavallaro 		bool last_segment = (i == (nfrags - 1));
44707ac6653aSJeff Kirsher 
44718531c808SChristian Marangi 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4472b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[entry]);
4473e3ad57c9SGiuseppe Cavallaro 
44740e80bdc9SGiuseppe Cavallaro 		if (likely(priv->extend_desc))
4475ce736788SJoao Pinto 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4476579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4477579a25a8SJose Abreu 			desc = &tx_q->dma_entx[entry].basic;
4478c24602efSGiuseppe CAVALLARO 		else
4479ce736788SJoao Pinto 			desc = tx_q->dma_tx + entry;
44807ac6653aSJeff Kirsher 
4481f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4482f722380dSIan Campbell 				       DMA_TO_DEVICE);
4483f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
4484362b37beSGiuseppe CAVALLARO 			goto dma_map_err; /* should reuse desc w/o issues */
4485362b37beSGiuseppe CAVALLARO 
4486ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].buf = des;
44876844171dSJose Abreu 
44886844171dSJose Abreu 		stmmac_set_desc_addr(priv, desc, des);
4489f748be53SAlexandre TORGUE 
4490ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4491ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].len = len;
4492ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4493be8b38a7SOng Boon Leong 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
44940e80bdc9SGiuseppe Cavallaro 
44950e80bdc9SGiuseppe Cavallaro 		/* Prepare the descriptor and set the own bit too */
449642de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
449742de047dSJose Abreu 				priv->mode, 1, last_segment, skb->len);
44987ac6653aSJeff Kirsher 	}
44997ac6653aSJeff Kirsher 
450005cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
450105cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[entry] = skb;
4502be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4503e3ad57c9SGiuseppe Cavallaro 
45047df4a3a7SJose Abreu 	/* According to the coalesce parameter the IC bit for the latest
45057df4a3a7SJose Abreu 	 * segment is reset and the timer re-started to clean the tx status.
45067df4a3a7SJose Abreu 	 * This approach takes care about the fragments: desc is the first
45077df4a3a7SJose Abreu 	 * element in case of no SG.
45087df4a3a7SJose Abreu 	 */
4509c2837423SJose Abreu 	tx_packets = (entry + 1) - first_tx;
4510c2837423SJose Abreu 	tx_q->tx_count_frames += tx_packets;
4511c2837423SJose Abreu 
4512c2837423SJose Abreu 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4513c2837423SJose Abreu 		set_ic = true;
4514db2f2842SOng Boon Leong 	else if (!priv->tx_coal_frames[queue])
4515c2837423SJose Abreu 		set_ic = false;
4516db2f2842SOng Boon Leong 	else if (tx_packets > priv->tx_coal_frames[queue])
4517c2837423SJose Abreu 		set_ic = true;
4518db2f2842SOng Boon Leong 	else if ((tx_q->tx_count_frames %
4519db2f2842SOng Boon Leong 		  priv->tx_coal_frames[queue]) < tx_packets)
4520c2837423SJose Abreu 		set_ic = true;
4521c2837423SJose Abreu 	else
4522c2837423SJose Abreu 		set_ic = false;
4523c2837423SJose Abreu 
4524c2837423SJose Abreu 	if (set_ic) {
45257df4a3a7SJose Abreu 		if (likely(priv->extend_desc))
45267df4a3a7SJose Abreu 			desc = &tx_q->dma_etx[entry].basic;
4527579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4528579a25a8SJose Abreu 			desc = &tx_q->dma_entx[entry].basic;
45297df4a3a7SJose Abreu 		else
45307df4a3a7SJose Abreu 			desc = &tx_q->dma_tx[entry];
45317df4a3a7SJose Abreu 
45327df4a3a7SJose Abreu 		tx_q->tx_count_frames = 0;
45337df4a3a7SJose Abreu 		stmmac_set_tx_ic(priv, desc);
45347df4a3a7SJose Abreu 	}
45357df4a3a7SJose Abreu 
453605cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
453705cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
453805cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
453905cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
454005cf0d1bSNiklas Cassel 	 */
45418531c808SChristian Marangi 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4542ce736788SJoao Pinto 	tx_q->cur_tx = entry;
45437ac6653aSJeff Kirsher 
45447ac6653aSJeff Kirsher 	if (netif_msg_pktdata(priv)) {
454538ddc59dSLABBE Corentin 		netdev_dbg(priv->dev,
454638ddc59dSLABBE Corentin 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4547ce736788SJoao Pinto 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
45480e80bdc9SGiuseppe Cavallaro 			   entry, first, nfrags);
454983d7af64SGiuseppe CAVALLARO 
455038ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
45517ac6653aSJeff Kirsher 		print_pkt(skb->data, skb->len);
45527ac6653aSJeff Kirsher 	}
45530e80bdc9SGiuseppe Cavallaro 
4554ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4555b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4556b3e51069SLABBE Corentin 			  __func__);
4557c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
45587ac6653aSJeff Kirsher 	}
45597ac6653aSJeff Kirsher 
45609680b2abSPetr Tesarik 	u64_stats_update_begin(&txq_stats->q_syncp);
45619680b2abSPetr Tesarik 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4562133466c3SJisheng Zhang 	if (set_ic)
45639680b2abSPetr Tesarik 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
45649680b2abSPetr Tesarik 	u64_stats_update_end(&txq_stats->q_syncp);
45657ac6653aSJeff Kirsher 
45668000ddc0SJose Abreu 	if (priv->sarc_type)
45678000ddc0SJose Abreu 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
45688000ddc0SJose Abreu 
45690e80bdc9SGiuseppe Cavallaro 	skb_tx_timestamp(skb);
45700e80bdc9SGiuseppe Cavallaro 
45710e80bdc9SGiuseppe Cavallaro 	/* Ready to fill the first descriptor and set the OWN bit w/o any
45720e80bdc9SGiuseppe Cavallaro 	 * problems because all the descriptors are actually ready to be
45730e80bdc9SGiuseppe Cavallaro 	 * passed to the DMA engine.
45740e80bdc9SGiuseppe Cavallaro 	 */
45750e80bdc9SGiuseppe Cavallaro 	if (likely(!is_jumbo)) {
45760e80bdc9SGiuseppe Cavallaro 		bool last_segment = (nfrags == 0);
45770e80bdc9SGiuseppe Cavallaro 
4578f748be53SAlexandre TORGUE 		des = dma_map_single(priv->device, skb->data,
45790e80bdc9SGiuseppe Cavallaro 				     nopaged_len, DMA_TO_DEVICE);
4580f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
45810e80bdc9SGiuseppe Cavallaro 			goto dma_map_err;
45820e80bdc9SGiuseppe Cavallaro 
4583ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4584be8b38a7SOng Boon Leong 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4585be8b38a7SOng Boon Leong 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
45866844171dSJose Abreu 
45876844171dSJose Abreu 		stmmac_set_desc_addr(priv, first, des);
4588f748be53SAlexandre TORGUE 
4589ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4590ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
45910e80bdc9SGiuseppe Cavallaro 
4592891434b1SRayagond Kokatanur 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4593891434b1SRayagond Kokatanur 			     priv->hwts_tx_en)) {
4594891434b1SRayagond Kokatanur 			/* declare that device is doing timestamping */
4595891434b1SRayagond Kokatanur 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
459642de047dSJose Abreu 			stmmac_enable_tx_timestamp(priv, first);
4597891434b1SRayagond Kokatanur 		}
4598891434b1SRayagond Kokatanur 
45990e80bdc9SGiuseppe Cavallaro 		/* Prepare the first descriptor setting the OWN bit too */
460042de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4601579a25a8SJose Abreu 				csum_insertion, priv->mode, 0, last_segment,
460242de047dSJose Abreu 				skb->len);
460380acbed9SAaro Koskinen 	}
46040e80bdc9SGiuseppe Cavallaro 
4605579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_EN) {
4606579a25a8SJose Abreu 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4607579a25a8SJose Abreu 
4608579a25a8SJose Abreu 		tbs_desc = &tx_q->dma_entx[first_entry];
4609579a25a8SJose Abreu 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4610579a25a8SJose Abreu 	}
4611579a25a8SJose Abreu 
4612579a25a8SJose Abreu 	stmmac_set_tx_owner(priv, first);
4613579a25a8SJose Abreu 
4614c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4615f748be53SAlexandre TORGUE 
4616a4e887faSJose Abreu 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
46178fce3331SJose Abreu 
4618d96febedSOng Boon Leong 	stmmac_flush_tx_descriptors(priv, queue);
46194772f26dSJose Abreu 	stmmac_tx_timer_arm(priv, queue);
46207ac6653aSJeff Kirsher 
4621362b37beSGiuseppe CAVALLARO 	return NETDEV_TX_OK;
4622a9097a96SGiuseppe CAVALLARO 
4623362b37beSGiuseppe CAVALLARO dma_map_err:
462438ddc59dSLABBE Corentin 	netdev_err(priv->dev, "Tx DMA map failed\n");
4625362b37beSGiuseppe CAVALLARO 	dev_kfree_skb(skb);
4626133466c3SJisheng Zhang 	priv->xstats.tx_dropped++;
46277ac6653aSJeff Kirsher 	return NETDEV_TX_OK;
46287ac6653aSJeff Kirsher }
46297ac6653aSJeff Kirsher 
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)4630b9381985SVince Bridgers static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4631b9381985SVince Bridgers {
46321f5020acSVladimir Oltean 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
46331f5020acSVladimir Oltean 	__be16 vlan_proto = veth->h_vlan_proto;
4634b9381985SVince Bridgers 	u16 vlanid;
4635b9381985SVince Bridgers 
4636ab188e8fSElad Nachman 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4637ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4638ab188e8fSElad Nachman 	    (vlan_proto == htons(ETH_P_8021AD) &&
4639ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4640b9381985SVince Bridgers 		/* pop the vlan tag */
4641ab188e8fSElad Nachman 		vlanid = ntohs(veth->h_vlan_TCI);
4642ab188e8fSElad Nachman 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4643b9381985SVince Bridgers 		skb_pull(skb, VLAN_HLEN);
4644ab188e8fSElad Nachman 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4645b9381985SVince Bridgers 	}
4646b9381985SVince Bridgers }
4647b9381985SVince Bridgers 
464832ceabcaSGiuseppe CAVALLARO /**
4649732fdf0eSGiuseppe CAVALLARO  * stmmac_rx_refill - refill used skb preallocated buffers
465032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
465154139cf3SJoao Pinto  * @queue: RX queue index
465232ceabcaSGiuseppe CAVALLARO  * Description : this is to reallocate the skb for the reception process
465332ceabcaSGiuseppe CAVALLARO  * that is based on zero-copy.
465432ceabcaSGiuseppe CAVALLARO  */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)465554139cf3SJoao Pinto static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
46567ac6653aSJeff Kirsher {
46578531c808SChristian Marangi 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
46585fabb012SOng Boon Leong 	int dirty = stmmac_rx_dirty(priv, queue);
465954139cf3SJoao Pinto 	unsigned int entry = rx_q->dirty_rx;
4660884d2b84SDavid Wu 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4661884d2b84SDavid Wu 
4662070246e4SJochen Henneberg 	if (priv->dma_cap.host_dma_width <= 32)
4663884d2b84SDavid Wu 		gfp |= GFP_DMA32;
466454139cf3SJoao Pinto 
4665e3ad57c9SGiuseppe Cavallaro 	while (dirty-- > 0) {
46662af6106aSJose Abreu 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4667c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
4668d429b66eSJose Abreu 		bool use_rx_wd;
4669c24602efSGiuseppe CAVALLARO 
4670c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
467154139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4672c24602efSGiuseppe CAVALLARO 		else
467354139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
4674c24602efSGiuseppe CAVALLARO 
46752af6106aSJose Abreu 		if (!buf->page) {
4676884d2b84SDavid Wu 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
46772af6106aSJose Abreu 			if (!buf->page)
46787ac6653aSJeff Kirsher 				break;
4679120e87f9SGiuseppe Cavallaro 		}
46807ac6653aSJeff Kirsher 
468167afd6d1SJose Abreu 		if (priv->sph && !buf->sec_page) {
4682884d2b84SDavid Wu 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
468367afd6d1SJose Abreu 			if (!buf->sec_page)
468467afd6d1SJose Abreu 				break;
468567afd6d1SJose Abreu 
468667afd6d1SJose Abreu 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
468767afd6d1SJose Abreu 		}
468867afd6d1SJose Abreu 
46895fabb012SOng Boon Leong 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
46903caa61c2SJose Abreu 
46912af6106aSJose Abreu 		stmmac_set_desc_addr(priv, p, buf->addr);
4692396e13e1SJoakim Zhang 		if (priv->sph)
4693396e13e1SJoakim Zhang 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4694396e13e1SJoakim Zhang 		else
4695396e13e1SJoakim Zhang 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
46962c520b1cSJose Abreu 		stmmac_refill_desc3(priv, rx_q, p);
4697286a8372SGiuseppe CAVALLARO 
4698d429b66eSJose Abreu 		rx_q->rx_count_frames++;
4699db2f2842SOng Boon Leong 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4700db2f2842SOng Boon Leong 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
47016fa9d691SJose Abreu 			rx_q->rx_count_frames = 0;
470209146abeSJose Abreu 
4703db2f2842SOng Boon Leong 		use_rx_wd = !priv->rx_coal_frames[queue];
470409146abeSJose Abreu 		use_rx_wd |= rx_q->rx_count_frames > 0;
470509146abeSJose Abreu 		if (!priv->use_riwt)
470609146abeSJose Abreu 			use_rx_wd = false;
4707d429b66eSJose Abreu 
4708ad688cdbSPavel Machek 		dma_wmb();
47092af6106aSJose Abreu 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4710e3ad57c9SGiuseppe Cavallaro 
47118531c808SChristian Marangi 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
47127ac6653aSJeff Kirsher 	}
471354139cf3SJoao Pinto 	rx_q->dirty_rx = entry;
4714858a31ffSJose Abreu 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4715858a31ffSJose Abreu 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
47164523a561SBiao Huang 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
47177ac6653aSJeff Kirsher }
47187ac6653aSJeff Kirsher 
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)471988ebe2cfSJose Abreu static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
472088ebe2cfSJose Abreu 				       struct dma_desc *p,
472188ebe2cfSJose Abreu 				       int status, unsigned int len)
472288ebe2cfSJose Abreu {
472388ebe2cfSJose Abreu 	unsigned int plen = 0, hlen = 0;
472431f2760eSLuo Jiaxing 	int coe = priv->hw->rx_csum;
472588ebe2cfSJose Abreu 
472688ebe2cfSJose Abreu 	/* Not first descriptor, buffer is always zero */
472788ebe2cfSJose Abreu 	if (priv->sph && len)
472888ebe2cfSJose Abreu 		return 0;
472988ebe2cfSJose Abreu 
473088ebe2cfSJose Abreu 	/* First descriptor, get split header length */
473131f2760eSLuo Jiaxing 	stmmac_get_rx_header_len(priv, p, &hlen);
473288ebe2cfSJose Abreu 	if (priv->sph && hlen) {
473388ebe2cfSJose Abreu 		priv->xstats.rx_split_hdr_pkt_n++;
473488ebe2cfSJose Abreu 		return hlen;
473588ebe2cfSJose Abreu 	}
473688ebe2cfSJose Abreu 
473788ebe2cfSJose Abreu 	/* First descriptor, not last descriptor and not split header */
473888ebe2cfSJose Abreu 	if (status & rx_not_ls)
47398531c808SChristian Marangi 		return priv->dma_conf.dma_buf_sz;
474088ebe2cfSJose Abreu 
474188ebe2cfSJose Abreu 	plen = stmmac_get_rx_frame_len(priv, p, coe);
474288ebe2cfSJose Abreu 
474388ebe2cfSJose Abreu 	/* First descriptor and last descriptor and not split header */
47448531c808SChristian Marangi 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
474588ebe2cfSJose Abreu }
474688ebe2cfSJose Abreu 
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)474788ebe2cfSJose Abreu static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
474888ebe2cfSJose Abreu 				       struct dma_desc *p,
474988ebe2cfSJose Abreu 				       int status, unsigned int len)
475088ebe2cfSJose Abreu {
475188ebe2cfSJose Abreu 	int coe = priv->hw->rx_csum;
475288ebe2cfSJose Abreu 	unsigned int plen = 0;
475388ebe2cfSJose Abreu 
475488ebe2cfSJose Abreu 	/* Not split header, buffer is not available */
475588ebe2cfSJose Abreu 	if (!priv->sph)
475688ebe2cfSJose Abreu 		return 0;
475788ebe2cfSJose Abreu 
475888ebe2cfSJose Abreu 	/* Not last descriptor */
475988ebe2cfSJose Abreu 	if (status & rx_not_ls)
47608531c808SChristian Marangi 		return priv->dma_conf.dma_buf_sz;
476188ebe2cfSJose Abreu 
476288ebe2cfSJose Abreu 	plen = stmmac_get_rx_frame_len(priv, p, coe);
476388ebe2cfSJose Abreu 
476488ebe2cfSJose Abreu 	/* Last descriptor */
476588ebe2cfSJose Abreu 	return plen - len;
476688ebe2cfSJose Abreu }
476788ebe2cfSJose Abreu 
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)4768be8b38a7SOng Boon Leong static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
47698b278a5bSOng Boon Leong 				struct xdp_frame *xdpf, bool dma_map)
4770be8b38a7SOng Boon Leong {
47718070274bSJisheng Zhang 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
47728531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4773be8b38a7SOng Boon Leong 	unsigned int entry = tx_q->cur_tx;
4774be8b38a7SOng Boon Leong 	struct dma_desc *tx_desc;
4775be8b38a7SOng Boon Leong 	dma_addr_t dma_addr;
4776be8b38a7SOng Boon Leong 	bool set_ic;
4777be8b38a7SOng Boon Leong 
4778be8b38a7SOng Boon Leong 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4779be8b38a7SOng Boon Leong 		return STMMAC_XDP_CONSUMED;
4780be8b38a7SOng Boon Leong 
4781be8b38a7SOng Boon Leong 	if (likely(priv->extend_desc))
4782be8b38a7SOng Boon Leong 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4783be8b38a7SOng Boon Leong 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4784be8b38a7SOng Boon Leong 		tx_desc = &tx_q->dma_entx[entry].basic;
4785be8b38a7SOng Boon Leong 	else
4786be8b38a7SOng Boon Leong 		tx_desc = tx_q->dma_tx + entry;
4787be8b38a7SOng Boon Leong 
47888b278a5bSOng Boon Leong 	if (dma_map) {
47898b278a5bSOng Boon Leong 		dma_addr = dma_map_single(priv->device, xdpf->data,
47908b278a5bSOng Boon Leong 					  xdpf->len, DMA_TO_DEVICE);
47918b278a5bSOng Boon Leong 		if (dma_mapping_error(priv->device, dma_addr))
47928b278a5bSOng Boon Leong 			return STMMAC_XDP_CONSUMED;
47938b278a5bSOng Boon Leong 
47948b278a5bSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
47958b278a5bSOng Boon Leong 	} else {
47968b278a5bSOng Boon Leong 		struct page *page = virt_to_page(xdpf->data);
47978b278a5bSOng Boon Leong 
4798be8b38a7SOng Boon Leong 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4799be8b38a7SOng Boon Leong 			   xdpf->headroom;
4800be8b38a7SOng Boon Leong 		dma_sync_single_for_device(priv->device, dma_addr,
4801be8b38a7SOng Boon Leong 					   xdpf->len, DMA_BIDIRECTIONAL);
4802be8b38a7SOng Boon Leong 
4803be8b38a7SOng Boon Leong 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
48048b278a5bSOng Boon Leong 	}
4805be8b38a7SOng Boon Leong 
4806be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4807be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4808be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4809be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4810be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4811be8b38a7SOng Boon Leong 
4812be8b38a7SOng Boon Leong 	tx_q->xdpf[entry] = xdpf;
4813be8b38a7SOng Boon Leong 
4814be8b38a7SOng Boon Leong 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4815be8b38a7SOng Boon Leong 
4816be8b38a7SOng Boon Leong 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4817be8b38a7SOng Boon Leong 			       true, priv->mode, true, true,
4818be8b38a7SOng Boon Leong 			       xdpf->len);
4819be8b38a7SOng Boon Leong 
4820be8b38a7SOng Boon Leong 	tx_q->tx_count_frames++;
4821be8b38a7SOng Boon Leong 
4822be8b38a7SOng Boon Leong 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4823be8b38a7SOng Boon Leong 		set_ic = true;
4824be8b38a7SOng Boon Leong 	else
4825be8b38a7SOng Boon Leong 		set_ic = false;
4826be8b38a7SOng Boon Leong 
4827be8b38a7SOng Boon Leong 	if (set_ic) {
4828be8b38a7SOng Boon Leong 		tx_q->tx_count_frames = 0;
4829be8b38a7SOng Boon Leong 		stmmac_set_tx_ic(priv, tx_desc);
48309680b2abSPetr Tesarik 		u64_stats_update_begin(&txq_stats->q_syncp);
48319680b2abSPetr Tesarik 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
48329680b2abSPetr Tesarik 		u64_stats_update_end(&txq_stats->q_syncp);
4833be8b38a7SOng Boon Leong 	}
4834be8b38a7SOng Boon Leong 
4835be8b38a7SOng Boon Leong 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4836be8b38a7SOng Boon Leong 
48378531c808SChristian Marangi 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4838be8b38a7SOng Boon Leong 	tx_q->cur_tx = entry;
4839be8b38a7SOng Boon Leong 
4840be8b38a7SOng Boon Leong 	return STMMAC_XDP_TX;
4841be8b38a7SOng Boon Leong }
4842be8b38a7SOng Boon Leong 
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)4843be8b38a7SOng Boon Leong static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4844be8b38a7SOng Boon Leong 				   int cpu)
4845be8b38a7SOng Boon Leong {
4846be8b38a7SOng Boon Leong 	int index = cpu;
4847be8b38a7SOng Boon Leong 
4848be8b38a7SOng Boon Leong 	if (unlikely(index < 0))
4849be8b38a7SOng Boon Leong 		index = 0;
4850be8b38a7SOng Boon Leong 
4851be8b38a7SOng Boon Leong 	while (index >= priv->plat->tx_queues_to_use)
4852be8b38a7SOng Boon Leong 		index -= priv->plat->tx_queues_to_use;
4853be8b38a7SOng Boon Leong 
4854be8b38a7SOng Boon Leong 	return index;
4855be8b38a7SOng Boon Leong }
4856be8b38a7SOng Boon Leong 
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)4857be8b38a7SOng Boon Leong static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4858be8b38a7SOng Boon Leong 				struct xdp_buff *xdp)
4859be8b38a7SOng Boon Leong {
4860be8b38a7SOng Boon Leong 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4861be8b38a7SOng Boon Leong 	int cpu = smp_processor_id();
4862be8b38a7SOng Boon Leong 	struct netdev_queue *nq;
4863be8b38a7SOng Boon Leong 	int queue;
4864be8b38a7SOng Boon Leong 	int res;
4865be8b38a7SOng Boon Leong 
4866be8b38a7SOng Boon Leong 	if (unlikely(!xdpf))
4867be8b38a7SOng Boon Leong 		return STMMAC_XDP_CONSUMED;
4868be8b38a7SOng Boon Leong 
4869be8b38a7SOng Boon Leong 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4870be8b38a7SOng Boon Leong 	nq = netdev_get_tx_queue(priv->dev, queue);
4871be8b38a7SOng Boon Leong 
4872be8b38a7SOng Boon Leong 	__netif_tx_lock(nq, cpu);
4873be8b38a7SOng Boon Leong 	/* Avoids TX time-out as we are sharing with slow path */
4874e92af33eSAlexander Lobakin 	txq_trans_cond_update(nq);
4875be8b38a7SOng Boon Leong 
48768b278a5bSOng Boon Leong 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4877be8b38a7SOng Boon Leong 	if (res == STMMAC_XDP_TX)
4878be8b38a7SOng Boon Leong 		stmmac_flush_tx_descriptors(priv, queue);
4879be8b38a7SOng Boon Leong 
4880be8b38a7SOng Boon Leong 	__netif_tx_unlock(nq);
4881be8b38a7SOng Boon Leong 
4882be8b38a7SOng Boon Leong 	return res;
4883be8b38a7SOng Boon Leong }
4884be8b38a7SOng Boon Leong 
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)4885bba71cacSOng Boon Leong static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4886bba71cacSOng Boon Leong 				 struct bpf_prog *prog,
48875fabb012SOng Boon Leong 				 struct xdp_buff *xdp)
48885fabb012SOng Boon Leong {
48895fabb012SOng Boon Leong 	u32 act;
4890bba71cacSOng Boon Leong 	int res;
48915fabb012SOng Boon Leong 
48925fabb012SOng Boon Leong 	act = bpf_prog_run_xdp(prog, xdp);
48935fabb012SOng Boon Leong 	switch (act) {
48945fabb012SOng Boon Leong 	case XDP_PASS:
48955fabb012SOng Boon Leong 		res = STMMAC_XDP_PASS;
48965fabb012SOng Boon Leong 		break;
4897be8b38a7SOng Boon Leong 	case XDP_TX:
4898be8b38a7SOng Boon Leong 		res = stmmac_xdp_xmit_back(priv, xdp);
4899be8b38a7SOng Boon Leong 		break;
49008b278a5bSOng Boon Leong 	case XDP_REDIRECT:
49018b278a5bSOng Boon Leong 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
49028b278a5bSOng Boon Leong 			res = STMMAC_XDP_CONSUMED;
49038b278a5bSOng Boon Leong 		else
49048b278a5bSOng Boon Leong 			res = STMMAC_XDP_REDIRECT;
49058b278a5bSOng Boon Leong 		break;
49065fabb012SOng Boon Leong 	default:
4907c8064e5bSPaolo Abeni 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
49085fabb012SOng Boon Leong 		fallthrough;
49095fabb012SOng Boon Leong 	case XDP_ABORTED:
49105fabb012SOng Boon Leong 		trace_xdp_exception(priv->dev, prog, act);
49115fabb012SOng Boon Leong 		fallthrough;
49125fabb012SOng Boon Leong 	case XDP_DROP:
49135fabb012SOng Boon Leong 		res = STMMAC_XDP_CONSUMED;
49145fabb012SOng Boon Leong 		break;
49155fabb012SOng Boon Leong 	}
49165fabb012SOng Boon Leong 
4917bba71cacSOng Boon Leong 	return res;
4918bba71cacSOng Boon Leong }
4919bba71cacSOng Boon Leong 
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)4920bba71cacSOng Boon Leong static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4921bba71cacSOng Boon Leong 					   struct xdp_buff *xdp)
4922bba71cacSOng Boon Leong {
4923bba71cacSOng Boon Leong 	struct bpf_prog *prog;
4924bba71cacSOng Boon Leong 	int res;
4925bba71cacSOng Boon Leong 
4926bba71cacSOng Boon Leong 	prog = READ_ONCE(priv->xdp_prog);
4927bba71cacSOng Boon Leong 	if (!prog) {
4928bba71cacSOng Boon Leong 		res = STMMAC_XDP_PASS;
49292f1e432dSToke Høiland-Jørgensen 		goto out;
4930bba71cacSOng Boon Leong 	}
4931bba71cacSOng Boon Leong 
4932bba71cacSOng Boon Leong 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
49332f1e432dSToke Høiland-Jørgensen out:
49345fabb012SOng Boon Leong 	return ERR_PTR(-res);
49355fabb012SOng Boon Leong }
49365fabb012SOng Boon Leong 
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)4937be8b38a7SOng Boon Leong static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4938be8b38a7SOng Boon Leong 				   int xdp_status)
4939be8b38a7SOng Boon Leong {
4940be8b38a7SOng Boon Leong 	int cpu = smp_processor_id();
4941be8b38a7SOng Boon Leong 	int queue;
4942be8b38a7SOng Boon Leong 
4943be8b38a7SOng Boon Leong 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4944be8b38a7SOng Boon Leong 
4945be8b38a7SOng Boon Leong 	if (xdp_status & STMMAC_XDP_TX)
4946be8b38a7SOng Boon Leong 		stmmac_tx_timer_arm(priv, queue);
49478b278a5bSOng Boon Leong 
49488b278a5bSOng Boon Leong 	if (xdp_status & STMMAC_XDP_REDIRECT)
49498b278a5bSOng Boon Leong 		xdp_do_flush();
4950be8b38a7SOng Boon Leong }
4951be8b38a7SOng Boon Leong 
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)4952bba2556eSOng Boon Leong static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4953bba2556eSOng Boon Leong 					       struct xdp_buff *xdp)
4954bba2556eSOng Boon Leong {
4955bba2556eSOng Boon Leong 	unsigned int metasize = xdp->data - xdp->data_meta;
4956bba2556eSOng Boon Leong 	unsigned int datasize = xdp->data_end - xdp->data;
4957bba2556eSOng Boon Leong 	struct sk_buff *skb;
4958bba2556eSOng Boon Leong 
4959132c32eeSOng Boon Leong 	skb = __napi_alloc_skb(&ch->rxtx_napi,
4960bba2556eSOng Boon Leong 			       xdp->data_end - xdp->data_hard_start,
4961bba2556eSOng Boon Leong 			       GFP_ATOMIC | __GFP_NOWARN);
4962bba2556eSOng Boon Leong 	if (unlikely(!skb))
4963bba2556eSOng Boon Leong 		return NULL;
4964bba2556eSOng Boon Leong 
4965bba2556eSOng Boon Leong 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
4966bba2556eSOng Boon Leong 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4967bba2556eSOng Boon Leong 	if (metasize)
4968bba2556eSOng Boon Leong 		skb_metadata_set(skb, metasize);
4969bba2556eSOng Boon Leong 
4970bba2556eSOng Boon Leong 	return skb;
4971bba2556eSOng Boon Leong }
4972bba2556eSOng Boon Leong 
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)4973bba2556eSOng Boon Leong static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4974bba2556eSOng Boon Leong 				   struct dma_desc *p, struct dma_desc *np,
4975bba2556eSOng Boon Leong 				   struct xdp_buff *xdp)
4976bba2556eSOng Boon Leong {
49778070274bSJisheng Zhang 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
4978bba2556eSOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
4979bba2556eSOng Boon Leong 	unsigned int len = xdp->data_end - xdp->data;
4980bba2556eSOng Boon Leong 	enum pkt_hash_types hash_type;
4981bba2556eSOng Boon Leong 	int coe = priv->hw->rx_csum;
4982bba2556eSOng Boon Leong 	struct sk_buff *skb;
4983bba2556eSOng Boon Leong 	u32 hash;
4984bba2556eSOng Boon Leong 
4985bba2556eSOng Boon Leong 	skb = stmmac_construct_skb_zc(ch, xdp);
4986bba2556eSOng Boon Leong 	if (!skb) {
4987133466c3SJisheng Zhang 		priv->xstats.rx_dropped++;
4988bba2556eSOng Boon Leong 		return;
4989bba2556eSOng Boon Leong 	}
4990bba2556eSOng Boon Leong 
4991bba2556eSOng Boon Leong 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
4992bba2556eSOng Boon Leong 	stmmac_rx_vlan(priv->dev, skb);
4993bba2556eSOng Boon Leong 	skb->protocol = eth_type_trans(skb, priv->dev);
4994bba2556eSOng Boon Leong 
499597d574fcSRomain Gantois 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
4996bba2556eSOng Boon Leong 		skb_checksum_none_assert(skb);
4997bba2556eSOng Boon Leong 	else
4998bba2556eSOng Boon Leong 		skb->ip_summed = CHECKSUM_UNNECESSARY;
4999bba2556eSOng Boon Leong 
5000bba2556eSOng Boon Leong 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5001bba2556eSOng Boon Leong 		skb_set_hash(skb, hash, hash_type);
5002bba2556eSOng Boon Leong 
5003bba2556eSOng Boon Leong 	skb_record_rx_queue(skb, queue);
5004132c32eeSOng Boon Leong 	napi_gro_receive(&ch->rxtx_napi, skb);
5005bba2556eSOng Boon Leong 
50069680b2abSPetr Tesarik 	u64_stats_update_begin(&rxq_stats->napi_syncp);
50079680b2abSPetr Tesarik 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
50089680b2abSPetr Tesarik 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
50099680b2abSPetr Tesarik 	u64_stats_update_end(&rxq_stats->napi_syncp);
5010bba2556eSOng Boon Leong }
5011bba2556eSOng Boon Leong 
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)5012bba2556eSOng Boon Leong static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5013bba2556eSOng Boon Leong {
50148531c808SChristian Marangi 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5015bba2556eSOng Boon Leong 	unsigned int entry = rx_q->dirty_rx;
5016bba2556eSOng Boon Leong 	struct dma_desc *rx_desc = NULL;
5017bba2556eSOng Boon Leong 	bool ret = true;
5018bba2556eSOng Boon Leong 
5019bba2556eSOng Boon Leong 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5020bba2556eSOng Boon Leong 
5021bba2556eSOng Boon Leong 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5022bba2556eSOng Boon Leong 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5023bba2556eSOng Boon Leong 		dma_addr_t dma_addr;
5024bba2556eSOng Boon Leong 		bool use_rx_wd;
5025bba2556eSOng Boon Leong 
5026bba2556eSOng Boon Leong 		if (!buf->xdp) {
5027bba2556eSOng Boon Leong 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5028bba2556eSOng Boon Leong 			if (!buf->xdp) {
5029bba2556eSOng Boon Leong 				ret = false;
5030bba2556eSOng Boon Leong 				break;
5031bba2556eSOng Boon Leong 			}
5032bba2556eSOng Boon Leong 		}
5033bba2556eSOng Boon Leong 
5034bba2556eSOng Boon Leong 		if (priv->extend_desc)
5035bba2556eSOng Boon Leong 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5036bba2556eSOng Boon Leong 		else
5037bba2556eSOng Boon Leong 			rx_desc = rx_q->dma_rx + entry;
5038bba2556eSOng Boon Leong 
5039bba2556eSOng Boon Leong 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5040bba2556eSOng Boon Leong 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5041bba2556eSOng Boon Leong 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5042bba2556eSOng Boon Leong 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5043bba2556eSOng Boon Leong 
5044bba2556eSOng Boon Leong 		rx_q->rx_count_frames++;
5045bba2556eSOng Boon Leong 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5046bba2556eSOng Boon Leong 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5047bba2556eSOng Boon Leong 			rx_q->rx_count_frames = 0;
5048bba2556eSOng Boon Leong 
5049bba2556eSOng Boon Leong 		use_rx_wd = !priv->rx_coal_frames[queue];
5050bba2556eSOng Boon Leong 		use_rx_wd |= rx_q->rx_count_frames > 0;
5051bba2556eSOng Boon Leong 		if (!priv->use_riwt)
5052bba2556eSOng Boon Leong 			use_rx_wd = false;
5053bba2556eSOng Boon Leong 
5054bba2556eSOng Boon Leong 		dma_wmb();
5055bba2556eSOng Boon Leong 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5056bba2556eSOng Boon Leong 
50578531c808SChristian Marangi 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5058bba2556eSOng Boon Leong 	}
5059bba2556eSOng Boon Leong 
5060bba2556eSOng Boon Leong 	if (rx_desc) {
5061bba2556eSOng Boon Leong 		rx_q->dirty_rx = entry;
5062bba2556eSOng Boon Leong 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5063bba2556eSOng Boon Leong 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5064bba2556eSOng Boon Leong 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5065bba2556eSOng Boon Leong 	}
5066bba2556eSOng Boon Leong 
5067bba2556eSOng Boon Leong 	return ret;
5068bba2556eSOng Boon Leong }
5069bba2556eSOng Boon Leong 
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)50709570df35SSong Yoong Siang static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
50719570df35SSong Yoong Siang {
50729570df35SSong Yoong Siang 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
50739570df35SSong Yoong Siang 	 * to represent incoming packet, whereas cb field in the same structure
50749570df35SSong Yoong Siang 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
50759570df35SSong Yoong Siang 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
50769570df35SSong Yoong Siang 	 */
50779570df35SSong Yoong Siang 	return (struct stmmac_xdp_buff *)xdp;
50789570df35SSong Yoong Siang }
50799570df35SSong Yoong Siang 
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5080bba2556eSOng Boon Leong static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5081bba2556eSOng Boon Leong {
50828070274bSJisheng Zhang 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
50838531c808SChristian Marangi 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5084bba2556eSOng Boon Leong 	unsigned int count = 0, error = 0, len = 0;
5085bba2556eSOng Boon Leong 	int dirty = stmmac_rx_dirty(priv, queue);
5086bba2556eSOng Boon Leong 	unsigned int next_entry = rx_q->cur_rx;
5087133466c3SJisheng Zhang 	u32 rx_errors = 0, rx_dropped = 0;
5088bba2556eSOng Boon Leong 	unsigned int desc_size;
5089bba2556eSOng Boon Leong 	struct bpf_prog *prog;
5090bba2556eSOng Boon Leong 	bool failure = false;
5091bba2556eSOng Boon Leong 	int xdp_status = 0;
5092bba2556eSOng Boon Leong 	int status = 0;
5093bba2556eSOng Boon Leong 
5094bba2556eSOng Boon Leong 	if (netif_msg_rx_status(priv)) {
5095bba2556eSOng Boon Leong 		void *rx_head;
5096bba2556eSOng Boon Leong 
5097bba2556eSOng Boon Leong 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5098bba2556eSOng Boon Leong 		if (priv->extend_desc) {
5099bba2556eSOng Boon Leong 			rx_head = (void *)rx_q->dma_erx;
5100bba2556eSOng Boon Leong 			desc_size = sizeof(struct dma_extended_desc);
5101bba2556eSOng Boon Leong 		} else {
5102bba2556eSOng Boon Leong 			rx_head = (void *)rx_q->dma_rx;
5103bba2556eSOng Boon Leong 			desc_size = sizeof(struct dma_desc);
5104bba2556eSOng Boon Leong 		}
5105bba2556eSOng Boon Leong 
51068531c808SChristian Marangi 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5107bba2556eSOng Boon Leong 				    rx_q->dma_rx_phy, desc_size);
5108bba2556eSOng Boon Leong 	}
5109bba2556eSOng Boon Leong 	while (count < limit) {
5110bba2556eSOng Boon Leong 		struct stmmac_rx_buffer *buf;
51119570df35SSong Yoong Siang 		struct stmmac_xdp_buff *ctx;
5112bba2556eSOng Boon Leong 		unsigned int buf1_len = 0;
5113bba2556eSOng Boon Leong 		struct dma_desc *np, *p;
5114bba2556eSOng Boon Leong 		int entry;
5115bba2556eSOng Boon Leong 		int res;
5116bba2556eSOng Boon Leong 
5117bba2556eSOng Boon Leong 		if (!count && rx_q->state_saved) {
5118bba2556eSOng Boon Leong 			error = rx_q->state.error;
5119bba2556eSOng Boon Leong 			len = rx_q->state.len;
5120bba2556eSOng Boon Leong 		} else {
5121bba2556eSOng Boon Leong 			rx_q->state_saved = false;
5122bba2556eSOng Boon Leong 			error = 0;
5123bba2556eSOng Boon Leong 			len = 0;
5124bba2556eSOng Boon Leong 		}
5125bba2556eSOng Boon Leong 
5126bba2556eSOng Boon Leong 		if (count >= limit)
5127bba2556eSOng Boon Leong 			break;
5128bba2556eSOng Boon Leong 
5129bba2556eSOng Boon Leong read_again:
5130bba2556eSOng Boon Leong 		buf1_len = 0;
5131bba2556eSOng Boon Leong 		entry = next_entry;
5132bba2556eSOng Boon Leong 		buf = &rx_q->buf_pool[entry];
5133bba2556eSOng Boon Leong 
5134bba2556eSOng Boon Leong 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5135bba2556eSOng Boon Leong 			failure = failure ||
5136bba2556eSOng Boon Leong 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5137bba2556eSOng Boon Leong 			dirty = 0;
5138bba2556eSOng Boon Leong 		}
5139bba2556eSOng Boon Leong 
5140bba2556eSOng Boon Leong 		if (priv->extend_desc)
5141bba2556eSOng Boon Leong 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5142bba2556eSOng Boon Leong 		else
5143bba2556eSOng Boon Leong 			p = rx_q->dma_rx + entry;
5144bba2556eSOng Boon Leong 
5145bba2556eSOng Boon Leong 		/* read the status of the incoming frame */
5146133466c3SJisheng Zhang 		status = stmmac_rx_status(priv, &priv->xstats, p);
5147bba2556eSOng Boon Leong 		/* check if managed by the DMA otherwise go ahead */
5148bba2556eSOng Boon Leong 		if (unlikely(status & dma_own))
5149bba2556eSOng Boon Leong 			break;
5150bba2556eSOng Boon Leong 
5151bba2556eSOng Boon Leong 		/* Prefetch the next RX descriptor */
5152bba2556eSOng Boon Leong 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
51538531c808SChristian Marangi 						priv->dma_conf.dma_rx_size);
5154bba2556eSOng Boon Leong 		next_entry = rx_q->cur_rx;
5155bba2556eSOng Boon Leong 
5156bba2556eSOng Boon Leong 		if (priv->extend_desc)
5157bba2556eSOng Boon Leong 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5158bba2556eSOng Boon Leong 		else
5159bba2556eSOng Boon Leong 			np = rx_q->dma_rx + next_entry;
5160bba2556eSOng Boon Leong 
5161bba2556eSOng Boon Leong 		prefetch(np);
5162bba2556eSOng Boon Leong 
51632b9fff64SSong Yoong Siang 		/* Ensure a valid XSK buffer before proceed */
51642b9fff64SSong Yoong Siang 		if (!buf->xdp)
51652b9fff64SSong Yoong Siang 			break;
51662b9fff64SSong Yoong Siang 
5167bba2556eSOng Boon Leong 		if (priv->extend_desc)
5168133466c3SJisheng Zhang 			stmmac_rx_extended_status(priv, &priv->xstats,
5169bba2556eSOng Boon Leong 						  rx_q->dma_erx + entry);
5170bba2556eSOng Boon Leong 		if (unlikely(status == discard_frame)) {
5171bba2556eSOng Boon Leong 			xsk_buff_free(buf->xdp);
5172bba2556eSOng Boon Leong 			buf->xdp = NULL;
5173bba2556eSOng Boon Leong 			dirty++;
5174bba2556eSOng Boon Leong 			error = 1;
5175bba2556eSOng Boon Leong 			if (!priv->hwts_rx_en)
5176133466c3SJisheng Zhang 				rx_errors++;
5177bba2556eSOng Boon Leong 		}
5178bba2556eSOng Boon Leong 
5179bba2556eSOng Boon Leong 		if (unlikely(error && (status & rx_not_ls)))
5180bba2556eSOng Boon Leong 			goto read_again;
5181bba2556eSOng Boon Leong 		if (unlikely(error)) {
5182bba2556eSOng Boon Leong 			count++;
5183bba2556eSOng Boon Leong 			continue;
5184bba2556eSOng Boon Leong 		}
5185bba2556eSOng Boon Leong 
5186bba2556eSOng Boon Leong 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5187bba2556eSOng Boon Leong 		if (likely(status & rx_not_ls)) {
5188bba2556eSOng Boon Leong 			xsk_buff_free(buf->xdp);
5189bba2556eSOng Boon Leong 			buf->xdp = NULL;
5190bba2556eSOng Boon Leong 			dirty++;
5191bba2556eSOng Boon Leong 			count++;
5192bba2556eSOng Boon Leong 			goto read_again;
5193bba2556eSOng Boon Leong 		}
5194bba2556eSOng Boon Leong 
51959570df35SSong Yoong Siang 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
51969570df35SSong Yoong Siang 		ctx->priv = priv;
51979570df35SSong Yoong Siang 		ctx->desc = p;
51989570df35SSong Yoong Siang 		ctx->ndesc = np;
51999570df35SSong Yoong Siang 
5200bba2556eSOng Boon Leong 		/* XDP ZC Frame only support primary buffers for now */
5201bba2556eSOng Boon Leong 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5202bba2556eSOng Boon Leong 		len += buf1_len;
5203bba2556eSOng Boon Leong 
5204929d4342SKurt Kanzenbach 		/* ACS is disabled; strip manually. */
5205929d4342SKurt Kanzenbach 		if (likely(!(status & rx_not_ls))) {
5206bba2556eSOng Boon Leong 			buf1_len -= ETH_FCS_LEN;
5207bba2556eSOng Boon Leong 			len -= ETH_FCS_LEN;
5208bba2556eSOng Boon Leong 		}
5209bba2556eSOng Boon Leong 
5210bba2556eSOng Boon Leong 		/* RX buffer is good and fit into a XSK pool buffer */
5211bba2556eSOng Boon Leong 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5212bba2556eSOng Boon Leong 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5213bba2556eSOng Boon Leong 
5214bba2556eSOng Boon Leong 		prog = READ_ONCE(priv->xdp_prog);
5215bba2556eSOng Boon Leong 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5216bba2556eSOng Boon Leong 
5217bba2556eSOng Boon Leong 		switch (res) {
5218bba2556eSOng Boon Leong 		case STMMAC_XDP_PASS:
5219bba2556eSOng Boon Leong 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5220bba2556eSOng Boon Leong 			xsk_buff_free(buf->xdp);
5221bba2556eSOng Boon Leong 			break;
5222bba2556eSOng Boon Leong 		case STMMAC_XDP_CONSUMED:
5223bba2556eSOng Boon Leong 			xsk_buff_free(buf->xdp);
5224133466c3SJisheng Zhang 			rx_dropped++;
5225bba2556eSOng Boon Leong 			break;
5226bba2556eSOng Boon Leong 		case STMMAC_XDP_TX:
5227bba2556eSOng Boon Leong 		case STMMAC_XDP_REDIRECT:
5228bba2556eSOng Boon Leong 			xdp_status |= res;
5229bba2556eSOng Boon Leong 			break;
5230bba2556eSOng Boon Leong 		}
5231bba2556eSOng Boon Leong 
5232bba2556eSOng Boon Leong 		buf->xdp = NULL;
5233bba2556eSOng Boon Leong 		dirty++;
5234bba2556eSOng Boon Leong 		count++;
5235bba2556eSOng Boon Leong 	}
5236bba2556eSOng Boon Leong 
5237bba2556eSOng Boon Leong 	if (status & rx_not_ls) {
5238bba2556eSOng Boon Leong 		rx_q->state_saved = true;
5239bba2556eSOng Boon Leong 		rx_q->state.error = error;
5240bba2556eSOng Boon Leong 		rx_q->state.len = len;
5241bba2556eSOng Boon Leong 	}
5242bba2556eSOng Boon Leong 
5243bba2556eSOng Boon Leong 	stmmac_finalize_xdp_rx(priv, xdp_status);
5244bba2556eSOng Boon Leong 
52459680b2abSPetr Tesarik 	u64_stats_update_begin(&rxq_stats->napi_syncp);
52469680b2abSPetr Tesarik 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
52479680b2abSPetr Tesarik 	u64_stats_update_end(&rxq_stats->napi_syncp);
5248133466c3SJisheng Zhang 
5249133466c3SJisheng Zhang 	priv->xstats.rx_dropped += rx_dropped;
5250133466c3SJisheng Zhang 	priv->xstats.rx_errors += rx_errors;
525168e9c5deSVijayakannan Ayyathurai 
5252bba2556eSOng Boon Leong 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5253bba2556eSOng Boon Leong 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5254bba2556eSOng Boon Leong 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5255bba2556eSOng Boon Leong 		else
5256bba2556eSOng Boon Leong 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5257bba2556eSOng Boon Leong 
5258bba2556eSOng Boon Leong 		return (int)count;
5259bba2556eSOng Boon Leong 	}
5260bba2556eSOng Boon Leong 
5261bba2556eSOng Boon Leong 	return failure ? limit : (int)count;
5262bba2556eSOng Boon Leong }
5263bba2556eSOng Boon Leong 
526432ceabcaSGiuseppe CAVALLARO /**
5265732fdf0eSGiuseppe CAVALLARO  * stmmac_rx - manage the receive process
526632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
526754139cf3SJoao Pinto  * @limit: napi bugget
526854139cf3SJoao Pinto  * @queue: RX queue index.
526932ceabcaSGiuseppe CAVALLARO  * Description :  this the function called by the napi poll method.
527032ceabcaSGiuseppe CAVALLARO  * It gets all the frames inside the ring.
527132ceabcaSGiuseppe CAVALLARO  */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)527254139cf3SJoao Pinto static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
52737ac6653aSJeff Kirsher {
5274133466c3SJisheng Zhang 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
52758070274bSJisheng Zhang 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
52768531c808SChristian Marangi 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
52778fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[queue];
5278ec222003SJose Abreu 	unsigned int count = 0, error = 0, len = 0;
5279ec222003SJose Abreu 	int status = 0, coe = priv->hw->rx_csum;
528007b39753SAaro Koskinen 	unsigned int next_entry = rx_q->cur_rx;
52815fabb012SOng Boon Leong 	enum dma_data_direction dma_dir;
5282bfaf91caSJoakim Zhang 	unsigned int desc_size;
5283ec222003SJose Abreu 	struct sk_buff *skb = NULL;
52845b24324aSSong Yoong Siang 	struct stmmac_xdp_buff ctx;
5285be8b38a7SOng Boon Leong 	int xdp_status = 0;
52865fabb012SOng Boon Leong 	int buf_sz;
52875fabb012SOng Boon Leong 
52885fabb012SOng Boon Leong 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
52898531c808SChristian Marangi 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5290779334e5SBaruch Siach 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
52917ac6653aSJeff Kirsher 
529283d7af64SGiuseppe CAVALLARO 	if (netif_msg_rx_status(priv)) {
5293d0225e7dSAlexandre TORGUE 		void *rx_head;
5294d0225e7dSAlexandre TORGUE 
529538ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5296bfaf91caSJoakim Zhang 		if (priv->extend_desc) {
529754139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_erx;
5298bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_extended_desc);
5299bfaf91caSJoakim Zhang 		} else {
530054139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_rx;
5301bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_desc);
5302bfaf91caSJoakim Zhang 		}
5303d0225e7dSAlexandre TORGUE 
53048531c808SChristian Marangi 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5305bfaf91caSJoakim Zhang 				    rx_q->dma_rx_phy, desc_size);
53067ac6653aSJeff Kirsher 	}
5307c24602efSGiuseppe CAVALLARO 	while (count < limit) {
530888ebe2cfSJose Abreu 		unsigned int buf1_len = 0, buf2_len = 0;
5309ec222003SJose Abreu 		enum pkt_hash_types hash_type;
53102af6106aSJose Abreu 		struct stmmac_rx_buffer *buf;
53112af6106aSJose Abreu 		struct dma_desc *np, *p;
5312ec222003SJose Abreu 		int entry;
5313ec222003SJose Abreu 		u32 hash;
53147ac6653aSJeff Kirsher 
5315ec222003SJose Abreu 		if (!count && rx_q->state_saved) {
5316ec222003SJose Abreu 			skb = rx_q->state.skb;
5317ec222003SJose Abreu 			error = rx_q->state.error;
5318ec222003SJose Abreu 			len = rx_q->state.len;
5319ec222003SJose Abreu 		} else {
5320ec222003SJose Abreu 			rx_q->state_saved = false;
5321ec222003SJose Abreu 			skb = NULL;
5322ec222003SJose Abreu 			error = 0;
5323ec222003SJose Abreu 			len = 0;
5324ec222003SJose Abreu 		}
5325ec222003SJose Abreu 
5326e5d20035SBaruch Siach read_again:
5327ec222003SJose Abreu 		if (count >= limit)
5328ec222003SJose Abreu 			break;
5329ec222003SJose Abreu 
533088ebe2cfSJose Abreu 		buf1_len = 0;
533188ebe2cfSJose Abreu 		buf2_len = 0;
533207b39753SAaro Koskinen 		entry = next_entry;
53332af6106aSJose Abreu 		buf = &rx_q->buf_pool[entry];
533407b39753SAaro Koskinen 
5335c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
533654139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5337c24602efSGiuseppe CAVALLARO 		else
533854139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
5339c24602efSGiuseppe CAVALLARO 
5340c1fa3212SFabrice Gasnier 		/* read the status of the incoming frame */
5341133466c3SJisheng Zhang 		status = stmmac_rx_status(priv, &priv->xstats, p);
5342c1fa3212SFabrice Gasnier 		/* check if managed by the DMA otherwise go ahead */
5343c1fa3212SFabrice Gasnier 		if (unlikely(status & dma_own))
53447ac6653aSJeff Kirsher 			break;
53457ac6653aSJeff Kirsher 
5346aa042f60SSong, Yoong Siang 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
53478531c808SChristian Marangi 						priv->dma_conf.dma_rx_size);
534854139cf3SJoao Pinto 		next_entry = rx_q->cur_rx;
5349e3ad57c9SGiuseppe Cavallaro 
5350c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
535154139cf3SJoao Pinto 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5352c24602efSGiuseppe CAVALLARO 		else
535354139cf3SJoao Pinto 			np = rx_q->dma_rx + next_entry;
5354ba1ffd74SGiuseppe CAVALLARO 
5355ba1ffd74SGiuseppe CAVALLARO 		prefetch(np);
53567ac6653aSJeff Kirsher 
535742de047dSJose Abreu 		if (priv->extend_desc)
5358133466c3SJisheng Zhang 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5359891434b1SRayagond Kokatanur 		if (unlikely(status == discard_frame)) {
53602af6106aSJose Abreu 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
53612af6106aSJose Abreu 			buf->page = NULL;
5362ec222003SJose Abreu 			error = 1;
53630b273ca4SJose Abreu 			if (!priv->hwts_rx_en)
5364133466c3SJisheng Zhang 				rx_errors++;
5365ec222003SJose Abreu 		}
5366f748be53SAlexandre TORGUE 
5367ec222003SJose Abreu 		if (unlikely(error && (status & rx_not_ls)))
5368ec222003SJose Abreu 			goto read_again;
5369ec222003SJose Abreu 		if (unlikely(error)) {
5370ec222003SJose Abreu 			dev_kfree_skb(skb);
537188ebe2cfSJose Abreu 			skb = NULL;
5372cda4985aSJose Abreu 			count++;
537307b39753SAaro Koskinen 			continue;
5374e527c4a7SGiuseppe CAVALLARO 		}
5375e527c4a7SGiuseppe CAVALLARO 
5376ec222003SJose Abreu 		/* Buffer is good. Go on. */
5377ec222003SJose Abreu 
53784744bf07SMatteo Croce 		prefetch(page_address(buf->page) + buf->page_offset);
537988ebe2cfSJose Abreu 		if (buf->sec_page)
538088ebe2cfSJose Abreu 			prefetch(page_address(buf->sec_page));
538188ebe2cfSJose Abreu 
538288ebe2cfSJose Abreu 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
538388ebe2cfSJose Abreu 		len += buf1_len;
538488ebe2cfSJose Abreu 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
538588ebe2cfSJose Abreu 		len += buf2_len;
5386ec222003SJose Abreu 
5387929d4342SKurt Kanzenbach 		/* ACS is disabled; strip manually. */
5388929d4342SKurt Kanzenbach 		if (likely(!(status & rx_not_ls))) {
53890f296e78SZekun Shen 			if (buf2_len) {
539088ebe2cfSJose Abreu 				buf2_len -= ETH_FCS_LEN;
5391ec222003SJose Abreu 				len -= ETH_FCS_LEN;
53920f296e78SZekun Shen 			} else if (buf1_len) {
53930f296e78SZekun Shen 				buf1_len -= ETH_FCS_LEN;
53940f296e78SZekun Shen 				len -= ETH_FCS_LEN;
53950f296e78SZekun Shen 			}
539683d7af64SGiuseppe CAVALLARO 		}
539722ad3838SGiuseppe Cavallaro 
5398ec222003SJose Abreu 		if (!skb) {
5399be8b38a7SOng Boon Leong 			unsigned int pre_len, sync_len;
5400be8b38a7SOng Boon Leong 
54015fabb012SOng Boon Leong 			dma_sync_single_for_cpu(priv->device, buf->addr,
54025fabb012SOng Boon Leong 						buf1_len, dma_dir);
54035fabb012SOng Boon Leong 
54045b24324aSSong Yoong Siang 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
54055b24324aSSong Yoong Siang 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5406e3f9c3e3SSong Yoong Siang 					 buf->page_offset, buf1_len, true);
54075fabb012SOng Boon Leong 
54085b24324aSSong Yoong Siang 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5409be8b38a7SOng Boon Leong 				  buf->page_offset;
5410e3f9c3e3SSong Yoong Siang 
5411e3f9c3e3SSong Yoong Siang 			ctx.priv = priv;
5412e3f9c3e3SSong Yoong Siang 			ctx.desc = p;
5413e3f9c3e3SSong Yoong Siang 			ctx.ndesc = np;
5414e3f9c3e3SSong Yoong Siang 
54155b24324aSSong Yoong Siang 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5416be8b38a7SOng Boon Leong 			/* Due xdp_adjust_tail: DMA sync for_device
5417be8b38a7SOng Boon Leong 			 * cover max len CPU touch
5418be8b38a7SOng Boon Leong 			 */
54195b24324aSSong Yoong Siang 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5420be8b38a7SOng Boon Leong 				   buf->page_offset;
5421be8b38a7SOng Boon Leong 			sync_len = max(sync_len, pre_len);
54225fabb012SOng Boon Leong 
54235fabb012SOng Boon Leong 			/* For Not XDP_PASS verdict */
54245fabb012SOng Boon Leong 			if (IS_ERR(skb)) {
54255fabb012SOng Boon Leong 				unsigned int xdp_res = -PTR_ERR(skb);
54265fabb012SOng Boon Leong 
54275fabb012SOng Boon Leong 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5428be8b38a7SOng Boon Leong 					page_pool_put_page(rx_q->page_pool,
54295b24324aSSong Yoong Siang 							   virt_to_head_page(ctx.xdp.data),
5430be8b38a7SOng Boon Leong 							   sync_len, true);
54315fabb012SOng Boon Leong 					buf->page = NULL;
5432133466c3SJisheng Zhang 					rx_dropped++;
54335fabb012SOng Boon Leong 
54345fabb012SOng Boon Leong 					/* Clear skb as it was set as
54355fabb012SOng Boon Leong 					 * status by XDP program.
54365fabb012SOng Boon Leong 					 */
54375fabb012SOng Boon Leong 					skb = NULL;
54385fabb012SOng Boon Leong 
54395fabb012SOng Boon Leong 					if (unlikely((status & rx_not_ls)))
54405fabb012SOng Boon Leong 						goto read_again;
54415fabb012SOng Boon Leong 
54425fabb012SOng Boon Leong 					count++;
54435fabb012SOng Boon Leong 					continue;
54448b278a5bSOng Boon Leong 				} else if (xdp_res & (STMMAC_XDP_TX |
54458b278a5bSOng Boon Leong 						      STMMAC_XDP_REDIRECT)) {
5446be8b38a7SOng Boon Leong 					xdp_status |= xdp_res;
5447be8b38a7SOng Boon Leong 					buf->page = NULL;
5448be8b38a7SOng Boon Leong 					skb = NULL;
5449be8b38a7SOng Boon Leong 					count++;
5450be8b38a7SOng Boon Leong 					continue;
54515fabb012SOng Boon Leong 				}
54525fabb012SOng Boon Leong 			}
54535fabb012SOng Boon Leong 		}
54545fabb012SOng Boon Leong 
54555fabb012SOng Boon Leong 		if (!skb) {
54565fabb012SOng Boon Leong 			/* XDP program may expand or reduce tail */
54575b24324aSSong Yoong Siang 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
54585fabb012SOng Boon Leong 
545988ebe2cfSJose Abreu 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5460ec222003SJose Abreu 			if (!skb) {
5461133466c3SJisheng Zhang 				rx_dropped++;
5462cda4985aSJose Abreu 				count++;
546388ebe2cfSJose Abreu 				goto drain_data;
546422ad3838SGiuseppe Cavallaro 			}
546522ad3838SGiuseppe Cavallaro 
54665fabb012SOng Boon Leong 			/* XDP program may adjust header */
54675b24324aSSong Yoong Siang 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
546888ebe2cfSJose Abreu 			skb_put(skb, buf1_len);
546922ad3838SGiuseppe Cavallaro 
5470ec222003SJose Abreu 			/* Data payload copied into SKB, page ready for recycle */
5471ec222003SJose Abreu 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5472ec222003SJose Abreu 			buf->page = NULL;
547388ebe2cfSJose Abreu 		} else if (buf1_len) {
5474ec222003SJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->addr,
54755fabb012SOng Boon Leong 						buf1_len, dma_dir);
5476ec222003SJose Abreu 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
54775fabb012SOng Boon Leong 					buf->page, buf->page_offset, buf1_len,
54788531c808SChristian Marangi 					priv->dma_conf.dma_buf_sz);
5479ec222003SJose Abreu 
5480ec222003SJose Abreu 			/* Data payload appended into SKB */
548198e2727cSJakub Kicinski 			skb_mark_for_recycle(skb);
5482ec222003SJose Abreu 			buf->page = NULL;
54837ac6653aSJeff Kirsher 		}
548483d7af64SGiuseppe CAVALLARO 
548588ebe2cfSJose Abreu 		if (buf2_len) {
548667afd6d1SJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
54875fabb012SOng Boon Leong 						buf2_len, dma_dir);
548867afd6d1SJose Abreu 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
548988ebe2cfSJose Abreu 					buf->sec_page, 0, buf2_len,
54908531c808SChristian Marangi 					priv->dma_conf.dma_buf_sz);
549167afd6d1SJose Abreu 
549267afd6d1SJose Abreu 			/* Data payload appended into SKB */
549398e2727cSJakub Kicinski 			skb_mark_for_recycle(skb);
549467afd6d1SJose Abreu 			buf->sec_page = NULL;
549567afd6d1SJose Abreu 		}
549667afd6d1SJose Abreu 
549788ebe2cfSJose Abreu drain_data:
5498ec222003SJose Abreu 		if (likely(status & rx_not_ls))
5499ec222003SJose Abreu 			goto read_again;
550088ebe2cfSJose Abreu 		if (!skb)
550188ebe2cfSJose Abreu 			continue;
5502ec222003SJose Abreu 
5503ec222003SJose Abreu 		/* Got entire packet into SKB. Finish it. */
5504ec222003SJose Abreu 
5505ba1ffd74SGiuseppe CAVALLARO 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5506b9381985SVince Bridgers 		stmmac_rx_vlan(priv->dev, skb);
55077ac6653aSJeff Kirsher 		skb->protocol = eth_type_trans(skb, priv->dev);
55087ac6653aSJeff Kirsher 
550997d574fcSRomain Gantois 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
55107ac6653aSJeff Kirsher 			skb_checksum_none_assert(skb);
551162a2ab93SGiuseppe CAVALLARO 		else
55127ac6653aSJeff Kirsher 			skb->ip_summed = CHECKSUM_UNNECESSARY;
551362a2ab93SGiuseppe CAVALLARO 
551476067459SJose Abreu 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
551576067459SJose Abreu 			skb_set_hash(skb, hash, hash_type);
551676067459SJose Abreu 
551776067459SJose Abreu 		skb_record_rx_queue(skb, queue);
55184ccb4585SJose Abreu 		napi_gro_receive(&ch->rx_napi, skb);
551988ebe2cfSJose Abreu 		skb = NULL;
55207ac6653aSJeff Kirsher 
5521133466c3SJisheng Zhang 		rx_packets++;
5522133466c3SJisheng Zhang 		rx_bytes += len;
5523cda4985aSJose Abreu 		count++;
55247ac6653aSJeff Kirsher 	}
5525ec222003SJose Abreu 
552688ebe2cfSJose Abreu 	if (status & rx_not_ls || skb) {
5527ec222003SJose Abreu 		rx_q->state_saved = true;
5528ec222003SJose Abreu 		rx_q->state.skb = skb;
5529ec222003SJose Abreu 		rx_q->state.error = error;
5530ec222003SJose Abreu 		rx_q->state.len = len;
55317ac6653aSJeff Kirsher 	}
55327ac6653aSJeff Kirsher 
5533be8b38a7SOng Boon Leong 	stmmac_finalize_xdp_rx(priv, xdp_status);
5534be8b38a7SOng Boon Leong 
553554139cf3SJoao Pinto 	stmmac_rx_refill(priv, queue);
55367ac6653aSJeff Kirsher 
55379680b2abSPetr Tesarik 	u64_stats_update_begin(&rxq_stats->napi_syncp);
55389680b2abSPetr Tesarik 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
55399680b2abSPetr Tesarik 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
55409680b2abSPetr Tesarik 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
55419680b2abSPetr Tesarik 	u64_stats_update_end(&rxq_stats->napi_syncp);
5542133466c3SJisheng Zhang 
5543133466c3SJisheng Zhang 	priv->xstats.rx_dropped += rx_dropped;
5544133466c3SJisheng Zhang 	priv->xstats.rx_errors += rx_errors;
55457ac6653aSJeff Kirsher 
55467ac6653aSJeff Kirsher 	return count;
55477ac6653aSJeff Kirsher }
55487ac6653aSJeff Kirsher 
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)55494ccb4585SJose Abreu static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
55507ac6653aSJeff Kirsher {
55518fce3331SJose Abreu 	struct stmmac_channel *ch =
55524ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, rx_napi);
55538fce3331SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
55548070274bSJisheng Zhang 	struct stmmac_rxq_stats *rxq_stats;
55558fce3331SJose Abreu 	u32 chan = ch->index;
55564ccb4585SJose Abreu 	int work_done;
55577ac6653aSJeff Kirsher 
55588070274bSJisheng Zhang 	rxq_stats = &priv->xstats.rxq_stats[chan];
55599680b2abSPetr Tesarik 	u64_stats_update_begin(&rxq_stats->napi_syncp);
55609680b2abSPetr Tesarik 	u64_stats_inc(&rxq_stats->napi.poll);
55619680b2abSPetr Tesarik 	u64_stats_update_end(&rxq_stats->napi_syncp);
5562ce736788SJoao Pinto 
5563132c32eeSOng Boon Leong 	work_done = stmmac_rx(priv, budget, chan);
5564021bd5e3SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5565021bd5e3SJose Abreu 		unsigned long flags;
5566021bd5e3SJose Abreu 
5567021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
5568021bd5e3SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5569021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
5570021bd5e3SJose Abreu 	}
5571021bd5e3SJose Abreu 
55724ccb4585SJose Abreu 	return work_done;
55734ccb4585SJose Abreu }
5574ce736788SJoao Pinto 
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)55754ccb4585SJose Abreu static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
55764ccb4585SJose Abreu {
55774ccb4585SJose Abreu 	struct stmmac_channel *ch =
55784ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, tx_napi);
55794ccb4585SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
55808070274bSJisheng Zhang 	struct stmmac_txq_stats *txq_stats;
55814ccb4585SJose Abreu 	u32 chan = ch->index;
55824ccb4585SJose Abreu 	int work_done;
55834ccb4585SJose Abreu 
55848070274bSJisheng Zhang 	txq_stats = &priv->xstats.txq_stats[chan];
55859680b2abSPetr Tesarik 	u64_stats_update_begin(&txq_stats->napi_syncp);
55869680b2abSPetr Tesarik 	u64_stats_inc(&txq_stats->napi.poll);
55879680b2abSPetr Tesarik 	u64_stats_update_end(&txq_stats->napi_syncp);
55884ccb4585SJose Abreu 
5589132c32eeSOng Boon Leong 	work_done = stmmac_tx_clean(priv, budget, chan);
5590fa0be0a4SJose Abreu 	work_done = min(work_done, budget);
55918fce3331SJose Abreu 
5592021bd5e3SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5593021bd5e3SJose Abreu 		unsigned long flags;
55944ccb4585SJose Abreu 
5595021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
5596021bd5e3SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5597021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
5598fa0be0a4SJose Abreu 	}
55998fce3331SJose Abreu 
56007ac6653aSJeff Kirsher 	return work_done;
56017ac6653aSJeff Kirsher }
56027ac6653aSJeff Kirsher 
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5603132c32eeSOng Boon Leong static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5604132c32eeSOng Boon Leong {
5605132c32eeSOng Boon Leong 	struct stmmac_channel *ch =
5606132c32eeSOng Boon Leong 		container_of(napi, struct stmmac_channel, rxtx_napi);
5607132c32eeSOng Boon Leong 	struct stmmac_priv *priv = ch->priv_data;
560881d0885dSSong Yoong Siang 	int rx_done, tx_done, rxtx_done;
56098070274bSJisheng Zhang 	struct stmmac_rxq_stats *rxq_stats;
56108070274bSJisheng Zhang 	struct stmmac_txq_stats *txq_stats;
5611132c32eeSOng Boon Leong 	u32 chan = ch->index;
5612132c32eeSOng Boon Leong 
56138070274bSJisheng Zhang 	rxq_stats = &priv->xstats.rxq_stats[chan];
56149680b2abSPetr Tesarik 	u64_stats_update_begin(&rxq_stats->napi_syncp);
56159680b2abSPetr Tesarik 	u64_stats_inc(&rxq_stats->napi.poll);
56169680b2abSPetr Tesarik 	u64_stats_update_end(&rxq_stats->napi_syncp);
5617133466c3SJisheng Zhang 
56188070274bSJisheng Zhang 	txq_stats = &priv->xstats.txq_stats[chan];
56199680b2abSPetr Tesarik 	u64_stats_update_begin(&txq_stats->napi_syncp);
56209680b2abSPetr Tesarik 	u64_stats_inc(&txq_stats->napi.poll);
56219680b2abSPetr Tesarik 	u64_stats_update_end(&txq_stats->napi_syncp);
5622132c32eeSOng Boon Leong 
5623132c32eeSOng Boon Leong 	tx_done = stmmac_tx_clean(priv, budget, chan);
5624132c32eeSOng Boon Leong 	tx_done = min(tx_done, budget);
5625132c32eeSOng Boon Leong 
5626132c32eeSOng Boon Leong 	rx_done = stmmac_rx_zc(priv, budget, chan);
5627132c32eeSOng Boon Leong 
562881d0885dSSong Yoong Siang 	rxtx_done = max(tx_done, rx_done);
562981d0885dSSong Yoong Siang 
5630132c32eeSOng Boon Leong 	/* If either TX or RX work is not complete, return budget
5631132c32eeSOng Boon Leong 	 * and keep pooling
5632132c32eeSOng Boon Leong 	 */
563381d0885dSSong Yoong Siang 	if (rxtx_done >= budget)
5634132c32eeSOng Boon Leong 		return budget;
5635132c32eeSOng Boon Leong 
5636132c32eeSOng Boon Leong 	/* all work done, exit the polling mode */
563781d0885dSSong Yoong Siang 	if (napi_complete_done(napi, rxtx_done)) {
5638132c32eeSOng Boon Leong 		unsigned long flags;
5639132c32eeSOng Boon Leong 
5640132c32eeSOng Boon Leong 		spin_lock_irqsave(&ch->lock, flags);
5641132c32eeSOng Boon Leong 		/* Both RX and TX work done are compelte,
5642132c32eeSOng Boon Leong 		 * so enable both RX & TX IRQs.
5643132c32eeSOng Boon Leong 		 */
5644132c32eeSOng Boon Leong 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5645132c32eeSOng Boon Leong 		spin_unlock_irqrestore(&ch->lock, flags);
5646132c32eeSOng Boon Leong 	}
5647132c32eeSOng Boon Leong 
564881d0885dSSong Yoong Siang 	return min(rxtx_done, budget - 1);
5649132c32eeSOng Boon Leong }
5650132c32eeSOng Boon Leong 
56517ac6653aSJeff Kirsher /**
56527ac6653aSJeff Kirsher  *  stmmac_tx_timeout
56537ac6653aSJeff Kirsher  *  @dev : Pointer to net device structure
5654d0ea5cbdSJesse Brandeburg  *  @txqueue: the index of the hanging transmit queue
56557ac6653aSJeff Kirsher  *  Description: this function is called when a packet transmission fails to
56567284a3f1SGiuseppe CAVALLARO  *   complete within a reasonable time. The driver will mark the error in the
56577ac6653aSJeff Kirsher  *   netdev structure and arrange for the device to be reset to a sane state
56587ac6653aSJeff Kirsher  *   in order to transmit a new packet.
56597ac6653aSJeff Kirsher  */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)56600290bd29SMichael S. Tsirkin static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
56617ac6653aSJeff Kirsher {
56627ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
56637ac6653aSJeff Kirsher 
566434877a15SJose Abreu 	stmmac_global_err(priv);
56657ac6653aSJeff Kirsher }
56667ac6653aSJeff Kirsher 
56677ac6653aSJeff Kirsher /**
566801789349SJiri Pirko  *  stmmac_set_rx_mode - entry point for multicast addressing
56697ac6653aSJeff Kirsher  *  @dev : pointer to the device structure
56707ac6653aSJeff Kirsher  *  Description:
56717ac6653aSJeff Kirsher  *  This function is a driver entry point which gets called by the kernel
56727ac6653aSJeff Kirsher  *  whenever multicast addresses must be enabled/disabled.
56737ac6653aSJeff Kirsher  *  Return value:
56747ac6653aSJeff Kirsher  *  void.
56757ac6653aSJeff Kirsher  */
stmmac_set_rx_mode(struct net_device * dev)567601789349SJiri Pirko static void stmmac_set_rx_mode(struct net_device *dev)
56777ac6653aSJeff Kirsher {
56787ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
56797ac6653aSJeff Kirsher 
5680c10d4c82SJose Abreu 	stmmac_set_filter(priv, priv->hw, dev);
56817ac6653aSJeff Kirsher }
56827ac6653aSJeff Kirsher 
56837ac6653aSJeff Kirsher /**
56847ac6653aSJeff Kirsher  *  stmmac_change_mtu - entry point to change MTU size for the device.
56857ac6653aSJeff Kirsher  *  @dev : device pointer.
56867ac6653aSJeff Kirsher  *  @new_mtu : the new MTU size for the device.
56877ac6653aSJeff Kirsher  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
56887ac6653aSJeff Kirsher  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
56897ac6653aSJeff Kirsher  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
56907ac6653aSJeff Kirsher  *  Return value:
56917ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
56927ac6653aSJeff Kirsher  *  file on failure.
56937ac6653aSJeff Kirsher  */
stmmac_change_mtu(struct net_device * dev,int new_mtu)56947ac6653aSJeff Kirsher static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
56957ac6653aSJeff Kirsher {
569638ddc59dSLABBE Corentin 	struct stmmac_priv *priv = netdev_priv(dev);
5697eaf4fac4SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
569834700796SChristian Marangi 	struct stmmac_dma_conf *dma_conf;
56995b55299eSDavid Wu 	const int mtu = new_mtu;
570034700796SChristian Marangi 	int ret;
5701eaf4fac4SJose Abreu 
5702eaf4fac4SJose Abreu 	if (txfifosz == 0)
5703eaf4fac4SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
5704eaf4fac4SJose Abreu 
5705eaf4fac4SJose Abreu 	txfifosz /= priv->plat->tx_queues_to_use;
570638ddc59dSLABBE Corentin 
57075fabb012SOng Boon Leong 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
57085fabb012SOng Boon Leong 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
57095fabb012SOng Boon Leong 		return -EINVAL;
57105fabb012SOng Boon Leong 	}
57115fabb012SOng Boon Leong 
5712eaf4fac4SJose Abreu 	new_mtu = STMMAC_ALIGN(new_mtu);
5713eaf4fac4SJose Abreu 
5714eaf4fac4SJose Abreu 	/* If condition true, FIFO is too small or MTU too large */
5715eaf4fac4SJose Abreu 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5716eaf4fac4SJose Abreu 		return -EINVAL;
5717eaf4fac4SJose Abreu 
571834700796SChristian Marangi 	if (netif_running(dev)) {
571934700796SChristian Marangi 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
572034700796SChristian Marangi 		/* Try to allocate the new DMA conf with the new mtu */
572134700796SChristian Marangi 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
572234700796SChristian Marangi 		if (IS_ERR(dma_conf)) {
572334700796SChristian Marangi 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
572434700796SChristian Marangi 				   mtu);
572534700796SChristian Marangi 			return PTR_ERR(dma_conf);
572634700796SChristian Marangi 		}
5727f748be53SAlexandre TORGUE 
572834700796SChristian Marangi 		stmmac_release(dev);
572934700796SChristian Marangi 
573034700796SChristian Marangi 		ret = __stmmac_open(dev, dma_conf);
573134700796SChristian Marangi 		if (ret) {
573230134b7cSChristian Marangi 			free_dma_desc_resources(priv, dma_conf);
573330134b7cSChristian Marangi 			kfree(dma_conf);
573434700796SChristian Marangi 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
573534700796SChristian Marangi 			return ret;
573634700796SChristian Marangi 		}
573734700796SChristian Marangi 
573830134b7cSChristian Marangi 		kfree(dma_conf);
573930134b7cSChristian Marangi 
574034700796SChristian Marangi 		stmmac_set_rx_mode(dev);
574134700796SChristian Marangi 	}
574234700796SChristian Marangi 
574334700796SChristian Marangi 	dev->mtu = mtu;
57447ac6653aSJeff Kirsher 	netdev_update_features(dev);
57457ac6653aSJeff Kirsher 
57467ac6653aSJeff Kirsher 	return 0;
57477ac6653aSJeff Kirsher }
57487ac6653aSJeff Kirsher 
stmmac_fix_features(struct net_device * dev,netdev_features_t features)5749c8f44affSMichał Mirosław static netdev_features_t stmmac_fix_features(struct net_device *dev,
5750c8f44affSMichał Mirosław 					     netdev_features_t features)
57517ac6653aSJeff Kirsher {
57527ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
57537ac6653aSJeff Kirsher 
575438912bdbSDeepak SIKRI 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
57557ac6653aSJeff Kirsher 		features &= ~NETIF_F_RXCSUM;
5756d2afb5bdSGiuseppe CAVALLARO 
57577ac6653aSJeff Kirsher 	if (!priv->plat->tx_coe)
5758a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
57597ac6653aSJeff Kirsher 
57607ac6653aSJeff Kirsher 	/* Some GMAC devices have a bugged Jumbo frame support that
57617ac6653aSJeff Kirsher 	 * needs to have the Tx COE disabled for oversized frames
57627ac6653aSJeff Kirsher 	 * (due to limited buffer sizes). In this case we disable
5763ceb69499SGiuseppe CAVALLARO 	 * the TX csum insertion in the TDES and not use SF.
5764ceb69499SGiuseppe CAVALLARO 	 */
57657ac6653aSJeff Kirsher 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5766a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
57677ac6653aSJeff Kirsher 
5768f748be53SAlexandre TORGUE 	/* Disable tso if asked by ethtool */
576968861a3bSBartosz Golaszewski 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5770f748be53SAlexandre TORGUE 		if (features & NETIF_F_TSO)
5771f748be53SAlexandre TORGUE 			priv->tso = true;
5772f748be53SAlexandre TORGUE 		else
5773f748be53SAlexandre TORGUE 			priv->tso = false;
5774f748be53SAlexandre TORGUE 	}
5775f748be53SAlexandre TORGUE 
57767ac6653aSJeff Kirsher 	return features;
57777ac6653aSJeff Kirsher }
57787ac6653aSJeff Kirsher 
stmmac_set_features(struct net_device * netdev,netdev_features_t features)5779d2afb5bdSGiuseppe CAVALLARO static int stmmac_set_features(struct net_device *netdev,
5780d2afb5bdSGiuseppe CAVALLARO 			       netdev_features_t features)
5781d2afb5bdSGiuseppe CAVALLARO {
5782d2afb5bdSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(netdev);
5783d2afb5bdSGiuseppe CAVALLARO 
5784d2afb5bdSGiuseppe CAVALLARO 	/* Keep the COE Type in case of csum is supporting */
5785d2afb5bdSGiuseppe CAVALLARO 	if (features & NETIF_F_RXCSUM)
5786d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
5787d2afb5bdSGiuseppe CAVALLARO 	else
5788d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
5789d2afb5bdSGiuseppe CAVALLARO 	/* No check needed because rx_coe has been set before and it will be
5790d2afb5bdSGiuseppe CAVALLARO 	 * fixed in case of issue.
5791d2afb5bdSGiuseppe CAVALLARO 	 */
5792c10d4c82SJose Abreu 	stmmac_rx_ipc(priv, priv->hw);
5793d2afb5bdSGiuseppe CAVALLARO 
5794f8e7dfd6SVincent Whitchurch 	if (priv->sph_cap) {
5795f8e7dfd6SVincent Whitchurch 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5796f8e7dfd6SVincent Whitchurch 		u32 chan;
57975fabb012SOng Boon Leong 
579867afd6d1SJose Abreu 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
579967afd6d1SJose Abreu 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5800f8e7dfd6SVincent Whitchurch 	}
580167afd6d1SJose Abreu 
5802d2afb5bdSGiuseppe CAVALLARO 	return 0;
5803d2afb5bdSGiuseppe CAVALLARO }
5804d2afb5bdSGiuseppe CAVALLARO 
stmmac_fpe_event_status(struct stmmac_priv * priv,int status)58055a558611SOng Boon Leong static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
58065a558611SOng Boon Leong {
58075a558611SOng Boon Leong 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
58085a558611SOng Boon Leong 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
58095a558611SOng Boon Leong 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
58105a558611SOng Boon Leong 	bool *hs_enable = &fpe_cfg->hs_enable;
58115a558611SOng Boon Leong 
58125a558611SOng Boon Leong 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
58135a558611SOng Boon Leong 		return;
58145a558611SOng Boon Leong 
58155a558611SOng Boon Leong 	/* If LP has sent verify mPacket, LP is FPE capable */
58165a558611SOng Boon Leong 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
58175a558611SOng Boon Leong 		if (*lp_state < FPE_STATE_CAPABLE)
58185a558611SOng Boon Leong 			*lp_state = FPE_STATE_CAPABLE;
58195a558611SOng Boon Leong 
58205a558611SOng Boon Leong 		/* If user has requested FPE enable, quickly response */
58215a558611SOng Boon Leong 		if (*hs_enable)
58225a558611SOng Boon Leong 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5823e1fbdef9SJianheng Zhang 						fpe_cfg,
58245a558611SOng Boon Leong 						MPACKET_RESPONSE);
58255a558611SOng Boon Leong 	}
58265a558611SOng Boon Leong 
58275a558611SOng Boon Leong 	/* If Local has sent verify mPacket, Local is FPE capable */
58285a558611SOng Boon Leong 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
58295a558611SOng Boon Leong 		if (*lo_state < FPE_STATE_CAPABLE)
58305a558611SOng Boon Leong 			*lo_state = FPE_STATE_CAPABLE;
58315a558611SOng Boon Leong 	}
58325a558611SOng Boon Leong 
58335a558611SOng Boon Leong 	/* If LP has sent response mPacket, LP is entering FPE ON */
58345a558611SOng Boon Leong 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
58355a558611SOng Boon Leong 		*lp_state = FPE_STATE_ENTERING_ON;
58365a558611SOng Boon Leong 
58375a558611SOng Boon Leong 	/* If Local has sent response mPacket, Local is entering FPE ON */
58385a558611SOng Boon Leong 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
58395a558611SOng Boon Leong 		*lo_state = FPE_STATE_ENTERING_ON;
58405a558611SOng Boon Leong 
58415a558611SOng Boon Leong 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
58425a558611SOng Boon Leong 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
58435a558611SOng Boon Leong 	    priv->fpe_wq) {
58445a558611SOng Boon Leong 		queue_work(priv->fpe_wq, &priv->fpe_task);
58455a558611SOng Boon Leong 	}
58465a558611SOng Boon Leong }
58475a558611SOng Boon Leong 
stmmac_common_interrupt(struct stmmac_priv * priv)584829e6573cSOng Boon Leong static void stmmac_common_interrupt(struct stmmac_priv *priv)
58497ac6653aSJeff Kirsher {
58507bac4e1eSJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
58517bac4e1eSJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
58527bac4e1eSJoao Pinto 	u32 queues_count;
58537bac4e1eSJoao Pinto 	u32 queue;
58547d9e6c5aSJose Abreu 	bool xmac;
58557bac4e1eSJoao Pinto 
58567d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
58577bac4e1eSJoao Pinto 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
58587ac6653aSJeff Kirsher 
585989f7f2cfSSrinivas Kandagatla 	if (priv->irq_wake)
586089f7f2cfSSrinivas Kandagatla 		pm_wakeup_event(priv->device, 0);
586189f7f2cfSSrinivas Kandagatla 
5862e49aa315SVoon Weifeng 	if (priv->dma_cap.estsel)
58639f298959SOng Boon Leong 		stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
58649f298959SOng Boon Leong 				      &priv->xstats, tx_cnt);
5865e49aa315SVoon Weifeng 
58665a558611SOng Boon Leong 	if (priv->dma_cap.fpesel) {
58675a558611SOng Boon Leong 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
58685a558611SOng Boon Leong 						   priv->dev);
58695a558611SOng Boon Leong 
58705a558611SOng Boon Leong 		stmmac_fpe_event_status(priv, status);
58715a558611SOng Boon Leong 	}
58725a558611SOng Boon Leong 
58737ac6653aSJeff Kirsher 	/* To handle GMAC own interrupts */
58747d9e6c5aSJose Abreu 	if ((priv->plat->has_gmac) || xmac) {
5875c10d4c82SJose Abreu 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
58768f71a88dSJoao Pinto 
5877d765955dSGiuseppe CAVALLARO 		if (unlikely(status)) {
5878d765955dSGiuseppe CAVALLARO 			/* For LPI we need to save the tx status */
58790982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5880d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = true;
58810982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5882d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = false;
58837bac4e1eSJoao Pinto 		}
58847bac4e1eSJoao Pinto 
58857bac4e1eSJoao Pinto 		for (queue = 0; queue < queues_count; queue++) {
58868a7cb245SYannick Vignon 			status = stmmac_host_mtl_irq_status(priv, priv->hw,
58877bac4e1eSJoao Pinto 							    queue);
58887bac4e1eSJoao Pinto 		}
588970523e63SGiuseppe CAVALLARO 
589070523e63SGiuseppe CAVALLARO 		/* PCS link status */
5891d26979f1SBartosz Golaszewski 		if (priv->hw->pcs &&
5892d26979f1SBartosz Golaszewski 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
589370523e63SGiuseppe CAVALLARO 			if (priv->xstats.pcs_link)
589429e6573cSOng Boon Leong 				netif_carrier_on(priv->dev);
589570523e63SGiuseppe CAVALLARO 			else
589629e6573cSOng Boon Leong 				netif_carrier_off(priv->dev);
589770523e63SGiuseppe CAVALLARO 		}
5898f4da5652STan Tee Min 
5899f4da5652STan Tee Min 		stmmac_timestamp_interrupt(priv, priv);
5900d765955dSGiuseppe CAVALLARO 	}
590129e6573cSOng Boon Leong }
590229e6573cSOng Boon Leong 
590329e6573cSOng Boon Leong /**
590429e6573cSOng Boon Leong  *  stmmac_interrupt - main ISR
590529e6573cSOng Boon Leong  *  @irq: interrupt number.
590629e6573cSOng Boon Leong  *  @dev_id: to pass the net device pointer.
590729e6573cSOng Boon Leong  *  Description: this is the main driver interrupt service routine.
590829e6573cSOng Boon Leong  *  It can call:
590929e6573cSOng Boon Leong  *  o DMA service routine (to manage incoming frame reception and transmission
591029e6573cSOng Boon Leong  *    status)
591129e6573cSOng Boon Leong  *  o Core interrupts to manage: remote wake-up, management counter, LPI
591229e6573cSOng Boon Leong  *    interrupts.
591329e6573cSOng Boon Leong  */
stmmac_interrupt(int irq,void * dev_id)591429e6573cSOng Boon Leong static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
591529e6573cSOng Boon Leong {
591629e6573cSOng Boon Leong 	struct net_device *dev = (struct net_device *)dev_id;
591729e6573cSOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
591829e6573cSOng Boon Leong 
591929e6573cSOng Boon Leong 	/* Check if adapter is up */
592029e6573cSOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
592129e6573cSOng Boon Leong 		return IRQ_HANDLED;
592229e6573cSOng Boon Leong 
592329e6573cSOng Boon Leong 	/* Check if a fatal error happened */
592429e6573cSOng Boon Leong 	if (stmmac_safety_feat_interrupt(priv))
592529e6573cSOng Boon Leong 		return IRQ_HANDLED;
592629e6573cSOng Boon Leong 
592729e6573cSOng Boon Leong 	/* To handle Common interrupts */
592829e6573cSOng Boon Leong 	stmmac_common_interrupt(priv);
5929d765955dSGiuseppe CAVALLARO 
5930d765955dSGiuseppe CAVALLARO 	/* To handle DMA interrupts */
59317ac6653aSJeff Kirsher 	stmmac_dma_interrupt(priv);
59327ac6653aSJeff Kirsher 
59337ac6653aSJeff Kirsher 	return IRQ_HANDLED;
59347ac6653aSJeff Kirsher }
59357ac6653aSJeff Kirsher 
stmmac_mac_interrupt(int irq,void * dev_id)59368532f613SOng Boon Leong static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
59378532f613SOng Boon Leong {
59388532f613SOng Boon Leong 	struct net_device *dev = (struct net_device *)dev_id;
59398532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
59408532f613SOng Boon Leong 
59418532f613SOng Boon Leong 	/* Check if adapter is up */
59428532f613SOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
59438532f613SOng Boon Leong 		return IRQ_HANDLED;
59448532f613SOng Boon Leong 
59458532f613SOng Boon Leong 	/* To handle Common interrupts */
59468532f613SOng Boon Leong 	stmmac_common_interrupt(priv);
59478532f613SOng Boon Leong 
59488532f613SOng Boon Leong 	return IRQ_HANDLED;
59498532f613SOng Boon Leong }
59508532f613SOng Boon Leong 
stmmac_safety_interrupt(int irq,void * dev_id)59518532f613SOng Boon Leong static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
59528532f613SOng Boon Leong {
59538532f613SOng Boon Leong 	struct net_device *dev = (struct net_device *)dev_id;
59548532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
59558532f613SOng Boon Leong 
59568532f613SOng Boon Leong 	/* Check if adapter is up */
59578532f613SOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
59588532f613SOng Boon Leong 		return IRQ_HANDLED;
59598532f613SOng Boon Leong 
59608532f613SOng Boon Leong 	/* Check if a fatal error happened */
59618532f613SOng Boon Leong 	stmmac_safety_feat_interrupt(priv);
59628532f613SOng Boon Leong 
59638532f613SOng Boon Leong 	return IRQ_HANDLED;
59648532f613SOng Boon Leong }
59658532f613SOng Boon Leong 
stmmac_msi_intr_tx(int irq,void * data)59668532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
59678532f613SOng Boon Leong {
59688532f613SOng Boon Leong 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
59698531c808SChristian Marangi 	struct stmmac_dma_conf *dma_conf;
59708532f613SOng Boon Leong 	int chan = tx_q->queue_index;
59718532f613SOng Boon Leong 	struct stmmac_priv *priv;
59728532f613SOng Boon Leong 	int status;
59738532f613SOng Boon Leong 
59748531c808SChristian Marangi 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
59758531c808SChristian Marangi 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
59768532f613SOng Boon Leong 
59778532f613SOng Boon Leong 	/* Check if adapter is up */
59788532f613SOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
59798532f613SOng Boon Leong 		return IRQ_HANDLED;
59808532f613SOng Boon Leong 
59818532f613SOng Boon Leong 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
59828532f613SOng Boon Leong 
59838532f613SOng Boon Leong 	if (unlikely(status & tx_hard_error_bump_tc)) {
59848532f613SOng Boon Leong 		/* Try to bump up the dma threshold on this failure */
59853a6c12a0SXiaoliang Yang 		stmmac_bump_dma_threshold(priv, chan);
59868532f613SOng Boon Leong 	} else if (unlikely(status == tx_hard_error)) {
59878532f613SOng Boon Leong 		stmmac_tx_err(priv, chan);
59888532f613SOng Boon Leong 	}
59898532f613SOng Boon Leong 
59908532f613SOng Boon Leong 	return IRQ_HANDLED;
59918532f613SOng Boon Leong }
59928532f613SOng Boon Leong 
stmmac_msi_intr_rx(int irq,void * data)59938532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
59948532f613SOng Boon Leong {
59958532f613SOng Boon Leong 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
59968531c808SChristian Marangi 	struct stmmac_dma_conf *dma_conf;
59978532f613SOng Boon Leong 	int chan = rx_q->queue_index;
59988532f613SOng Boon Leong 	struct stmmac_priv *priv;
59998532f613SOng Boon Leong 
60008531c808SChristian Marangi 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
60018531c808SChristian Marangi 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
60028532f613SOng Boon Leong 
60038532f613SOng Boon Leong 	/* Check if adapter is up */
60048532f613SOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
60058532f613SOng Boon Leong 		return IRQ_HANDLED;
60068532f613SOng Boon Leong 
60078532f613SOng Boon Leong 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
60088532f613SOng Boon Leong 
60098532f613SOng Boon Leong 	return IRQ_HANDLED;
60108532f613SOng Boon Leong }
60118532f613SOng Boon Leong 
60127ac6653aSJeff Kirsher /**
60137ac6653aSJeff Kirsher  *  stmmac_ioctl - Entry point for the Ioctl
60147ac6653aSJeff Kirsher  *  @dev: Device pointer.
60157ac6653aSJeff Kirsher  *  @rq: An IOCTL specefic structure, that can contain a pointer to
60167ac6653aSJeff Kirsher  *  a proprietary structure used to pass information to the driver.
60177ac6653aSJeff Kirsher  *  @cmd: IOCTL command
60187ac6653aSJeff Kirsher  *  Description:
601932ceabcaSGiuseppe CAVALLARO  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
60207ac6653aSJeff Kirsher  */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)60217ac6653aSJeff Kirsher static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
60227ac6653aSJeff Kirsher {
602374371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv (dev);
6024891434b1SRayagond Kokatanur 	int ret = -EOPNOTSUPP;
60257ac6653aSJeff Kirsher 
60267ac6653aSJeff Kirsher 	if (!netif_running(dev))
60277ac6653aSJeff Kirsher 		return -EINVAL;
60287ac6653aSJeff Kirsher 
6029891434b1SRayagond Kokatanur 	switch (cmd) {
6030891434b1SRayagond Kokatanur 	case SIOCGMIIPHY:
6031891434b1SRayagond Kokatanur 	case SIOCGMIIREG:
6032891434b1SRayagond Kokatanur 	case SIOCSMIIREG:
603374371272SJose Abreu 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6034891434b1SRayagond Kokatanur 		break;
6035891434b1SRayagond Kokatanur 	case SIOCSHWTSTAMP:
6036d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_set(dev, rq);
6037d6228b7cSArtem Panfilov 		break;
6038d6228b7cSArtem Panfilov 	case SIOCGHWTSTAMP:
6039d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_get(dev, rq);
6040891434b1SRayagond Kokatanur 		break;
6041891434b1SRayagond Kokatanur 	default:
6042891434b1SRayagond Kokatanur 		break;
6043891434b1SRayagond Kokatanur 	}
60447ac6653aSJeff Kirsher 
60457ac6653aSJeff Kirsher 	return ret;
60467ac6653aSJeff Kirsher }
60477ac6653aSJeff Kirsher 
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)60484dbbe8ddSJose Abreu static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
60494dbbe8ddSJose Abreu 				    void *cb_priv)
60504dbbe8ddSJose Abreu {
60514dbbe8ddSJose Abreu 	struct stmmac_priv *priv = cb_priv;
60524dbbe8ddSJose Abreu 	int ret = -EOPNOTSUPP;
60534dbbe8ddSJose Abreu 
6054425eabddSJose Abreu 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6055425eabddSJose Abreu 		return ret;
6056425eabddSJose Abreu 
6057bba2556eSOng Boon Leong 	__stmmac_disable_all_queues(priv);
60584dbbe8ddSJose Abreu 
60594dbbe8ddSJose Abreu 	switch (type) {
60604dbbe8ddSJose Abreu 	case TC_SETUP_CLSU32:
60614dbbe8ddSJose Abreu 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
60624dbbe8ddSJose Abreu 		break;
6063425eabddSJose Abreu 	case TC_SETUP_CLSFLOWER:
6064425eabddSJose Abreu 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6065425eabddSJose Abreu 		break;
60664dbbe8ddSJose Abreu 	default:
60674dbbe8ddSJose Abreu 		break;
60684dbbe8ddSJose Abreu 	}
60694dbbe8ddSJose Abreu 
60704dbbe8ddSJose Abreu 	stmmac_enable_all_queues(priv);
60714dbbe8ddSJose Abreu 	return ret;
60724dbbe8ddSJose Abreu }
60734dbbe8ddSJose Abreu 
6074955bcb6eSPablo Neira Ayuso static LIST_HEAD(stmmac_block_cb_list);
6075955bcb6eSPablo Neira Ayuso 
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)60764dbbe8ddSJose Abreu static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
60774dbbe8ddSJose Abreu 			   void *type_data)
60784dbbe8ddSJose Abreu {
60794dbbe8ddSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
60804dbbe8ddSJose Abreu 
60814dbbe8ddSJose Abreu 	switch (type) {
6082522d15eaSVladimir Oltean 	case TC_QUERY_CAPS:
6083522d15eaSVladimir Oltean 		return stmmac_tc_query_caps(priv, priv, type_data);
60844dbbe8ddSJose Abreu 	case TC_SETUP_BLOCK:
6085955bcb6eSPablo Neira Ayuso 		return flow_block_cb_setup_simple(type_data,
6086955bcb6eSPablo Neira Ayuso 						  &stmmac_block_cb_list,
60874e95bc26SPablo Neira Ayuso 						  stmmac_setup_tc_block_cb,
60884e95bc26SPablo Neira Ayuso 						  priv, priv, true);
60891f705bc6SJose Abreu 	case TC_SETUP_QDISC_CBS:
60901f705bc6SJose Abreu 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6091b60189e0SJose Abreu 	case TC_SETUP_QDISC_TAPRIO:
6092b60189e0SJose Abreu 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6093430b383cSJose Abreu 	case TC_SETUP_QDISC_ETF:
6094430b383cSJose Abreu 		return stmmac_tc_setup_etf(priv, priv, type_data);
60954dbbe8ddSJose Abreu 	default:
60964dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
60974dbbe8ddSJose Abreu 	}
60984dbbe8ddSJose Abreu }
60994dbbe8ddSJose Abreu 
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)61004993e5b3SJose Abreu static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
61014993e5b3SJose Abreu 			       struct net_device *sb_dev)
61024993e5b3SJose Abreu {
6103b7766206SJose Abreu 	int gso = skb_shinfo(skb)->gso_type;
6104b7766206SJose Abreu 
6105b7766206SJose Abreu 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
61064993e5b3SJose Abreu 		/*
6107b7766206SJose Abreu 		 * There is no way to determine the number of TSO/USO
61084993e5b3SJose Abreu 		 * capable Queues. Let's use always the Queue 0
6109b7766206SJose Abreu 		 * because if TSO/USO is supported then at least this
61104993e5b3SJose Abreu 		 * one will be capable.
61114993e5b3SJose Abreu 		 */
61124993e5b3SJose Abreu 		return 0;
61134993e5b3SJose Abreu 	}
61144993e5b3SJose Abreu 
61154993e5b3SJose Abreu 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
61164993e5b3SJose Abreu }
61174993e5b3SJose Abreu 
stmmac_set_mac_address(struct net_device * ndev,void * addr)6118a830405eSBhadram Varka static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6119a830405eSBhadram Varka {
6120a830405eSBhadram Varka 	struct stmmac_priv *priv = netdev_priv(ndev);
6121a830405eSBhadram Varka 	int ret = 0;
6122a830405eSBhadram Varka 
612385648865SMinghao Chi 	ret = pm_runtime_resume_and_get(priv->device);
612485648865SMinghao Chi 	if (ret < 0)
61254691ffb1SJoakim Zhang 		return ret;
61264691ffb1SJoakim Zhang 
6127a830405eSBhadram Varka 	ret = eth_mac_addr(ndev, addr);
6128a830405eSBhadram Varka 	if (ret)
61294691ffb1SJoakim Zhang 		goto set_mac_error;
6130a830405eSBhadram Varka 
6131c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6132a830405eSBhadram Varka 
61334691ffb1SJoakim Zhang set_mac_error:
61344691ffb1SJoakim Zhang 	pm_runtime_put(priv->device);
61354691ffb1SJoakim Zhang 
6136a830405eSBhadram Varka 	return ret;
6137a830405eSBhadram Varka }
6138a830405eSBhadram Varka 
613950fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
61407ac29055SGiuseppe CAVALLARO static struct dentry *stmmac_fs_dir;
61417ac29055SGiuseppe CAVALLARO 
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6142c24602efSGiuseppe CAVALLARO static void sysfs_display_ring(void *head, int size, int extend_desc,
6143bfaf91caSJoakim Zhang 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
61447ac29055SGiuseppe CAVALLARO {
61457ac29055SGiuseppe CAVALLARO 	int i;
6146c24602efSGiuseppe CAVALLARO 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6147c24602efSGiuseppe CAVALLARO 	struct dma_desc *p = (struct dma_desc *)head;
6148bfaf91caSJoakim Zhang 	dma_addr_t dma_addr;
61497ac29055SGiuseppe CAVALLARO 
6150c24602efSGiuseppe CAVALLARO 	for (i = 0; i < size; i++) {
6151c24602efSGiuseppe CAVALLARO 		if (extend_desc) {
6152bfaf91caSJoakim Zhang 			dma_addr = dma_phy_addr + i * sizeof(*ep);
6153bfaf91caSJoakim Zhang 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6154bfaf91caSJoakim Zhang 				   i, &dma_addr,
6155f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des0),
6156f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des1),
6157f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des2),
6158f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des3));
6159c24602efSGiuseppe CAVALLARO 			ep++;
6160c24602efSGiuseppe CAVALLARO 		} else {
6161bfaf91caSJoakim Zhang 			dma_addr = dma_phy_addr + i * sizeof(*p);
6162bfaf91caSJoakim Zhang 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6163bfaf91caSJoakim Zhang 				   i, &dma_addr,
6164f8be0d78SMichael Weiser 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6165f8be0d78SMichael Weiser 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6166c24602efSGiuseppe CAVALLARO 			p++;
6167c24602efSGiuseppe CAVALLARO 		}
61687ac29055SGiuseppe CAVALLARO 		seq_printf(seq, "\n");
61697ac29055SGiuseppe CAVALLARO 	}
6170c24602efSGiuseppe CAVALLARO }
61717ac29055SGiuseppe CAVALLARO 
stmmac_rings_status_show(struct seq_file * seq,void * v)6172fb0d9c63SYangtao Li static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6173c24602efSGiuseppe CAVALLARO {
6174c24602efSGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
6175c24602efSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
617654139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
6177ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
617854139cf3SJoao Pinto 	u32 queue;
617954139cf3SJoao Pinto 
61805f2b8b62SThierry Reding 	if ((dev->flags & IFF_UP) == 0)
61815f2b8b62SThierry Reding 		return 0;
61825f2b8b62SThierry Reding 
618354139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
61848531c808SChristian Marangi 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
618554139cf3SJoao Pinto 
618654139cf3SJoao Pinto 		seq_printf(seq, "RX Queue %d:\n", queue);
61877ac29055SGiuseppe CAVALLARO 
6188c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc) {
618954139cf3SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
619054139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_erx,
61918531c808SChristian Marangi 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
619254139cf3SJoao Pinto 		} else {
619354139cf3SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
619454139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_rx,
61958531c808SChristian Marangi 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
619654139cf3SJoao Pinto 		}
619754139cf3SJoao Pinto 	}
619854139cf3SJoao Pinto 
6199ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
62008531c808SChristian Marangi 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6201ce736788SJoao Pinto 
6202ce736788SJoao Pinto 		seq_printf(seq, "TX Queue %d:\n", queue);
6203ce736788SJoao Pinto 
620454139cf3SJoao Pinto 		if (priv->extend_desc) {
6205ce736788SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
6206ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_etx,
62078531c808SChristian Marangi 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6208579a25a8SJose Abreu 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6209ce736788SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
6210ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_tx,
62118531c808SChristian Marangi 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6212ce736788SJoao Pinto 		}
62137ac29055SGiuseppe CAVALLARO 	}
62147ac29055SGiuseppe CAVALLARO 
62157ac29055SGiuseppe CAVALLARO 	return 0;
62167ac29055SGiuseppe CAVALLARO }
6217fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
62187ac29055SGiuseppe CAVALLARO 
stmmac_dma_cap_show(struct seq_file * seq,void * v)6219fb0d9c63SYangtao Li static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6220e7434821SGiuseppe CAVALLARO {
622158c1e0baSFurong Xu 	static const char * const dwxgmac_timestamp_source[] = {
622258c1e0baSFurong Xu 		"None",
622358c1e0baSFurong Xu 		"Internal",
622458c1e0baSFurong Xu 		"External",
622558c1e0baSFurong Xu 		"Both",
622658c1e0baSFurong Xu 	};
6227669a5556SFurong Xu 	static const char * const dwxgmac_safety_feature_desc[] = {
6228669a5556SFurong Xu 		"No",
6229669a5556SFurong Xu 		"All Safety Features with ECC and Parity",
6230669a5556SFurong Xu 		"All Safety Features without ECC or Parity",
6231669a5556SFurong Xu 		"All Safety Features with Parity Only",
6232669a5556SFurong Xu 		"ECC Only",
6233669a5556SFurong Xu 		"UNDEFINED",
6234669a5556SFurong Xu 		"UNDEFINED",
6235669a5556SFurong Xu 		"UNDEFINED",
6236669a5556SFurong Xu 	};
6237e7434821SGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
6238e7434821SGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
6239e7434821SGiuseppe CAVALLARO 
624019e30c14SGiuseppe CAVALLARO 	if (!priv->hw_cap_support) {
6241e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "DMA HW features not supported\n");
6242e7434821SGiuseppe CAVALLARO 		return 0;
6243e7434821SGiuseppe CAVALLARO 	}
6244e7434821SGiuseppe CAVALLARO 
6245e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
6246e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tDMA HW features\n");
6247e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
6248e7434821SGiuseppe CAVALLARO 
624922d3efe5SPavel Machek 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6250e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
625122d3efe5SPavel Machek 	seq_printf(seq, "\t1000 Mbps: %s\n",
6252e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
625322d3efe5SPavel Machek 	seq_printf(seq, "\tHalf duplex: %s\n",
6254e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6255669a5556SFurong Xu 	if (priv->plat->has_xgmac) {
625658c1e0baSFurong Xu 		seq_printf(seq,
625758c1e0baSFurong Xu 			   "\tNumber of Additional MAC address registers: %d\n",
625858c1e0baSFurong Xu 			   priv->dma_cap.multi_addr);
6259669a5556SFurong Xu 	} else {
6260669a5556SFurong Xu 		seq_printf(seq, "\tHash Filter: %s\n",
6261669a5556SFurong Xu 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6262e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6263e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6264669a5556SFurong Xu 	}
62658d45e42bSLABBE Corentin 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6266e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pcs) ? "Y" : "N");
6267e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6268e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6269e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6270e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6271e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6272e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6273e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRMON module: %s\n",
6274e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rmon) ? "Y" : "N");
6275e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6276e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6277e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6278e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
627958c1e0baSFurong Xu 	if (priv->plat->has_xgmac)
628058c1e0baSFurong Xu 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
628158c1e0baSFurong Xu 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
628222d3efe5SPavel Machek 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6283e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.eee) ? "Y" : "N");
6284e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6285e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6286e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
628758c1e0baSFurong Xu 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
628858c1e0baSFurong Xu 	    priv->plat->has_xgmac) {
6289f748be53SAlexandre TORGUE 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6290f748be53SAlexandre TORGUE 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6291f748be53SAlexandre TORGUE 	} else {
6292e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6293e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6294e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6295e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6296e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6297e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
629858c1e0baSFurong Xu 	}
6299e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6300e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_rx_channel);
6301e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6302e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_tx_channel);
63037d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
63047d0b447aSJose Abreu 		   priv->dma_cap.number_rx_queues);
63057d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
63067d0b447aSJose Abreu 		   priv->dma_cap.number_tx_queues);
6307e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6308e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
63097d0b447aSJose Abreu 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
63107d0b447aSJose Abreu 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6311669a5556SFurong Xu 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6312669a5556SFurong Xu 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
63137d0b447aSJose Abreu 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
63147d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
63157d0b447aSJose Abreu 		   priv->dma_cap.pps_out_num);
63167d0b447aSJose Abreu 	seq_printf(seq, "\tSafety Features: %s\n",
6317669a5556SFurong Xu 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
63187d0b447aSJose Abreu 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
63197d0b447aSJose Abreu 		   priv->dma_cap.frpsel ? "Y" : "N");
63207d0b447aSJose Abreu 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6321070246e4SJochen Henneberg 		   priv->dma_cap.host_dma_width);
63227d0b447aSJose Abreu 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
63237d0b447aSJose Abreu 		   priv->dma_cap.rssen ? "Y" : "N");
63247d0b447aSJose Abreu 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
63257d0b447aSJose Abreu 		   priv->dma_cap.vlhash ? "Y" : "N");
63267d0b447aSJose Abreu 	seq_printf(seq, "\tSplit Header: %s\n",
63277d0b447aSJose Abreu 		   priv->dma_cap.sphen ? "Y" : "N");
63287d0b447aSJose Abreu 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
63297d0b447aSJose Abreu 		   priv->dma_cap.vlins ? "Y" : "N");
63307d0b447aSJose Abreu 	seq_printf(seq, "\tDouble VLAN: %s\n",
63317d0b447aSJose Abreu 		   priv->dma_cap.dvlan ? "Y" : "N");
63327d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
63337d0b447aSJose Abreu 		   priv->dma_cap.l3l4fnum);
63347d0b447aSJose Abreu 	seq_printf(seq, "\tARP Offloading: %s\n",
63357d0b447aSJose Abreu 		   priv->dma_cap.arpoffsel ? "Y" : "N");
633644e65475SJose Abreu 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
633744e65475SJose Abreu 		   priv->dma_cap.estsel ? "Y" : "N");
633844e65475SJose Abreu 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
633944e65475SJose Abreu 		   priv->dma_cap.fpesel ? "Y" : "N");
634044e65475SJose Abreu 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
634144e65475SJose Abreu 		   priv->dma_cap.tbssel ? "Y" : "N");
6342669a5556SFurong Xu 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6343669a5556SFurong Xu 		   priv->dma_cap.tbs_ch_num);
6344669a5556SFurong Xu 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6345669a5556SFurong Xu 		   priv->dma_cap.sgfsel ? "Y" : "N");
6346669a5556SFurong Xu 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6347669a5556SFurong Xu 		   BIT(priv->dma_cap.ttsfd) >> 1);
6348669a5556SFurong Xu 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6349669a5556SFurong Xu 		   priv->dma_cap.numtc);
6350669a5556SFurong Xu 	seq_printf(seq, "\tDCB Feature: %s\n",
6351669a5556SFurong Xu 		   priv->dma_cap.dcben ? "Y" : "N");
6352669a5556SFurong Xu 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6353669a5556SFurong Xu 		   priv->dma_cap.advthword ? "Y" : "N");
6354669a5556SFurong Xu 	seq_printf(seq, "\tPTP Offload: %s\n",
6355669a5556SFurong Xu 		   priv->dma_cap.ptoen ? "Y" : "N");
6356669a5556SFurong Xu 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6357669a5556SFurong Xu 		   priv->dma_cap.osten ? "Y" : "N");
6358669a5556SFurong Xu 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6359669a5556SFurong Xu 		   priv->dma_cap.pfcen ? "Y" : "N");
6360669a5556SFurong Xu 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6361669a5556SFurong Xu 		   BIT(priv->dma_cap.frpes) << 6);
6362669a5556SFurong Xu 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6363669a5556SFurong Xu 		   BIT(priv->dma_cap.frpbs) << 6);
6364669a5556SFurong Xu 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6365669a5556SFurong Xu 		   priv->dma_cap.frppipe_num);
6366669a5556SFurong Xu 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6367669a5556SFurong Xu 		   priv->dma_cap.nrvf_num ?
6368669a5556SFurong Xu 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6369669a5556SFurong Xu 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6370669a5556SFurong Xu 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6371669a5556SFurong Xu 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6372669a5556SFurong Xu 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6373669a5556SFurong Xu 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6374669a5556SFurong Xu 		   priv->dma_cap.cbtisel ? "Y" : "N");
6375669a5556SFurong Xu 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6376669a5556SFurong Xu 		   priv->dma_cap.aux_snapshot_n);
6377669a5556SFurong Xu 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6378669a5556SFurong Xu 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6379669a5556SFurong Xu 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6380669a5556SFurong Xu 		   priv->dma_cap.edma ? "Y" : "N");
6381669a5556SFurong Xu 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6382669a5556SFurong Xu 		   priv->dma_cap.ediffc ? "Y" : "N");
6383669a5556SFurong Xu 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6384669a5556SFurong Xu 		   priv->dma_cap.vxn ? "Y" : "N");
6385669a5556SFurong Xu 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6386669a5556SFurong Xu 		   priv->dma_cap.dbgmem ? "Y" : "N");
6387669a5556SFurong Xu 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6388669a5556SFurong Xu 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6389e7434821SGiuseppe CAVALLARO 	return 0;
6390e7434821SGiuseppe CAVALLARO }
6391fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6392e7434821SGiuseppe CAVALLARO 
6393481a7d15SJiping Ma /* Use network device events to rename debugfs file entries.
6394481a7d15SJiping Ma  */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6395481a7d15SJiping Ma static int stmmac_device_event(struct notifier_block *unused,
6396481a7d15SJiping Ma 			       unsigned long event, void *ptr)
6397481a7d15SJiping Ma {
6398481a7d15SJiping Ma 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6399481a7d15SJiping Ma 	struct stmmac_priv *priv = netdev_priv(dev);
6400481a7d15SJiping Ma 
6401481a7d15SJiping Ma 	if (dev->netdev_ops != &stmmac_netdev_ops)
6402481a7d15SJiping Ma 		goto done;
6403481a7d15SJiping Ma 
6404481a7d15SJiping Ma 	switch (event) {
6405481a7d15SJiping Ma 	case NETDEV_CHANGENAME:
6406481a7d15SJiping Ma 		if (priv->dbgfs_dir)
6407481a7d15SJiping Ma 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6408481a7d15SJiping Ma 							 priv->dbgfs_dir,
6409481a7d15SJiping Ma 							 stmmac_fs_dir,
6410481a7d15SJiping Ma 							 dev->name);
6411481a7d15SJiping Ma 		break;
6412481a7d15SJiping Ma 	}
6413481a7d15SJiping Ma done:
6414481a7d15SJiping Ma 	return NOTIFY_DONE;
6415481a7d15SJiping Ma }
6416481a7d15SJiping Ma 
6417481a7d15SJiping Ma static struct notifier_block stmmac_notifier = {
6418481a7d15SJiping Ma 	.notifier_call = stmmac_device_event,
6419481a7d15SJiping Ma };
6420481a7d15SJiping Ma 
stmmac_init_fs(struct net_device * dev)64218d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev)
64227ac29055SGiuseppe CAVALLARO {
6423466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
64247ac29055SGiuseppe CAVALLARO 
6425474a31e1SAaro Koskinen 	rtnl_lock();
6426474a31e1SAaro Koskinen 
6427466c5ac8SMathieu Olivari 	/* Create per netdev entries */
6428466c5ac8SMathieu Olivari 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6429466c5ac8SMathieu Olivari 
64307ac29055SGiuseppe CAVALLARO 	/* Entry to report DMA RX/TX rings */
64318d72ab11SGreg Kroah-Hartman 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
64327ac29055SGiuseppe CAVALLARO 			    &stmmac_rings_status_fops);
64337ac29055SGiuseppe CAVALLARO 
6434e7434821SGiuseppe CAVALLARO 	/* Entry to report the DMA HW features */
64358d72ab11SGreg Kroah-Hartman 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
64368d72ab11SGreg Kroah-Hartman 			    &stmmac_dma_cap_fops);
6437481a7d15SJiping Ma 
6438474a31e1SAaro Koskinen 	rtnl_unlock();
64397ac29055SGiuseppe CAVALLARO }
64407ac29055SGiuseppe CAVALLARO 
stmmac_exit_fs(struct net_device * dev)6441466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev)
64427ac29055SGiuseppe CAVALLARO {
6443466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
6444466c5ac8SMathieu Olivari 
6445466c5ac8SMathieu Olivari 	debugfs_remove_recursive(priv->dbgfs_dir);
64467ac29055SGiuseppe CAVALLARO }
644750fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
64487ac29055SGiuseppe CAVALLARO 
stmmac_vid_crc32_le(__le16 vid_le)64493cd1cfcbSJose Abreu static u32 stmmac_vid_crc32_le(__le16 vid_le)
64503cd1cfcbSJose Abreu {
64513cd1cfcbSJose Abreu 	unsigned char *data = (unsigned char *)&vid_le;
64523cd1cfcbSJose Abreu 	unsigned char data_byte = 0;
64533cd1cfcbSJose Abreu 	u32 crc = ~0x0;
64543cd1cfcbSJose Abreu 	u32 temp = 0;
64553cd1cfcbSJose Abreu 	int i, bits;
64563cd1cfcbSJose Abreu 
64573cd1cfcbSJose Abreu 	bits = get_bitmask_order(VLAN_VID_MASK);
64583cd1cfcbSJose Abreu 	for (i = 0; i < bits; i++) {
64593cd1cfcbSJose Abreu 		if ((i % 8) == 0)
64603cd1cfcbSJose Abreu 			data_byte = data[i / 8];
64613cd1cfcbSJose Abreu 
64623cd1cfcbSJose Abreu 		temp = ((crc & 1) ^ data_byte) & 1;
64633cd1cfcbSJose Abreu 		crc >>= 1;
64643cd1cfcbSJose Abreu 		data_byte >>= 1;
64653cd1cfcbSJose Abreu 
64663cd1cfcbSJose Abreu 		if (temp)
64673cd1cfcbSJose Abreu 			crc ^= 0xedb88320;
64683cd1cfcbSJose Abreu 	}
64693cd1cfcbSJose Abreu 
64703cd1cfcbSJose Abreu 	return crc;
64713cd1cfcbSJose Abreu }
64723cd1cfcbSJose Abreu 
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)64733cd1cfcbSJose Abreu static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
64743cd1cfcbSJose Abreu {
64753cd1cfcbSJose Abreu 	u32 crc, hash = 0;
6476a24cae70SJose Abreu 	__le16 pmatch = 0;
6477c7ab0b80SJose Abreu 	int count = 0;
6478c7ab0b80SJose Abreu 	u16 vid = 0;
64793cd1cfcbSJose Abreu 
64803cd1cfcbSJose Abreu 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
64813cd1cfcbSJose Abreu 		__le16 vid_le = cpu_to_le16(vid);
64823cd1cfcbSJose Abreu 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
64833cd1cfcbSJose Abreu 		hash |= (1 << crc);
6484c7ab0b80SJose Abreu 		count++;
64853cd1cfcbSJose Abreu 	}
64863cd1cfcbSJose Abreu 
6487c7ab0b80SJose Abreu 	if (!priv->dma_cap.vlhash) {
6488c7ab0b80SJose Abreu 		if (count > 2) /* VID = 0 always passes filter */
6489c7ab0b80SJose Abreu 			return -EOPNOTSUPP;
6490c7ab0b80SJose Abreu 
6491a24cae70SJose Abreu 		pmatch = cpu_to_le16(vid);
6492c7ab0b80SJose Abreu 		hash = 0;
6493c7ab0b80SJose Abreu 	}
6494c7ab0b80SJose Abreu 
6495a24cae70SJose Abreu 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
64963cd1cfcbSJose Abreu }
64973cd1cfcbSJose Abreu 
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)64983cd1cfcbSJose Abreu static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
64993cd1cfcbSJose Abreu {
65003cd1cfcbSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
65013cd1cfcbSJose Abreu 	bool is_double = false;
65023cd1cfcbSJose Abreu 	int ret;
65033cd1cfcbSJose Abreu 
650435226750SYan Wang 	ret = pm_runtime_resume_and_get(priv->device);
650535226750SYan Wang 	if (ret < 0)
650635226750SYan Wang 		return ret;
650735226750SYan Wang 
65083cd1cfcbSJose Abreu 	if (be16_to_cpu(proto) == ETH_P_8021AD)
65093cd1cfcbSJose Abreu 		is_double = true;
65103cd1cfcbSJose Abreu 
65113cd1cfcbSJose Abreu 	set_bit(vid, priv->active_vlans);
65123cd1cfcbSJose Abreu 	ret = stmmac_vlan_update(priv, is_double);
65133cd1cfcbSJose Abreu 	if (ret) {
65143cd1cfcbSJose Abreu 		clear_bit(vid, priv->active_vlans);
651535226750SYan Wang 		goto err_pm_put;
65163cd1cfcbSJose Abreu 	}
65173cd1cfcbSJose Abreu 
6518dd6a4998SJose Abreu 	if (priv->hw->num_vlan) {
6519ed64639bSWong Vee Khee 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6520dd6a4998SJose Abreu 		if (ret)
652135226750SYan Wang 			goto err_pm_put;
65223cd1cfcbSJose Abreu 	}
652335226750SYan Wang err_pm_put:
652435226750SYan Wang 	pm_runtime_put(priv->device);
65253cd1cfcbSJose Abreu 
652635226750SYan Wang 	return ret;
6527dd6a4998SJose Abreu }
6528dd6a4998SJose Abreu 
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)65293cd1cfcbSJose Abreu static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
65303cd1cfcbSJose Abreu {
65313cd1cfcbSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
65323cd1cfcbSJose Abreu 	bool is_double = false;
6533ed64639bSWong Vee Khee 	int ret;
65343cd1cfcbSJose Abreu 
653585648865SMinghao Chi 	ret = pm_runtime_resume_and_get(priv->device);
653685648865SMinghao Chi 	if (ret < 0)
6537b3dcb312SJoakim Zhang 		return ret;
6538b3dcb312SJoakim Zhang 
65393cd1cfcbSJose Abreu 	if (be16_to_cpu(proto) == ETH_P_8021AD)
65403cd1cfcbSJose Abreu 		is_double = true;
65413cd1cfcbSJose Abreu 
65423cd1cfcbSJose Abreu 	clear_bit(vid, priv->active_vlans);
6543dd6a4998SJose Abreu 
6544dd6a4998SJose Abreu 	if (priv->hw->num_vlan) {
6545ed64639bSWong Vee Khee 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6546ed64639bSWong Vee Khee 		if (ret)
65475ec55823SJoakim Zhang 			goto del_vlan_error;
6548dd6a4998SJose Abreu 	}
6549ed64639bSWong Vee Khee 
65505ec55823SJoakim Zhang 	ret = stmmac_vlan_update(priv, is_double);
65515ec55823SJoakim Zhang 
65525ec55823SJoakim Zhang del_vlan_error:
65535ec55823SJoakim Zhang 	pm_runtime_put(priv->device);
65545ec55823SJoakim Zhang 
65555ec55823SJoakim Zhang 	return ret;
65563cd1cfcbSJose Abreu }
65573cd1cfcbSJose Abreu 
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)65585fabb012SOng Boon Leong static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
65595fabb012SOng Boon Leong {
65605fabb012SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
65615fabb012SOng Boon Leong 
65625fabb012SOng Boon Leong 	switch (bpf->command) {
65635fabb012SOng Boon Leong 	case XDP_SETUP_PROG:
65645fabb012SOng Boon Leong 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6565bba2556eSOng Boon Leong 	case XDP_SETUP_XSK_POOL:
6566bba2556eSOng Boon Leong 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6567bba2556eSOng Boon Leong 					     bpf->xsk.queue_id);
65685fabb012SOng Boon Leong 	default:
65695fabb012SOng Boon Leong 		return -EOPNOTSUPP;
65705fabb012SOng Boon Leong 	}
65715fabb012SOng Boon Leong }
65725fabb012SOng Boon Leong 
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)65738b278a5bSOng Boon Leong static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
65748b278a5bSOng Boon Leong 			   struct xdp_frame **frames, u32 flags)
65758b278a5bSOng Boon Leong {
65768b278a5bSOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
65778b278a5bSOng Boon Leong 	int cpu = smp_processor_id();
65788b278a5bSOng Boon Leong 	struct netdev_queue *nq;
65798b278a5bSOng Boon Leong 	int i, nxmit = 0;
65808b278a5bSOng Boon Leong 	int queue;
65818b278a5bSOng Boon Leong 
65828b278a5bSOng Boon Leong 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
65838b278a5bSOng Boon Leong 		return -ENETDOWN;
65848b278a5bSOng Boon Leong 
65858b278a5bSOng Boon Leong 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
65868b278a5bSOng Boon Leong 		return -EINVAL;
65878b278a5bSOng Boon Leong 
65888b278a5bSOng Boon Leong 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
65898b278a5bSOng Boon Leong 	nq = netdev_get_tx_queue(priv->dev, queue);
65908b278a5bSOng Boon Leong 
65918b278a5bSOng Boon Leong 	__netif_tx_lock(nq, cpu);
65928b278a5bSOng Boon Leong 	/* Avoids TX time-out as we are sharing with slow path */
65935337824fSEric Dumazet 	txq_trans_cond_update(nq);
65948b278a5bSOng Boon Leong 
65958b278a5bSOng Boon Leong 	for (i = 0; i < num_frames; i++) {
65968b278a5bSOng Boon Leong 		int res;
65978b278a5bSOng Boon Leong 
65988b278a5bSOng Boon Leong 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
65998b278a5bSOng Boon Leong 		if (res == STMMAC_XDP_CONSUMED)
66008b278a5bSOng Boon Leong 			break;
66018b278a5bSOng Boon Leong 
66028b278a5bSOng Boon Leong 		nxmit++;
66038b278a5bSOng Boon Leong 	}
66048b278a5bSOng Boon Leong 
66058b278a5bSOng Boon Leong 	if (flags & XDP_XMIT_FLUSH) {
66068b278a5bSOng Boon Leong 		stmmac_flush_tx_descriptors(priv, queue);
66078b278a5bSOng Boon Leong 		stmmac_tx_timer_arm(priv, queue);
66088b278a5bSOng Boon Leong 	}
66098b278a5bSOng Boon Leong 
66108b278a5bSOng Boon Leong 	__netif_tx_unlock(nq);
66118b278a5bSOng Boon Leong 
66128b278a5bSOng Boon Leong 	return nxmit;
66138b278a5bSOng Boon Leong }
66148b278a5bSOng Boon Leong 
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6615bba2556eSOng Boon Leong void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6616bba2556eSOng Boon Leong {
6617bba2556eSOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
6618bba2556eSOng Boon Leong 	unsigned long flags;
6619bba2556eSOng Boon Leong 
6620bba2556eSOng Boon Leong 	spin_lock_irqsave(&ch->lock, flags);
6621bba2556eSOng Boon Leong 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6622bba2556eSOng Boon Leong 	spin_unlock_irqrestore(&ch->lock, flags);
6623bba2556eSOng Boon Leong 
6624bba2556eSOng Boon Leong 	stmmac_stop_rx_dma(priv, queue);
6625ba39b344SChristian Marangi 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6626bba2556eSOng Boon Leong }
6627bba2556eSOng Boon Leong 
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)6628bba2556eSOng Boon Leong void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6629bba2556eSOng Boon Leong {
66308531c808SChristian Marangi 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6631bba2556eSOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
6632bba2556eSOng Boon Leong 	unsigned long flags;
6633bba2556eSOng Boon Leong 	u32 buf_size;
6634bba2556eSOng Boon Leong 	int ret;
6635bba2556eSOng Boon Leong 
6636ba39b344SChristian Marangi 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6637bba2556eSOng Boon Leong 	if (ret) {
6638bba2556eSOng Boon Leong 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6639bba2556eSOng Boon Leong 		return;
6640bba2556eSOng Boon Leong 	}
6641bba2556eSOng Boon Leong 
6642ba39b344SChristian Marangi 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6643bba2556eSOng Boon Leong 	if (ret) {
6644ba39b344SChristian Marangi 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6645bba2556eSOng Boon Leong 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6646bba2556eSOng Boon Leong 		return;
6647bba2556eSOng Boon Leong 	}
6648bba2556eSOng Boon Leong 
6649f9ec5723SChristian Marangi 	stmmac_reset_rx_queue(priv, queue);
6650ba39b344SChristian Marangi 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6651bba2556eSOng Boon Leong 
6652bba2556eSOng Boon Leong 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6653bba2556eSOng Boon Leong 			    rx_q->dma_rx_phy, rx_q->queue_index);
6654bba2556eSOng Boon Leong 
6655bba2556eSOng Boon Leong 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6656bba2556eSOng Boon Leong 			     sizeof(struct dma_desc));
6657bba2556eSOng Boon Leong 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6658bba2556eSOng Boon Leong 			       rx_q->rx_tail_addr, rx_q->queue_index);
6659bba2556eSOng Boon Leong 
6660bba2556eSOng Boon Leong 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6661bba2556eSOng Boon Leong 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6662bba2556eSOng Boon Leong 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6663bba2556eSOng Boon Leong 				      buf_size,
6664bba2556eSOng Boon Leong 				      rx_q->queue_index);
6665bba2556eSOng Boon Leong 	} else {
6666bba2556eSOng Boon Leong 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
66678531c808SChristian Marangi 				      priv->dma_conf.dma_buf_sz,
6668bba2556eSOng Boon Leong 				      rx_q->queue_index);
6669bba2556eSOng Boon Leong 	}
6670bba2556eSOng Boon Leong 
6671bba2556eSOng Boon Leong 	stmmac_start_rx_dma(priv, queue);
6672bba2556eSOng Boon Leong 
6673bba2556eSOng Boon Leong 	spin_lock_irqsave(&ch->lock, flags);
6674bba2556eSOng Boon Leong 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6675bba2556eSOng Boon Leong 	spin_unlock_irqrestore(&ch->lock, flags);
6676bba2556eSOng Boon Leong }
6677bba2556eSOng Boon Leong 
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)6678132c32eeSOng Boon Leong void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6679132c32eeSOng Boon Leong {
6680132c32eeSOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
6681132c32eeSOng Boon Leong 	unsigned long flags;
6682132c32eeSOng Boon Leong 
6683132c32eeSOng Boon Leong 	spin_lock_irqsave(&ch->lock, flags);
6684132c32eeSOng Boon Leong 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6685132c32eeSOng Boon Leong 	spin_unlock_irqrestore(&ch->lock, flags);
6686132c32eeSOng Boon Leong 
6687132c32eeSOng Boon Leong 	stmmac_stop_tx_dma(priv, queue);
6688ba39b344SChristian Marangi 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6689132c32eeSOng Boon Leong }
6690132c32eeSOng Boon Leong 
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)6691132c32eeSOng Boon Leong void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6692132c32eeSOng Boon Leong {
66938531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6694132c32eeSOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
6695132c32eeSOng Boon Leong 	unsigned long flags;
6696132c32eeSOng Boon Leong 	int ret;
6697132c32eeSOng Boon Leong 
6698ba39b344SChristian Marangi 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6699132c32eeSOng Boon Leong 	if (ret) {
6700132c32eeSOng Boon Leong 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6701132c32eeSOng Boon Leong 		return;
6702132c32eeSOng Boon Leong 	}
6703132c32eeSOng Boon Leong 
6704ba39b344SChristian Marangi 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6705132c32eeSOng Boon Leong 	if (ret) {
6706ba39b344SChristian Marangi 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6707132c32eeSOng Boon Leong 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6708132c32eeSOng Boon Leong 		return;
6709132c32eeSOng Boon Leong 	}
6710132c32eeSOng Boon Leong 
6711f9ec5723SChristian Marangi 	stmmac_reset_tx_queue(priv, queue);
6712ba39b344SChristian Marangi 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6713132c32eeSOng Boon Leong 
6714132c32eeSOng Boon Leong 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6715132c32eeSOng Boon Leong 			    tx_q->dma_tx_phy, tx_q->queue_index);
6716132c32eeSOng Boon Leong 
6717132c32eeSOng Boon Leong 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6718132c32eeSOng Boon Leong 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6719132c32eeSOng Boon Leong 
6720132c32eeSOng Boon Leong 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6721132c32eeSOng Boon Leong 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6722132c32eeSOng Boon Leong 			       tx_q->tx_tail_addr, tx_q->queue_index);
6723132c32eeSOng Boon Leong 
6724132c32eeSOng Boon Leong 	stmmac_start_tx_dma(priv, queue);
6725132c32eeSOng Boon Leong 
6726132c32eeSOng Boon Leong 	spin_lock_irqsave(&ch->lock, flags);
6727132c32eeSOng Boon Leong 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6728132c32eeSOng Boon Leong 	spin_unlock_irqrestore(&ch->lock, flags);
6729132c32eeSOng Boon Leong }
6730132c32eeSOng Boon Leong 
stmmac_xdp_release(struct net_device * dev)6731ac746c85SOng Boon Leong void stmmac_xdp_release(struct net_device *dev)
6732ac746c85SOng Boon Leong {
6733ac746c85SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
6734ac746c85SOng Boon Leong 	u32 chan;
6735ac746c85SOng Boon Leong 
673677711683SMohd Faizal Abdul Rahim 	/* Ensure tx function is not running */
673777711683SMohd Faizal Abdul Rahim 	netif_tx_disable(dev);
673877711683SMohd Faizal Abdul Rahim 
6739ac746c85SOng Boon Leong 	/* Disable NAPI process */
6740ac746c85SOng Boon Leong 	stmmac_disable_all_queues(priv);
6741ac746c85SOng Boon Leong 
6742ac746c85SOng Boon Leong 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
67438531c808SChristian Marangi 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6744ac746c85SOng Boon Leong 
6745ac746c85SOng Boon Leong 	/* Free the IRQ lines */
6746ac746c85SOng Boon Leong 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6747ac746c85SOng Boon Leong 
6748ac746c85SOng Boon Leong 	/* Stop TX/RX DMA channels */
6749ac746c85SOng Boon Leong 	stmmac_stop_all_dma(priv);
6750ac746c85SOng Boon Leong 
6751ac746c85SOng Boon Leong 	/* Release and free the Rx/Tx resources */
6752ba39b344SChristian Marangi 	free_dma_desc_resources(priv, &priv->dma_conf);
6753ac746c85SOng Boon Leong 
6754ac746c85SOng Boon Leong 	/* Disable the MAC Rx/Tx */
6755ac746c85SOng Boon Leong 	stmmac_mac_set(priv, priv->ioaddr, false);
6756ac746c85SOng Boon Leong 
6757ac746c85SOng Boon Leong 	/* set trans_start so we don't get spurious
6758ac746c85SOng Boon Leong 	 * watchdogs during reset
6759ac746c85SOng Boon Leong 	 */
6760ac746c85SOng Boon Leong 	netif_trans_update(dev);
6761ac746c85SOng Boon Leong 	netif_carrier_off(dev);
6762ac746c85SOng Boon Leong }
6763ac746c85SOng Boon Leong 
stmmac_xdp_open(struct net_device * dev)6764ac746c85SOng Boon Leong int stmmac_xdp_open(struct net_device *dev)
6765ac746c85SOng Boon Leong {
6766ac746c85SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
6767ac746c85SOng Boon Leong 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6768ac746c85SOng Boon Leong 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6769ac746c85SOng Boon Leong 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6770ac746c85SOng Boon Leong 	struct stmmac_rx_queue *rx_q;
6771ac746c85SOng Boon Leong 	struct stmmac_tx_queue *tx_q;
6772ac746c85SOng Boon Leong 	u32 buf_size;
6773ac746c85SOng Boon Leong 	bool sph_en;
6774ac746c85SOng Boon Leong 	u32 chan;
6775ac746c85SOng Boon Leong 	int ret;
6776ac746c85SOng Boon Leong 
6777ba39b344SChristian Marangi 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6778ac746c85SOng Boon Leong 	if (ret < 0) {
6779ac746c85SOng Boon Leong 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6780ac746c85SOng Boon Leong 			   __func__);
6781ac746c85SOng Boon Leong 		goto dma_desc_error;
6782ac746c85SOng Boon Leong 	}
6783ac746c85SOng Boon Leong 
6784ba39b344SChristian Marangi 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6785ac746c85SOng Boon Leong 	if (ret < 0) {
6786ac746c85SOng Boon Leong 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6787ac746c85SOng Boon Leong 			   __func__);
6788ac746c85SOng Boon Leong 		goto init_error;
6789ac746c85SOng Boon Leong 	}
6790ac746c85SOng Boon Leong 
679124e3fce0SSong Yoong Siang 	stmmac_reset_queues_param(priv);
679224e3fce0SSong Yoong Siang 
6793ac746c85SOng Boon Leong 	/* DMA CSR Channel configuration */
6794087a7b94SVincent Whitchurch 	for (chan = 0; chan < dma_csr_ch; chan++) {
6795ac746c85SOng Boon Leong 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6796087a7b94SVincent Whitchurch 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6797087a7b94SVincent Whitchurch 	}
6798ac746c85SOng Boon Leong 
6799ac746c85SOng Boon Leong 	/* Adjust Split header */
6800ac746c85SOng Boon Leong 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6801ac746c85SOng Boon Leong 
6802ac746c85SOng Boon Leong 	/* DMA RX Channel Configuration */
6803ac746c85SOng Boon Leong 	for (chan = 0; chan < rx_cnt; chan++) {
68048531c808SChristian Marangi 		rx_q = &priv->dma_conf.rx_queue[chan];
6805ac746c85SOng Boon Leong 
6806ac746c85SOng Boon Leong 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6807ac746c85SOng Boon Leong 				    rx_q->dma_rx_phy, chan);
6808ac746c85SOng Boon Leong 
6809ac746c85SOng Boon Leong 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6810ac746c85SOng Boon Leong 				     (rx_q->buf_alloc_num *
6811ac746c85SOng Boon Leong 				      sizeof(struct dma_desc));
6812ac746c85SOng Boon Leong 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6813ac746c85SOng Boon Leong 				       rx_q->rx_tail_addr, chan);
6814ac746c85SOng Boon Leong 
6815ac746c85SOng Boon Leong 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6816ac746c85SOng Boon Leong 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6817ac746c85SOng Boon Leong 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6818ac746c85SOng Boon Leong 					      buf_size,
6819ac746c85SOng Boon Leong 					      rx_q->queue_index);
6820ac746c85SOng Boon Leong 		} else {
6821ac746c85SOng Boon Leong 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
68228531c808SChristian Marangi 					      priv->dma_conf.dma_buf_sz,
6823ac746c85SOng Boon Leong 					      rx_q->queue_index);
6824ac746c85SOng Boon Leong 		}
6825ac746c85SOng Boon Leong 
6826ac746c85SOng Boon Leong 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6827ac746c85SOng Boon Leong 	}
6828ac746c85SOng Boon Leong 
6829ac746c85SOng Boon Leong 	/* DMA TX Channel Configuration */
6830ac746c85SOng Boon Leong 	for (chan = 0; chan < tx_cnt; chan++) {
68318531c808SChristian Marangi 		tx_q = &priv->dma_conf.tx_queue[chan];
6832ac746c85SOng Boon Leong 
6833ac746c85SOng Boon Leong 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6834ac746c85SOng Boon Leong 				    tx_q->dma_tx_phy, chan);
6835ac746c85SOng Boon Leong 
6836ac746c85SOng Boon Leong 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6837ac746c85SOng Boon Leong 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6838ac746c85SOng Boon Leong 				       tx_q->tx_tail_addr, chan);
683961da6ac7SOng Boon Leong 
684061da6ac7SOng Boon Leong 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
684161da6ac7SOng Boon Leong 		tx_q->txtimer.function = stmmac_tx_timer;
6842ac746c85SOng Boon Leong 	}
6843ac746c85SOng Boon Leong 
6844ac746c85SOng Boon Leong 	/* Enable the MAC Rx/Tx */
6845ac746c85SOng Boon Leong 	stmmac_mac_set(priv, priv->ioaddr, true);
6846ac746c85SOng Boon Leong 
6847ac746c85SOng Boon Leong 	/* Start Rx & Tx DMA Channels */
6848ac746c85SOng Boon Leong 	stmmac_start_all_dma(priv);
6849ac746c85SOng Boon Leong 
6850ac746c85SOng Boon Leong 	ret = stmmac_request_irq(dev);
6851ac746c85SOng Boon Leong 	if (ret)
6852ac746c85SOng Boon Leong 		goto irq_error;
6853ac746c85SOng Boon Leong 
6854ac746c85SOng Boon Leong 	/* Enable NAPI process*/
6855ac746c85SOng Boon Leong 	stmmac_enable_all_queues(priv);
6856ac746c85SOng Boon Leong 	netif_carrier_on(dev);
6857ac746c85SOng Boon Leong 	netif_tx_start_all_queues(dev);
6858087a7b94SVincent Whitchurch 	stmmac_enable_all_dma_irq(priv);
6859ac746c85SOng Boon Leong 
6860ac746c85SOng Boon Leong 	return 0;
6861ac746c85SOng Boon Leong 
6862ac746c85SOng Boon Leong irq_error:
6863ac746c85SOng Boon Leong 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
68648531c808SChristian Marangi 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6865ac746c85SOng Boon Leong 
6866ac746c85SOng Boon Leong 	stmmac_hw_teardown(dev);
6867ac746c85SOng Boon Leong init_error:
6868ba39b344SChristian Marangi 	free_dma_desc_resources(priv, &priv->dma_conf);
6869ac746c85SOng Boon Leong dma_desc_error:
6870ac746c85SOng Boon Leong 	return ret;
6871ac746c85SOng Boon Leong }
6872ac746c85SOng Boon Leong 
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)6873bba2556eSOng Boon Leong int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6874bba2556eSOng Boon Leong {
6875bba2556eSOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
6876bba2556eSOng Boon Leong 	struct stmmac_rx_queue *rx_q;
6877132c32eeSOng Boon Leong 	struct stmmac_tx_queue *tx_q;
6878bba2556eSOng Boon Leong 	struct stmmac_channel *ch;
6879bba2556eSOng Boon Leong 
6880bba2556eSOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6881bba2556eSOng Boon Leong 	    !netif_carrier_ok(priv->dev))
6882bba2556eSOng Boon Leong 		return -ENETDOWN;
6883bba2556eSOng Boon Leong 
6884bba2556eSOng Boon Leong 	if (!stmmac_xdp_is_enabled(priv))
6885a817ead4SMaciej Fijalkowski 		return -EINVAL;
6886bba2556eSOng Boon Leong 
6887132c32eeSOng Boon Leong 	if (queue >= priv->plat->rx_queues_to_use ||
6888132c32eeSOng Boon Leong 	    queue >= priv->plat->tx_queues_to_use)
6889bba2556eSOng Boon Leong 		return -EINVAL;
6890bba2556eSOng Boon Leong 
68918531c808SChristian Marangi 	rx_q = &priv->dma_conf.rx_queue[queue];
68928531c808SChristian Marangi 	tx_q = &priv->dma_conf.tx_queue[queue];
6893bba2556eSOng Boon Leong 	ch = &priv->channel[queue];
6894bba2556eSOng Boon Leong 
6895132c32eeSOng Boon Leong 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6896a817ead4SMaciej Fijalkowski 		return -EINVAL;
6897bba2556eSOng Boon Leong 
6898132c32eeSOng Boon Leong 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6899bba2556eSOng Boon Leong 		/* EQoS does not have per-DMA channel SW interrupt,
6900bba2556eSOng Boon Leong 		 * so we schedule RX Napi straight-away.
6901bba2556eSOng Boon Leong 		 */
6902132c32eeSOng Boon Leong 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6903132c32eeSOng Boon Leong 			__napi_schedule(&ch->rxtx_napi);
6904bba2556eSOng Boon Leong 	}
6905bba2556eSOng Boon Leong 
6906bba2556eSOng Boon Leong 	return 0;
6907bba2556eSOng Boon Leong }
6908bba2556eSOng Boon Leong 
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)6909133466c3SJisheng Zhang static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6910133466c3SJisheng Zhang {
6911133466c3SJisheng Zhang 	struct stmmac_priv *priv = netdev_priv(dev);
6912133466c3SJisheng Zhang 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6913133466c3SJisheng Zhang 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6914133466c3SJisheng Zhang 	unsigned int start;
6915133466c3SJisheng Zhang 	int q;
6916133466c3SJisheng Zhang 
6917133466c3SJisheng Zhang 	for (q = 0; q < tx_cnt; q++) {
69188070274bSJisheng Zhang 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
6919133466c3SJisheng Zhang 		u64 tx_packets;
6920133466c3SJisheng Zhang 		u64 tx_bytes;
6921133466c3SJisheng Zhang 
6922133466c3SJisheng Zhang 		do {
69239680b2abSPetr Tesarik 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
69249680b2abSPetr Tesarik 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
69259680b2abSPetr Tesarik 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
69269680b2abSPetr Tesarik 		do {
69279680b2abSPetr Tesarik 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
69289680b2abSPetr Tesarik 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
69299680b2abSPetr Tesarik 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
6930133466c3SJisheng Zhang 
6931133466c3SJisheng Zhang 		stats->tx_packets += tx_packets;
6932133466c3SJisheng Zhang 		stats->tx_bytes += tx_bytes;
6933133466c3SJisheng Zhang 	}
6934133466c3SJisheng Zhang 
6935133466c3SJisheng Zhang 	for (q = 0; q < rx_cnt; q++) {
69368070274bSJisheng Zhang 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
6937133466c3SJisheng Zhang 		u64 rx_packets;
6938133466c3SJisheng Zhang 		u64 rx_bytes;
6939133466c3SJisheng Zhang 
6940133466c3SJisheng Zhang 		do {
69419680b2abSPetr Tesarik 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
69429680b2abSPetr Tesarik 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
69439680b2abSPetr Tesarik 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
69449680b2abSPetr Tesarik 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
6945133466c3SJisheng Zhang 
6946133466c3SJisheng Zhang 		stats->rx_packets += rx_packets;
6947133466c3SJisheng Zhang 		stats->rx_bytes += rx_bytes;
6948133466c3SJisheng Zhang 	}
6949133466c3SJisheng Zhang 
6950133466c3SJisheng Zhang 	stats->rx_dropped = priv->xstats.rx_dropped;
6951133466c3SJisheng Zhang 	stats->rx_errors = priv->xstats.rx_errors;
6952133466c3SJisheng Zhang 	stats->tx_dropped = priv->xstats.tx_dropped;
6953133466c3SJisheng Zhang 	stats->tx_errors = priv->xstats.tx_errors;
6954133466c3SJisheng Zhang 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
6955133466c3SJisheng Zhang 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
6956133466c3SJisheng Zhang 	stats->rx_length_errors = priv->xstats.rx_length;
6957133466c3SJisheng Zhang 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
6958133466c3SJisheng Zhang 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
6959133466c3SJisheng Zhang 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
6960133466c3SJisheng Zhang }
6961133466c3SJisheng Zhang 
69627ac6653aSJeff Kirsher static const struct net_device_ops stmmac_netdev_ops = {
69637ac6653aSJeff Kirsher 	.ndo_open = stmmac_open,
69647ac6653aSJeff Kirsher 	.ndo_start_xmit = stmmac_xmit,
69657ac6653aSJeff Kirsher 	.ndo_stop = stmmac_release,
69667ac6653aSJeff Kirsher 	.ndo_change_mtu = stmmac_change_mtu,
69677ac6653aSJeff Kirsher 	.ndo_fix_features = stmmac_fix_features,
6968d2afb5bdSGiuseppe CAVALLARO 	.ndo_set_features = stmmac_set_features,
696901789349SJiri Pirko 	.ndo_set_rx_mode = stmmac_set_rx_mode,
69707ac6653aSJeff Kirsher 	.ndo_tx_timeout = stmmac_tx_timeout,
6971a7605370SArnd Bergmann 	.ndo_eth_ioctl = stmmac_ioctl,
6972133466c3SJisheng Zhang 	.ndo_get_stats64 = stmmac_get_stats64,
69734dbbe8ddSJose Abreu 	.ndo_setup_tc = stmmac_setup_tc,
69744993e5b3SJose Abreu 	.ndo_select_queue = stmmac_select_queue,
6975a830405eSBhadram Varka 	.ndo_set_mac_address = stmmac_set_mac_address,
69763cd1cfcbSJose Abreu 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
69773cd1cfcbSJose Abreu 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
69785fabb012SOng Boon Leong 	.ndo_bpf = stmmac_bpf,
69798b278a5bSOng Boon Leong 	.ndo_xdp_xmit = stmmac_xdp_xmit,
6980bba2556eSOng Boon Leong 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
69817ac6653aSJeff Kirsher };
69827ac6653aSJeff Kirsher 
stmmac_reset_subtask(struct stmmac_priv * priv)698334877a15SJose Abreu static void stmmac_reset_subtask(struct stmmac_priv *priv)
698434877a15SJose Abreu {
698534877a15SJose Abreu 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
698634877a15SJose Abreu 		return;
698734877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
698834877a15SJose Abreu 		return;
698934877a15SJose Abreu 
699034877a15SJose Abreu 	netdev_err(priv->dev, "Reset adapter.\n");
699134877a15SJose Abreu 
699234877a15SJose Abreu 	rtnl_lock();
699334877a15SJose Abreu 	netif_trans_update(priv->dev);
699434877a15SJose Abreu 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
699534877a15SJose Abreu 		usleep_range(1000, 2000);
699634877a15SJose Abreu 
699734877a15SJose Abreu 	set_bit(STMMAC_DOWN, &priv->state);
699834877a15SJose Abreu 	dev_close(priv->dev);
699900f54e68SPetr Machata 	dev_open(priv->dev, NULL);
700034877a15SJose Abreu 	clear_bit(STMMAC_DOWN, &priv->state);
700134877a15SJose Abreu 	clear_bit(STMMAC_RESETING, &priv->state);
700234877a15SJose Abreu 	rtnl_unlock();
700334877a15SJose Abreu }
700434877a15SJose Abreu 
stmmac_service_task(struct work_struct * work)700534877a15SJose Abreu static void stmmac_service_task(struct work_struct *work)
700634877a15SJose Abreu {
700734877a15SJose Abreu 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
700834877a15SJose Abreu 			service_task);
700934877a15SJose Abreu 
701034877a15SJose Abreu 	stmmac_reset_subtask(priv);
701134877a15SJose Abreu 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
701234877a15SJose Abreu }
701334877a15SJose Abreu 
70147ac6653aSJeff Kirsher /**
7015cf3f047bSGiuseppe CAVALLARO  *  stmmac_hw_init - Init the MAC device
701632ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
7017732fdf0eSGiuseppe CAVALLARO  *  Description: this function is to configure the MAC device according to
7018732fdf0eSGiuseppe CAVALLARO  *  some platform parameters or the HW capability register. It prepares the
7019732fdf0eSGiuseppe CAVALLARO  *  driver to use either ring or chain modes and to setup either enhanced or
7020732fdf0eSGiuseppe CAVALLARO  *  normal descriptors.
7021cf3f047bSGiuseppe CAVALLARO  */
stmmac_hw_init(struct stmmac_priv * priv)7022cf3f047bSGiuseppe CAVALLARO static int stmmac_hw_init(struct stmmac_priv *priv)
7023cf3f047bSGiuseppe CAVALLARO {
70245f0456b4SJose Abreu 	int ret;
7025cf3f047bSGiuseppe CAVALLARO 
70269f93ac8dSLABBE Corentin 	/* dwmac-sun8i only work in chain mode */
7027d8daff28SBartosz Golaszewski 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
70289f93ac8dSLABBE Corentin 		chain_mode = 1;
70295f0456b4SJose Abreu 	priv->chain_mode = chain_mode;
70309f93ac8dSLABBE Corentin 
70315f0456b4SJose Abreu 	/* Initialize HW Interface */
70325f0456b4SJose Abreu 	ret = stmmac_hwif_init(priv);
70335f0456b4SJose Abreu 	if (ret)
70345f0456b4SJose Abreu 		return ret;
70354a7d666aSGiuseppe CAVALLARO 
7036cf3f047bSGiuseppe CAVALLARO 	/* Get the HW capability (new GMAC newer than 3.50a) */
7037cf3f047bSGiuseppe CAVALLARO 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7038cf3f047bSGiuseppe CAVALLARO 	if (priv->hw_cap_support) {
703938ddc59dSLABBE Corentin 		dev_info(priv->device, "DMA HW capability register supported\n");
7040cf3f047bSGiuseppe CAVALLARO 
7041cf3f047bSGiuseppe CAVALLARO 		/* We can override some gmac/dma configuration fields: e.g.
7042cf3f047bSGiuseppe CAVALLARO 		 * enh_desc, tx_coe (e.g. that are passed through the
7043cf3f047bSGiuseppe CAVALLARO 		 * platform) with the values from the HW capability
7044cf3f047bSGiuseppe CAVALLARO 		 * register (if supported).
7045cf3f047bSGiuseppe CAVALLARO 		 */
7046cf3f047bSGiuseppe CAVALLARO 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
70475a9b876eSLing Pei Lee 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7048fd1d62d8SBartosz Golaszewski 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
70493fe5cadbSGiuseppe CAVALLARO 		priv->hw->pmt = priv->plat->pmt;
7050b8ef7020SBiao Huang 		if (priv->dma_cap.hash_tb_sz) {
7051b8ef7020SBiao Huang 			priv->hw->multicast_filter_bins =
7052b8ef7020SBiao Huang 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7053b8ef7020SBiao Huang 			priv->hw->mcast_bits_log2 =
7054b8ef7020SBiao Huang 					ilog2(priv->hw->multicast_filter_bins);
7055b8ef7020SBiao Huang 		}
705638912bdbSDeepak SIKRI 
7057a8df35d4SEzequiel Garcia 		/* TXCOE doesn't work in thresh DMA mode */
7058a8df35d4SEzequiel Garcia 		if (priv->plat->force_thresh_dma_mode)
7059a8df35d4SEzequiel Garcia 			priv->plat->tx_coe = 0;
7060a8df35d4SEzequiel Garcia 		else
706138912bdbSDeepak SIKRI 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7062a8df35d4SEzequiel Garcia 
7063f748be53SAlexandre TORGUE 		/* In case of GMAC4 rx_coe is from HW cap register. */
7064f748be53SAlexandre TORGUE 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
706538912bdbSDeepak SIKRI 
706638912bdbSDeepak SIKRI 		if (priv->dma_cap.rx_coe_type2)
706738912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
706838912bdbSDeepak SIKRI 		else if (priv->dma_cap.rx_coe_type1)
706938912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
707038912bdbSDeepak SIKRI 
707138ddc59dSLABBE Corentin 	} else {
707238ddc59dSLABBE Corentin 		dev_info(priv->device, "No HW DMA feature register supported\n");
707338ddc59dSLABBE Corentin 	}
7074cf3f047bSGiuseppe CAVALLARO 
7075d2afb5bdSGiuseppe CAVALLARO 	if (priv->plat->rx_coe) {
7076d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
707738ddc59dSLABBE Corentin 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7078f748be53SAlexandre TORGUE 		if (priv->synopsys_id < DWMAC_CORE_4_00)
707938ddc59dSLABBE Corentin 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7080d2afb5bdSGiuseppe CAVALLARO 	}
7081cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->tx_coe)
708238ddc59dSLABBE Corentin 		dev_info(priv->device, "TX Checksum insertion supported\n");
7083cf3f047bSGiuseppe CAVALLARO 
7084cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->pmt) {
708538ddc59dSLABBE Corentin 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7086cf3f047bSGiuseppe CAVALLARO 		device_set_wakeup_capable(priv->device, 1);
7087cf3f047bSGiuseppe CAVALLARO 	}
7088cf3f047bSGiuseppe CAVALLARO 
7089f748be53SAlexandre TORGUE 	if (priv->dma_cap.tsoen)
709038ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO supported\n");
7091f748be53SAlexandre TORGUE 
7092fc02152bSBartosz Golaszewski 	priv->hw->vlan_fail_q_en =
7093fc02152bSBartosz Golaszewski 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7094e0f9956aSChuah, Kim Tatt 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7095e0f9956aSChuah, Kim Tatt 
70967cfde0afSJose Abreu 	/* Run HW quirks, if any */
70977cfde0afSJose Abreu 	if (priv->hwif_quirks) {
70987cfde0afSJose Abreu 		ret = priv->hwif_quirks(priv);
70997cfde0afSJose Abreu 		if (ret)
71007cfde0afSJose Abreu 			return ret;
71017cfde0afSJose Abreu 	}
71027cfde0afSJose Abreu 
71033b509466SJose Abreu 	/* Rx Watchdog is available in the COREs newer than the 3.40.
71043b509466SJose Abreu 	 * In some case, for example on bugged HW this feature
71053b509466SJose Abreu 	 * has to be disable and this can be done by passing the
71063b509466SJose Abreu 	 * riwt_off field from the platform.
71073b509466SJose Abreu 	 */
71083b509466SJose Abreu 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
71093b509466SJose Abreu 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
71103b509466SJose Abreu 		priv->use_riwt = 1;
71113b509466SJose Abreu 		dev_info(priv->device,
71123b509466SJose Abreu 			 "Enable RX Mitigation via HW Watchdog Timer\n");
71133b509466SJose Abreu 	}
71143b509466SJose Abreu 
7115c24602efSGiuseppe CAVALLARO 	return 0;
7116cf3f047bSGiuseppe CAVALLARO }
7117cf3f047bSGiuseppe CAVALLARO 
stmmac_napi_add(struct net_device * dev)71180366f7e0SOng Boon Leong static void stmmac_napi_add(struct net_device *dev)
71190366f7e0SOng Boon Leong {
71200366f7e0SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
71210366f7e0SOng Boon Leong 	u32 queue, maxq;
71220366f7e0SOng Boon Leong 
71230366f7e0SOng Boon Leong 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
71240366f7e0SOng Boon Leong 
71250366f7e0SOng Boon Leong 	for (queue = 0; queue < maxq; queue++) {
71260366f7e0SOng Boon Leong 		struct stmmac_channel *ch = &priv->channel[queue];
71270366f7e0SOng Boon Leong 
71280366f7e0SOng Boon Leong 		ch->priv_data = priv;
71290366f7e0SOng Boon Leong 		ch->index = queue;
71302b94f526SMarek Szyprowski 		spin_lock_init(&ch->lock);
71310366f7e0SOng Boon Leong 
71320366f7e0SOng Boon Leong 		if (queue < priv->plat->rx_queues_to_use) {
7133b48b89f9SJakub Kicinski 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
71340366f7e0SOng Boon Leong 		}
71350366f7e0SOng Boon Leong 		if (queue < priv->plat->tx_queues_to_use) {
713616d083e2SJakub Kicinski 			netif_napi_add_tx(dev, &ch->tx_napi,
713716d083e2SJakub Kicinski 					  stmmac_napi_poll_tx);
71380366f7e0SOng Boon Leong 		}
7139132c32eeSOng Boon Leong 		if (queue < priv->plat->rx_queues_to_use &&
7140132c32eeSOng Boon Leong 		    queue < priv->plat->tx_queues_to_use) {
7141132c32eeSOng Boon Leong 			netif_napi_add(dev, &ch->rxtx_napi,
7142b48b89f9SJakub Kicinski 				       stmmac_napi_poll_rxtx);
7143132c32eeSOng Boon Leong 		}
71440366f7e0SOng Boon Leong 	}
71450366f7e0SOng Boon Leong }
71460366f7e0SOng Boon Leong 
stmmac_napi_del(struct net_device * dev)71470366f7e0SOng Boon Leong static void stmmac_napi_del(struct net_device *dev)
71480366f7e0SOng Boon Leong {
71490366f7e0SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
71500366f7e0SOng Boon Leong 	u32 queue, maxq;
71510366f7e0SOng Boon Leong 
71520366f7e0SOng Boon Leong 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
71530366f7e0SOng Boon Leong 
71540366f7e0SOng Boon Leong 	for (queue = 0; queue < maxq; queue++) {
71550366f7e0SOng Boon Leong 		struct stmmac_channel *ch = &priv->channel[queue];
71560366f7e0SOng Boon Leong 
71570366f7e0SOng Boon Leong 		if (queue < priv->plat->rx_queues_to_use)
71580366f7e0SOng Boon Leong 			netif_napi_del(&ch->rx_napi);
71590366f7e0SOng Boon Leong 		if (queue < priv->plat->tx_queues_to_use)
71600366f7e0SOng Boon Leong 			netif_napi_del(&ch->tx_napi);
7161132c32eeSOng Boon Leong 		if (queue < priv->plat->rx_queues_to_use &&
7162132c32eeSOng Boon Leong 		    queue < priv->plat->tx_queues_to_use) {
7163132c32eeSOng Boon Leong 			netif_napi_del(&ch->rxtx_napi);
7164132c32eeSOng Boon Leong 		}
71650366f7e0SOng Boon Leong 	}
71660366f7e0SOng Boon Leong }
71670366f7e0SOng Boon Leong 
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)71680366f7e0SOng Boon Leong int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
71690366f7e0SOng Boon Leong {
71700366f7e0SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
7171218c5973SCorinna Vinschen 	int ret = 0, i;
7172178856bfSSerge Semin 	int max_speed;
71730366f7e0SOng Boon Leong 
71740366f7e0SOng Boon Leong 	if (netif_running(dev))
71750366f7e0SOng Boon Leong 		stmmac_release(dev);
71760366f7e0SOng Boon Leong 
71770366f7e0SOng Boon Leong 	stmmac_napi_del(dev);
71780366f7e0SOng Boon Leong 
71790366f7e0SOng Boon Leong 	priv->plat->rx_queues_to_use = rx_cnt;
71800366f7e0SOng Boon Leong 	priv->plat->tx_queues_to_use = tx_cnt;
7181218c5973SCorinna Vinschen 	if (!netif_is_rxfh_configured(dev))
7182218c5973SCorinna Vinschen 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7183218c5973SCorinna Vinschen 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7184218c5973SCorinna Vinschen 									rx_cnt);
71850366f7e0SOng Boon Leong 
71860580dcc5SSerge Semin 	stmmac_mac_phylink_get_caps(priv);
71870580dcc5SSerge Semin 
718893d565ebSSerge Semin 	priv->phylink_config.mac_capabilities = priv->hw->link.caps;
718993d565ebSSerge Semin 
7190178856bfSSerge Semin 	max_speed = priv->plat->max_speed;
7191178856bfSSerge Semin 	if (max_speed)
7192178856bfSSerge Semin 		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
7193178856bfSSerge Semin 
71940366f7e0SOng Boon Leong 	stmmac_napi_add(dev);
71950366f7e0SOng Boon Leong 
71960366f7e0SOng Boon Leong 	if (netif_running(dev))
71970366f7e0SOng Boon Leong 		ret = stmmac_open(dev);
71980366f7e0SOng Boon Leong 
71990366f7e0SOng Boon Leong 	return ret;
72000366f7e0SOng Boon Leong }
72010366f7e0SOng Boon Leong 
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7202aa042f60SSong, Yoong Siang int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7203aa042f60SSong, Yoong Siang {
7204aa042f60SSong, Yoong Siang 	struct stmmac_priv *priv = netdev_priv(dev);
7205aa042f60SSong, Yoong Siang 	int ret = 0;
7206aa042f60SSong, Yoong Siang 
7207aa042f60SSong, Yoong Siang 	if (netif_running(dev))
7208aa042f60SSong, Yoong Siang 		stmmac_release(dev);
7209aa042f60SSong, Yoong Siang 
72108531c808SChristian Marangi 	priv->dma_conf.dma_rx_size = rx_size;
72118531c808SChristian Marangi 	priv->dma_conf.dma_tx_size = tx_size;
7212aa042f60SSong, Yoong Siang 
7213aa042f60SSong, Yoong Siang 	if (netif_running(dev))
7214aa042f60SSong, Yoong Siang 		ret = stmmac_open(dev);
7215aa042f60SSong, Yoong Siang 
7216aa042f60SSong, Yoong Siang 	return ret;
7217aa042f60SSong, Yoong Siang }
7218aa042f60SSong, Yoong Siang 
72195a558611SOng Boon Leong #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
stmmac_fpe_lp_task(struct work_struct * work)72205a558611SOng Boon Leong static void stmmac_fpe_lp_task(struct work_struct *work)
72215a558611SOng Boon Leong {
72225a558611SOng Boon Leong 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
72235a558611SOng Boon Leong 						fpe_task);
72245a558611SOng Boon Leong 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
72255a558611SOng Boon Leong 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
72265a558611SOng Boon Leong 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
72275a558611SOng Boon Leong 	bool *hs_enable = &fpe_cfg->hs_enable;
72285a558611SOng Boon Leong 	bool *enable = &fpe_cfg->enable;
72295a558611SOng Boon Leong 	int retries = 20;
72305a558611SOng Boon Leong 
72315a558611SOng Boon Leong 	while (retries-- > 0) {
72325a558611SOng Boon Leong 		/* Bail out immediately if FPE handshake is OFF */
72335a558611SOng Boon Leong 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
72345a558611SOng Boon Leong 			break;
72355a558611SOng Boon Leong 
72365a558611SOng Boon Leong 		if (*lo_state == FPE_STATE_ENTERING_ON &&
72375a558611SOng Boon Leong 		    *lp_state == FPE_STATE_ENTERING_ON) {
72385a558611SOng Boon Leong 			stmmac_fpe_configure(priv, priv->ioaddr,
7239e1fbdef9SJianheng Zhang 					     fpe_cfg,
72405a558611SOng Boon Leong 					     priv->plat->tx_queues_to_use,
72415a558611SOng Boon Leong 					     priv->plat->rx_queues_to_use,
72425a558611SOng Boon Leong 					     *enable);
72435a558611SOng Boon Leong 
72445a558611SOng Boon Leong 			netdev_info(priv->dev, "configured FPE\n");
72455a558611SOng Boon Leong 
72465a558611SOng Boon Leong 			*lo_state = FPE_STATE_ON;
72475a558611SOng Boon Leong 			*lp_state = FPE_STATE_ON;
72485a558611SOng Boon Leong 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
72495a558611SOng Boon Leong 			break;
72505a558611SOng Boon Leong 		}
72515a558611SOng Boon Leong 
72525a558611SOng Boon Leong 		if ((*lo_state == FPE_STATE_CAPABLE ||
72535a558611SOng Boon Leong 		     *lo_state == FPE_STATE_ENTERING_ON) &&
72545a558611SOng Boon Leong 		     *lp_state != FPE_STATE_ON) {
72555a558611SOng Boon Leong 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
72565a558611SOng Boon Leong 				    *lo_state, *lp_state);
72575a558611SOng Boon Leong 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7258e1fbdef9SJianheng Zhang 						fpe_cfg,
72595a558611SOng Boon Leong 						MPACKET_VERIFY);
72605a558611SOng Boon Leong 		}
72615a558611SOng Boon Leong 		/* Sleep then retry */
72625a558611SOng Boon Leong 		msleep(500);
72635a558611SOng Boon Leong 	}
72645a558611SOng Boon Leong 
72655a558611SOng Boon Leong 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
72665a558611SOng Boon Leong }
72675a558611SOng Boon Leong 
stmmac_fpe_handshake(struct stmmac_priv * priv,bool enable)72685a558611SOng Boon Leong void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
72695a558611SOng Boon Leong {
72705a558611SOng Boon Leong 	if (priv->plat->fpe_cfg->hs_enable != enable) {
72715a558611SOng Boon Leong 		if (enable) {
72725a558611SOng Boon Leong 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7273e1fbdef9SJianheng Zhang 						priv->plat->fpe_cfg,
72745a558611SOng Boon Leong 						MPACKET_VERIFY);
72755a558611SOng Boon Leong 		} else {
72765a558611SOng Boon Leong 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
72775a558611SOng Boon Leong 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
72785a558611SOng Boon Leong 		}
72795a558611SOng Boon Leong 
72805a558611SOng Boon Leong 		priv->plat->fpe_cfg->hs_enable = enable;
72815a558611SOng Boon Leong 	}
72825a558611SOng Boon Leong }
72835a558611SOng Boon Leong 
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7284e3f9c3e3SSong Yoong Siang static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7285e3f9c3e3SSong Yoong Siang {
7286e3f9c3e3SSong Yoong Siang 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7287e3f9c3e3SSong Yoong Siang 	struct dma_desc *desc_contains_ts = ctx->desc;
7288e3f9c3e3SSong Yoong Siang 	struct stmmac_priv *priv = ctx->priv;
7289e3f9c3e3SSong Yoong Siang 	struct dma_desc *ndesc = ctx->ndesc;
7290e3f9c3e3SSong Yoong Siang 	struct dma_desc *desc = ctx->desc;
7291e3f9c3e3SSong Yoong Siang 	u64 ns = 0;
7292e3f9c3e3SSong Yoong Siang 
7293e3f9c3e3SSong Yoong Siang 	if (!priv->hwts_rx_en)
7294e3f9c3e3SSong Yoong Siang 		return -ENODATA;
7295e3f9c3e3SSong Yoong Siang 
7296e3f9c3e3SSong Yoong Siang 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7297e3f9c3e3SSong Yoong Siang 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7298e3f9c3e3SSong Yoong Siang 		desc_contains_ts = ndesc;
7299e3f9c3e3SSong Yoong Siang 
7300e3f9c3e3SSong Yoong Siang 	/* Check if timestamp is available */
7301e3f9c3e3SSong Yoong Siang 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7302e3f9c3e3SSong Yoong Siang 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7303e3f9c3e3SSong Yoong Siang 		ns -= priv->plat->cdc_error_adj;
7304e3f9c3e3SSong Yoong Siang 		*timestamp = ns_to_ktime(ns);
7305e3f9c3e3SSong Yoong Siang 		return 0;
7306e3f9c3e3SSong Yoong Siang 	}
7307e3f9c3e3SSong Yoong Siang 
7308e3f9c3e3SSong Yoong Siang 	return -ENODATA;
7309e3f9c3e3SSong Yoong Siang }
7310e3f9c3e3SSong Yoong Siang 
7311e3f9c3e3SSong Yoong Siang static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7312e3f9c3e3SSong Yoong Siang 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7313e3f9c3e3SSong Yoong Siang };
7314e3f9c3e3SSong Yoong Siang 
7315cf3f047bSGiuseppe CAVALLARO /**
7316bfab27a1SGiuseppe CAVALLARO  * stmmac_dvr_probe
7317bfab27a1SGiuseppe CAVALLARO  * @device: device pointer
7318ff3dd78cSGiuseppe CAVALLARO  * @plat_dat: platform data pointer
7319e56788cfSJoachim Eastwood  * @res: stmmac resource pointer
7320bfab27a1SGiuseppe CAVALLARO  * Description: this is the main probe function used to
7321bfab27a1SGiuseppe CAVALLARO  * call the alloc_etherdev, allocate the priv structure.
73229afec6efSAndy Shevchenko  * Return:
732315ffac73SJoachim Eastwood  * returns 0 on success, otherwise errno.
73247ac6653aSJeff Kirsher  */
stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)732515ffac73SJoachim Eastwood int stmmac_dvr_probe(struct device *device,
7326cf3f047bSGiuseppe CAVALLARO 		     struct plat_stmmacenet_data *plat_dat,
7327e56788cfSJoachim Eastwood 		     struct stmmac_resources *res)
73287ac6653aSJeff Kirsher {
7329bfab27a1SGiuseppe CAVALLARO 	struct net_device *ndev = NULL;
7330bfab27a1SGiuseppe CAVALLARO 	struct stmmac_priv *priv;
73310366f7e0SOng Boon Leong 	u32 rxq;
733276067459SJose Abreu 	int i, ret = 0;
73337ac6653aSJeff Kirsher 
73349737070cSJisheng Zhang 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
73359737070cSJisheng Zhang 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
733641de8d4cSJoe Perches 	if (!ndev)
733715ffac73SJoachim Eastwood 		return -ENOMEM;
73387ac6653aSJeff Kirsher 
7339bfab27a1SGiuseppe CAVALLARO 	SET_NETDEV_DEV(ndev, device);
73407ac6653aSJeff Kirsher 
7341bfab27a1SGiuseppe CAVALLARO 	priv = netdev_priv(ndev);
7342bfab27a1SGiuseppe CAVALLARO 	priv->device = device;
7343bfab27a1SGiuseppe CAVALLARO 	priv->dev = ndev;
7344bfab27a1SGiuseppe CAVALLARO 
7345133466c3SJisheng Zhang 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
73469680b2abSPetr Tesarik 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
73479680b2abSPetr Tesarik 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
73489680b2abSPetr Tesarik 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
73499680b2abSPetr Tesarik 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
73509680b2abSPetr Tesarik 	}
73519680b2abSPetr Tesarik 
73529680b2abSPetr Tesarik 	priv->xstats.pcpu_stats =
73539680b2abSPetr Tesarik 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
73549680b2abSPetr Tesarik 	if (!priv->xstats.pcpu_stats)
73559680b2abSPetr Tesarik 		return -ENOMEM;
7356133466c3SJisheng Zhang 
7357bfab27a1SGiuseppe CAVALLARO 	stmmac_set_ethtool_ops(ndev);
7358cf3f047bSGiuseppe CAVALLARO 	priv->pause = pause;
7359cf3f047bSGiuseppe CAVALLARO 	priv->plat = plat_dat;
7360e56788cfSJoachim Eastwood 	priv->ioaddr = res->addr;
7361e56788cfSJoachim Eastwood 	priv->dev->base_addr = (unsigned long)res->addr;
7362956c3f09SBartosz Golaszewski 	priv->plat->dma_cfg->multi_msi_en =
7363956c3f09SBartosz Golaszewski 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7364e56788cfSJoachim Eastwood 
7365e56788cfSJoachim Eastwood 	priv->dev->irq = res->irq;
7366e56788cfSJoachim Eastwood 	priv->wol_irq = res->wol_irq;
7367e56788cfSJoachim Eastwood 	priv->lpi_irq = res->lpi_irq;
73688532f613SOng Boon Leong 	priv->sfty_ce_irq = res->sfty_ce_irq;
73698532f613SOng Boon Leong 	priv->sfty_ue_irq = res->sfty_ue_irq;
73708532f613SOng Boon Leong 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
73718532f613SOng Boon Leong 		priv->rx_irq[i] = res->rx_irq[i];
73728532f613SOng Boon Leong 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
73738532f613SOng Boon Leong 		priv->tx_irq[i] = res->tx_irq[i];
7374e56788cfSJoachim Eastwood 
737583216e39SMichael Walle 	if (!is_zero_ether_addr(res->mac))
7376a96d317fSJakub Kicinski 		eth_hw_addr_set(priv->dev, res->mac);
7377bfab27a1SGiuseppe CAVALLARO 
7378a7a62685SJoachim Eastwood 	dev_set_drvdata(device, priv->dev);
7379803f8fc4SJoachim Eastwood 
7380cf3f047bSGiuseppe CAVALLARO 	/* Verify driver arguments */
7381cf3f047bSGiuseppe CAVALLARO 	stmmac_verify_args();
7382cf3f047bSGiuseppe CAVALLARO 
7383bba2556eSOng Boon Leong 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7384bba2556eSOng Boon Leong 	if (!priv->af_xdp_zc_qps)
7385bba2556eSOng Boon Leong 		return -ENOMEM;
7386bba2556eSOng Boon Leong 
738734877a15SJose Abreu 	/* Allocate workqueue */
738834877a15SJose Abreu 	priv->wq = create_singlethread_workqueue("stmmac_wq");
738934877a15SJose Abreu 	if (!priv->wq) {
739034877a15SJose Abreu 		dev_err(priv->device, "failed to create workqueue\n");
73912cb815cfSGaosheng Cui 		ret = -ENOMEM;
7392a137f3f2SGaosheng Cui 		goto error_wq_init;
739334877a15SJose Abreu 	}
739434877a15SJose Abreu 
739534877a15SJose Abreu 	INIT_WORK(&priv->service_task, stmmac_service_task);
739634877a15SJose Abreu 
73975a558611SOng Boon Leong 	/* Initialize Link Partner FPE workqueue */
73985a558611SOng Boon Leong 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
73995a558611SOng Boon Leong 
7400cf3f047bSGiuseppe CAVALLARO 	/* Override with kernel parameters if supplied XXX CRS XXX
7401ceb69499SGiuseppe CAVALLARO 	 * this needs to have multiple instances
7402ceb69499SGiuseppe CAVALLARO 	 */
7403cf3f047bSGiuseppe CAVALLARO 	if ((phyaddr >= 0) && (phyaddr <= 31))
7404cf3f047bSGiuseppe CAVALLARO 		priv->plat->phy_addr = phyaddr;
7405cf3f047bSGiuseppe CAVALLARO 
740690f522a2SEugeniy Paltsev 	if (priv->plat->stmmac_rst) {
740790f522a2SEugeniy Paltsev 		ret = reset_control_assert(priv->plat->stmmac_rst);
7408f573c0b9Sjpinto 		reset_control_deassert(priv->plat->stmmac_rst);
740990f522a2SEugeniy Paltsev 		/* Some reset controllers have only reset callback instead of
741090f522a2SEugeniy Paltsev 		 * assert + deassert callbacks pair.
741190f522a2SEugeniy Paltsev 		 */
741290f522a2SEugeniy Paltsev 		if (ret == -ENOTSUPP)
741390f522a2SEugeniy Paltsev 			reset_control_reset(priv->plat->stmmac_rst);
741490f522a2SEugeniy Paltsev 	}
7415c5e4ddbdSChen-Yu Tsai 
7416e67f325eSMatthew Hagan 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7417e67f325eSMatthew Hagan 	if (ret == -ENOTSUPP)
7418e67f325eSMatthew Hagan 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7419e67f325eSMatthew Hagan 			ERR_PTR(ret));
7420e67f325eSMatthew Hagan 
74216264994bSBernd Edlinger 	/* Wait a bit for the reset to take effect */
74226264994bSBernd Edlinger 	udelay(10);
74236264994bSBernd Edlinger 
7424cf3f047bSGiuseppe CAVALLARO 	/* Init MAC and get the capabilities */
7425c24602efSGiuseppe CAVALLARO 	ret = stmmac_hw_init(priv);
7426c24602efSGiuseppe CAVALLARO 	if (ret)
742762866e98SChen-Yu Tsai 		goto error_hw_init;
7428cf3f047bSGiuseppe CAVALLARO 
742996874c61SMohammad Athari Bin Ismail 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
743096874c61SMohammad Athari Bin Ismail 	 */
743196874c61SMohammad Athari Bin Ismail 	if (priv->synopsys_id < DWMAC_CORE_5_20)
743296874c61SMohammad Athari Bin Ismail 		priv->plat->dma_cfg->dche = false;
743396874c61SMohammad Athari Bin Ismail 
7434b561af36SVinod Koul 	stmmac_check_ether_addr(priv);
7435b561af36SVinod Koul 
7436cf3f047bSGiuseppe CAVALLARO 	ndev->netdev_ops = &stmmac_netdev_ops;
7437cf3f047bSGiuseppe CAVALLARO 
7438e3f9c3e3SSong Yoong Siang 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7439e3f9c3e3SSong Yoong Siang 
7440cf3f047bSGiuseppe CAVALLARO 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7441cf3f047bSGiuseppe CAVALLARO 			    NETIF_F_RXCSUM;
744266c0e13aSMarek Majtyka 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7443ffb33221SWei Fang 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7444f748be53SAlexandre TORGUE 
74454dbbe8ddSJose Abreu 	ret = stmmac_tc_init(priv, priv);
74464dbbe8ddSJose Abreu 	if (!ret) {
74474dbbe8ddSJose Abreu 		ndev->hw_features |= NETIF_F_HW_TC;
74484dbbe8ddSJose Abreu 	}
74494dbbe8ddSJose Abreu 
745068861a3bSBartosz Golaszewski 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
74519edfa7daSNiklas Cassel 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7452b7766206SJose Abreu 		if (priv->plat->has_gmac4)
7453b7766206SJose Abreu 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7454f748be53SAlexandre TORGUE 		priv->tso = true;
745538ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO feature enabled\n");
7456f748be53SAlexandre TORGUE 	}
7457a993db88SJose Abreu 
7458309efe6eSBartosz Golaszewski 	if (priv->dma_cap.sphen &&
7459309efe6eSBartosz Golaszewski 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
746067afd6d1SJose Abreu 		ndev->hw_features |= NETIF_F_GRO;
7461d08d32d1SOng Boon Leong 		priv->sph_cap = true;
7462d08d32d1SOng Boon Leong 		priv->sph = priv->sph_cap;
746367afd6d1SJose Abreu 		dev_info(priv->device, "SPH feature enabled\n");
746467afd6d1SJose Abreu 	}
746567afd6d1SJose Abreu 
7466070246e4SJochen Henneberg 	/* Ideally our host DMA address width is the same as for the
7467070246e4SJochen Henneberg 	 * device. However, it may differ and then we have to use our
7468070246e4SJochen Henneberg 	 * host DMA width for allocation and the device DMA width for
7469070246e4SJochen Henneberg 	 * register handling.
7470f119cc98SFugang Duan 	 */
7471070246e4SJochen Henneberg 	if (priv->plat->host_dma_width)
7472070246e4SJochen Henneberg 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7473070246e4SJochen Henneberg 	else
7474070246e4SJochen Henneberg 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7475f119cc98SFugang Duan 
7476070246e4SJochen Henneberg 	if (priv->dma_cap.host_dma_width) {
7477a993db88SJose Abreu 		ret = dma_set_mask_and_coherent(device,
7478070246e4SJochen Henneberg 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7479a993db88SJose Abreu 		if (!ret) {
7480070246e4SJochen Henneberg 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7481070246e4SJochen Henneberg 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7482968a2978SThierry Reding 
7483968a2978SThierry Reding 			/*
7484968a2978SThierry Reding 			 * If more than 32 bits can be addressed, make sure to
7485968a2978SThierry Reding 			 * enable enhanced addressing mode.
7486968a2978SThierry Reding 			 */
7487968a2978SThierry Reding 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7488968a2978SThierry Reding 				priv->plat->dma_cfg->eame = true;
7489a993db88SJose Abreu 		} else {
7490a993db88SJose Abreu 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7491a993db88SJose Abreu 			if (ret) {
7492a993db88SJose Abreu 				dev_err(priv->device, "Failed to set DMA Mask\n");
7493a993db88SJose Abreu 				goto error_hw_init;
7494a993db88SJose Abreu 			}
7495a993db88SJose Abreu 
7496070246e4SJochen Henneberg 			priv->dma_cap.host_dma_width = 32;
7497a993db88SJose Abreu 		}
7498a993db88SJose Abreu 	}
7499a993db88SJose Abreu 
7500bfab27a1SGiuseppe CAVALLARO 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7501bfab27a1SGiuseppe CAVALLARO 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
75027ac6653aSJeff Kirsher #ifdef STMMAC_VLAN_TAG_USED
75037ac6653aSJeff Kirsher 	/* Both mac100 and gmac support receive VLAN tag detection */
7504ab188e8fSElad Nachman 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
75053cd1cfcbSJose Abreu 	if (priv->dma_cap.vlhash) {
75063cd1cfcbSJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
75073cd1cfcbSJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
75083cd1cfcbSJose Abreu 	}
750930d93227SJose Abreu 	if (priv->dma_cap.vlins) {
751030d93227SJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
751130d93227SJose Abreu 		if (priv->dma_cap.dvlan)
751230d93227SJose Abreu 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
751330d93227SJose Abreu 	}
75147ac6653aSJeff Kirsher #endif
75157ac6653aSJeff Kirsher 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
75167ac6653aSJeff Kirsher 
75172eb85b75SJisheng Zhang 	priv->xstats.threshold = tc;
75182eb85b75SJisheng Zhang 
751976067459SJose Abreu 	/* Initialize RSS */
752076067459SJose Abreu 	rxq = priv->plat->rx_queues_to_use;
752176067459SJose Abreu 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
752276067459SJose Abreu 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
752376067459SJose Abreu 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
752476067459SJose Abreu 
752576067459SJose Abreu 	if (priv->dma_cap.rssen && priv->plat->rss_en)
752676067459SJose Abreu 		ndev->features |= NETIF_F_RXHASH;
752776067459SJose Abreu 
75286b2c6e4aSCorinna Vinschen 	ndev->vlan_features |= ndev->features;
75296b2c6e4aSCorinna Vinschen 	/* TSO doesn't work on VLANs yet */
75306b2c6e4aSCorinna Vinschen 	ndev->vlan_features &= ~NETIF_F_TSO;
75316b2c6e4aSCorinna Vinschen 
753244770e11SJarod Wilson 	/* MTU range: 46 - hw-specific max */
753344770e11SJarod Wilson 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
753456bcd591SJose Abreu 	if (priv->plat->has_xgmac)
75357d9e6c5aSJose Abreu 		ndev->max_mtu = XGMAC_JUMBO_LEN;
753656bcd591SJose Abreu 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
753756bcd591SJose Abreu 		ndev->max_mtu = JUMBO_LEN;
753844770e11SJarod Wilson 	else
753944770e11SJarod Wilson 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7540a2cd64f3SKweh, Hock Leong 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7541a2cd64f3SKweh, Hock Leong 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7542a2cd64f3SKweh, Hock Leong 	 */
7543a2cd64f3SKweh, Hock Leong 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7544a2cd64f3SKweh, Hock Leong 	    (priv->plat->maxmtu >= ndev->min_mtu))
754544770e11SJarod Wilson 		ndev->max_mtu = priv->plat->maxmtu;
7546a2cd64f3SKweh, Hock Leong 	else if (priv->plat->maxmtu < ndev->min_mtu)
7547b618ab45SHeiner Kallweit 		dev_warn(priv->device,
7548a2cd64f3SKweh, Hock Leong 			 "%s: warning: maxmtu having invalid value (%d)\n",
7549a2cd64f3SKweh, Hock Leong 			 __func__, priv->plat->maxmtu);
755044770e11SJarod Wilson 
75517ac6653aSJeff Kirsher 	if (flow_ctrl)
75527ac6653aSJeff Kirsher 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
75537ac6653aSJeff Kirsher 
75544e195166SCorinna Vinschen 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
75554e195166SCorinna Vinschen 
75568fce3331SJose Abreu 	/* Setup channels NAPI */
75570366f7e0SOng Boon Leong 	stmmac_napi_add(ndev);
75587ac6653aSJeff Kirsher 
755929555fa3SThierry Reding 	mutex_init(&priv->lock);
75607ac6653aSJeff Kirsher 
7561cd7201f4SGiuseppe CAVALLARO 	/* If a specific clk_csr value is passed from the platform
7562cd7201f4SGiuseppe CAVALLARO 	 * this means that the CSR Clock Range selection cannot be
7563cd7201f4SGiuseppe CAVALLARO 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7564cd7201f4SGiuseppe CAVALLARO 	 * set the MDC clock dynamically according to the csr actual
7565cd7201f4SGiuseppe CAVALLARO 	 * clock input.
7566cd7201f4SGiuseppe CAVALLARO 	 */
75675e7f7fc5SBiao Huang 	if (priv->plat->clk_csr >= 0)
7568cd7201f4SGiuseppe CAVALLARO 		priv->clk_csr = priv->plat->clk_csr;
75695e7f7fc5SBiao Huang 	else
75705e7f7fc5SBiao Huang 		stmmac_clk_csr_set(priv);
7571cd7201f4SGiuseppe CAVALLARO 
7572e58bb43fSGiuseppe CAVALLARO 	stmmac_check_pcs_mode(priv);
7573e58bb43fSGiuseppe CAVALLARO 
75745ec55823SJoakim Zhang 	pm_runtime_get_noresume(device);
75755ec55823SJoakim Zhang 	pm_runtime_set_active(device);
7576d90d0c17SKai-Heng Feng 	if (!pm_runtime_enabled(device))
75775ec55823SJoakim Zhang 		pm_runtime_enable(device);
75785ec55823SJoakim Zhang 
7579a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
75803fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
75814bfcbd7aSFrancesco Virlinzi 		/* MDIO bus Registration */
75824bfcbd7aSFrancesco Virlinzi 		ret = stmmac_mdio_register(ndev);
75834bfcbd7aSFrancesco Virlinzi 		if (ret < 0) {
7584839612d2SRasmus Villemoes 			dev_err_probe(priv->device, ret,
7585839612d2SRasmus Villemoes 				      "%s: MDIO bus (id: %d) registration failed\n",
75864bfcbd7aSFrancesco Virlinzi 				      __func__, priv->plat->bus_id);
75876a81c26fSViresh Kumar 			goto error_mdio_register;
75884bfcbd7aSFrancesco Virlinzi 		}
7589e58bb43fSGiuseppe CAVALLARO 	}
75904bfcbd7aSFrancesco Virlinzi 
759146682cb8SVoon Weifeng 	if (priv->plat->speed_mode_2500)
759246682cb8SVoon Weifeng 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
759346682cb8SVoon Weifeng 
75947413f9a6SVladimir Oltean 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7595597a68ceSVoon Weifeng 		ret = stmmac_xpcs_setup(priv->mii);
7596597a68ceSVoon Weifeng 		if (ret)
7597597a68ceSVoon Weifeng 			goto error_xpcs_setup;
7598597a68ceSVoon Weifeng 	}
7599597a68ceSVoon Weifeng 
760074371272SJose Abreu 	ret = stmmac_phy_setup(priv);
760174371272SJose Abreu 	if (ret) {
760274371272SJose Abreu 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
760374371272SJose Abreu 		goto error_phy_setup;
760474371272SJose Abreu 	}
760574371272SJose Abreu 
760657016590SFlorian Fainelli 	ret = register_netdev(ndev);
7607b2eb09afSFlorian Fainelli 	if (ret) {
7608b618ab45SHeiner Kallweit 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
760957016590SFlorian Fainelli 			__func__, ret);
7610b2eb09afSFlorian Fainelli 		goto error_netdev_register;
7611b2eb09afSFlorian Fainelli 	}
76127ac6653aSJeff Kirsher 
76135f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS
76148d72ab11SGreg Kroah-Hartman 	stmmac_init_fs(ndev);
76155f2b8b62SThierry Reding #endif
76165f2b8b62SThierry Reding 
76174047b9dbSBhupesh Sharma 	if (priv->plat->dump_debug_regs)
76184047b9dbSBhupesh Sharma 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
76194047b9dbSBhupesh Sharma 
76205ec55823SJoakim Zhang 	/* Let pm_runtime_put() disable the clocks.
76215ec55823SJoakim Zhang 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
76225ec55823SJoakim Zhang 	 */
76235ec55823SJoakim Zhang 	pm_runtime_put(device);
76245ec55823SJoakim Zhang 
762557016590SFlorian Fainelli 	return ret;
76267ac6653aSJeff Kirsher 
76276a81c26fSViresh Kumar error_netdev_register:
762874371272SJose Abreu 	phylink_destroy(priv->phylink);
7629597a68ceSVoon Weifeng error_xpcs_setup:
763074371272SJose Abreu error_phy_setup:
7631a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7632b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7633b2eb09afSFlorian Fainelli 		stmmac_mdio_unregister(ndev);
76347ac6653aSJeff Kirsher error_mdio_register:
76350366f7e0SOng Boon Leong 	stmmac_napi_del(ndev);
763662866e98SChen-Yu Tsai error_hw_init:
763734877a15SJose Abreu 	destroy_workqueue(priv->wq);
7638a137f3f2SGaosheng Cui error_wq_init:
7639d7f576dcSWong Vee Khee 	bitmap_free(priv->af_xdp_zc_qps);
76407ac6653aSJeff Kirsher 
764115ffac73SJoachim Eastwood 	return ret;
76427ac6653aSJeff Kirsher }
7643b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
76447ac6653aSJeff Kirsher 
76457ac6653aSJeff Kirsher /**
76467ac6653aSJeff Kirsher  * stmmac_dvr_remove
7647f4e7bd81SJoachim Eastwood  * @dev: device pointer
76487ac6653aSJeff Kirsher  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7649bfab27a1SGiuseppe CAVALLARO  * changes the link status, releases the DMA descriptor rings.
76507ac6653aSJeff Kirsher  */
stmmac_dvr_remove(struct device * dev)7651ff0011cfSUwe Kleine-König void stmmac_dvr_remove(struct device *dev)
76527ac6653aSJeff Kirsher {
7653f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
76547ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
76557ac6653aSJeff Kirsher 
765638ddc59dSLABBE Corentin 	netdev_info(priv->dev, "%s: removing driver", __func__);
76577ac6653aSJeff Kirsher 
765864495203SJisheng Zhang 	pm_runtime_get_sync(dev);
765964495203SJisheng Zhang 
7660ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
7661c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
76627ac6653aSJeff Kirsher 	netif_carrier_off(ndev);
76637ac6653aSJeff Kirsher 	unregister_netdev(ndev);
76649a7b3950SOng Boon Leong 
7665474a31e1SAaro Koskinen #ifdef CONFIG_DEBUG_FS
7666474a31e1SAaro Koskinen 	stmmac_exit_fs(ndev);
7667474a31e1SAaro Koskinen #endif
766874371272SJose Abreu 	phylink_destroy(priv->phylink);
7669f573c0b9Sjpinto 	if (priv->plat->stmmac_rst)
7670f573c0b9Sjpinto 		reset_control_assert(priv->plat->stmmac_rst);
7671e67f325eSMatthew Hagan 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7672a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
76733fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7674e743471fSBryan O'Donoghue 		stmmac_mdio_unregister(ndev);
767534877a15SJose Abreu 	destroy_workqueue(priv->wq);
767629555fa3SThierry Reding 	mutex_destroy(&priv->lock);
7677d7f576dcSWong Vee Khee 	bitmap_free(priv->af_xdp_zc_qps);
76787ac6653aSJeff Kirsher 
76790d9a1591SBiao Huang 	pm_runtime_disable(dev);
76800d9a1591SBiao Huang 	pm_runtime_put_noidle(dev);
76817ac6653aSJeff Kirsher }
7682b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
76837ac6653aSJeff Kirsher 
7684732fdf0eSGiuseppe CAVALLARO /**
7685732fdf0eSGiuseppe CAVALLARO  * stmmac_suspend - suspend callback
7686f4e7bd81SJoachim Eastwood  * @dev: device pointer
7687732fdf0eSGiuseppe CAVALLARO  * Description: this is the function to suspend the device and it is called
7688732fdf0eSGiuseppe CAVALLARO  * by the platform driver to stop the network queue, release the resources,
7689732fdf0eSGiuseppe CAVALLARO  * program the PMT register (for WoL), clean and release driver resources.
7690732fdf0eSGiuseppe CAVALLARO  */
stmmac_suspend(struct device * dev)7691f4e7bd81SJoachim Eastwood int stmmac_suspend(struct device *dev)
76927ac6653aSJeff Kirsher {
7693f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
76947ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
769514b41a29SNicolin Chen 	u32 chan;
76967ac6653aSJeff Kirsher 
76977ac6653aSJeff Kirsher 	if (!ndev || !netif_running(ndev))
76987ac6653aSJeff Kirsher 		return 0;
76997ac6653aSJeff Kirsher 
7700134cc4ceSThierry Reding 	mutex_lock(&priv->lock);
770119e13cb2SJose Abreu 
77027ac6653aSJeff Kirsher 	netif_device_detach(ndev);
77037ac6653aSJeff Kirsher 
7704c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
77057ac6653aSJeff Kirsher 
770614b41a29SNicolin Chen 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
77078531c808SChristian Marangi 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
770814b41a29SNicolin Chen 
77095f585913SFugang Duan 	if (priv->eee_enabled) {
77105f585913SFugang Duan 		priv->tx_path_in_lpi_mode = false;
77115f585913SFugang Duan 		del_timer_sync(&priv->eee_ctrl_timer);
77125f585913SFugang Duan 	}
77135f585913SFugang Duan 
77147ac6653aSJeff Kirsher 	/* Stop TX/RX DMA */
7715ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
7716c24602efSGiuseppe CAVALLARO 
7717b9663b7cSVoon Weifeng 	if (priv->plat->serdes_powerdown)
7718b9663b7cSVoon Weifeng 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7719b9663b7cSVoon Weifeng 
77207ac6653aSJeff Kirsher 	/* Enable Power down mode by programming the PMT regs */
7721e8377e7aSJisheng Zhang 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7722c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, priv->wolopts);
772389f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 1;
772489f7f2cfSSrinivas Kandagatla 	} else {
7725c10d4c82SJose Abreu 		stmmac_mac_set(priv, priv->ioaddr, false);
7726db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_sleep_state(priv->device);
772730f347aeSYang Yingliang 	}
77285a558611SOng Boon Leong 
772929555fa3SThierry Reding 	mutex_unlock(&priv->lock);
77302d871aa0SVince Bridgers 
773190702dcdSJoakim Zhang 	rtnl_lock();
773290702dcdSJoakim Zhang 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
773390702dcdSJoakim Zhang 		phylink_suspend(priv->phylink, true);
773490702dcdSJoakim Zhang 	} else {
773590702dcdSJoakim Zhang 		if (device_may_wakeup(priv->device))
773690702dcdSJoakim Zhang 			phylink_speed_down(priv->phylink, false);
773790702dcdSJoakim Zhang 		phylink_suspend(priv->phylink, false);
773890702dcdSJoakim Zhang 	}
773990702dcdSJoakim Zhang 	rtnl_unlock();
774090702dcdSJoakim Zhang 
77415a558611SOng Boon Leong 	if (priv->dma_cap.fpesel) {
77425a558611SOng Boon Leong 		/* Disable FPE */
77435a558611SOng Boon Leong 		stmmac_fpe_configure(priv, priv->ioaddr,
7744e1fbdef9SJianheng Zhang 				     priv->plat->fpe_cfg,
77455a558611SOng Boon Leong 				     priv->plat->tx_queues_to_use,
77465a558611SOng Boon Leong 				     priv->plat->rx_queues_to_use, false);
77475a558611SOng Boon Leong 
77485a558611SOng Boon Leong 		stmmac_fpe_handshake(priv, false);
77496b28a86dSMohammad Athari Bin Ismail 		stmmac_fpe_stop_wq(priv);
77505a558611SOng Boon Leong 	}
77515a558611SOng Boon Leong 
7752bd00632cSLABBE Corentin 	priv->speed = SPEED_UNKNOWN;
77537ac6653aSJeff Kirsher 	return 0;
77547ac6653aSJeff Kirsher }
7755b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_suspend);
77567ac6653aSJeff Kirsher 
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)7757f9ec5723SChristian Marangi static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7758f9ec5723SChristian Marangi {
77598531c808SChristian Marangi 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7760f9ec5723SChristian Marangi 
7761f9ec5723SChristian Marangi 	rx_q->cur_rx = 0;
7762f9ec5723SChristian Marangi 	rx_q->dirty_rx = 0;
7763f9ec5723SChristian Marangi }
7764f9ec5723SChristian Marangi 
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)7765f9ec5723SChristian Marangi static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7766f9ec5723SChristian Marangi {
77678531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7768f9ec5723SChristian Marangi 
7769f9ec5723SChristian Marangi 	tx_q->cur_tx = 0;
7770f9ec5723SChristian Marangi 	tx_q->dirty_tx = 0;
7771f9ec5723SChristian Marangi 	tx_q->mss = 0;
7772f9ec5723SChristian Marangi 
7773f9ec5723SChristian Marangi 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7774f9ec5723SChristian Marangi }
7775f9ec5723SChristian Marangi 
7776732fdf0eSGiuseppe CAVALLARO /**
777754139cf3SJoao Pinto  * stmmac_reset_queues_param - reset queue parameters
7778d0ea5cbdSJesse Brandeburg  * @priv: device pointer
777954139cf3SJoao Pinto  */
stmmac_reset_queues_param(struct stmmac_priv * priv)778054139cf3SJoao Pinto static void stmmac_reset_queues_param(struct stmmac_priv *priv)
778154139cf3SJoao Pinto {
778254139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7783ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
778454139cf3SJoao Pinto 	u32 queue;
778554139cf3SJoao Pinto 
7786f9ec5723SChristian Marangi 	for (queue = 0; queue < rx_cnt; queue++)
7787f9ec5723SChristian Marangi 		stmmac_reset_rx_queue(priv, queue);
778854139cf3SJoao Pinto 
7789f9ec5723SChristian Marangi 	for (queue = 0; queue < tx_cnt; queue++)
7790f9ec5723SChristian Marangi 		stmmac_reset_tx_queue(priv, queue);
779154139cf3SJoao Pinto }
779254139cf3SJoao Pinto 
779354139cf3SJoao Pinto /**
7794732fdf0eSGiuseppe CAVALLARO  * stmmac_resume - resume callback
7795f4e7bd81SJoachim Eastwood  * @dev: device pointer
7796732fdf0eSGiuseppe CAVALLARO  * Description: when resume this function is invoked to setup the DMA and CORE
7797732fdf0eSGiuseppe CAVALLARO  * in a usable state.
7798732fdf0eSGiuseppe CAVALLARO  */
stmmac_resume(struct device * dev)7799f4e7bd81SJoachim Eastwood int stmmac_resume(struct device *dev)
78007ac6653aSJeff Kirsher {
7801f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
78027ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
7803b9663b7cSVoon Weifeng 	int ret;
78047ac6653aSJeff Kirsher 
78057ac6653aSJeff Kirsher 	if (!netif_running(ndev))
78067ac6653aSJeff Kirsher 		return 0;
78077ac6653aSJeff Kirsher 
78087ac6653aSJeff Kirsher 	/* Power Down bit, into the PM register, is cleared
78097ac6653aSJeff Kirsher 	 * automatically as soon as a magic packet or a Wake-up frame
78107ac6653aSJeff Kirsher 	 * is received. Anyway, it's better to manually clear
78117ac6653aSJeff Kirsher 	 * this bit because it can generate problems while resuming
7812ceb69499SGiuseppe CAVALLARO 	 * from another devices (e.g. serial console).
7813ceb69499SGiuseppe CAVALLARO 	 */
7814e8377e7aSJisheng Zhang 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
781529555fa3SThierry Reding 		mutex_lock(&priv->lock);
7816c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, 0);
781729555fa3SThierry Reding 		mutex_unlock(&priv->lock);
781889f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 0;
7819623997fbSSrinivas Kandagatla 	} else {
7820db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_default_state(priv->device);
7821623997fbSSrinivas Kandagatla 		/* reset the phy so that it's ready */
7822623997fbSSrinivas Kandagatla 		if (priv->mii)
7823623997fbSSrinivas Kandagatla 			stmmac_mdio_reset(priv->mii);
7824623997fbSSrinivas Kandagatla 	}
78257ac6653aSJeff Kirsher 
7826efe92571SBartosz Golaszewski 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7827efe92571SBartosz Golaszewski 	    priv->plat->serdes_powerup) {
7828b9663b7cSVoon Weifeng 		ret = priv->plat->serdes_powerup(ndev,
7829b9663b7cSVoon Weifeng 						 priv->plat->bsp_priv);
7830b9663b7cSVoon Weifeng 
7831b9663b7cSVoon Weifeng 		if (ret < 0)
7832b9663b7cSVoon Weifeng 			return ret;
7833b9663b7cSVoon Weifeng 	}
7834b9663b7cSVoon Weifeng 
783536d18b56SFugang Duan 	rtnl_lock();
783690702dcdSJoakim Zhang 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
783790702dcdSJoakim Zhang 		phylink_resume(priv->phylink);
783890702dcdSJoakim Zhang 	} else {
783990702dcdSJoakim Zhang 		phylink_resume(priv->phylink);
784090702dcdSJoakim Zhang 		if (device_may_wakeup(priv->device))
784136d18b56SFugang Duan 			phylink_speed_up(priv->phylink);
784236d18b56SFugang Duan 	}
784390702dcdSJoakim Zhang 	rtnl_unlock();
784436d18b56SFugang Duan 
78458e5debedSWong Vee Khee 	rtnl_lock();
784629555fa3SThierry Reding 	mutex_lock(&priv->lock);
7847f55d84b0SVincent Palatin 
784854139cf3SJoao Pinto 	stmmac_reset_queues_param(priv);
784900423969SThierry Reding 
78504ec236c7SFugang Duan 	stmmac_free_tx_skbufs(priv);
7851ba39b344SChristian Marangi 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7852ae79a639SGiuseppe CAVALLARO 
7853fe131929SHuacai Chen 	stmmac_hw_setup(ndev, false);
7854d429b66eSJose Abreu 	stmmac_init_coalesce(priv);
7855ac316c78SGiuseppe CAVALLARO 	stmmac_set_rx_mode(ndev);
78567ac6653aSJeff Kirsher 
7857ed64639bSWong Vee Khee 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7858ed64639bSWong Vee Khee 
7859c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
7860087a7b94SVincent Whitchurch 	stmmac_enable_all_dma_irq(priv);
78617ac6653aSJeff Kirsher 
7862134cc4ceSThierry Reding 	mutex_unlock(&priv->lock);
78638e5debedSWong Vee Khee 	rtnl_unlock();
7864134cc4ceSThierry Reding 
786531096c3eSLeon Yu 	netif_device_attach(ndev);
786631096c3eSLeon Yu 
78677ac6653aSJeff Kirsher 	return 0;
78687ac6653aSJeff Kirsher }
7869b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_resume);
7870ba27ec66SGiuseppe CAVALLARO 
78717ac6653aSJeff Kirsher #ifndef MODULE
stmmac_cmdline_opt(char * str)78727ac6653aSJeff Kirsher static int __init stmmac_cmdline_opt(char *str)
78737ac6653aSJeff Kirsher {
78747ac6653aSJeff Kirsher 	char *opt;
78757ac6653aSJeff Kirsher 
78767ac6653aSJeff Kirsher 	if (!str || !*str)
7877e01b042eSRandy Dunlap 		return 1;
78787ac6653aSJeff Kirsher 	while ((opt = strsep(&str, ",")) != NULL) {
7879469d258dSVladimir Oltean 		if (!strncmp(opt, "debug:", 6)) {
7880ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &debug))
78817ac6653aSJeff Kirsher 				goto err;
7882469d258dSVladimir Oltean 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7883ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 8, 0, &phyaddr))
78847ac6653aSJeff Kirsher 				goto err;
7885469d258dSVladimir Oltean 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7886ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 7, 0, &buf_sz))
78877ac6653aSJeff Kirsher 				goto err;
7888469d258dSVladimir Oltean 		} else if (!strncmp(opt, "tc:", 3)) {
7889ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 3, 0, &tc))
78907ac6653aSJeff Kirsher 				goto err;
7891469d258dSVladimir Oltean 		} else if (!strncmp(opt, "watchdog:", 9)) {
7892ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 9, 0, &watchdog))
78937ac6653aSJeff Kirsher 				goto err;
7894469d258dSVladimir Oltean 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7895ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
78967ac6653aSJeff Kirsher 				goto err;
7897469d258dSVladimir Oltean 		} else if (!strncmp(opt, "pause:", 6)) {
7898ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &pause))
78997ac6653aSJeff Kirsher 				goto err;
7900469d258dSVladimir Oltean 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7901d765955dSGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &eee_timer))
7902d765955dSGiuseppe CAVALLARO 				goto err;
7903469d258dSVladimir Oltean 		} else if (!strncmp(opt, "chain_mode:", 11)) {
79044a7d666aSGiuseppe CAVALLARO 			if (kstrtoint(opt + 11, 0, &chain_mode))
79054a7d666aSGiuseppe CAVALLARO 				goto err;
79067ac6653aSJeff Kirsher 		}
79077ac6653aSJeff Kirsher 	}
7908e01b042eSRandy Dunlap 	return 1;
79097ac6653aSJeff Kirsher 
79107ac6653aSJeff Kirsher err:
79117ac6653aSJeff Kirsher 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7912e01b042eSRandy Dunlap 	return 1;
79137ac6653aSJeff Kirsher }
79147ac6653aSJeff Kirsher 
79157ac6653aSJeff Kirsher __setup("stmmaceth=", stmmac_cmdline_opt);
7916ceb69499SGiuseppe CAVALLARO #endif /* MODULE */
79176fc0d0f2SGiuseppe Cavallaro 
stmmac_init(void)7918466c5ac8SMathieu Olivari static int __init stmmac_init(void)
7919466c5ac8SMathieu Olivari {
7920466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
7921466c5ac8SMathieu Olivari 	/* Create debugfs main directory if it doesn't exist yet */
79228d72ab11SGreg Kroah-Hartman 	if (!stmmac_fs_dir)
7923466c5ac8SMathieu Olivari 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7924474a31e1SAaro Koskinen 	register_netdevice_notifier(&stmmac_notifier);
7925466c5ac8SMathieu Olivari #endif
7926466c5ac8SMathieu Olivari 
7927466c5ac8SMathieu Olivari 	return 0;
7928466c5ac8SMathieu Olivari }
7929466c5ac8SMathieu Olivari 
stmmac_exit(void)7930466c5ac8SMathieu Olivari static void __exit stmmac_exit(void)
7931466c5ac8SMathieu Olivari {
7932466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
7933474a31e1SAaro Koskinen 	unregister_netdevice_notifier(&stmmac_notifier);
7934466c5ac8SMathieu Olivari 	debugfs_remove_recursive(stmmac_fs_dir);
7935466c5ac8SMathieu Olivari #endif
7936466c5ac8SMathieu Olivari }
7937466c5ac8SMathieu Olivari 
7938466c5ac8SMathieu Olivari module_init(stmmac_init)
7939466c5ac8SMathieu Olivari module_exit(stmmac_exit)
7940466c5ac8SMathieu Olivari 
79416fc0d0f2SGiuseppe Cavallaro MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
79426fc0d0f2SGiuseppe Cavallaro MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
79436fc0d0f2SGiuseppe Cavallaro MODULE_LICENSE("GPL");
7944