14fa9c49fSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
27ac6653aSJeff Kirsher /*******************************************************************************
37ac6653aSJeff Kirsher   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
47ac6653aSJeff Kirsher   ST Ethernet IPs are built around a Synopsys IP Core.
57ac6653aSJeff Kirsher 
6286a8372SGiuseppe CAVALLARO 	Copyright(C) 2007-2011 STMicroelectronics Ltd
77ac6653aSJeff Kirsher 
87ac6653aSJeff Kirsher 
97ac6653aSJeff Kirsher   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
107ac6653aSJeff Kirsher 
117ac6653aSJeff Kirsher   Documentation available at:
127ac6653aSJeff Kirsher 	http://www.stlinux.com
137ac6653aSJeff Kirsher   Support available at:
147ac6653aSJeff Kirsher 	https://bugzilla.stlinux.com/
157ac6653aSJeff Kirsher *******************************************************************************/
167ac6653aSJeff Kirsher 
176a81c26fSViresh Kumar #include <linux/clk.h>
187ac6653aSJeff Kirsher #include <linux/kernel.h>
197ac6653aSJeff Kirsher #include <linux/interrupt.h>
207ac6653aSJeff Kirsher #include <linux/ip.h>
217ac6653aSJeff Kirsher #include <linux/tcp.h>
227ac6653aSJeff Kirsher #include <linux/skbuff.h>
237ac6653aSJeff Kirsher #include <linux/ethtool.h>
247ac6653aSJeff Kirsher #include <linux/if_ether.h>
257ac6653aSJeff Kirsher #include <linux/crc32.h>
267ac6653aSJeff Kirsher #include <linux/mii.h>
2701789349SJiri Pirko #include <linux/if.h>
287ac6653aSJeff Kirsher #include <linux/if_vlan.h>
297ac6653aSJeff Kirsher #include <linux/dma-mapping.h>
307ac6653aSJeff Kirsher #include <linux/slab.h>
315ec55823SJoakim Zhang #include <linux/pm_runtime.h>
327ac6653aSJeff Kirsher #include <linux/prefetch.h>
33db88f10aSSrinivas Kandagatla #include <linux/pinctrl/consumer.h>
3450fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
357ac29055SGiuseppe CAVALLARO #include <linux/debugfs.h>
367ac29055SGiuseppe CAVALLARO #include <linux/seq_file.h>
3750fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
38891434b1SRayagond Kokatanur #include <linux/net_tstamp.h>
39eeef2f6bSJose Abreu #include <linux/phylink.h>
40b7766206SJose Abreu #include <linux/udp.h>
415fabb012SOng Boon Leong #include <linux/bpf_trace.h>
424dbbe8ddSJose Abreu #include <net/pkt_cls.h>
43bba2556eSOng Boon Leong #include <net/xdp_sock_drv.h>
44891434b1SRayagond Kokatanur #include "stmmac_ptp.h"
45286a8372SGiuseppe CAVALLARO #include "stmmac.h"
465fabb012SOng Boon Leong #include "stmmac_xdp.h"
47c5e4ddbdSChen-Yu Tsai #include <linux/reset.h>
485790cf3cSMathieu Olivari #include <linux/of_mdio.h>
4919d857c9SPhil Reid #include "dwmac1000.h"
507d9e6c5aSJose Abreu #include "dwxgmac2.h"
5142de047dSJose Abreu #include "hwif.h"
527ac6653aSJeff Kirsher 
53a6da2bbbSHolger Assmann /* As long as the interface is active, we keep the timestamping counter enabled
54a6da2bbbSHolger Assmann  * with fine resolution and binary rollover. This avoid non-monotonic behavior
55a6da2bbbSHolger Assmann  * (clock jumps) when changing timestamping settings at runtime.
56a6da2bbbSHolger Assmann  */
57a6da2bbbSHolger Assmann #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
58a6da2bbbSHolger Assmann 				 PTP_TCR_TSCTRLSSR)
59a6da2bbbSHolger Assmann 
608d558f02SJose Abreu #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
61f748be53SAlexandre TORGUE #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
627ac6653aSJeff Kirsher 
637ac6653aSJeff Kirsher /* Module parameters */
6432ceabcaSGiuseppe CAVALLARO #define TX_TIMEO	5000
657ac6653aSJeff Kirsher static int watchdog = TX_TIMEO;
66d3757ba4SJoe Perches module_param(watchdog, int, 0644);
6732ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
687ac6653aSJeff Kirsher 
6932ceabcaSGiuseppe CAVALLARO static int debug = -1;
70d3757ba4SJoe Perches module_param(debug, int, 0644);
7132ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
727ac6653aSJeff Kirsher 
7347d1f71fSstephen hemminger static int phyaddr = -1;
74d3757ba4SJoe Perches module_param(phyaddr, int, 0444);
757ac6653aSJeff Kirsher MODULE_PARM_DESC(phyaddr, "Physical device address");
767ac6653aSJeff Kirsher 
778531c808SChristian Marangi #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
788531c808SChristian Marangi #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
797ac6653aSJeff Kirsher 
80132c32eeSOng Boon Leong /* Limit to make sure XDP TX and slow path can coexist */
81132c32eeSOng Boon Leong #define STMMAC_XSK_TX_BUDGET_MAX	256
82132c32eeSOng Boon Leong #define STMMAC_TX_XSK_AVAIL		16
83bba2556eSOng Boon Leong #define STMMAC_RX_FILL_BATCH		16
84bba2556eSOng Boon Leong 
855fabb012SOng Boon Leong #define STMMAC_XDP_PASS		0
865fabb012SOng Boon Leong #define STMMAC_XDP_CONSUMED	BIT(0)
87be8b38a7SOng Boon Leong #define STMMAC_XDP_TX		BIT(1)
888b278a5bSOng Boon Leong #define STMMAC_XDP_REDIRECT	BIT(2)
895fabb012SOng Boon Leong 
90e9989339SJose Abreu static int flow_ctrl = FLOW_AUTO;
91d3757ba4SJoe Perches module_param(flow_ctrl, int, 0644);
927ac6653aSJeff Kirsher MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
937ac6653aSJeff Kirsher 
947ac6653aSJeff Kirsher static int pause = PAUSE_TIME;
95d3757ba4SJoe Perches module_param(pause, int, 0644);
967ac6653aSJeff Kirsher MODULE_PARM_DESC(pause, "Flow Control Pause Time");
977ac6653aSJeff Kirsher 
987ac6653aSJeff Kirsher #define TC_DEFAULT 64
997ac6653aSJeff Kirsher static int tc = TC_DEFAULT;
100d3757ba4SJoe Perches module_param(tc, int, 0644);
1017ac6653aSJeff Kirsher MODULE_PARM_DESC(tc, "DMA threshold control value");
1027ac6653aSJeff Kirsher 
103d916701cSGiuseppe CAVALLARO #define	DEFAULT_BUFSIZE	1536
104d916701cSGiuseppe CAVALLARO static int buf_sz = DEFAULT_BUFSIZE;
105d3757ba4SJoe Perches module_param(buf_sz, int, 0644);
1067ac6653aSJeff Kirsher MODULE_PARM_DESC(buf_sz, "DMA buffer size");
1077ac6653aSJeff Kirsher 
10822ad3838SGiuseppe Cavallaro #define	STMMAC_RX_COPYBREAK	256
10922ad3838SGiuseppe Cavallaro 
1107ac6653aSJeff Kirsher static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
1117ac6653aSJeff Kirsher 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
1127ac6653aSJeff Kirsher 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
1137ac6653aSJeff Kirsher 
114d765955dSGiuseppe CAVALLARO #define STMMAC_DEFAULT_LPI_TIMER	1000
115d765955dSGiuseppe CAVALLARO static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
116d3757ba4SJoe Perches module_param(eee_timer, int, 0644);
117d765955dSGiuseppe CAVALLARO MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
118388e201dSVineetha G. Jaya Kumaran #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
119d765955dSGiuseppe CAVALLARO 
12022d3efe5SPavel Machek /* By default the driver will use the ring mode to manage tx and rx descriptors,
12122d3efe5SPavel Machek  * but allow user to force to use the chain instead of the ring
1224a7d666aSGiuseppe CAVALLARO  */
1234a7d666aSGiuseppe CAVALLARO static unsigned int chain_mode;
124d3757ba4SJoe Perches module_param(chain_mode, int, 0444);
1254a7d666aSGiuseppe CAVALLARO MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
1264a7d666aSGiuseppe CAVALLARO 
1277ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
1288532f613SOng Boon Leong /* For MSI interrupts handling */
1298532f613SOng Boon Leong static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
1308532f613SOng Boon Leong static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
1318532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
1328532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
133f9ec5723SChristian Marangi static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
134f9ec5723SChristian Marangi static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
135f9ec5723SChristian Marangi static void stmmac_reset_queues_param(struct stmmac_priv *priv);
136132c32eeSOng Boon Leong static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
137132c32eeSOng Boon Leong static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
1383a6c12a0SXiaoliang Yang static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1393a6c12a0SXiaoliang Yang 					  u32 rxmode, u32 chan);
1407ac6653aSJeff Kirsher 
14150fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
142481a7d15SJiping Ma static const struct net_device_ops stmmac_netdev_ops;
1438d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev);
144466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev);
145bfab27a1SGiuseppe CAVALLARO #endif
146bfab27a1SGiuseppe CAVALLARO 
147d5a05e69SVincent Whitchurch #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
1489125cdd1SGiuseppe CAVALLARO 
1495ec55823SJoakim Zhang int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
1505ec55823SJoakim Zhang {
1515ec55823SJoakim Zhang 	int ret = 0;
1525ec55823SJoakim Zhang 
1535ec55823SJoakim Zhang 	if (enabled) {
1545ec55823SJoakim Zhang 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
1555ec55823SJoakim Zhang 		if (ret)
1565ec55823SJoakim Zhang 			return ret;
1575ec55823SJoakim Zhang 		ret = clk_prepare_enable(priv->plat->pclk);
1585ec55823SJoakim Zhang 		if (ret) {
1595ec55823SJoakim Zhang 			clk_disable_unprepare(priv->plat->stmmac_clk);
1605ec55823SJoakim Zhang 			return ret;
1615ec55823SJoakim Zhang 		}
162b4d45aeeSJoakim Zhang 		if (priv->plat->clks_config) {
163b4d45aeeSJoakim Zhang 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
164b4d45aeeSJoakim Zhang 			if (ret) {
165b4d45aeeSJoakim Zhang 				clk_disable_unprepare(priv->plat->stmmac_clk);
166b4d45aeeSJoakim Zhang 				clk_disable_unprepare(priv->plat->pclk);
167b4d45aeeSJoakim Zhang 				return ret;
168b4d45aeeSJoakim Zhang 			}
169b4d45aeeSJoakim Zhang 		}
1705ec55823SJoakim Zhang 	} else {
1715ec55823SJoakim Zhang 		clk_disable_unprepare(priv->plat->stmmac_clk);
1725ec55823SJoakim Zhang 		clk_disable_unprepare(priv->plat->pclk);
173b4d45aeeSJoakim Zhang 		if (priv->plat->clks_config)
174b4d45aeeSJoakim Zhang 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
1755ec55823SJoakim Zhang 	}
1765ec55823SJoakim Zhang 
1775ec55823SJoakim Zhang 	return ret;
1785ec55823SJoakim Zhang }
1795ec55823SJoakim Zhang EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
1805ec55823SJoakim Zhang 
1817ac6653aSJeff Kirsher /**
1827ac6653aSJeff Kirsher  * stmmac_verify_args - verify the driver parameters.
183732fdf0eSGiuseppe CAVALLARO  * Description: it checks the driver parameters and set a default in case of
184732fdf0eSGiuseppe CAVALLARO  * errors.
1857ac6653aSJeff Kirsher  */
1867ac6653aSJeff Kirsher static void stmmac_verify_args(void)
1877ac6653aSJeff Kirsher {
1887ac6653aSJeff Kirsher 	if (unlikely(watchdog < 0))
1897ac6653aSJeff Kirsher 		watchdog = TX_TIMEO;
190d916701cSGiuseppe CAVALLARO 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
191d916701cSGiuseppe CAVALLARO 		buf_sz = DEFAULT_BUFSIZE;
1927ac6653aSJeff Kirsher 	if (unlikely(flow_ctrl > 1))
1937ac6653aSJeff Kirsher 		flow_ctrl = FLOW_AUTO;
1947ac6653aSJeff Kirsher 	else if (likely(flow_ctrl < 0))
1957ac6653aSJeff Kirsher 		flow_ctrl = FLOW_OFF;
1967ac6653aSJeff Kirsher 	if (unlikely((pause < 0) || (pause > 0xffff)))
1977ac6653aSJeff Kirsher 		pause = PAUSE_TIME;
198d765955dSGiuseppe CAVALLARO 	if (eee_timer < 0)
199d765955dSGiuseppe CAVALLARO 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
2007ac6653aSJeff Kirsher }
2017ac6653aSJeff Kirsher 
202bba2556eSOng Boon Leong static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
203c22a3f48SJoao Pinto {
204c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
2058fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
2068fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
207c22a3f48SJoao Pinto 	u32 queue;
208c22a3f48SJoao Pinto 
2098fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
2108fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
211c22a3f48SJoao Pinto 
212132c32eeSOng Boon Leong 		if (stmmac_xdp_is_enabled(priv) &&
213132c32eeSOng Boon Leong 		    test_bit(queue, priv->af_xdp_zc_qps)) {
214132c32eeSOng Boon Leong 			napi_disable(&ch->rxtx_napi);
215132c32eeSOng Boon Leong 			continue;
216132c32eeSOng Boon Leong 		}
217132c32eeSOng Boon Leong 
2184ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
2194ccb4585SJose Abreu 			napi_disable(&ch->rx_napi);
2204ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
2214ccb4585SJose Abreu 			napi_disable(&ch->tx_napi);
222c22a3f48SJoao Pinto 	}
223c22a3f48SJoao Pinto }
224c22a3f48SJoao Pinto 
225c22a3f48SJoao Pinto /**
226bba2556eSOng Boon Leong  * stmmac_disable_all_queues - Disable all queues
227bba2556eSOng Boon Leong  * @priv: driver private structure
228bba2556eSOng Boon Leong  */
229bba2556eSOng Boon Leong static void stmmac_disable_all_queues(struct stmmac_priv *priv)
230bba2556eSOng Boon Leong {
231bba2556eSOng Boon Leong 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
232bba2556eSOng Boon Leong 	struct stmmac_rx_queue *rx_q;
233bba2556eSOng Boon Leong 	u32 queue;
234bba2556eSOng Boon Leong 
235bba2556eSOng Boon Leong 	/* synchronize_rcu() needed for pending XDP buffers to drain */
236bba2556eSOng Boon Leong 	for (queue = 0; queue < rx_queues_cnt; queue++) {
2378531c808SChristian Marangi 		rx_q = &priv->dma_conf.rx_queue[queue];
238bba2556eSOng Boon Leong 		if (rx_q->xsk_pool) {
239bba2556eSOng Boon Leong 			synchronize_rcu();
240bba2556eSOng Boon Leong 			break;
241bba2556eSOng Boon Leong 		}
242bba2556eSOng Boon Leong 	}
243bba2556eSOng Boon Leong 
244bba2556eSOng Boon Leong 	__stmmac_disable_all_queues(priv);
245bba2556eSOng Boon Leong }
246bba2556eSOng Boon Leong 
247bba2556eSOng Boon Leong /**
248c22a3f48SJoao Pinto  * stmmac_enable_all_queues - Enable all queues
249c22a3f48SJoao Pinto  * @priv: driver private structure
250c22a3f48SJoao Pinto  */
251c22a3f48SJoao Pinto static void stmmac_enable_all_queues(struct stmmac_priv *priv)
252c22a3f48SJoao Pinto {
253c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
2548fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
2558fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
256c22a3f48SJoao Pinto 	u32 queue;
257c22a3f48SJoao Pinto 
2588fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
2598fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
260c22a3f48SJoao Pinto 
261132c32eeSOng Boon Leong 		if (stmmac_xdp_is_enabled(priv) &&
262132c32eeSOng Boon Leong 		    test_bit(queue, priv->af_xdp_zc_qps)) {
263132c32eeSOng Boon Leong 			napi_enable(&ch->rxtx_napi);
264132c32eeSOng Boon Leong 			continue;
265132c32eeSOng Boon Leong 		}
266132c32eeSOng Boon Leong 
2674ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
2684ccb4585SJose Abreu 			napi_enable(&ch->rx_napi);
2694ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
2704ccb4585SJose Abreu 			napi_enable(&ch->tx_napi);
271c22a3f48SJoao Pinto 	}
272c22a3f48SJoao Pinto }
273c22a3f48SJoao Pinto 
27434877a15SJose Abreu static void stmmac_service_event_schedule(struct stmmac_priv *priv)
27534877a15SJose Abreu {
27634877a15SJose Abreu 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
27734877a15SJose Abreu 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
27834877a15SJose Abreu 		queue_work(priv->wq, &priv->service_task);
27934877a15SJose Abreu }
28034877a15SJose Abreu 
28134877a15SJose Abreu static void stmmac_global_err(struct stmmac_priv *priv)
28234877a15SJose Abreu {
28334877a15SJose Abreu 	netif_carrier_off(priv->dev);
28434877a15SJose Abreu 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
28534877a15SJose Abreu 	stmmac_service_event_schedule(priv);
28634877a15SJose Abreu }
28734877a15SJose Abreu 
288c22a3f48SJoao Pinto /**
28932ceabcaSGiuseppe CAVALLARO  * stmmac_clk_csr_set - dynamically set the MDC clock
29032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
29132ceabcaSGiuseppe CAVALLARO  * Description: this is to dynamically set the MDC clock according to the csr
29232ceabcaSGiuseppe CAVALLARO  * clock input.
29332ceabcaSGiuseppe CAVALLARO  * Note:
29432ceabcaSGiuseppe CAVALLARO  *	If a specific clk_csr value is passed from the platform
29532ceabcaSGiuseppe CAVALLARO  *	this means that the CSR Clock Range selection cannot be
29632ceabcaSGiuseppe CAVALLARO  *	changed at run-time and it is fixed (as reported in the driver
29732ceabcaSGiuseppe CAVALLARO  *	documentation). Viceversa the driver will try to set the MDC
29832ceabcaSGiuseppe CAVALLARO  *	clock dynamically according to the actual clock input.
29932ceabcaSGiuseppe CAVALLARO  */
300cd7201f4SGiuseppe CAVALLARO static void stmmac_clk_csr_set(struct stmmac_priv *priv)
301cd7201f4SGiuseppe CAVALLARO {
302cd7201f4SGiuseppe CAVALLARO 	u32 clk_rate;
303cd7201f4SGiuseppe CAVALLARO 
304f573c0b9Sjpinto 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
305cd7201f4SGiuseppe CAVALLARO 
306cd7201f4SGiuseppe CAVALLARO 	/* Platform provided default clk_csr would be assumed valid
307ceb69499SGiuseppe CAVALLARO 	 * for all other cases except for the below mentioned ones.
308ceb69499SGiuseppe CAVALLARO 	 * For values higher than the IEEE 802.3 specified frequency
309ceb69499SGiuseppe CAVALLARO 	 * we can not estimate the proper divider as it is not known
310ceb69499SGiuseppe CAVALLARO 	 * the frequency of clk_csr_i. So we do not change the default
311ceb69499SGiuseppe CAVALLARO 	 * divider.
312ceb69499SGiuseppe CAVALLARO 	 */
313cd7201f4SGiuseppe CAVALLARO 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
314cd7201f4SGiuseppe CAVALLARO 		if (clk_rate < CSR_F_35M)
315cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_20_35M;
316cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
317cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_35_60M;
318cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
319cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_60_100M;
320cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
321cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_100_150M;
322cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
323cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_150_250M;
32408dad2f4SJesper Nilsson 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
325cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_250_300M;
326ceb69499SGiuseppe CAVALLARO 	}
3279f93ac8dSLABBE Corentin 
3289f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i) {
3299f93ac8dSLABBE Corentin 		if (clk_rate > 160000000)
3309f93ac8dSLABBE Corentin 			priv->clk_csr = 0x03;
3319f93ac8dSLABBE Corentin 		else if (clk_rate > 80000000)
3329f93ac8dSLABBE Corentin 			priv->clk_csr = 0x02;
3339f93ac8dSLABBE Corentin 		else if (clk_rate > 40000000)
3349f93ac8dSLABBE Corentin 			priv->clk_csr = 0x01;
3359f93ac8dSLABBE Corentin 		else
3369f93ac8dSLABBE Corentin 			priv->clk_csr = 0;
3379f93ac8dSLABBE Corentin 	}
3387d9e6c5aSJose Abreu 
3397d9e6c5aSJose Abreu 	if (priv->plat->has_xgmac) {
3407d9e6c5aSJose Abreu 		if (clk_rate > 400000000)
3417d9e6c5aSJose Abreu 			priv->clk_csr = 0x5;
3427d9e6c5aSJose Abreu 		else if (clk_rate > 350000000)
3437d9e6c5aSJose Abreu 			priv->clk_csr = 0x4;
3447d9e6c5aSJose Abreu 		else if (clk_rate > 300000000)
3457d9e6c5aSJose Abreu 			priv->clk_csr = 0x3;
3467d9e6c5aSJose Abreu 		else if (clk_rate > 250000000)
3477d9e6c5aSJose Abreu 			priv->clk_csr = 0x2;
3487d9e6c5aSJose Abreu 		else if (clk_rate > 150000000)
3497d9e6c5aSJose Abreu 			priv->clk_csr = 0x1;
3507d9e6c5aSJose Abreu 		else
3517d9e6c5aSJose Abreu 			priv->clk_csr = 0x0;
3527d9e6c5aSJose Abreu 	}
353cd7201f4SGiuseppe CAVALLARO }
354cd7201f4SGiuseppe CAVALLARO 
3557ac6653aSJeff Kirsher static void print_pkt(unsigned char *buf, int len)
3567ac6653aSJeff Kirsher {
357424c4f78SAndy Shevchenko 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
358424c4f78SAndy Shevchenko 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
3597ac6653aSJeff Kirsher }
3607ac6653aSJeff Kirsher 
361ce736788SJoao Pinto static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
3627ac6653aSJeff Kirsher {
3638531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
364a6a3e026SLABBE Corentin 	u32 avail;
365e3ad57c9SGiuseppe Cavallaro 
366ce736788SJoao Pinto 	if (tx_q->dirty_tx > tx_q->cur_tx)
367ce736788SJoao Pinto 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
368e3ad57c9SGiuseppe Cavallaro 	else
3698531c808SChristian Marangi 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
370e3ad57c9SGiuseppe Cavallaro 
371e3ad57c9SGiuseppe Cavallaro 	return avail;
372e3ad57c9SGiuseppe Cavallaro }
373e3ad57c9SGiuseppe Cavallaro 
37454139cf3SJoao Pinto /**
37554139cf3SJoao Pinto  * stmmac_rx_dirty - Get RX queue dirty
37654139cf3SJoao Pinto  * @priv: driver private structure
37754139cf3SJoao Pinto  * @queue: RX queue index
37854139cf3SJoao Pinto  */
37954139cf3SJoao Pinto static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
380e3ad57c9SGiuseppe Cavallaro {
3818531c808SChristian Marangi 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
382a6a3e026SLABBE Corentin 	u32 dirty;
383e3ad57c9SGiuseppe Cavallaro 
38454139cf3SJoao Pinto 	if (rx_q->dirty_rx <= rx_q->cur_rx)
38554139cf3SJoao Pinto 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
386e3ad57c9SGiuseppe Cavallaro 	else
3878531c808SChristian Marangi 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
388e3ad57c9SGiuseppe Cavallaro 
389e3ad57c9SGiuseppe Cavallaro 	return dirty;
3907ac6653aSJeff Kirsher }
3917ac6653aSJeff Kirsher 
392be1c7eaeSVineetha G. Jaya Kumaran static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
393be1c7eaeSVineetha G. Jaya Kumaran {
394be1c7eaeSVineetha G. Jaya Kumaran 	int tx_lpi_timer;
395be1c7eaeSVineetha G. Jaya Kumaran 
396be1c7eaeSVineetha G. Jaya Kumaran 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
397be1c7eaeSVineetha G. Jaya Kumaran 	priv->eee_sw_timer_en = en ? 0 : 1;
398be1c7eaeSVineetha G. Jaya Kumaran 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
399be1c7eaeSVineetha G. Jaya Kumaran 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
400be1c7eaeSVineetha G. Jaya Kumaran }
401be1c7eaeSVineetha G. Jaya Kumaran 
40232ceabcaSGiuseppe CAVALLARO /**
403732fdf0eSGiuseppe CAVALLARO  * stmmac_enable_eee_mode - check and enter in LPI mode
40432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
405732fdf0eSGiuseppe CAVALLARO  * Description: this function is to verify and enter in LPI mode in case of
406732fdf0eSGiuseppe CAVALLARO  * EEE.
40732ceabcaSGiuseppe CAVALLARO  */
408c74ead22SJisheng Zhang static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
409d765955dSGiuseppe CAVALLARO {
410ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
411ce736788SJoao Pinto 	u32 queue;
412ce736788SJoao Pinto 
413ce736788SJoao Pinto 	/* check if all TX queues have the work finished */
414ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
4158531c808SChristian Marangi 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
416ce736788SJoao Pinto 
417ce736788SJoao Pinto 		if (tx_q->dirty_tx != tx_q->cur_tx)
418c74ead22SJisheng Zhang 			return -EBUSY; /* still unfinished work */
419ce736788SJoao Pinto 	}
420ce736788SJoao Pinto 
421d765955dSGiuseppe CAVALLARO 	/* Check and enter in LPI mode */
422ce736788SJoao Pinto 	if (!priv->tx_path_in_lpi_mode)
423c10d4c82SJose Abreu 		stmmac_set_eee_mode(priv, priv->hw,
424b4b7b772Sjpinto 				priv->plat->en_tx_lpi_clockgating);
425c74ead22SJisheng Zhang 	return 0;
426d765955dSGiuseppe CAVALLARO }
427d765955dSGiuseppe CAVALLARO 
42832ceabcaSGiuseppe CAVALLARO /**
429732fdf0eSGiuseppe CAVALLARO  * stmmac_disable_eee_mode - disable and exit from LPI mode
43032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
43132ceabcaSGiuseppe CAVALLARO  * Description: this function is to exit and disable EEE in case of
43232ceabcaSGiuseppe CAVALLARO  * LPI state is true. This is called by the xmit.
43332ceabcaSGiuseppe CAVALLARO  */
434d765955dSGiuseppe CAVALLARO void stmmac_disable_eee_mode(struct stmmac_priv *priv)
435d765955dSGiuseppe CAVALLARO {
436be1c7eaeSVineetha G. Jaya Kumaran 	if (!priv->eee_sw_timer_en) {
437be1c7eaeSVineetha G. Jaya Kumaran 		stmmac_lpi_entry_timer_config(priv, 0);
438be1c7eaeSVineetha G. Jaya Kumaran 		return;
439be1c7eaeSVineetha G. Jaya Kumaran 	}
440be1c7eaeSVineetha G. Jaya Kumaran 
441c10d4c82SJose Abreu 	stmmac_reset_eee_mode(priv, priv->hw);
442d765955dSGiuseppe CAVALLARO 	del_timer_sync(&priv->eee_ctrl_timer);
443d765955dSGiuseppe CAVALLARO 	priv->tx_path_in_lpi_mode = false;
444d765955dSGiuseppe CAVALLARO }
445d765955dSGiuseppe CAVALLARO 
446d765955dSGiuseppe CAVALLARO /**
447732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_ctrl_timer - EEE TX SW timer.
448d0ea5cbdSJesse Brandeburg  * @t:  timer_list struct containing private info
449d765955dSGiuseppe CAVALLARO  * Description:
45032ceabcaSGiuseppe CAVALLARO  *  if there is no data transfer and if we are not in LPI state,
451d765955dSGiuseppe CAVALLARO  *  then MAC Transmitter can be moved to LPI state.
452d765955dSGiuseppe CAVALLARO  */
453e99e88a9SKees Cook static void stmmac_eee_ctrl_timer(struct timer_list *t)
454d765955dSGiuseppe CAVALLARO {
455e99e88a9SKees Cook 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
456d765955dSGiuseppe CAVALLARO 
457c74ead22SJisheng Zhang 	if (stmmac_enable_eee_mode(priv))
458388e201dSVineetha G. Jaya Kumaran 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
459d765955dSGiuseppe CAVALLARO }
460d765955dSGiuseppe CAVALLARO 
461d765955dSGiuseppe CAVALLARO /**
462732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_init - init EEE
46332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
464d765955dSGiuseppe CAVALLARO  * Description:
465732fdf0eSGiuseppe CAVALLARO  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
466732fdf0eSGiuseppe CAVALLARO  *  can also manage EEE, this function enable the LPI state and start related
467732fdf0eSGiuseppe CAVALLARO  *  timer.
468d765955dSGiuseppe CAVALLARO  */
469d765955dSGiuseppe CAVALLARO bool stmmac_eee_init(struct stmmac_priv *priv)
470d765955dSGiuseppe CAVALLARO {
471388e201dSVineetha G. Jaya Kumaran 	int eee_tw_timer = priv->eee_tw_timer;
472879626e3SJerome Brunet 
473f5351ef7SGiuseppe CAVALLARO 	/* Using PCS we cannot dial with the phy registers at this stage
474f5351ef7SGiuseppe CAVALLARO 	 * so we do not support extra feature like EEE.
475f5351ef7SGiuseppe CAVALLARO 	 */
476a47b9e15SDejin Zheng 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
477a47b9e15SDejin Zheng 	    priv->hw->pcs == STMMAC_PCS_RTBI)
47874371272SJose Abreu 		return false;
479f5351ef7SGiuseppe CAVALLARO 
48074371272SJose Abreu 	/* Check if MAC core supports the EEE feature. */
48174371272SJose Abreu 	if (!priv->dma_cap.eee)
48274371272SJose Abreu 		return false;
483d765955dSGiuseppe CAVALLARO 
48429555fa3SThierry Reding 	mutex_lock(&priv->lock);
48574371272SJose Abreu 
48674371272SJose Abreu 	/* Check if it needs to be deactivated */
487177d935aSJon Hunter 	if (!priv->eee_active) {
488177d935aSJon Hunter 		if (priv->eee_enabled) {
48938ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "disable EEE\n");
490be1c7eaeSVineetha G. Jaya Kumaran 			stmmac_lpi_entry_timer_config(priv, 0);
49183bf79b6SGiuseppe CAVALLARO 			del_timer_sync(&priv->eee_ctrl_timer);
492388e201dSVineetha G. Jaya Kumaran 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
493d4aeaed8SWong Vee Khee 			if (priv->hw->xpcs)
494d4aeaed8SWong Vee Khee 				xpcs_config_eee(priv->hw->xpcs,
495d4aeaed8SWong Vee Khee 						priv->plat->mult_fact_100ns,
496d4aeaed8SWong Vee Khee 						false);
497177d935aSJon Hunter 		}
4980867bb97SJon Hunter 		mutex_unlock(&priv->lock);
49974371272SJose Abreu 		return false;
50074371272SJose Abreu 	}
50174371272SJose Abreu 
50274371272SJose Abreu 	if (priv->eee_active && !priv->eee_enabled) {
50374371272SJose Abreu 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
50474371272SJose Abreu 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
505388e201dSVineetha G. Jaya Kumaran 				     eee_tw_timer);
506656ed8b0SWong Vee Khee 		if (priv->hw->xpcs)
507656ed8b0SWong Vee Khee 			xpcs_config_eee(priv->hw->xpcs,
508656ed8b0SWong Vee Khee 					priv->plat->mult_fact_100ns,
509656ed8b0SWong Vee Khee 					true);
51083bf79b6SGiuseppe CAVALLARO 	}
51174371272SJose Abreu 
512be1c7eaeSVineetha G. Jaya Kumaran 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
513be1c7eaeSVineetha G. Jaya Kumaran 		del_timer_sync(&priv->eee_ctrl_timer);
514be1c7eaeSVineetha G. Jaya Kumaran 		priv->tx_path_in_lpi_mode = false;
515be1c7eaeSVineetha G. Jaya Kumaran 		stmmac_lpi_entry_timer_config(priv, 1);
516be1c7eaeSVineetha G. Jaya Kumaran 	} else {
517be1c7eaeSVineetha G. Jaya Kumaran 		stmmac_lpi_entry_timer_config(priv, 0);
518be1c7eaeSVineetha G. Jaya Kumaran 		mod_timer(&priv->eee_ctrl_timer,
519be1c7eaeSVineetha G. Jaya Kumaran 			  STMMAC_LPI_T(priv->tx_lpi_timer));
520be1c7eaeSVineetha G. Jaya Kumaran 	}
521388e201dSVineetha G. Jaya Kumaran 
52229555fa3SThierry Reding 	mutex_unlock(&priv->lock);
52338ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
52474371272SJose Abreu 	return true;
525d765955dSGiuseppe CAVALLARO }
526d765955dSGiuseppe CAVALLARO 
527732fdf0eSGiuseppe CAVALLARO /* stmmac_get_tx_hwtstamp - get HW TX timestamps
52832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
529ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
530891434b1SRayagond Kokatanur  * @skb : the socket buffer
531891434b1SRayagond Kokatanur  * Description :
532891434b1SRayagond Kokatanur  * This function will read timestamp from the descriptor & pass it to stack.
533891434b1SRayagond Kokatanur  * and also perform some sanity checks.
534891434b1SRayagond Kokatanur  */
535891434b1SRayagond Kokatanur static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
536ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *p, struct sk_buff *skb)
537891434b1SRayagond Kokatanur {
538891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps shhwtstamp;
53925e80cd0SJose Abreu 	bool found = false;
540df103170SNathan Chancellor 	u64 ns = 0;
541891434b1SRayagond Kokatanur 
542891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en)
543891434b1SRayagond Kokatanur 		return;
544891434b1SRayagond Kokatanur 
545ceb69499SGiuseppe CAVALLARO 	/* exit if skb doesn't support hw tstamp */
54675e4364fSdamuzi000 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
547891434b1SRayagond Kokatanur 		return;
548891434b1SRayagond Kokatanur 
549891434b1SRayagond Kokatanur 	/* check tx tstamp status */
55042de047dSJose Abreu 	if (stmmac_get_tx_timestamp_status(priv, p)) {
55142de047dSJose Abreu 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
55225e80cd0SJose Abreu 		found = true;
55325e80cd0SJose Abreu 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
55425e80cd0SJose Abreu 		found = true;
55525e80cd0SJose Abreu 	}
556891434b1SRayagond Kokatanur 
55725e80cd0SJose Abreu 	if (found) {
558c6d5f193SKurt Kanzenbach 		ns -= priv->plat->cdc_error_adj;
5593600be5fSVoon Weifeng 
560891434b1SRayagond Kokatanur 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
561891434b1SRayagond Kokatanur 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
562ba1ffd74SGiuseppe CAVALLARO 
56333d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
564891434b1SRayagond Kokatanur 		/* pass tstamp to stack */
565891434b1SRayagond Kokatanur 		skb_tstamp_tx(skb, &shhwtstamp);
566ba1ffd74SGiuseppe CAVALLARO 	}
567891434b1SRayagond Kokatanur }
568891434b1SRayagond Kokatanur 
569732fdf0eSGiuseppe CAVALLARO /* stmmac_get_rx_hwtstamp - get HW RX timestamps
57032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
571ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
572ba1ffd74SGiuseppe CAVALLARO  * @np : next descriptor pointer
573891434b1SRayagond Kokatanur  * @skb : the socket buffer
574891434b1SRayagond Kokatanur  * Description :
575891434b1SRayagond Kokatanur  * This function will read received packet's timestamp from the descriptor
576891434b1SRayagond Kokatanur  * and pass it to stack. It also perform some sanity checks.
577891434b1SRayagond Kokatanur  */
578ba1ffd74SGiuseppe CAVALLARO static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
579ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *np, struct sk_buff *skb)
580891434b1SRayagond Kokatanur {
581891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
58298870943SJose Abreu 	struct dma_desc *desc = p;
583df103170SNathan Chancellor 	u64 ns = 0;
584891434b1SRayagond Kokatanur 
585891434b1SRayagond Kokatanur 	if (!priv->hwts_rx_en)
586891434b1SRayagond Kokatanur 		return;
587ba1ffd74SGiuseppe CAVALLARO 	/* For GMAC4, the valid timestamp is from CTX next desc. */
5887d9e6c5aSJose Abreu 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
58998870943SJose Abreu 		desc = np;
590891434b1SRayagond Kokatanur 
59198870943SJose Abreu 	/* Check if timestamp is available */
59242de047dSJose Abreu 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
59342de047dSJose Abreu 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
5943600be5fSVoon Weifeng 
595c6d5f193SKurt Kanzenbach 		ns -= priv->plat->cdc_error_adj;
5963600be5fSVoon Weifeng 
59733d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
598891434b1SRayagond Kokatanur 		shhwtstamp = skb_hwtstamps(skb);
599891434b1SRayagond Kokatanur 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
600891434b1SRayagond Kokatanur 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
601ba1ffd74SGiuseppe CAVALLARO 	} else  {
60233d4c482SMario Molitor 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
603ba1ffd74SGiuseppe CAVALLARO 	}
604891434b1SRayagond Kokatanur }
605891434b1SRayagond Kokatanur 
606891434b1SRayagond Kokatanur /**
607d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_set - control hardware timestamping.
608891434b1SRayagond Kokatanur  *  @dev: device pointer.
6098d45e42bSLABBE Corentin  *  @ifr: An IOCTL specific structure, that can contain a pointer to
610891434b1SRayagond Kokatanur  *  a proprietary structure used to pass information to the driver.
611891434b1SRayagond Kokatanur  *  Description:
612891434b1SRayagond Kokatanur  *  This function configures the MAC to enable/disable both outgoing(TX)
613891434b1SRayagond Kokatanur  *  and incoming(RX) packets time stamping based on user input.
614891434b1SRayagond Kokatanur  *  Return Value:
615891434b1SRayagond Kokatanur  *  0 on success and an appropriate -ve integer on failure.
616891434b1SRayagond Kokatanur  */
617d6228b7cSArtem Panfilov static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
618891434b1SRayagond Kokatanur {
619891434b1SRayagond Kokatanur 	struct stmmac_priv *priv = netdev_priv(dev);
620891434b1SRayagond Kokatanur 	struct hwtstamp_config config;
621891434b1SRayagond Kokatanur 	u32 ptp_v2 = 0;
622891434b1SRayagond Kokatanur 	u32 tstamp_all = 0;
623891434b1SRayagond Kokatanur 	u32 ptp_over_ipv4_udp = 0;
624891434b1SRayagond Kokatanur 	u32 ptp_over_ipv6_udp = 0;
625891434b1SRayagond Kokatanur 	u32 ptp_over_ethernet = 0;
626891434b1SRayagond Kokatanur 	u32 snap_type_sel = 0;
627891434b1SRayagond Kokatanur 	u32 ts_master_en = 0;
628891434b1SRayagond Kokatanur 	u32 ts_event_en = 0;
629891434b1SRayagond Kokatanur 
630891434b1SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
631891434b1SRayagond Kokatanur 		netdev_alert(priv->dev, "No support for HW time stamping\n");
632891434b1SRayagond Kokatanur 		priv->hwts_tx_en = 0;
633891434b1SRayagond Kokatanur 		priv->hwts_rx_en = 0;
634891434b1SRayagond Kokatanur 
635891434b1SRayagond Kokatanur 		return -EOPNOTSUPP;
636891434b1SRayagond Kokatanur 	}
637891434b1SRayagond Kokatanur 
638891434b1SRayagond Kokatanur 	if (copy_from_user(&config, ifr->ifr_data,
639d6228b7cSArtem Panfilov 			   sizeof(config)))
640891434b1SRayagond Kokatanur 		return -EFAULT;
641891434b1SRayagond Kokatanur 
64238ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
643891434b1SRayagond Kokatanur 		   __func__, config.flags, config.tx_type, config.rx_filter);
644891434b1SRayagond Kokatanur 
6455f3da328SBen Hutchings 	if (config.tx_type != HWTSTAMP_TX_OFF &&
6465f3da328SBen Hutchings 	    config.tx_type != HWTSTAMP_TX_ON)
647891434b1SRayagond Kokatanur 		return -ERANGE;
648891434b1SRayagond Kokatanur 
649891434b1SRayagond Kokatanur 	if (priv->adv_ts) {
650891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
651891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
652ceb69499SGiuseppe CAVALLARO 			/* time stamp no incoming packet at all */
653891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
654891434b1SRayagond Kokatanur 			break;
655891434b1SRayagond Kokatanur 
656891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
657ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, any kind of event packet */
658891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
6597d8e249fSIlias Apalodimas 			/* 'xmac' hardware can support Sync, Pdelay_Req and
6607d8e249fSIlias Apalodimas 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
6617d8e249fSIlias Apalodimas 			 * This leaves Delay_Req timestamps out.
6627d8e249fSIlias Apalodimas 			 * Enable all events *and* general purpose message
6637d8e249fSIlias Apalodimas 			 * timestamping
6647d8e249fSIlias Apalodimas 			 */
665891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
666891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
667891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
668891434b1SRayagond Kokatanur 			break;
669891434b1SRayagond Kokatanur 
670891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
671ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Sync packet */
672891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
673891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
674891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
675891434b1SRayagond Kokatanur 
676891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
677891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
678891434b1SRayagond Kokatanur 			break;
679891434b1SRayagond Kokatanur 
680891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
681ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Delay_req packet */
682891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
683891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
684891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
685891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
686891434b1SRayagond Kokatanur 
687891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
688891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
689891434b1SRayagond Kokatanur 			break;
690891434b1SRayagond Kokatanur 
691891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
692ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, any kind of event packet */
693891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
694891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
695891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
696891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
697891434b1SRayagond Kokatanur 
698891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
699891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
700891434b1SRayagond Kokatanur 			break;
701891434b1SRayagond Kokatanur 
702891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
703ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Sync packet */
704891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
705891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
706891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
707891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
708891434b1SRayagond Kokatanur 
709891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
710891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
711891434b1SRayagond Kokatanur 			break;
712891434b1SRayagond Kokatanur 
713891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
714ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Delay_req packet */
715891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
716891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
717891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
718891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
719891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
720891434b1SRayagond Kokatanur 
721891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
722891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
723891434b1SRayagond Kokatanur 			break;
724891434b1SRayagond Kokatanur 
725891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
726ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1 any layer, any kind of event packet */
727891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
728891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
729891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
7303cb95802SKurt Kanzenbach 			if (priv->synopsys_id < DWMAC_CORE_4_10)
73114f34733SJose Abreu 				ts_event_en = PTP_TCR_TSEVNTENA;
732891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
733891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
734891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
735891434b1SRayagond Kokatanur 			break;
736891434b1SRayagond Kokatanur 
737891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
738ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Sync packet */
739891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
740891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
741891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
742891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
743891434b1SRayagond Kokatanur 
744891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
745891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
746891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
747891434b1SRayagond Kokatanur 			break;
748891434b1SRayagond Kokatanur 
749891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
750ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Delay_req packet */
751891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
752891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
753891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
754891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
755891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
756891434b1SRayagond Kokatanur 
757891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
758891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
759891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
760891434b1SRayagond Kokatanur 			break;
761891434b1SRayagond Kokatanur 
762e3412575SMiroslav Lichvar 		case HWTSTAMP_FILTER_NTP_ALL:
763891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_ALL:
764ceb69499SGiuseppe CAVALLARO 			/* time stamp any incoming packet */
765891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_ALL;
766891434b1SRayagond Kokatanur 			tstamp_all = PTP_TCR_TSENALL;
767891434b1SRayagond Kokatanur 			break;
768891434b1SRayagond Kokatanur 
769891434b1SRayagond Kokatanur 		default:
770891434b1SRayagond Kokatanur 			return -ERANGE;
771891434b1SRayagond Kokatanur 		}
772891434b1SRayagond Kokatanur 	} else {
773891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
774891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
775891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
776891434b1SRayagond Kokatanur 			break;
777891434b1SRayagond Kokatanur 		default:
778891434b1SRayagond Kokatanur 			/* PTP v1, UDP, any kind of event packet */
779891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
780891434b1SRayagond Kokatanur 			break;
781891434b1SRayagond Kokatanur 		}
782891434b1SRayagond Kokatanur 	}
783891434b1SRayagond Kokatanur 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
7845f3da328SBen Hutchings 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
785891434b1SRayagond Kokatanur 
786a6da2bbbSHolger Assmann 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
787891434b1SRayagond Kokatanur 
788a6da2bbbSHolger Assmann 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
789a6da2bbbSHolger Assmann 		priv->systime_flags |= tstamp_all | ptp_v2 |
790a6da2bbbSHolger Assmann 				       ptp_over_ethernet | ptp_over_ipv6_udp |
791a6da2bbbSHolger Assmann 				       ptp_over_ipv4_udp | ts_event_en |
792a6da2bbbSHolger Assmann 				       ts_master_en | snap_type_sel;
793891434b1SRayagond Kokatanur 	}
794891434b1SRayagond Kokatanur 
795a6da2bbbSHolger Assmann 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
796a6da2bbbSHolger Assmann 
797d6228b7cSArtem Panfilov 	memcpy(&priv->tstamp_config, &config, sizeof(config));
798d6228b7cSArtem Panfilov 
799891434b1SRayagond Kokatanur 	return copy_to_user(ifr->ifr_data, &config,
800d6228b7cSArtem Panfilov 			    sizeof(config)) ? -EFAULT : 0;
801d6228b7cSArtem Panfilov }
802d6228b7cSArtem Panfilov 
803d6228b7cSArtem Panfilov /**
804d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_get - read hardware timestamping.
805d6228b7cSArtem Panfilov  *  @dev: device pointer.
806d6228b7cSArtem Panfilov  *  @ifr: An IOCTL specific structure, that can contain a pointer to
807d6228b7cSArtem Panfilov  *  a proprietary structure used to pass information to the driver.
808d6228b7cSArtem Panfilov  *  Description:
809d6228b7cSArtem Panfilov  *  This function obtain the current hardware timestamping settings
810d0ea5cbdSJesse Brandeburg  *  as requested.
811d6228b7cSArtem Panfilov  */
812d6228b7cSArtem Panfilov static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
813d6228b7cSArtem Panfilov {
814d6228b7cSArtem Panfilov 	struct stmmac_priv *priv = netdev_priv(dev);
815d6228b7cSArtem Panfilov 	struct hwtstamp_config *config = &priv->tstamp_config;
816d6228b7cSArtem Panfilov 
817d6228b7cSArtem Panfilov 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
818d6228b7cSArtem Panfilov 		return -EOPNOTSUPP;
819d6228b7cSArtem Panfilov 
820d6228b7cSArtem Panfilov 	return copy_to_user(ifr->ifr_data, config,
821d6228b7cSArtem Panfilov 			    sizeof(*config)) ? -EFAULT : 0;
822891434b1SRayagond Kokatanur }
823891434b1SRayagond Kokatanur 
82432ceabcaSGiuseppe CAVALLARO /**
825a6da2bbbSHolger Assmann  * stmmac_init_tstamp_counter - init hardware timestamping counter
826a6da2bbbSHolger Assmann  * @priv: driver private structure
827a6da2bbbSHolger Assmann  * @systime_flags: timestamping flags
828a6da2bbbSHolger Assmann  * Description:
829a6da2bbbSHolger Assmann  * Initialize hardware counter for packet timestamping.
830a6da2bbbSHolger Assmann  * This is valid as long as the interface is open and not suspended.
831a6da2bbbSHolger Assmann  * Will be rerun after resuming from suspend, case in which the timestamping
832a6da2bbbSHolger Assmann  * flags updated by stmmac_hwtstamp_set() also need to be restored.
833a6da2bbbSHolger Assmann  */
834a6da2bbbSHolger Assmann int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
835a6da2bbbSHolger Assmann {
836a6da2bbbSHolger Assmann 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
837a6da2bbbSHolger Assmann 	struct timespec64 now;
838a6da2bbbSHolger Assmann 	u32 sec_inc = 0;
839a6da2bbbSHolger Assmann 	u64 temp = 0;
840a6da2bbbSHolger Assmann 
841a6da2bbbSHolger Assmann 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
842a6da2bbbSHolger Assmann 		return -EOPNOTSUPP;
843a6da2bbbSHolger Assmann 
844a6da2bbbSHolger Assmann 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
845a6da2bbbSHolger Assmann 	priv->systime_flags = systime_flags;
846a6da2bbbSHolger Assmann 
847a6da2bbbSHolger Assmann 	/* program Sub Second Increment reg */
848a6da2bbbSHolger Assmann 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
849a6da2bbbSHolger Assmann 					   priv->plat->clk_ptp_rate,
850a6da2bbbSHolger Assmann 					   xmac, &sec_inc);
851a6da2bbbSHolger Assmann 	temp = div_u64(1000000000ULL, sec_inc);
852a6da2bbbSHolger Assmann 
853a6da2bbbSHolger Assmann 	/* Store sub second increment for later use */
854a6da2bbbSHolger Assmann 	priv->sub_second_inc = sec_inc;
855a6da2bbbSHolger Assmann 
856a6da2bbbSHolger Assmann 	/* calculate default added value:
857a6da2bbbSHolger Assmann 	 * formula is :
858a6da2bbbSHolger Assmann 	 * addend = (2^32)/freq_div_ratio;
859a6da2bbbSHolger Assmann 	 * where, freq_div_ratio = 1e9ns/sec_inc
860a6da2bbbSHolger Assmann 	 */
861a6da2bbbSHolger Assmann 	temp = (u64)(temp << 32);
862a6da2bbbSHolger Assmann 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
863a6da2bbbSHolger Assmann 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
864a6da2bbbSHolger Assmann 
865a6da2bbbSHolger Assmann 	/* initialize system time */
866a6da2bbbSHolger Assmann 	ktime_get_real_ts64(&now);
867a6da2bbbSHolger Assmann 
868a6da2bbbSHolger Assmann 	/* lower 32 bits of tv_sec are safe until y2106 */
869a6da2bbbSHolger Assmann 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
870a6da2bbbSHolger Assmann 
871a6da2bbbSHolger Assmann 	return 0;
872a6da2bbbSHolger Assmann }
873a6da2bbbSHolger Assmann EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
874a6da2bbbSHolger Assmann 
875a6da2bbbSHolger Assmann /**
876732fdf0eSGiuseppe CAVALLARO  * stmmac_init_ptp - init PTP
87732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
878732fdf0eSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
87932ceabcaSGiuseppe CAVALLARO  * This is done by looking at the HW cap. register.
880732fdf0eSGiuseppe CAVALLARO  * This function also registers the ptp driver.
88132ceabcaSGiuseppe CAVALLARO  */
88292ba6888SRayagond Kokatanur static int stmmac_init_ptp(struct stmmac_priv *priv)
883891434b1SRayagond Kokatanur {
8847d9e6c5aSJose Abreu 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
885a6da2bbbSHolger Assmann 	int ret;
8867d9e6c5aSJose Abreu 
88794c82de4SMohammad Athari Bin Ismail 	if (priv->plat->ptp_clk_freq_config)
88894c82de4SMohammad Athari Bin Ismail 		priv->plat->ptp_clk_freq_config(priv);
88994c82de4SMohammad Athari Bin Ismail 
890a6da2bbbSHolger Assmann 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
891a6da2bbbSHolger Assmann 	if (ret)
892a6da2bbbSHolger Assmann 		return ret;
89392ba6888SRayagond Kokatanur 
894891434b1SRayagond Kokatanur 	priv->adv_ts = 0;
8957d9e6c5aSJose Abreu 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
8967d9e6c5aSJose Abreu 	if (xmac && priv->dma_cap.atime_stamp)
897be9b3174SGiuseppe CAVALLARO 		priv->adv_ts = 1;
898be9b3174SGiuseppe CAVALLARO 	/* Dwmac 3.x core with extend_desc can support adv_ts */
899be9b3174SGiuseppe CAVALLARO 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
900891434b1SRayagond Kokatanur 		priv->adv_ts = 1;
9017cd01399SVince Bridgers 
902be9b3174SGiuseppe CAVALLARO 	if (priv->dma_cap.time_stamp)
903be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
9047cd01399SVince Bridgers 
905be9b3174SGiuseppe CAVALLARO 	if (priv->adv_ts)
906be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev,
907be9b3174SGiuseppe CAVALLARO 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
908891434b1SRayagond Kokatanur 
909891434b1SRayagond Kokatanur 	priv->hwts_tx_en = 0;
910891434b1SRayagond Kokatanur 	priv->hwts_rx_en = 0;
91192ba6888SRayagond Kokatanur 
912c30a70d3SGiuseppe CAVALLARO 	return 0;
91392ba6888SRayagond Kokatanur }
91492ba6888SRayagond Kokatanur 
91592ba6888SRayagond Kokatanur static void stmmac_release_ptp(struct stmmac_priv *priv)
91692ba6888SRayagond Kokatanur {
917f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
91892ba6888SRayagond Kokatanur 	stmmac_ptp_unregister(priv);
919891434b1SRayagond Kokatanur }
920891434b1SRayagond Kokatanur 
9217ac6653aSJeff Kirsher /**
92229feff39SJoao Pinto  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
92329feff39SJoao Pinto  *  @priv: driver private structure
924d0ea5cbdSJesse Brandeburg  *  @duplex: duplex passed to the next function
92529feff39SJoao Pinto  *  Description: It is used for configuring the flow control in all queues
92629feff39SJoao Pinto  */
92729feff39SJoao Pinto static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
92829feff39SJoao Pinto {
92929feff39SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
93029feff39SJoao Pinto 
931c10d4c82SJose Abreu 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
93229feff39SJoao Pinto 			priv->pause, tx_cnt);
93329feff39SJoao Pinto }
93429feff39SJoao Pinto 
93572e94511SRussell King (Oracle) static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
93672e94511SRussell King (Oracle) 						 phy_interface_t interface)
93772e94511SRussell King (Oracle) {
93872e94511SRussell King (Oracle) 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
93972e94511SRussell King (Oracle) 
94072e94511SRussell King (Oracle) 	if (!priv->hw->xpcs)
94172e94511SRussell King (Oracle) 		return NULL;
94272e94511SRussell King (Oracle) 
94372e94511SRussell King (Oracle) 	return &priv->hw->xpcs->pcs;
94472e94511SRussell King (Oracle) }
94572e94511SRussell King (Oracle) 
94674371272SJose Abreu static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
94774371272SJose Abreu 			      const struct phylink_link_state *state)
9489ad372fcSJose Abreu {
94911059740SVladimir Oltean 	/* Nothing to do, xpcs_config() handles everything */
950eeef2f6bSJose Abreu }
951eeef2f6bSJose Abreu 
9525a558611SOng Boon Leong static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
9535a558611SOng Boon Leong {
9545a558611SOng Boon Leong 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
9555a558611SOng Boon Leong 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
9565a558611SOng Boon Leong 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
9575a558611SOng Boon Leong 	bool *hs_enable = &fpe_cfg->hs_enable;
9585a558611SOng Boon Leong 
9595a558611SOng Boon Leong 	if (is_up && *hs_enable) {
9605a558611SOng Boon Leong 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
9615a558611SOng Boon Leong 	} else {
9621f7096f0SWong Vee Khee 		*lo_state = FPE_STATE_OFF;
9631f7096f0SWong Vee Khee 		*lp_state = FPE_STATE_OFF;
9645a558611SOng Boon Leong 	}
9655a558611SOng Boon Leong }
9665a558611SOng Boon Leong 
96774371272SJose Abreu static void stmmac_mac_link_down(struct phylink_config *config,
96874371272SJose Abreu 				 unsigned int mode, phy_interface_t interface)
9699ad372fcSJose Abreu {
97074371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
9719ad372fcSJose Abreu 
9729ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
97374371272SJose Abreu 	priv->eee_active = false;
974388e201dSVineetha G. Jaya Kumaran 	priv->tx_lpi_enabled = false;
975d4aeaed8SWong Vee Khee 	priv->eee_enabled = stmmac_eee_init(priv);
97674371272SJose Abreu 	stmmac_set_eee_pls(priv, priv->hw, false);
9775a558611SOng Boon Leong 
97863c173ffSMohammad Athari Bin Ismail 	if (priv->dma_cap.fpesel)
9795a558611SOng Boon Leong 		stmmac_fpe_link_state_handle(priv, false);
9809ad372fcSJose Abreu }
9819ad372fcSJose Abreu 
98274371272SJose Abreu static void stmmac_mac_link_up(struct phylink_config *config,
98391a208f2SRussell King 			       struct phy_device *phy,
98474371272SJose Abreu 			       unsigned int mode, phy_interface_t interface,
98591a208f2SRussell King 			       int speed, int duplex,
98691a208f2SRussell King 			       bool tx_pause, bool rx_pause)
9879ad372fcSJose Abreu {
98874371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
989a3a57bf0SHeiner Kallweit 	u32 old_ctrl, ctrl;
99046f69dedSJose Abreu 
991a46e9010SRevanth Kumar Uppala 	if (priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup)
992a46e9010SRevanth Kumar Uppala 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
993a46e9010SRevanth Kumar Uppala 
994a3a57bf0SHeiner Kallweit 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
995a3a57bf0SHeiner Kallweit 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
99646f69dedSJose Abreu 
99746f69dedSJose Abreu 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
99846f69dedSJose Abreu 		switch (speed) {
99946f69dedSJose Abreu 		case SPEED_10000:
100046f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed10000;
100146f69dedSJose Abreu 			break;
100246f69dedSJose Abreu 		case SPEED_5000:
100346f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed5000;
100446f69dedSJose Abreu 			break;
100546f69dedSJose Abreu 		case SPEED_2500:
100646f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed2500;
100746f69dedSJose Abreu 			break;
100846f69dedSJose Abreu 		default:
100946f69dedSJose Abreu 			return;
101046f69dedSJose Abreu 		}
10118a880936SJose Abreu 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
10128a880936SJose Abreu 		switch (speed) {
10138a880936SJose Abreu 		case SPEED_100000:
10148a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed100000;
10158a880936SJose Abreu 			break;
10168a880936SJose Abreu 		case SPEED_50000:
10178a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed50000;
10188a880936SJose Abreu 			break;
10198a880936SJose Abreu 		case SPEED_40000:
10208a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed40000;
10218a880936SJose Abreu 			break;
10228a880936SJose Abreu 		case SPEED_25000:
10238a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed25000;
10248a880936SJose Abreu 			break;
10258a880936SJose Abreu 		case SPEED_10000:
10268a880936SJose Abreu 			ctrl |= priv->hw->link.xgmii.speed10000;
10278a880936SJose Abreu 			break;
10288a880936SJose Abreu 		case SPEED_2500:
10298a880936SJose Abreu 			ctrl |= priv->hw->link.speed2500;
10308a880936SJose Abreu 			break;
10318a880936SJose Abreu 		case SPEED_1000:
10328a880936SJose Abreu 			ctrl |= priv->hw->link.speed1000;
10338a880936SJose Abreu 			break;
10348a880936SJose Abreu 		default:
10358a880936SJose Abreu 			return;
10368a880936SJose Abreu 		}
103746f69dedSJose Abreu 	} else {
103846f69dedSJose Abreu 		switch (speed) {
103946f69dedSJose Abreu 		case SPEED_2500:
104046f69dedSJose Abreu 			ctrl |= priv->hw->link.speed2500;
104146f69dedSJose Abreu 			break;
104246f69dedSJose Abreu 		case SPEED_1000:
104346f69dedSJose Abreu 			ctrl |= priv->hw->link.speed1000;
104446f69dedSJose Abreu 			break;
104546f69dedSJose Abreu 		case SPEED_100:
104646f69dedSJose Abreu 			ctrl |= priv->hw->link.speed100;
104746f69dedSJose Abreu 			break;
104846f69dedSJose Abreu 		case SPEED_10:
104946f69dedSJose Abreu 			ctrl |= priv->hw->link.speed10;
105046f69dedSJose Abreu 			break;
105146f69dedSJose Abreu 		default:
105246f69dedSJose Abreu 			return;
105346f69dedSJose Abreu 		}
105446f69dedSJose Abreu 	}
105546f69dedSJose Abreu 
105646f69dedSJose Abreu 	priv->speed = speed;
105746f69dedSJose Abreu 
105846f69dedSJose Abreu 	if (priv->plat->fix_mac_speed)
105946f69dedSJose Abreu 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
106046f69dedSJose Abreu 
106146f69dedSJose Abreu 	if (!duplex)
106246f69dedSJose Abreu 		ctrl &= ~priv->hw->link.duplex;
106346f69dedSJose Abreu 	else
106446f69dedSJose Abreu 		ctrl |= priv->hw->link.duplex;
106546f69dedSJose Abreu 
106646f69dedSJose Abreu 	/* Flow Control operation */
1067cc3d2b5fSGoh, Wei Sheng 	if (rx_pause && tx_pause)
1068cc3d2b5fSGoh, Wei Sheng 		priv->flow_ctrl = FLOW_AUTO;
1069cc3d2b5fSGoh, Wei Sheng 	else if (rx_pause && !tx_pause)
1070cc3d2b5fSGoh, Wei Sheng 		priv->flow_ctrl = FLOW_RX;
1071cc3d2b5fSGoh, Wei Sheng 	else if (!rx_pause && tx_pause)
1072cc3d2b5fSGoh, Wei Sheng 		priv->flow_ctrl = FLOW_TX;
1073cc3d2b5fSGoh, Wei Sheng 	else
1074cc3d2b5fSGoh, Wei Sheng 		priv->flow_ctrl = FLOW_OFF;
1075cc3d2b5fSGoh, Wei Sheng 
107646f69dedSJose Abreu 	stmmac_mac_flow_ctrl(priv, duplex);
107746f69dedSJose Abreu 
1078a3a57bf0SHeiner Kallweit 	if (ctrl != old_ctrl)
107946f69dedSJose Abreu 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
10809ad372fcSJose Abreu 
10819ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
10825b111770SJose Abreu 	if (phy && priv->dma_cap.eee) {
108354aa39a5SAndrey Konovalov 		priv->eee_active =
108454aa39a5SAndrey Konovalov 			phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0;
108574371272SJose Abreu 		priv->eee_enabled = stmmac_eee_init(priv);
1086388e201dSVineetha G. Jaya Kumaran 		priv->tx_lpi_enabled = priv->eee_enabled;
108774371272SJose Abreu 		stmmac_set_eee_pls(priv, priv->hw, true);
108874371272SJose Abreu 	}
10895a558611SOng Boon Leong 
109063c173ffSMohammad Athari Bin Ismail 	if (priv->dma_cap.fpesel)
10915a558611SOng Boon Leong 		stmmac_fpe_link_state_handle(priv, true);
10929ad372fcSJose Abreu }
10939ad372fcSJose Abreu 
109474371272SJose Abreu static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
109572e94511SRussell King (Oracle) 	.mac_select_pcs = stmmac_mac_select_pcs,
109674371272SJose Abreu 	.mac_config = stmmac_mac_config,
109774371272SJose Abreu 	.mac_link_down = stmmac_mac_link_down,
109874371272SJose Abreu 	.mac_link_up = stmmac_mac_link_up,
1099eeef2f6bSJose Abreu };
1100eeef2f6bSJose Abreu 
110129feff39SJoao Pinto /**
1102732fdf0eSGiuseppe CAVALLARO  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
110332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
110432ceabcaSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PCS.
110532ceabcaSGiuseppe CAVALLARO  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
110632ceabcaSGiuseppe CAVALLARO  * configured for the TBI, RTBI, or SGMII PHY interface.
110732ceabcaSGiuseppe CAVALLARO  */
1108e58bb43fSGiuseppe CAVALLARO static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1109e58bb43fSGiuseppe CAVALLARO {
1110e58bb43fSGiuseppe CAVALLARO 	int interface = priv->plat->interface;
1111e58bb43fSGiuseppe CAVALLARO 
1112e58bb43fSGiuseppe CAVALLARO 	if (priv->dma_cap.pcs) {
11130d909dcdSByungho An 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
11140d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
11150d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
11160d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
111738ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
11183fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_RGMII;
11190d909dcdSByungho An 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
112038ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
11213fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_SGMII;
1122e58bb43fSGiuseppe CAVALLARO 		}
1123e58bb43fSGiuseppe CAVALLARO 	}
1124e58bb43fSGiuseppe CAVALLARO }
1125e58bb43fSGiuseppe CAVALLARO 
11267ac6653aSJeff Kirsher /**
11277ac6653aSJeff Kirsher  * stmmac_init_phy - PHY initialization
11287ac6653aSJeff Kirsher  * @dev: net device structure
11297ac6653aSJeff Kirsher  * Description: it initializes the driver's PHY state, and attaches the PHY
11307ac6653aSJeff Kirsher  * to the mac driver.
11317ac6653aSJeff Kirsher  *  Return value:
11327ac6653aSJeff Kirsher  *  0 on success
11337ac6653aSJeff Kirsher  */
11347ac6653aSJeff Kirsher static int stmmac_init_phy(struct net_device *dev)
11357ac6653aSJeff Kirsher {
11367ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
11378fbc10b9SMichael Sit Wei Hong 	struct fwnode_handle *phy_fwnode;
1138ab21cf92SOng Boon Leong 	struct fwnode_handle *fwnode;
113974371272SJose Abreu 	int ret;
11407ac6653aSJeff Kirsher 
11418fbc10b9SMichael Sit Wei Hong 	if (!phylink_expects_phy(priv->phylink))
11428fbc10b9SMichael Sit Wei Hong 		return 0;
11438fbc10b9SMichael Sit Wei Hong 
1144ab21cf92SOng Boon Leong 	fwnode = of_fwnode_handle(priv->plat->phylink_node);
1145ab21cf92SOng Boon Leong 	if (!fwnode)
1146ab21cf92SOng Boon Leong 		fwnode = dev_fwnode(priv->device);
114774371272SJose Abreu 
1148ab21cf92SOng Boon Leong 	if (fwnode)
11498fbc10b9SMichael Sit Wei Hong 		phy_fwnode = fwnode_get_phy_node(fwnode);
11508fbc10b9SMichael Sit Wei Hong 	else
11518fbc10b9SMichael Sit Wei Hong 		phy_fwnode = NULL;
115242e87024SJose Abreu 
115342e87024SJose Abreu 	/* Some DT bindings do not set-up the PHY handle. Let's try to
115442e87024SJose Abreu 	 * manually parse it
115542e87024SJose Abreu 	 */
11568fbc10b9SMichael Sit Wei Hong 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
115774371272SJose Abreu 		int addr = priv->plat->phy_addr;
115874371272SJose Abreu 		struct phy_device *phydev;
1159f142af2eSSrinivas Kandagatla 
11601f3bd64aSHeiner Kallweit 		if (addr < 0) {
11611f3bd64aSHeiner Kallweit 			netdev_err(priv->dev, "no phy found\n");
11621f3bd64aSHeiner Kallweit 			return -ENODEV;
11631f3bd64aSHeiner Kallweit 		}
11641f3bd64aSHeiner Kallweit 
116574371272SJose Abreu 		phydev = mdiobus_get_phy(priv->mii, addr);
116674371272SJose Abreu 		if (!phydev) {
116774371272SJose Abreu 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
11687ac6653aSJeff Kirsher 			return -ENODEV;
11697ac6653aSJeff Kirsher 		}
11708e99fc5fSGiuseppe Cavallaro 
117174371272SJose Abreu 		ret = phylink_connect_phy(priv->phylink, phydev);
11728fbc10b9SMichael Sit Wei Hong 	} else {
11738fbc10b9SMichael Sit Wei Hong 		fwnode_handle_put(phy_fwnode);
11748fbc10b9SMichael Sit Wei Hong 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
117574371272SJose Abreu 	}
1176c51e424dSFlorian Fainelli 
1177576f9eacSJoakim Zhang 	if (!priv->plat->pmt) {
1178576f9eacSJoakim Zhang 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1179576f9eacSJoakim Zhang 
11801d8e5b0fSJisheng Zhang 		phylink_ethtool_get_wol(priv->phylink, &wol);
11811d8e5b0fSJisheng Zhang 		device_set_wakeup_capable(priv->device, !!wol.supported);
1182a9334b70SRongguang Wei 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1183576f9eacSJoakim Zhang 	}
11841d8e5b0fSJisheng Zhang 
118574371272SJose Abreu 	return ret;
118674371272SJose Abreu }
118774371272SJose Abreu 
118874371272SJose Abreu static int stmmac_phy_setup(struct stmmac_priv *priv)
118974371272SJose Abreu {
119011059740SVladimir Oltean 	struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
1191c63d1e5cSArnd Bergmann 	struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
119292c3807bSRussell King (Oracle) 	int max_speed = priv->plat->max_speed;
11930060c878SAlexandru Ardelean 	int mode = priv->plat->phy_interface;
119474371272SJose Abreu 	struct phylink *phylink;
119574371272SJose Abreu 
119674371272SJose Abreu 	priv->phylink_config.dev = &priv->dev->dev;
119774371272SJose Abreu 	priv->phylink_config.type = PHYLINK_NETDEV;
1198593f555fSSriranjani P 	if (priv->plat->mdio_bus_data)
1199e5e5b771SOng Boon Leong 		priv->phylink_config.ovr_an_inband =
120012628565SDavid S. Miller 			mdio_bus_data->xpcs_an_inband;
120174371272SJose Abreu 
12028dc6051cSJose Abreu 	if (!fwnode)
12038dc6051cSJose Abreu 		fwnode = dev_fwnode(priv->device);
12048dc6051cSJose Abreu 
1205d194923dSRussell King (Oracle) 	/* Set the platform/firmware specified interface mode */
1206d194923dSRussell King (Oracle) 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1207d194923dSRussell King (Oracle) 
1208d194923dSRussell King (Oracle) 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1209d194923dSRussell King (Oracle) 	if (priv->hw->xpcs)
1210d194923dSRussell King (Oracle) 		xpcs_get_interfaces(priv->hw->xpcs,
1211d194923dSRussell King (Oracle) 				    priv->phylink_config.supported_interfaces);
1212d194923dSRussell King (Oracle) 
121392c3807bSRussell King (Oracle) 	priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
121492c3807bSRussell King (Oracle) 		MAC_10 | MAC_100;
121592c3807bSRussell King (Oracle) 
121692c3807bSRussell King (Oracle) 	if (!max_speed || max_speed >= 1000)
121792c3807bSRussell King (Oracle) 		priv->phylink_config.mac_capabilities |= MAC_1000;
121892c3807bSRussell King (Oracle) 
121992c3807bSRussell King (Oracle) 	if (priv->plat->has_gmac4) {
122092c3807bSRussell King (Oracle) 		if (!max_speed || max_speed >= 2500)
122192c3807bSRussell King (Oracle) 			priv->phylink_config.mac_capabilities |= MAC_2500FD;
122292c3807bSRussell King (Oracle) 	} else if (priv->plat->has_xgmac) {
122392c3807bSRussell King (Oracle) 		if (!max_speed || max_speed >= 2500)
122492c3807bSRussell King (Oracle) 			priv->phylink_config.mac_capabilities |= MAC_2500FD;
122592c3807bSRussell King (Oracle) 		if (!max_speed || max_speed >= 5000)
122692c3807bSRussell King (Oracle) 			priv->phylink_config.mac_capabilities |= MAC_5000FD;
122792c3807bSRussell King (Oracle) 		if (!max_speed || max_speed >= 10000)
122892c3807bSRussell King (Oracle) 			priv->phylink_config.mac_capabilities |= MAC_10000FD;
122992c3807bSRussell King (Oracle) 		if (!max_speed || max_speed >= 25000)
123092c3807bSRussell King (Oracle) 			priv->phylink_config.mac_capabilities |= MAC_25000FD;
123192c3807bSRussell King (Oracle) 		if (!max_speed || max_speed >= 40000)
123292c3807bSRussell King (Oracle) 			priv->phylink_config.mac_capabilities |= MAC_40000FD;
123392c3807bSRussell King (Oracle) 		if (!max_speed || max_speed >= 50000)
123492c3807bSRussell King (Oracle) 			priv->phylink_config.mac_capabilities |= MAC_50000FD;
123592c3807bSRussell King (Oracle) 		if (!max_speed || max_speed >= 100000)
123692c3807bSRussell King (Oracle) 			priv->phylink_config.mac_capabilities |= MAC_100000FD;
123792c3807bSRussell King (Oracle) 	}
123892c3807bSRussell King (Oracle) 
123992c3807bSRussell King (Oracle) 	/* Half-Duplex can only work with single queue */
124092c3807bSRussell King (Oracle) 	if (priv->plat->tx_queues_to_use > 1)
124192c3807bSRussell King (Oracle) 		priv->phylink_config.mac_capabilities &=
124292c3807bSRussell King (Oracle) 			~(MAC_10HD | MAC_100HD | MAC_1000HD);
1243f151c147SShenwei Wang 	priv->phylink_config.mac_managed_pm = true;
124492c3807bSRussell King (Oracle) 
1245c63d1e5cSArnd Bergmann 	phylink = phylink_create(&priv->phylink_config, fwnode,
124674371272SJose Abreu 				 mode, &stmmac_phylink_mac_ops);
124774371272SJose Abreu 	if (IS_ERR(phylink))
124874371272SJose Abreu 		return PTR_ERR(phylink);
124974371272SJose Abreu 
125074371272SJose Abreu 	priv->phylink = phylink;
12517ac6653aSJeff Kirsher 	return 0;
12527ac6653aSJeff Kirsher }
12537ac6653aSJeff Kirsher 
1254ba39b344SChristian Marangi static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1255ba39b344SChristian Marangi 				    struct stmmac_dma_conf *dma_conf)
1256c24602efSGiuseppe CAVALLARO {
125754139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1258bfaf91caSJoakim Zhang 	unsigned int desc_size;
125971fedb01SJoao Pinto 	void *head_rx;
126054139cf3SJoao Pinto 	u32 queue;
126154139cf3SJoao Pinto 
126254139cf3SJoao Pinto 	/* Display RX rings */
126354139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
1264ba39b344SChristian Marangi 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
126554139cf3SJoao Pinto 
126654139cf3SJoao Pinto 		pr_info("\tRX Queue %u rings\n", queue);
1267d0225e7dSAlexandre TORGUE 
1268bfaf91caSJoakim Zhang 		if (priv->extend_desc) {
126954139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_erx;
1270bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_extended_desc);
1271bfaf91caSJoakim Zhang 		} else {
127254139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_rx;
1273bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_desc);
1274bfaf91caSJoakim Zhang 		}
127571fedb01SJoao Pinto 
127671fedb01SJoao Pinto 		/* Display RX ring */
1277ba39b344SChristian Marangi 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1278bfaf91caSJoakim Zhang 				    rx_q->dma_rx_phy, desc_size);
12795bacd778SLABBE Corentin 	}
128054139cf3SJoao Pinto }
1281d0225e7dSAlexandre TORGUE 
1282ba39b344SChristian Marangi static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1283ba39b344SChristian Marangi 				    struct stmmac_dma_conf *dma_conf)
128471fedb01SJoao Pinto {
1285ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1286bfaf91caSJoakim Zhang 	unsigned int desc_size;
128771fedb01SJoao Pinto 	void *head_tx;
1288ce736788SJoao Pinto 	u32 queue;
1289ce736788SJoao Pinto 
1290ce736788SJoao Pinto 	/* Display TX rings */
1291ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
1292ba39b344SChristian Marangi 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1293ce736788SJoao Pinto 
1294ce736788SJoao Pinto 		pr_info("\tTX Queue %d rings\n", queue);
129571fedb01SJoao Pinto 
1296bfaf91caSJoakim Zhang 		if (priv->extend_desc) {
1297ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_etx;
1298bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_extended_desc);
1299bfaf91caSJoakim Zhang 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1300579a25a8SJose Abreu 			head_tx = (void *)tx_q->dma_entx;
1301bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_edesc);
1302bfaf91caSJoakim Zhang 		} else {
1303ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_tx;
1304bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_desc);
1305bfaf91caSJoakim Zhang 		}
130671fedb01SJoao Pinto 
1307ba39b344SChristian Marangi 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1308bfaf91caSJoakim Zhang 				    tx_q->dma_tx_phy, desc_size);
1309c24602efSGiuseppe CAVALLARO 	}
1310ce736788SJoao Pinto }
1311c24602efSGiuseppe CAVALLARO 
1312ba39b344SChristian Marangi static void stmmac_display_rings(struct stmmac_priv *priv,
1313ba39b344SChristian Marangi 				 struct stmmac_dma_conf *dma_conf)
131471fedb01SJoao Pinto {
131571fedb01SJoao Pinto 	/* Display RX ring */
1316ba39b344SChristian Marangi 	stmmac_display_rx_rings(priv, dma_conf);
131771fedb01SJoao Pinto 
131871fedb01SJoao Pinto 	/* Display TX ring */
1319ba39b344SChristian Marangi 	stmmac_display_tx_rings(priv, dma_conf);
132071fedb01SJoao Pinto }
132171fedb01SJoao Pinto 
1322286a8372SGiuseppe CAVALLARO static int stmmac_set_bfsize(int mtu, int bufsize)
1323286a8372SGiuseppe CAVALLARO {
1324286a8372SGiuseppe CAVALLARO 	int ret = bufsize;
1325286a8372SGiuseppe CAVALLARO 
1326b2f3a481SJose Abreu 	if (mtu >= BUF_SIZE_8KiB)
1327b2f3a481SJose Abreu 		ret = BUF_SIZE_16KiB;
1328b2f3a481SJose Abreu 	else if (mtu >= BUF_SIZE_4KiB)
1329286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_8KiB;
1330286a8372SGiuseppe CAVALLARO 	else if (mtu >= BUF_SIZE_2KiB)
1331286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_4KiB;
1332d916701cSGiuseppe CAVALLARO 	else if (mtu > DEFAULT_BUFSIZE)
1333286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_2KiB;
1334286a8372SGiuseppe CAVALLARO 	else
1335d916701cSGiuseppe CAVALLARO 		ret = DEFAULT_BUFSIZE;
1336286a8372SGiuseppe CAVALLARO 
1337286a8372SGiuseppe CAVALLARO 	return ret;
1338286a8372SGiuseppe CAVALLARO }
1339286a8372SGiuseppe CAVALLARO 
134032ceabcaSGiuseppe CAVALLARO /**
134171fedb01SJoao Pinto  * stmmac_clear_rx_descriptors - clear RX descriptors
134232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
1343ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
134454139cf3SJoao Pinto  * @queue: RX queue index
134571fedb01SJoao Pinto  * Description: this function is called to clear the RX descriptors
134632ceabcaSGiuseppe CAVALLARO  * in case of both basic and extended descriptors are used.
134732ceabcaSGiuseppe CAVALLARO  */
1348ba39b344SChristian Marangi static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1349ba39b344SChristian Marangi 					struct stmmac_dma_conf *dma_conf,
1350ba39b344SChristian Marangi 					u32 queue)
1351c24602efSGiuseppe CAVALLARO {
1352ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
13535bacd778SLABBE Corentin 	int i;
1354c24602efSGiuseppe CAVALLARO 
135571fedb01SJoao Pinto 	/* Clear the RX descriptors */
1356ba39b344SChristian Marangi 	for (i = 0; i < dma_conf->dma_rx_size; i++)
13575bacd778SLABBE Corentin 		if (priv->extend_desc)
135842de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
13595bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1360ba39b344SChristian Marangi 					(i == dma_conf->dma_rx_size - 1),
1361ba39b344SChristian Marangi 					dma_conf->dma_buf_sz);
13625bacd778SLABBE Corentin 		else
136342de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
13645bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1365ba39b344SChristian Marangi 					(i == dma_conf->dma_rx_size - 1),
1366ba39b344SChristian Marangi 					dma_conf->dma_buf_sz);
136771fedb01SJoao Pinto }
136871fedb01SJoao Pinto 
136971fedb01SJoao Pinto /**
137071fedb01SJoao Pinto  * stmmac_clear_tx_descriptors - clear tx descriptors
137171fedb01SJoao Pinto  * @priv: driver private structure
1372ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1373ce736788SJoao Pinto  * @queue: TX queue index.
137471fedb01SJoao Pinto  * Description: this function is called to clear the TX descriptors
137571fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
137671fedb01SJoao Pinto  */
1377ba39b344SChristian Marangi static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1378ba39b344SChristian Marangi 					struct stmmac_dma_conf *dma_conf,
1379ba39b344SChristian Marangi 					u32 queue)
138071fedb01SJoao Pinto {
1381ba39b344SChristian Marangi 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
138271fedb01SJoao Pinto 	int i;
138371fedb01SJoao Pinto 
138471fedb01SJoao Pinto 	/* Clear the TX descriptors */
1385ba39b344SChristian Marangi 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1386ba39b344SChristian Marangi 		int last = (i == (dma_conf->dma_tx_size - 1));
1387579a25a8SJose Abreu 		struct dma_desc *p;
1388579a25a8SJose Abreu 
13895bacd778SLABBE Corentin 		if (priv->extend_desc)
1390579a25a8SJose Abreu 			p = &tx_q->dma_etx[i].basic;
1391579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1392579a25a8SJose Abreu 			p = &tx_q->dma_entx[i].basic;
13935bacd778SLABBE Corentin 		else
1394579a25a8SJose Abreu 			p = &tx_q->dma_tx[i];
1395579a25a8SJose Abreu 
1396579a25a8SJose Abreu 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1397579a25a8SJose Abreu 	}
1398c24602efSGiuseppe CAVALLARO }
1399c24602efSGiuseppe CAVALLARO 
1400732fdf0eSGiuseppe CAVALLARO /**
140171fedb01SJoao Pinto  * stmmac_clear_descriptors - clear descriptors
140271fedb01SJoao Pinto  * @priv: driver private structure
1403ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
140471fedb01SJoao Pinto  * Description: this function is called to clear the TX and RX descriptors
140571fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
140671fedb01SJoao Pinto  */
1407ba39b344SChristian Marangi static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1408ba39b344SChristian Marangi 				     struct stmmac_dma_conf *dma_conf)
140971fedb01SJoao Pinto {
141054139cf3SJoao Pinto 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1411ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
141254139cf3SJoao Pinto 	u32 queue;
141354139cf3SJoao Pinto 
141471fedb01SJoao Pinto 	/* Clear the RX descriptors */
141554139cf3SJoao Pinto 	for (queue = 0; queue < rx_queue_cnt; queue++)
1416ba39b344SChristian Marangi 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
141771fedb01SJoao Pinto 
141871fedb01SJoao Pinto 	/* Clear the TX descriptors */
1419ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++)
1420ba39b344SChristian Marangi 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
142171fedb01SJoao Pinto }
142271fedb01SJoao Pinto 
142371fedb01SJoao Pinto /**
1424732fdf0eSGiuseppe CAVALLARO  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1425732fdf0eSGiuseppe CAVALLARO  * @priv: driver private structure
1426ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1427732fdf0eSGiuseppe CAVALLARO  * @p: descriptor pointer
1428732fdf0eSGiuseppe CAVALLARO  * @i: descriptor index
142954139cf3SJoao Pinto  * @flags: gfp flag
143054139cf3SJoao Pinto  * @queue: RX queue index
1431732fdf0eSGiuseppe CAVALLARO  * Description: this function is called to allocate a receive buffer, perform
1432732fdf0eSGiuseppe CAVALLARO  * the DMA mapping and init the descriptor.
1433732fdf0eSGiuseppe CAVALLARO  */
1434ba39b344SChristian Marangi static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1435ba39b344SChristian Marangi 				  struct stmmac_dma_conf *dma_conf,
1436ba39b344SChristian Marangi 				  struct dma_desc *p,
143754139cf3SJoao Pinto 				  int i, gfp_t flags, u32 queue)
1438c24602efSGiuseppe CAVALLARO {
1439ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
14402af6106aSJose Abreu 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1441884d2b84SDavid Wu 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1442884d2b84SDavid Wu 
1443070246e4SJochen Henneberg 	if (priv->dma_cap.host_dma_width <= 32)
1444884d2b84SDavid Wu 		gfp |= GFP_DMA32;
1445c24602efSGiuseppe CAVALLARO 
1446da5ec7f2SOng Boon Leong 	if (!buf->page) {
1447884d2b84SDavid Wu 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
14482af6106aSJose Abreu 		if (!buf->page)
144956329137SBartlomiej Zolnierkiewicz 			return -ENOMEM;
14505fabb012SOng Boon Leong 		buf->page_offset = stmmac_rx_offset(priv);
1451da5ec7f2SOng Boon Leong 	}
1452c24602efSGiuseppe CAVALLARO 
1453da5ec7f2SOng Boon Leong 	if (priv->sph && !buf->sec_page) {
1454884d2b84SDavid Wu 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
145567afd6d1SJose Abreu 		if (!buf->sec_page)
145667afd6d1SJose Abreu 			return -ENOMEM;
145767afd6d1SJose Abreu 
145867afd6d1SJose Abreu 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1459396e13e1SJoakim Zhang 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
146067afd6d1SJose Abreu 	} else {
146167afd6d1SJose Abreu 		buf->sec_page = NULL;
1462396e13e1SJoakim Zhang 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
146367afd6d1SJose Abreu 	}
146467afd6d1SJose Abreu 
14655fabb012SOng Boon Leong 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
14665fabb012SOng Boon Leong 
14672af6106aSJose Abreu 	stmmac_set_desc_addr(priv, p, buf->addr);
1468ba39b344SChristian Marangi 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
14692c520b1cSJose Abreu 		stmmac_init_desc3(priv, p);
1470c24602efSGiuseppe CAVALLARO 
1471c24602efSGiuseppe CAVALLARO 	return 0;
1472c24602efSGiuseppe CAVALLARO }
1473c24602efSGiuseppe CAVALLARO 
147471fedb01SJoao Pinto /**
147571fedb01SJoao Pinto  * stmmac_free_rx_buffer - free RX dma buffers
147671fedb01SJoao Pinto  * @priv: private structure
1477ba39b344SChristian Marangi  * @rx_q: RX queue
147871fedb01SJoao Pinto  * @i: buffer index.
147971fedb01SJoao Pinto  */
1480ba39b344SChristian Marangi static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1481ba39b344SChristian Marangi 				  struct stmmac_rx_queue *rx_q,
1482ba39b344SChristian Marangi 				  int i)
148356329137SBartlomiej Zolnierkiewicz {
14842af6106aSJose Abreu 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
148554139cf3SJoao Pinto 
14862af6106aSJose Abreu 	if (buf->page)
1487458de8a9SIlias Apalodimas 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
14882af6106aSJose Abreu 	buf->page = NULL;
148967afd6d1SJose Abreu 
149067afd6d1SJose Abreu 	if (buf->sec_page)
1491458de8a9SIlias Apalodimas 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
149267afd6d1SJose Abreu 	buf->sec_page = NULL;
149356329137SBartlomiej Zolnierkiewicz }
149456329137SBartlomiej Zolnierkiewicz 
14957ac6653aSJeff Kirsher /**
149671fedb01SJoao Pinto  * stmmac_free_tx_buffer - free RX dma buffers
149771fedb01SJoao Pinto  * @priv: private structure
1498ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1499ce736788SJoao Pinto  * @queue: RX queue index
150071fedb01SJoao Pinto  * @i: buffer index.
150171fedb01SJoao Pinto  */
1502ba39b344SChristian Marangi static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1503ba39b344SChristian Marangi 				  struct stmmac_dma_conf *dma_conf,
1504ba39b344SChristian Marangi 				  u32 queue, int i)
150571fedb01SJoao Pinto {
1506ba39b344SChristian Marangi 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1507ce736788SJoao Pinto 
1508be8b38a7SOng Boon Leong 	if (tx_q->tx_skbuff_dma[i].buf &&
1509be8b38a7SOng Boon Leong 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1510ce736788SJoao Pinto 		if (tx_q->tx_skbuff_dma[i].map_as_page)
151171fedb01SJoao Pinto 			dma_unmap_page(priv->device,
1512ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].buf,
1513ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].len,
151471fedb01SJoao Pinto 				       DMA_TO_DEVICE);
151571fedb01SJoao Pinto 		else
151671fedb01SJoao Pinto 			dma_unmap_single(priv->device,
1517ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].buf,
1518ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].len,
151971fedb01SJoao Pinto 					 DMA_TO_DEVICE);
152071fedb01SJoao Pinto 	}
152171fedb01SJoao Pinto 
1522be8b38a7SOng Boon Leong 	if (tx_q->xdpf[i] &&
15238b278a5bSOng Boon Leong 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
15248b278a5bSOng Boon Leong 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1525be8b38a7SOng Boon Leong 		xdp_return_frame(tx_q->xdpf[i]);
1526be8b38a7SOng Boon Leong 		tx_q->xdpf[i] = NULL;
1527be8b38a7SOng Boon Leong 	}
1528be8b38a7SOng Boon Leong 
1529132c32eeSOng Boon Leong 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1530132c32eeSOng Boon Leong 		tx_q->xsk_frames_done++;
1531132c32eeSOng Boon Leong 
1532be8b38a7SOng Boon Leong 	if (tx_q->tx_skbuff[i] &&
1533be8b38a7SOng Boon Leong 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1534ce736788SJoao Pinto 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1535ce736788SJoao Pinto 		tx_q->tx_skbuff[i] = NULL;
1536be8b38a7SOng Boon Leong 	}
1537be8b38a7SOng Boon Leong 
1538ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[i].buf = 0;
1539ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[i].map_as_page = false;
154071fedb01SJoao Pinto }
154171fedb01SJoao Pinto 
154271fedb01SJoao Pinto /**
15434298255fSOng Boon Leong  * dma_free_rx_skbufs - free RX dma buffers
15444298255fSOng Boon Leong  * @priv: private structure
1545ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
15464298255fSOng Boon Leong  * @queue: RX queue index
15474298255fSOng Boon Leong  */
1548ba39b344SChristian Marangi static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1549ba39b344SChristian Marangi 			       struct stmmac_dma_conf *dma_conf,
1550ba39b344SChristian Marangi 			       u32 queue)
15514298255fSOng Boon Leong {
1552ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
15534298255fSOng Boon Leong 	int i;
15544298255fSOng Boon Leong 
1555ba39b344SChristian Marangi 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1556ba39b344SChristian Marangi 		stmmac_free_rx_buffer(priv, rx_q, i);
15574298255fSOng Boon Leong }
15584298255fSOng Boon Leong 
1559ba39b344SChristian Marangi static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1560ba39b344SChristian Marangi 				   struct stmmac_dma_conf *dma_conf,
1561ba39b344SChristian Marangi 				   u32 queue, gfp_t flags)
15624298255fSOng Boon Leong {
1563ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
15644298255fSOng Boon Leong 	int i;
15654298255fSOng Boon Leong 
1566ba39b344SChristian Marangi 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
15674298255fSOng Boon Leong 		struct dma_desc *p;
15684298255fSOng Boon Leong 		int ret;
15694298255fSOng Boon Leong 
15704298255fSOng Boon Leong 		if (priv->extend_desc)
15714298255fSOng Boon Leong 			p = &((rx_q->dma_erx + i)->basic);
15724298255fSOng Boon Leong 		else
15734298255fSOng Boon Leong 			p = rx_q->dma_rx + i;
15744298255fSOng Boon Leong 
1575ba39b344SChristian Marangi 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
15764298255fSOng Boon Leong 					     queue);
15774298255fSOng Boon Leong 		if (ret)
15784298255fSOng Boon Leong 			return ret;
1579bba2556eSOng Boon Leong 
1580bba2556eSOng Boon Leong 		rx_q->buf_alloc_num++;
15814298255fSOng Boon Leong 	}
15824298255fSOng Boon Leong 
15834298255fSOng Boon Leong 	return 0;
15844298255fSOng Boon Leong }
15854298255fSOng Boon Leong 
15864298255fSOng Boon Leong /**
1587bba2556eSOng Boon Leong  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1588bba2556eSOng Boon Leong  * @priv: private structure
1589ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1590bba2556eSOng Boon Leong  * @queue: RX queue index
1591bba2556eSOng Boon Leong  */
1592ba39b344SChristian Marangi static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1593ba39b344SChristian Marangi 				struct stmmac_dma_conf *dma_conf,
1594ba39b344SChristian Marangi 				u32 queue)
1595bba2556eSOng Boon Leong {
1596ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1597bba2556eSOng Boon Leong 	int i;
1598bba2556eSOng Boon Leong 
1599ba39b344SChristian Marangi 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1600bba2556eSOng Boon Leong 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1601bba2556eSOng Boon Leong 
1602bba2556eSOng Boon Leong 		if (!buf->xdp)
1603bba2556eSOng Boon Leong 			continue;
1604bba2556eSOng Boon Leong 
1605bba2556eSOng Boon Leong 		xsk_buff_free(buf->xdp);
1606bba2556eSOng Boon Leong 		buf->xdp = NULL;
1607bba2556eSOng Boon Leong 	}
1608bba2556eSOng Boon Leong }
1609bba2556eSOng Boon Leong 
1610ba39b344SChristian Marangi static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1611ba39b344SChristian Marangi 				      struct stmmac_dma_conf *dma_conf,
1612ba39b344SChristian Marangi 				      u32 queue)
1613bba2556eSOng Boon Leong {
1614ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1615bba2556eSOng Boon Leong 	int i;
1616bba2556eSOng Boon Leong 
16179570df35SSong Yoong Siang 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
16189570df35SSong Yoong Siang 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
16199570df35SSong Yoong Siang 	 * use this macro to make sure no size violations.
16209570df35SSong Yoong Siang 	 */
16219570df35SSong Yoong Siang 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
16229570df35SSong Yoong Siang 
1623ba39b344SChristian Marangi 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1624bba2556eSOng Boon Leong 		struct stmmac_rx_buffer *buf;
1625bba2556eSOng Boon Leong 		dma_addr_t dma_addr;
1626bba2556eSOng Boon Leong 		struct dma_desc *p;
1627bba2556eSOng Boon Leong 
1628bba2556eSOng Boon Leong 		if (priv->extend_desc)
1629bba2556eSOng Boon Leong 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1630bba2556eSOng Boon Leong 		else
1631bba2556eSOng Boon Leong 			p = rx_q->dma_rx + i;
1632bba2556eSOng Boon Leong 
1633bba2556eSOng Boon Leong 		buf = &rx_q->buf_pool[i];
1634bba2556eSOng Boon Leong 
1635bba2556eSOng Boon Leong 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1636bba2556eSOng Boon Leong 		if (!buf->xdp)
1637bba2556eSOng Boon Leong 			return -ENOMEM;
1638bba2556eSOng Boon Leong 
1639bba2556eSOng Boon Leong 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1640bba2556eSOng Boon Leong 		stmmac_set_desc_addr(priv, p, dma_addr);
1641bba2556eSOng Boon Leong 		rx_q->buf_alloc_num++;
1642bba2556eSOng Boon Leong 	}
1643bba2556eSOng Boon Leong 
1644bba2556eSOng Boon Leong 	return 0;
1645bba2556eSOng Boon Leong }
1646bba2556eSOng Boon Leong 
1647bba2556eSOng Boon Leong static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1648bba2556eSOng Boon Leong {
1649bba2556eSOng Boon Leong 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1650bba2556eSOng Boon Leong 		return NULL;
1651bba2556eSOng Boon Leong 
1652bba2556eSOng Boon Leong 	return xsk_get_pool_from_qid(priv->dev, queue);
1653bba2556eSOng Boon Leong }
1654bba2556eSOng Boon Leong 
16559c63faaaSJoakim Zhang /**
1656de0b90e5SOng Boon Leong  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1657de0b90e5SOng Boon Leong  * @priv: driver private structure
1658ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1659de0b90e5SOng Boon Leong  * @queue: RX queue index
16605bacd778SLABBE Corentin  * @flags: gfp flag.
166171fedb01SJoao Pinto  * Description: this function initializes the DMA RX descriptors
16625bacd778SLABBE Corentin  * and allocates the socket buffers. It supports the chained and ring
1663286a8372SGiuseppe CAVALLARO  * modes.
16647ac6653aSJeff Kirsher  */
1665ba39b344SChristian Marangi static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1666ba39b344SChristian Marangi 				    struct stmmac_dma_conf *dma_conf,
1667ba39b344SChristian Marangi 				    u32 queue, gfp_t flags)
16687ac6653aSJeff Kirsher {
1669ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1670de0b90e5SOng Boon Leong 	int ret;
167154139cf3SJoao Pinto 
167254139cf3SJoao Pinto 	netif_dbg(priv, probe, priv->dev,
167354139cf3SJoao Pinto 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
167454139cf3SJoao Pinto 		  (u32)rx_q->dma_rx_phy);
167554139cf3SJoao Pinto 
1676ba39b344SChristian Marangi 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1677cbcf0999SJose Abreu 
1678bba2556eSOng Boon Leong 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1679bba2556eSOng Boon Leong 
1680bba2556eSOng Boon Leong 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1681bba2556eSOng Boon Leong 
1682bba2556eSOng Boon Leong 	if (rx_q->xsk_pool) {
1683bba2556eSOng Boon Leong 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1684bba2556eSOng Boon Leong 						   MEM_TYPE_XSK_BUFF_POOL,
1685bba2556eSOng Boon Leong 						   NULL));
1686bba2556eSOng Boon Leong 		netdev_info(priv->dev,
1687bba2556eSOng Boon Leong 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1688bba2556eSOng Boon Leong 			    rx_q->queue_index);
1689bba2556eSOng Boon Leong 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1690bba2556eSOng Boon Leong 	} else {
1691be8b38a7SOng Boon Leong 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1692be8b38a7SOng Boon Leong 						   MEM_TYPE_PAGE_POOL,
1693be8b38a7SOng Boon Leong 						   rx_q->page_pool));
1694be8b38a7SOng Boon Leong 		netdev_info(priv->dev,
1695be8b38a7SOng Boon Leong 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1696be8b38a7SOng Boon Leong 			    rx_q->queue_index);
1697bba2556eSOng Boon Leong 	}
1698be8b38a7SOng Boon Leong 
1699bba2556eSOng Boon Leong 	if (rx_q->xsk_pool) {
1700bba2556eSOng Boon Leong 		/* RX XDP ZC buffer pool may not be populated, e.g.
1701bba2556eSOng Boon Leong 		 * xdpsock TX-only.
1702bba2556eSOng Boon Leong 		 */
1703ba39b344SChristian Marangi 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1704bba2556eSOng Boon Leong 	} else {
1705ba39b344SChristian Marangi 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
17064298255fSOng Boon Leong 		if (ret < 0)
1707de0b90e5SOng Boon Leong 			return -ENOMEM;
1708bba2556eSOng Boon Leong 	}
170954139cf3SJoao Pinto 
1710c24602efSGiuseppe CAVALLARO 	/* Setup the chained descriptor addresses */
1711c24602efSGiuseppe CAVALLARO 	if (priv->mode == STMMAC_CHAIN_MODE) {
171271fedb01SJoao Pinto 		if (priv->extend_desc)
17132c520b1cSJose Abreu 			stmmac_mode_init(priv, rx_q->dma_erx,
1714aa042f60SSong, Yoong Siang 					 rx_q->dma_rx_phy,
1715ba39b344SChristian Marangi 					 dma_conf->dma_rx_size, 1);
171671fedb01SJoao Pinto 		else
17172c520b1cSJose Abreu 			stmmac_mode_init(priv, rx_q->dma_rx,
1718aa042f60SSong, Yoong Siang 					 rx_q->dma_rx_phy,
1719ba39b344SChristian Marangi 					 dma_conf->dma_rx_size, 0);
172071fedb01SJoao Pinto 	}
1721de0b90e5SOng Boon Leong 
1722de0b90e5SOng Boon Leong 	return 0;
1723de0b90e5SOng Boon Leong }
1724de0b90e5SOng Boon Leong 
1725ba39b344SChristian Marangi static int init_dma_rx_desc_rings(struct net_device *dev,
1726ba39b344SChristian Marangi 				  struct stmmac_dma_conf *dma_conf,
1727ba39b344SChristian Marangi 				  gfp_t flags)
1728de0b90e5SOng Boon Leong {
1729de0b90e5SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
1730de0b90e5SOng Boon Leong 	u32 rx_count = priv->plat->rx_queues_to_use;
173158e06d05SDan Carpenter 	int queue;
1732de0b90e5SOng Boon Leong 	int ret;
1733de0b90e5SOng Boon Leong 
1734de0b90e5SOng Boon Leong 	/* RX INITIALIZATION */
1735de0b90e5SOng Boon Leong 	netif_dbg(priv, probe, priv->dev,
1736de0b90e5SOng Boon Leong 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1737de0b90e5SOng Boon Leong 
1738de0b90e5SOng Boon Leong 	for (queue = 0; queue < rx_count; queue++) {
1739ba39b344SChristian Marangi 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1740de0b90e5SOng Boon Leong 		if (ret)
1741de0b90e5SOng Boon Leong 			goto err_init_rx_buffers;
174254139cf3SJoao Pinto 	}
174354139cf3SJoao Pinto 
174471fedb01SJoao Pinto 	return 0;
174554139cf3SJoao Pinto 
174671fedb01SJoao Pinto err_init_rx_buffers:
174754139cf3SJoao Pinto 	while (queue >= 0) {
1748ba39b344SChristian Marangi 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1749bba2556eSOng Boon Leong 
1750bba2556eSOng Boon Leong 		if (rx_q->xsk_pool)
1751ba39b344SChristian Marangi 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1752bba2556eSOng Boon Leong 		else
1753ba39b344SChristian Marangi 			dma_free_rx_skbufs(priv, dma_conf, queue);
175454139cf3SJoao Pinto 
1755bba2556eSOng Boon Leong 		rx_q->buf_alloc_num = 0;
1756bba2556eSOng Boon Leong 		rx_q->xsk_pool = NULL;
1757bba2556eSOng Boon Leong 
175854139cf3SJoao Pinto 		queue--;
175954139cf3SJoao Pinto 	}
176054139cf3SJoao Pinto 
176171fedb01SJoao Pinto 	return ret;
176271fedb01SJoao Pinto }
176371fedb01SJoao Pinto 
176471fedb01SJoao Pinto /**
1765de0b90e5SOng Boon Leong  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1766de0b90e5SOng Boon Leong  * @priv: driver private structure
1767ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1768de0b90e5SOng Boon Leong  * @queue: TX queue index
176971fedb01SJoao Pinto  * Description: this function initializes the DMA TX descriptors
177071fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
177171fedb01SJoao Pinto  * modes.
177271fedb01SJoao Pinto  */
1773ba39b344SChristian Marangi static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1774ba39b344SChristian Marangi 				    struct stmmac_dma_conf *dma_conf,
1775ba39b344SChristian Marangi 				    u32 queue)
177671fedb01SJoao Pinto {
1777ba39b344SChristian Marangi 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1778de0b90e5SOng Boon Leong 	int i;
1779ce736788SJoao Pinto 
178071fedb01SJoao Pinto 	netif_dbg(priv, probe, priv->dev,
1781ce736788SJoao Pinto 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1782ce736788SJoao Pinto 		  (u32)tx_q->dma_tx_phy);
178371fedb01SJoao Pinto 
178471fedb01SJoao Pinto 	/* Setup the chained descriptor addresses */
178571fedb01SJoao Pinto 	if (priv->mode == STMMAC_CHAIN_MODE) {
178671fedb01SJoao Pinto 		if (priv->extend_desc)
17872c520b1cSJose Abreu 			stmmac_mode_init(priv, tx_q->dma_etx,
1788aa042f60SSong, Yoong Siang 					 tx_q->dma_tx_phy,
1789ba39b344SChristian Marangi 					 dma_conf->dma_tx_size, 1);
1790579a25a8SJose Abreu 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
17912c520b1cSJose Abreu 			stmmac_mode_init(priv, tx_q->dma_tx,
1792aa042f60SSong, Yoong Siang 					 tx_q->dma_tx_phy,
1793ba39b344SChristian Marangi 					 dma_conf->dma_tx_size, 0);
1794c24602efSGiuseppe CAVALLARO 	}
1795286a8372SGiuseppe CAVALLARO 
1796132c32eeSOng Boon Leong 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1797132c32eeSOng Boon Leong 
1798ba39b344SChristian Marangi 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1799c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
1800de0b90e5SOng Boon Leong 
1801c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
1802ce736788SJoao Pinto 			p = &((tx_q->dma_etx + i)->basic);
1803579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1804579a25a8SJose Abreu 			p = &((tx_q->dma_entx + i)->basic);
1805c24602efSGiuseppe CAVALLARO 		else
1806ce736788SJoao Pinto 			p = tx_q->dma_tx + i;
1807f748be53SAlexandre TORGUE 
180844c67f85SJose Abreu 		stmmac_clear_desc(priv, p);
1809f748be53SAlexandre TORGUE 
1810ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].buf = 0;
1811ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1812ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].len = 0;
1813ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].last_segment = false;
1814ce736788SJoao Pinto 		tx_q->tx_skbuff[i] = NULL;
18154a7d666aSGiuseppe CAVALLARO 	}
1816c24602efSGiuseppe CAVALLARO 
1817de0b90e5SOng Boon Leong 	return 0;
1818c22a3f48SJoao Pinto }
18197ac6653aSJeff Kirsher 
1820ba39b344SChristian Marangi static int init_dma_tx_desc_rings(struct net_device *dev,
1821ba39b344SChristian Marangi 				  struct stmmac_dma_conf *dma_conf)
1822de0b90e5SOng Boon Leong {
1823de0b90e5SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
1824de0b90e5SOng Boon Leong 	u32 tx_queue_cnt;
1825de0b90e5SOng Boon Leong 	u32 queue;
1826de0b90e5SOng Boon Leong 
1827de0b90e5SOng Boon Leong 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1828de0b90e5SOng Boon Leong 
1829de0b90e5SOng Boon Leong 	for (queue = 0; queue < tx_queue_cnt; queue++)
1830ba39b344SChristian Marangi 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1831de0b90e5SOng Boon Leong 
183271fedb01SJoao Pinto 	return 0;
183371fedb01SJoao Pinto }
183471fedb01SJoao Pinto 
183571fedb01SJoao Pinto /**
183671fedb01SJoao Pinto  * init_dma_desc_rings - init the RX/TX descriptor rings
183771fedb01SJoao Pinto  * @dev: net device structure
1838ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
183971fedb01SJoao Pinto  * @flags: gfp flag.
184071fedb01SJoao Pinto  * Description: this function initializes the DMA RX/TX descriptors
184171fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
184271fedb01SJoao Pinto  * modes.
184371fedb01SJoao Pinto  */
1844ba39b344SChristian Marangi static int init_dma_desc_rings(struct net_device *dev,
1845ba39b344SChristian Marangi 			       struct stmmac_dma_conf *dma_conf,
1846ba39b344SChristian Marangi 			       gfp_t flags)
184771fedb01SJoao Pinto {
184871fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
184971fedb01SJoao Pinto 	int ret;
185071fedb01SJoao Pinto 
1851ba39b344SChristian Marangi 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
185271fedb01SJoao Pinto 	if (ret)
185371fedb01SJoao Pinto 		return ret;
185471fedb01SJoao Pinto 
1855ba39b344SChristian Marangi 	ret = init_dma_tx_desc_rings(dev, dma_conf);
185671fedb01SJoao Pinto 
1857ba39b344SChristian Marangi 	stmmac_clear_descriptors(priv, dma_conf);
18587ac6653aSJeff Kirsher 
1859c24602efSGiuseppe CAVALLARO 	if (netif_msg_hw(priv))
1860ba39b344SChristian Marangi 		stmmac_display_rings(priv, dma_conf);
186156329137SBartlomiej Zolnierkiewicz 
186256329137SBartlomiej Zolnierkiewicz 	return ret;
18637ac6653aSJeff Kirsher }
18647ac6653aSJeff Kirsher 
186571fedb01SJoao Pinto /**
186671fedb01SJoao Pinto  * dma_free_tx_skbufs - free TX dma buffers
186771fedb01SJoao Pinto  * @priv: private structure
1868ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1869ce736788SJoao Pinto  * @queue: TX queue index
187071fedb01SJoao Pinto  */
1871ba39b344SChristian Marangi static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1872ba39b344SChristian Marangi 			       struct stmmac_dma_conf *dma_conf,
1873ba39b344SChristian Marangi 			       u32 queue)
18747ac6653aSJeff Kirsher {
1875ba39b344SChristian Marangi 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
18767ac6653aSJeff Kirsher 	int i;
18777ac6653aSJeff Kirsher 
1878132c32eeSOng Boon Leong 	tx_q->xsk_frames_done = 0;
1879132c32eeSOng Boon Leong 
1880ba39b344SChristian Marangi 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1881ba39b344SChristian Marangi 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1882132c32eeSOng Boon Leong 
1883132c32eeSOng Boon Leong 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1884132c32eeSOng Boon Leong 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1885132c32eeSOng Boon Leong 		tx_q->xsk_frames_done = 0;
1886132c32eeSOng Boon Leong 		tx_q->xsk_pool = NULL;
1887132c32eeSOng Boon Leong 	}
18887ac6653aSJeff Kirsher }
18897ac6653aSJeff Kirsher 
1890732fdf0eSGiuseppe CAVALLARO /**
18914ec236c7SFugang Duan  * stmmac_free_tx_skbufs - free TX skb buffers
18924ec236c7SFugang Duan  * @priv: private structure
18934ec236c7SFugang Duan  */
18944ec236c7SFugang Duan static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
18954ec236c7SFugang Duan {
18964ec236c7SFugang Duan 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
18974ec236c7SFugang Duan 	u32 queue;
18984ec236c7SFugang Duan 
18994ec236c7SFugang Duan 	for (queue = 0; queue < tx_queue_cnt; queue++)
1900ba39b344SChristian Marangi 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
19014ec236c7SFugang Duan }
19024ec236c7SFugang Duan 
19034ec236c7SFugang Duan /**
1904da5ec7f2SOng Boon Leong  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
190554139cf3SJoao Pinto  * @priv: private structure
1906ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1907da5ec7f2SOng Boon Leong  * @queue: RX queue index
190854139cf3SJoao Pinto  */
1909ba39b344SChristian Marangi static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1910ba39b344SChristian Marangi 					 struct stmmac_dma_conf *dma_conf,
1911ba39b344SChristian Marangi 					 u32 queue)
191254139cf3SJoao Pinto {
1913ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
191454139cf3SJoao Pinto 
191554139cf3SJoao Pinto 	/* Release the DMA RX socket buffers */
1916bba2556eSOng Boon Leong 	if (rx_q->xsk_pool)
1917ba39b344SChristian Marangi 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1918bba2556eSOng Boon Leong 	else
1919ba39b344SChristian Marangi 		dma_free_rx_skbufs(priv, dma_conf, queue);
192054139cf3SJoao Pinto 
1921bba2556eSOng Boon Leong 	rx_q->buf_alloc_num = 0;
1922bba2556eSOng Boon Leong 	rx_q->xsk_pool = NULL;
1923bba2556eSOng Boon Leong 
192454139cf3SJoao Pinto 	/* Free DMA regions of consistent memory previously allocated */
192554139cf3SJoao Pinto 	if (!priv->extend_desc)
1926ba39b344SChristian Marangi 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1927aa042f60SSong, Yoong Siang 				  sizeof(struct dma_desc),
192854139cf3SJoao Pinto 				  rx_q->dma_rx, rx_q->dma_rx_phy);
192954139cf3SJoao Pinto 	else
1930ba39b344SChristian Marangi 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
193154139cf3SJoao Pinto 				  sizeof(struct dma_extended_desc),
193254139cf3SJoao Pinto 				  rx_q->dma_erx, rx_q->dma_rx_phy);
193354139cf3SJoao Pinto 
1934be8b38a7SOng Boon Leong 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1935be8b38a7SOng Boon Leong 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1936be8b38a7SOng Boon Leong 
19372af6106aSJose Abreu 	kfree(rx_q->buf_pool);
1938c3f812ceSJonathan Lemon 	if (rx_q->page_pool)
19392af6106aSJose Abreu 		page_pool_destroy(rx_q->page_pool);
19402af6106aSJose Abreu }
1941da5ec7f2SOng Boon Leong 
1942ba39b344SChristian Marangi static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1943ba39b344SChristian Marangi 				       struct stmmac_dma_conf *dma_conf)
1944da5ec7f2SOng Boon Leong {
1945da5ec7f2SOng Boon Leong 	u32 rx_count = priv->plat->rx_queues_to_use;
1946da5ec7f2SOng Boon Leong 	u32 queue;
1947da5ec7f2SOng Boon Leong 
1948da5ec7f2SOng Boon Leong 	/* Free RX queue resources */
1949da5ec7f2SOng Boon Leong 	for (queue = 0; queue < rx_count; queue++)
1950ba39b344SChristian Marangi 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
195154139cf3SJoao Pinto }
195254139cf3SJoao Pinto 
195354139cf3SJoao Pinto /**
1954da5ec7f2SOng Boon Leong  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1955ce736788SJoao Pinto  * @priv: private structure
1956ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1957da5ec7f2SOng Boon Leong  * @queue: TX queue index
1958ce736788SJoao Pinto  */
1959ba39b344SChristian Marangi static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1960ba39b344SChristian Marangi 					 struct stmmac_dma_conf *dma_conf,
1961ba39b344SChristian Marangi 					 u32 queue)
1962ce736788SJoao Pinto {
1963ba39b344SChristian Marangi 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1964579a25a8SJose Abreu 	size_t size;
1965579a25a8SJose Abreu 	void *addr;
1966ce736788SJoao Pinto 
1967ce736788SJoao Pinto 	/* Release the DMA TX socket buffers */
1968ba39b344SChristian Marangi 	dma_free_tx_skbufs(priv, dma_conf, queue);
1969ce736788SJoao Pinto 
1970579a25a8SJose Abreu 	if (priv->extend_desc) {
1971579a25a8SJose Abreu 		size = sizeof(struct dma_extended_desc);
1972579a25a8SJose Abreu 		addr = tx_q->dma_etx;
1973579a25a8SJose Abreu 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1974579a25a8SJose Abreu 		size = sizeof(struct dma_edesc);
1975579a25a8SJose Abreu 		addr = tx_q->dma_entx;
1976579a25a8SJose Abreu 	} else {
1977579a25a8SJose Abreu 		size = sizeof(struct dma_desc);
1978579a25a8SJose Abreu 		addr = tx_q->dma_tx;
1979579a25a8SJose Abreu 	}
1980579a25a8SJose Abreu 
1981ba39b344SChristian Marangi 	size *= dma_conf->dma_tx_size;
1982579a25a8SJose Abreu 
1983579a25a8SJose Abreu 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1984ce736788SJoao Pinto 
1985ce736788SJoao Pinto 	kfree(tx_q->tx_skbuff_dma);
1986ce736788SJoao Pinto 	kfree(tx_q->tx_skbuff);
1987ce736788SJoao Pinto }
1988da5ec7f2SOng Boon Leong 
1989ba39b344SChristian Marangi static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1990ba39b344SChristian Marangi 				       struct stmmac_dma_conf *dma_conf)
1991da5ec7f2SOng Boon Leong {
1992da5ec7f2SOng Boon Leong 	u32 tx_count = priv->plat->tx_queues_to_use;
1993da5ec7f2SOng Boon Leong 	u32 queue;
1994da5ec7f2SOng Boon Leong 
1995da5ec7f2SOng Boon Leong 	/* Free TX queue resources */
1996da5ec7f2SOng Boon Leong 	for (queue = 0; queue < tx_count; queue++)
1997ba39b344SChristian Marangi 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
1998ce736788SJoao Pinto }
1999ce736788SJoao Pinto 
2000ce736788SJoao Pinto /**
2001da5ec7f2SOng Boon Leong  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2002732fdf0eSGiuseppe CAVALLARO  * @priv: private structure
2003ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
2004da5ec7f2SOng Boon Leong  * @queue: RX queue index
2005732fdf0eSGiuseppe CAVALLARO  * Description: according to which descriptor can be used (extend or basic)
2006732fdf0eSGiuseppe CAVALLARO  * this function allocates the resources for TX and RX paths. In case of
2007732fdf0eSGiuseppe CAVALLARO  * reception, for example, it pre-allocated the RX socket buffer in order to
2008732fdf0eSGiuseppe CAVALLARO  * allow zero-copy mechanism.
2009732fdf0eSGiuseppe CAVALLARO  */
2010ba39b344SChristian Marangi static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2011ba39b344SChristian Marangi 					 struct stmmac_dma_conf *dma_conf,
2012ba39b344SChristian Marangi 					 u32 queue)
201309f8d696SSrinivas Kandagatla {
2014ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2015be8b38a7SOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
2016da5ec7f2SOng Boon Leong 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
20172af6106aSJose Abreu 	struct page_pool_params pp_params = { 0 };
20184f28bd95SThierry Reding 	unsigned int num_pages;
2019132c32eeSOng Boon Leong 	unsigned int napi_id;
2020be8b38a7SOng Boon Leong 	int ret;
202154139cf3SJoao Pinto 
202254139cf3SJoao Pinto 	rx_q->queue_index = queue;
202354139cf3SJoao Pinto 	rx_q->priv_data = priv;
202454139cf3SJoao Pinto 
20255fabb012SOng Boon Leong 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2026ba39b344SChristian Marangi 	pp_params.pool_size = dma_conf->dma_rx_size;
2027ba39b344SChristian Marangi 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
20284f28bd95SThierry Reding 	pp_params.order = ilog2(num_pages);
20292af6106aSJose Abreu 	pp_params.nid = dev_to_node(priv->device);
20302af6106aSJose Abreu 	pp_params.dev = priv->device;
20315fabb012SOng Boon Leong 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
20325fabb012SOng Boon Leong 	pp_params.offset = stmmac_rx_offset(priv);
20335fabb012SOng Boon Leong 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
20345bacd778SLABBE Corentin 
20352af6106aSJose Abreu 	rx_q->page_pool = page_pool_create(&pp_params);
20362af6106aSJose Abreu 	if (IS_ERR(rx_q->page_pool)) {
20372af6106aSJose Abreu 		ret = PTR_ERR(rx_q->page_pool);
20382af6106aSJose Abreu 		rx_q->page_pool = NULL;
2039da5ec7f2SOng Boon Leong 		return ret;
20402af6106aSJose Abreu 	}
20412af6106aSJose Abreu 
2042ba39b344SChristian Marangi 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2043aa042f60SSong, Yoong Siang 				 sizeof(*rx_q->buf_pool),
20445bacd778SLABBE Corentin 				 GFP_KERNEL);
20452af6106aSJose Abreu 	if (!rx_q->buf_pool)
2046da5ec7f2SOng Boon Leong 		return -ENOMEM;
20475bacd778SLABBE Corentin 
20485bacd778SLABBE Corentin 	if (priv->extend_desc) {
2049750afb08SLuis Chamberlain 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2050ba39b344SChristian Marangi 						   dma_conf->dma_rx_size *
2051aa042f60SSong, Yoong Siang 						   sizeof(struct dma_extended_desc),
205254139cf3SJoao Pinto 						   &rx_q->dma_rx_phy,
20535bacd778SLABBE Corentin 						   GFP_KERNEL);
205454139cf3SJoao Pinto 		if (!rx_q->dma_erx)
2055da5ec7f2SOng Boon Leong 			return -ENOMEM;
20565bacd778SLABBE Corentin 
205771fedb01SJoao Pinto 	} else {
2058750afb08SLuis Chamberlain 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2059ba39b344SChristian Marangi 						  dma_conf->dma_rx_size *
2060aa042f60SSong, Yoong Siang 						  sizeof(struct dma_desc),
206154139cf3SJoao Pinto 						  &rx_q->dma_rx_phy,
206271fedb01SJoao Pinto 						  GFP_KERNEL);
206354139cf3SJoao Pinto 		if (!rx_q->dma_rx)
2064da5ec7f2SOng Boon Leong 			return -ENOMEM;
206571fedb01SJoao Pinto 	}
2066be8b38a7SOng Boon Leong 
2067132c32eeSOng Boon Leong 	if (stmmac_xdp_is_enabled(priv) &&
2068132c32eeSOng Boon Leong 	    test_bit(queue, priv->af_xdp_zc_qps))
2069132c32eeSOng Boon Leong 		napi_id = ch->rxtx_napi.napi_id;
2070132c32eeSOng Boon Leong 	else
2071132c32eeSOng Boon Leong 		napi_id = ch->rx_napi.napi_id;
2072132c32eeSOng Boon Leong 
2073be8b38a7SOng Boon Leong 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2074be8b38a7SOng Boon Leong 			       rx_q->queue_index,
2075132c32eeSOng Boon Leong 			       napi_id);
2076be8b38a7SOng Boon Leong 	if (ret) {
2077be8b38a7SOng Boon Leong 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2078da5ec7f2SOng Boon Leong 		return -EINVAL;
2079be8b38a7SOng Boon Leong 	}
2080da5ec7f2SOng Boon Leong 
2081da5ec7f2SOng Boon Leong 	return 0;
2082da5ec7f2SOng Boon Leong }
2083da5ec7f2SOng Boon Leong 
2084ba39b344SChristian Marangi static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2085ba39b344SChristian Marangi 				       struct stmmac_dma_conf *dma_conf)
2086da5ec7f2SOng Boon Leong {
2087da5ec7f2SOng Boon Leong 	u32 rx_count = priv->plat->rx_queues_to_use;
2088da5ec7f2SOng Boon Leong 	u32 queue;
2089da5ec7f2SOng Boon Leong 	int ret;
2090da5ec7f2SOng Boon Leong 
2091da5ec7f2SOng Boon Leong 	/* RX queues buffers and DMA */
2092da5ec7f2SOng Boon Leong 	for (queue = 0; queue < rx_count; queue++) {
2093ba39b344SChristian Marangi 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2094da5ec7f2SOng Boon Leong 		if (ret)
2095da5ec7f2SOng Boon Leong 			goto err_dma;
209654139cf3SJoao Pinto 	}
209771fedb01SJoao Pinto 
209871fedb01SJoao Pinto 	return 0;
209971fedb01SJoao Pinto 
210071fedb01SJoao Pinto err_dma:
2101ba39b344SChristian Marangi 	free_dma_rx_desc_resources(priv, dma_conf);
210254139cf3SJoao Pinto 
210371fedb01SJoao Pinto 	return ret;
210471fedb01SJoao Pinto }
210571fedb01SJoao Pinto 
210671fedb01SJoao Pinto /**
2107da5ec7f2SOng Boon Leong  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
210871fedb01SJoao Pinto  * @priv: private structure
2109ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
2110da5ec7f2SOng Boon Leong  * @queue: TX queue index
211171fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
211271fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
211371fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
211471fedb01SJoao Pinto  * allow zero-copy mechanism.
211571fedb01SJoao Pinto  */
2116ba39b344SChristian Marangi static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2117ba39b344SChristian Marangi 					 struct stmmac_dma_conf *dma_conf,
2118ba39b344SChristian Marangi 					 u32 queue)
211971fedb01SJoao Pinto {
2120ba39b344SChristian Marangi 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2121579a25a8SJose Abreu 	size_t size;
2122579a25a8SJose Abreu 	void *addr;
2123ce736788SJoao Pinto 
2124ce736788SJoao Pinto 	tx_q->queue_index = queue;
2125ce736788SJoao Pinto 	tx_q->priv_data = priv;
2126ce736788SJoao Pinto 
2127ba39b344SChristian Marangi 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2128ce736788SJoao Pinto 				      sizeof(*tx_q->tx_skbuff_dma),
212971fedb01SJoao Pinto 				      GFP_KERNEL);
2130ce736788SJoao Pinto 	if (!tx_q->tx_skbuff_dma)
2131da5ec7f2SOng Boon Leong 		return -ENOMEM;
213271fedb01SJoao Pinto 
2133ba39b344SChristian Marangi 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2134ce736788SJoao Pinto 				  sizeof(struct sk_buff *),
213571fedb01SJoao Pinto 				  GFP_KERNEL);
2136ce736788SJoao Pinto 	if (!tx_q->tx_skbuff)
2137da5ec7f2SOng Boon Leong 		return -ENOMEM;
213871fedb01SJoao Pinto 
2139579a25a8SJose Abreu 	if (priv->extend_desc)
2140579a25a8SJose Abreu 		size = sizeof(struct dma_extended_desc);
2141579a25a8SJose Abreu 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2142579a25a8SJose Abreu 		size = sizeof(struct dma_edesc);
2143579a25a8SJose Abreu 	else
2144579a25a8SJose Abreu 		size = sizeof(struct dma_desc);
2145579a25a8SJose Abreu 
2146ba39b344SChristian Marangi 	size *= dma_conf->dma_tx_size;
2147579a25a8SJose Abreu 
2148579a25a8SJose Abreu 	addr = dma_alloc_coherent(priv->device, size,
2149579a25a8SJose Abreu 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2150579a25a8SJose Abreu 	if (!addr)
2151da5ec7f2SOng Boon Leong 		return -ENOMEM;
2152579a25a8SJose Abreu 
2153579a25a8SJose Abreu 	if (priv->extend_desc)
2154579a25a8SJose Abreu 		tx_q->dma_etx = addr;
2155579a25a8SJose Abreu 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2156579a25a8SJose Abreu 		tx_q->dma_entx = addr;
2157579a25a8SJose Abreu 	else
2158579a25a8SJose Abreu 		tx_q->dma_tx = addr;
2159da5ec7f2SOng Boon Leong 
2160da5ec7f2SOng Boon Leong 	return 0;
2161da5ec7f2SOng Boon Leong }
2162da5ec7f2SOng Boon Leong 
2163ba39b344SChristian Marangi static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2164ba39b344SChristian Marangi 				       struct stmmac_dma_conf *dma_conf)
2165da5ec7f2SOng Boon Leong {
2166da5ec7f2SOng Boon Leong 	u32 tx_count = priv->plat->tx_queues_to_use;
2167da5ec7f2SOng Boon Leong 	u32 queue;
2168da5ec7f2SOng Boon Leong 	int ret;
2169da5ec7f2SOng Boon Leong 
2170da5ec7f2SOng Boon Leong 	/* TX queues buffers and DMA */
2171da5ec7f2SOng Boon Leong 	for (queue = 0; queue < tx_count; queue++) {
2172ba39b344SChristian Marangi 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2173da5ec7f2SOng Boon Leong 		if (ret)
2174da5ec7f2SOng Boon Leong 			goto err_dma;
21755bacd778SLABBE Corentin 	}
21765bacd778SLABBE Corentin 
21775bacd778SLABBE Corentin 	return 0;
21785bacd778SLABBE Corentin 
217962242260SChristophe Jaillet err_dma:
2180ba39b344SChristian Marangi 	free_dma_tx_desc_resources(priv, dma_conf);
218109f8d696SSrinivas Kandagatla 	return ret;
21825bacd778SLABBE Corentin }
218309f8d696SSrinivas Kandagatla 
218471fedb01SJoao Pinto /**
218571fedb01SJoao Pinto  * alloc_dma_desc_resources - alloc TX/RX resources.
218671fedb01SJoao Pinto  * @priv: private structure
2187ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
218871fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
218971fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
219071fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
219171fedb01SJoao Pinto  * allow zero-copy mechanism.
219271fedb01SJoao Pinto  */
2193ba39b344SChristian Marangi static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2194ba39b344SChristian Marangi 				    struct stmmac_dma_conf *dma_conf)
21955bacd778SLABBE Corentin {
219654139cf3SJoao Pinto 	/* RX Allocation */
2197ba39b344SChristian Marangi 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
219871fedb01SJoao Pinto 
219971fedb01SJoao Pinto 	if (ret)
220071fedb01SJoao Pinto 		return ret;
220171fedb01SJoao Pinto 
2202ba39b344SChristian Marangi 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
220371fedb01SJoao Pinto 
220471fedb01SJoao Pinto 	return ret;
220571fedb01SJoao Pinto }
220671fedb01SJoao Pinto 
220771fedb01SJoao Pinto /**
220871fedb01SJoao Pinto  * free_dma_desc_resources - free dma desc resources
220971fedb01SJoao Pinto  * @priv: private structure
2210ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
221171fedb01SJoao Pinto  */
2212ba39b344SChristian Marangi static void free_dma_desc_resources(struct stmmac_priv *priv,
2213ba39b344SChristian Marangi 				    struct stmmac_dma_conf *dma_conf)
221471fedb01SJoao Pinto {
221571fedb01SJoao Pinto 	/* Release the DMA TX socket buffers */
2216ba39b344SChristian Marangi 	free_dma_tx_desc_resources(priv, dma_conf);
2217be8b38a7SOng Boon Leong 
2218be8b38a7SOng Boon Leong 	/* Release the DMA RX socket buffers later
2219be8b38a7SOng Boon Leong 	 * to ensure all pending XDP_TX buffers are returned.
2220be8b38a7SOng Boon Leong 	 */
2221ba39b344SChristian Marangi 	free_dma_rx_desc_resources(priv, dma_conf);
222271fedb01SJoao Pinto }
222371fedb01SJoao Pinto 
222471fedb01SJoao Pinto /**
22259eb12474Sjpinto  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
22269eb12474Sjpinto  *  @priv: driver private structure
22279eb12474Sjpinto  *  Description: It is used for enabling the rx queues in the MAC
22289eb12474Sjpinto  */
22299eb12474Sjpinto static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
22309eb12474Sjpinto {
22314f6046f5SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
22324f6046f5SJoao Pinto 	int queue;
22334f6046f5SJoao Pinto 	u8 mode;
22349eb12474Sjpinto 
22354f6046f5SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
22364f6046f5SJoao Pinto 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2237c10d4c82SJose Abreu 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
22384f6046f5SJoao Pinto 	}
22399eb12474Sjpinto }
22409eb12474Sjpinto 
22419eb12474Sjpinto /**
2242ae4f0d46SJoao Pinto  * stmmac_start_rx_dma - start RX DMA channel
2243ae4f0d46SJoao Pinto  * @priv: driver private structure
2244ae4f0d46SJoao Pinto  * @chan: RX channel index
2245ae4f0d46SJoao Pinto  * Description:
2246ae4f0d46SJoao Pinto  * This starts a RX DMA channel
2247ae4f0d46SJoao Pinto  */
2248ae4f0d46SJoao Pinto static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2249ae4f0d46SJoao Pinto {
2250ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2251a4e887faSJose Abreu 	stmmac_start_rx(priv, priv->ioaddr, chan);
2252ae4f0d46SJoao Pinto }
2253ae4f0d46SJoao Pinto 
2254ae4f0d46SJoao Pinto /**
2255ae4f0d46SJoao Pinto  * stmmac_start_tx_dma - start TX DMA channel
2256ae4f0d46SJoao Pinto  * @priv: driver private structure
2257ae4f0d46SJoao Pinto  * @chan: TX channel index
2258ae4f0d46SJoao Pinto  * Description:
2259ae4f0d46SJoao Pinto  * This starts a TX DMA channel
2260ae4f0d46SJoao Pinto  */
2261ae4f0d46SJoao Pinto static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2262ae4f0d46SJoao Pinto {
2263ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2264a4e887faSJose Abreu 	stmmac_start_tx(priv, priv->ioaddr, chan);
2265ae4f0d46SJoao Pinto }
2266ae4f0d46SJoao Pinto 
2267ae4f0d46SJoao Pinto /**
2268ae4f0d46SJoao Pinto  * stmmac_stop_rx_dma - stop RX DMA channel
2269ae4f0d46SJoao Pinto  * @priv: driver private structure
2270ae4f0d46SJoao Pinto  * @chan: RX channel index
2271ae4f0d46SJoao Pinto  * Description:
2272ae4f0d46SJoao Pinto  * This stops a RX DMA channel
2273ae4f0d46SJoao Pinto  */
2274ae4f0d46SJoao Pinto static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2275ae4f0d46SJoao Pinto {
2276ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2277a4e887faSJose Abreu 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2278ae4f0d46SJoao Pinto }
2279ae4f0d46SJoao Pinto 
2280ae4f0d46SJoao Pinto /**
2281ae4f0d46SJoao Pinto  * stmmac_stop_tx_dma - stop TX DMA channel
2282ae4f0d46SJoao Pinto  * @priv: driver private structure
2283ae4f0d46SJoao Pinto  * @chan: TX channel index
2284ae4f0d46SJoao Pinto  * Description:
2285ae4f0d46SJoao Pinto  * This stops a TX DMA channel
2286ae4f0d46SJoao Pinto  */
2287ae4f0d46SJoao Pinto static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2288ae4f0d46SJoao Pinto {
2289ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2290a4e887faSJose Abreu 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2291ae4f0d46SJoao Pinto }
2292ae4f0d46SJoao Pinto 
2293087a7b94SVincent Whitchurch static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2294087a7b94SVincent Whitchurch {
2295087a7b94SVincent Whitchurch 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2296087a7b94SVincent Whitchurch 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2297087a7b94SVincent Whitchurch 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2298087a7b94SVincent Whitchurch 	u32 chan;
2299087a7b94SVincent Whitchurch 
2300087a7b94SVincent Whitchurch 	for (chan = 0; chan < dma_csr_ch; chan++) {
2301087a7b94SVincent Whitchurch 		struct stmmac_channel *ch = &priv->channel[chan];
2302087a7b94SVincent Whitchurch 		unsigned long flags;
2303087a7b94SVincent Whitchurch 
2304087a7b94SVincent Whitchurch 		spin_lock_irqsave(&ch->lock, flags);
2305087a7b94SVincent Whitchurch 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2306087a7b94SVincent Whitchurch 		spin_unlock_irqrestore(&ch->lock, flags);
2307087a7b94SVincent Whitchurch 	}
2308087a7b94SVincent Whitchurch }
2309087a7b94SVincent Whitchurch 
2310ae4f0d46SJoao Pinto /**
2311ae4f0d46SJoao Pinto  * stmmac_start_all_dma - start all RX and TX DMA channels
2312ae4f0d46SJoao Pinto  * @priv: driver private structure
2313ae4f0d46SJoao Pinto  * Description:
2314ae4f0d46SJoao Pinto  * This starts all the RX and TX DMA channels
2315ae4f0d46SJoao Pinto  */
2316ae4f0d46SJoao Pinto static void stmmac_start_all_dma(struct stmmac_priv *priv)
2317ae4f0d46SJoao Pinto {
2318ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2319ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2320ae4f0d46SJoao Pinto 	u32 chan = 0;
2321ae4f0d46SJoao Pinto 
2322ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2323ae4f0d46SJoao Pinto 		stmmac_start_rx_dma(priv, chan);
2324ae4f0d46SJoao Pinto 
2325ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2326ae4f0d46SJoao Pinto 		stmmac_start_tx_dma(priv, chan);
2327ae4f0d46SJoao Pinto }
2328ae4f0d46SJoao Pinto 
2329ae4f0d46SJoao Pinto /**
2330ae4f0d46SJoao Pinto  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2331ae4f0d46SJoao Pinto  * @priv: driver private structure
2332ae4f0d46SJoao Pinto  * Description:
2333ae4f0d46SJoao Pinto  * This stops the RX and TX DMA channels
2334ae4f0d46SJoao Pinto  */
2335ae4f0d46SJoao Pinto static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2336ae4f0d46SJoao Pinto {
2337ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2338ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2339ae4f0d46SJoao Pinto 	u32 chan = 0;
2340ae4f0d46SJoao Pinto 
2341ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2342ae4f0d46SJoao Pinto 		stmmac_stop_rx_dma(priv, chan);
2343ae4f0d46SJoao Pinto 
2344ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2345ae4f0d46SJoao Pinto 		stmmac_stop_tx_dma(priv, chan);
2346ae4f0d46SJoao Pinto }
2347ae4f0d46SJoao Pinto 
2348ae4f0d46SJoao Pinto /**
23497ac6653aSJeff Kirsher  *  stmmac_dma_operation_mode - HW DMA operation mode
235032ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
2351732fdf0eSGiuseppe CAVALLARO  *  Description: it is used for configuring the DMA operation mode register in
2352732fdf0eSGiuseppe CAVALLARO  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
23537ac6653aSJeff Kirsher  */
23547ac6653aSJeff Kirsher static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
23557ac6653aSJeff Kirsher {
23566deee222SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
23576deee222SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2358f88203a2SVince Bridgers 	int rxfifosz = priv->plat->rx_fifo_size;
235952a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
23606deee222SJoao Pinto 	u32 txmode = 0;
23616deee222SJoao Pinto 	u32 rxmode = 0;
23626deee222SJoao Pinto 	u32 chan = 0;
2363a0daae13SJose Abreu 	u8 qmode = 0;
2364f88203a2SVince Bridgers 
236511fbf811SThierry Reding 	if (rxfifosz == 0)
236611fbf811SThierry Reding 		rxfifosz = priv->dma_cap.rx_fifo_size;
236752a76235SJose Abreu 	if (txfifosz == 0)
236852a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
236952a76235SJose Abreu 
237052a76235SJose Abreu 	/* Adjust for real per queue fifo size */
237152a76235SJose Abreu 	rxfifosz /= rx_channels_count;
237252a76235SJose Abreu 	txfifosz /= tx_channels_count;
237311fbf811SThierry Reding 
23746deee222SJoao Pinto 	if (priv->plat->force_thresh_dma_mode) {
23756deee222SJoao Pinto 		txmode = tc;
23766deee222SJoao Pinto 		rxmode = tc;
23776deee222SJoao Pinto 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
23787ac6653aSJeff Kirsher 		/*
23797ac6653aSJeff Kirsher 		 * In case of GMAC, SF mode can be enabled
23807ac6653aSJeff Kirsher 		 * to perform the TX COE in HW. This depends on:
23817ac6653aSJeff Kirsher 		 * 1) TX COE if actually supported
23827ac6653aSJeff Kirsher 		 * 2) There is no bugged Jumbo frame support
23837ac6653aSJeff Kirsher 		 *    that needs to not insert csum in the TDES.
23847ac6653aSJeff Kirsher 		 */
23856deee222SJoao Pinto 		txmode = SF_DMA_MODE;
23866deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
2387b2dec116SSonic Zhang 		priv->xstats.threshold = SF_DMA_MODE;
23886deee222SJoao Pinto 	} else {
23896deee222SJoao Pinto 		txmode = tc;
23906deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
23916deee222SJoao Pinto 	}
23926deee222SJoao Pinto 
23936deee222SJoao Pinto 	/* configure all channels */
2394a0daae13SJose Abreu 	for (chan = 0; chan < rx_channels_count; chan++) {
23958531c808SChristian Marangi 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2396bba2556eSOng Boon Leong 		u32 buf_size;
2397bba2556eSOng Boon Leong 
2398a0daae13SJose Abreu 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
23996deee222SJoao Pinto 
2400a4e887faSJose Abreu 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2401a0daae13SJose Abreu 				rxfifosz, qmode);
2402bba2556eSOng Boon Leong 
2403bba2556eSOng Boon Leong 		if (rx_q->xsk_pool) {
2404bba2556eSOng Boon Leong 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2405bba2556eSOng Boon Leong 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2406bba2556eSOng Boon Leong 					      buf_size,
24074205c88eSJose Abreu 					      chan);
2408bba2556eSOng Boon Leong 		} else {
2409bba2556eSOng Boon Leong 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
24108531c808SChristian Marangi 					      priv->dma_conf.dma_buf_sz,
2411bba2556eSOng Boon Leong 					      chan);
2412bba2556eSOng Boon Leong 		}
2413a0daae13SJose Abreu 	}
2414a0daae13SJose Abreu 
2415a0daae13SJose Abreu 	for (chan = 0; chan < tx_channels_count; chan++) {
2416a0daae13SJose Abreu 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2417a0daae13SJose Abreu 
2418a4e887faSJose Abreu 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2419a0daae13SJose Abreu 				txfifosz, qmode);
2420a0daae13SJose Abreu 	}
24217ac6653aSJeff Kirsher }
24227ac6653aSJeff Kirsher 
2423132c32eeSOng Boon Leong static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2424132c32eeSOng Boon Leong {
2425132c32eeSOng Boon Leong 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
24268531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2427132c32eeSOng Boon Leong 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2428132c32eeSOng Boon Leong 	unsigned int entry = tx_q->cur_tx;
2429132c32eeSOng Boon Leong 	struct dma_desc *tx_desc = NULL;
2430132c32eeSOng Boon Leong 	struct xdp_desc xdp_desc;
2431132c32eeSOng Boon Leong 	bool work_done = true;
2432132c32eeSOng Boon Leong 
2433132c32eeSOng Boon Leong 	/* Avoids TX time-out as we are sharing with slow path */
2434e92af33eSAlexander Lobakin 	txq_trans_cond_update(nq);
2435132c32eeSOng Boon Leong 
2436132c32eeSOng Boon Leong 	budget = min(budget, stmmac_tx_avail(priv, queue));
2437132c32eeSOng Boon Leong 
2438132c32eeSOng Boon Leong 	while (budget-- > 0) {
2439132c32eeSOng Boon Leong 		dma_addr_t dma_addr;
2440132c32eeSOng Boon Leong 		bool set_ic;
2441132c32eeSOng Boon Leong 
2442132c32eeSOng Boon Leong 		/* We are sharing with slow path and stop XSK TX desc submission when
2443132c32eeSOng Boon Leong 		 * available TX ring is less than threshold.
2444132c32eeSOng Boon Leong 		 */
2445132c32eeSOng Boon Leong 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2446132c32eeSOng Boon Leong 		    !netif_carrier_ok(priv->dev)) {
2447132c32eeSOng Boon Leong 			work_done = false;
2448132c32eeSOng Boon Leong 			break;
2449132c32eeSOng Boon Leong 		}
2450132c32eeSOng Boon Leong 
2451132c32eeSOng Boon Leong 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2452132c32eeSOng Boon Leong 			break;
2453132c32eeSOng Boon Leong 
2454132c32eeSOng Boon Leong 		if (likely(priv->extend_desc))
2455132c32eeSOng Boon Leong 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2456132c32eeSOng Boon Leong 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2457132c32eeSOng Boon Leong 			tx_desc = &tx_q->dma_entx[entry].basic;
2458132c32eeSOng Boon Leong 		else
2459132c32eeSOng Boon Leong 			tx_desc = tx_q->dma_tx + entry;
2460132c32eeSOng Boon Leong 
2461132c32eeSOng Boon Leong 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2462132c32eeSOng Boon Leong 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2463132c32eeSOng Boon Leong 
2464132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2465132c32eeSOng Boon Leong 
2466132c32eeSOng Boon Leong 		/* To return XDP buffer to XSK pool, we simple call
2467132c32eeSOng Boon Leong 		 * xsk_tx_completed(), so we don't need to fill up
2468132c32eeSOng Boon Leong 		 * 'buf' and 'xdpf'.
2469132c32eeSOng Boon Leong 		 */
2470132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].buf = 0;
2471132c32eeSOng Boon Leong 		tx_q->xdpf[entry] = NULL;
2472132c32eeSOng Boon Leong 
2473132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2474132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2475132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2476132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2477132c32eeSOng Boon Leong 
2478132c32eeSOng Boon Leong 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2479132c32eeSOng Boon Leong 
2480132c32eeSOng Boon Leong 		tx_q->tx_count_frames++;
2481132c32eeSOng Boon Leong 
2482132c32eeSOng Boon Leong 		if (!priv->tx_coal_frames[queue])
2483132c32eeSOng Boon Leong 			set_ic = false;
2484132c32eeSOng Boon Leong 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2485132c32eeSOng Boon Leong 			set_ic = true;
2486132c32eeSOng Boon Leong 		else
2487132c32eeSOng Boon Leong 			set_ic = false;
2488132c32eeSOng Boon Leong 
2489132c32eeSOng Boon Leong 		if (set_ic) {
2490132c32eeSOng Boon Leong 			tx_q->tx_count_frames = 0;
2491132c32eeSOng Boon Leong 			stmmac_set_tx_ic(priv, tx_desc);
2492132c32eeSOng Boon Leong 			priv->xstats.tx_set_ic_bit++;
2493132c32eeSOng Boon Leong 		}
2494132c32eeSOng Boon Leong 
2495132c32eeSOng Boon Leong 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2496132c32eeSOng Boon Leong 				       true, priv->mode, true, true,
2497132c32eeSOng Boon Leong 				       xdp_desc.len);
2498132c32eeSOng Boon Leong 
2499132c32eeSOng Boon Leong 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2500132c32eeSOng Boon Leong 
25018531c808SChristian Marangi 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2502132c32eeSOng Boon Leong 		entry = tx_q->cur_tx;
2503132c32eeSOng Boon Leong 	}
2504132c32eeSOng Boon Leong 
2505132c32eeSOng Boon Leong 	if (tx_desc) {
2506132c32eeSOng Boon Leong 		stmmac_flush_tx_descriptors(priv, queue);
2507132c32eeSOng Boon Leong 		xsk_tx_release(pool);
2508132c32eeSOng Boon Leong 	}
2509132c32eeSOng Boon Leong 
2510132c32eeSOng Boon Leong 	/* Return true if all of the 3 conditions are met
2511132c32eeSOng Boon Leong 	 *  a) TX Budget is still available
2512132c32eeSOng Boon Leong 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2513132c32eeSOng Boon Leong 	 *     pending XSK TX for transmission)
2514132c32eeSOng Boon Leong 	 */
2515132c32eeSOng Boon Leong 	return !!budget && work_done;
2516132c32eeSOng Boon Leong }
2517132c32eeSOng Boon Leong 
25183a6c12a0SXiaoliang Yang static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
25193a6c12a0SXiaoliang Yang {
25203a6c12a0SXiaoliang Yang 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
25213a6c12a0SXiaoliang Yang 		tc += 64;
25223a6c12a0SXiaoliang Yang 
25233a6c12a0SXiaoliang Yang 		if (priv->plat->force_thresh_dma_mode)
25243a6c12a0SXiaoliang Yang 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
25253a6c12a0SXiaoliang Yang 		else
25263a6c12a0SXiaoliang Yang 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
25273a6c12a0SXiaoliang Yang 						      chan);
25283a6c12a0SXiaoliang Yang 
25293a6c12a0SXiaoliang Yang 		priv->xstats.threshold = tc;
25303a6c12a0SXiaoliang Yang 	}
25313a6c12a0SXiaoliang Yang }
25323a6c12a0SXiaoliang Yang 
25337ac6653aSJeff Kirsher /**
2534732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_clean - to manage the transmission completion
253532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
2536d0ea5cbdSJesse Brandeburg  * @budget: napi budget limiting this functions packet handling
2537ce736788SJoao Pinto  * @queue: TX queue index
2538732fdf0eSGiuseppe CAVALLARO  * Description: it reclaims the transmit resources after transmission completes.
25397ac6653aSJeff Kirsher  */
25408fce3331SJose Abreu static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
25417ac6653aSJeff Kirsher {
25428531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
254338979574SBeniamino Galvani 	unsigned int bytes_compl = 0, pkts_compl = 0;
2544132c32eeSOng Boon Leong 	unsigned int entry, xmits = 0, count = 0;
25457ac6653aSJeff Kirsher 
25468fce3331SJose Abreu 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2547a9097a96SGiuseppe CAVALLARO 
25489125cdd1SGiuseppe CAVALLARO 	priv->xstats.tx_clean++;
25499125cdd1SGiuseppe CAVALLARO 
2550132c32eeSOng Boon Leong 	tx_q->xsk_frames_done = 0;
2551132c32eeSOng Boon Leong 
25528d5f4b07SBernd Edlinger 	entry = tx_q->dirty_tx;
2553132c32eeSOng Boon Leong 
2554132c32eeSOng Boon Leong 	/* Try to clean all TX complete frame in 1 shot */
25558531c808SChristian Marangi 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2556be8b38a7SOng Boon Leong 		struct xdp_frame *xdpf;
2557be8b38a7SOng Boon Leong 		struct sk_buff *skb;
2558c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
2559c363b658SFabrice Gasnier 		int status;
2560c24602efSGiuseppe CAVALLARO 
25618b278a5bSOng Boon Leong 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
25628b278a5bSOng Boon Leong 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2563be8b38a7SOng Boon Leong 			xdpf = tx_q->xdpf[entry];
2564be8b38a7SOng Boon Leong 			skb = NULL;
2565be8b38a7SOng Boon Leong 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2566be8b38a7SOng Boon Leong 			xdpf = NULL;
2567be8b38a7SOng Boon Leong 			skb = tx_q->tx_skbuff[entry];
2568be8b38a7SOng Boon Leong 		} else {
2569be8b38a7SOng Boon Leong 			xdpf = NULL;
2570be8b38a7SOng Boon Leong 			skb = NULL;
2571be8b38a7SOng Boon Leong 		}
2572be8b38a7SOng Boon Leong 
2573c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
2574ce736788SJoao Pinto 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2575579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2576579a25a8SJose Abreu 			p = &tx_q->dma_entx[entry].basic;
2577c24602efSGiuseppe CAVALLARO 		else
2578ce736788SJoao Pinto 			p = tx_q->dma_tx + entry;
25797ac6653aSJeff Kirsher 
258042de047dSJose Abreu 		status = stmmac_tx_status(priv, &priv->dev->stats,
258142de047dSJose Abreu 				&priv->xstats, p, priv->ioaddr);
2582c363b658SFabrice Gasnier 		/* Check if the descriptor is owned by the DMA */
2583c363b658SFabrice Gasnier 		if (unlikely(status & tx_dma_own))
2584c363b658SFabrice Gasnier 			break;
2585c363b658SFabrice Gasnier 
25868fce3331SJose Abreu 		count++;
25878fce3331SJose Abreu 
2588a6b25da5SNiklas Cassel 		/* Make sure descriptor fields are read after reading
2589a6b25da5SNiklas Cassel 		 * the own bit.
2590a6b25da5SNiklas Cassel 		 */
2591a6b25da5SNiklas Cassel 		dma_rmb();
2592a6b25da5SNiklas Cassel 
2593c363b658SFabrice Gasnier 		/* Just consider the last segment and ...*/
2594c363b658SFabrice Gasnier 		if (likely(!(status & tx_not_ls))) {
2595c363b658SFabrice Gasnier 			/* ... verify the status error condition */
2596c363b658SFabrice Gasnier 			if (unlikely(status & tx_err)) {
2597c363b658SFabrice Gasnier 				priv->dev->stats.tx_errors++;
25983a6c12a0SXiaoliang Yang 				if (unlikely(status & tx_err_bump_tc))
25993a6c12a0SXiaoliang Yang 					stmmac_bump_dma_threshold(priv, queue);
2600c363b658SFabrice Gasnier 			} else {
26017ac6653aSJeff Kirsher 				priv->dev->stats.tx_packets++;
26027ac6653aSJeff Kirsher 				priv->xstats.tx_pkt_n++;
260368e9c5deSVijayakannan Ayyathurai 				priv->xstats.txq_stats[queue].tx_pkt_n++;
2604c363b658SFabrice Gasnier 			}
2605be8b38a7SOng Boon Leong 			if (skb)
2606ba1ffd74SGiuseppe CAVALLARO 				stmmac_get_tx_hwtstamp(priv, p, skb);
26077ac6653aSJeff Kirsher 		}
26087ac6653aSJeff Kirsher 
2609be8b38a7SOng Boon Leong 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2610be8b38a7SOng Boon Leong 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2611ce736788SJoao Pinto 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2612362b37beSGiuseppe CAVALLARO 				dma_unmap_page(priv->device,
2613ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].buf,
2614ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].len,
26157ac6653aSJeff Kirsher 					       DMA_TO_DEVICE);
2616362b37beSGiuseppe CAVALLARO 			else
2617362b37beSGiuseppe CAVALLARO 				dma_unmap_single(priv->device,
2618ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].buf,
2619ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].len,
2620362b37beSGiuseppe CAVALLARO 						 DMA_TO_DEVICE);
2621ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].buf = 0;
2622ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].len = 0;
2623ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2624cf32deecSRayagond Kokatanur 		}
2625f748be53SAlexandre TORGUE 
26262c520b1cSJose Abreu 		stmmac_clean_desc3(priv, tx_q, p);
2627f748be53SAlexandre TORGUE 
2628ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2629ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
26307ac6653aSJeff Kirsher 
2631be8b38a7SOng Boon Leong 		if (xdpf &&
2632be8b38a7SOng Boon Leong 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2633be8b38a7SOng Boon Leong 			xdp_return_frame_rx_napi(xdpf);
2634be8b38a7SOng Boon Leong 			tx_q->xdpf[entry] = NULL;
2635be8b38a7SOng Boon Leong 		}
2636be8b38a7SOng Boon Leong 
26378b278a5bSOng Boon Leong 		if (xdpf &&
26388b278a5bSOng Boon Leong 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
26398b278a5bSOng Boon Leong 			xdp_return_frame(xdpf);
26408b278a5bSOng Boon Leong 			tx_q->xdpf[entry] = NULL;
26418b278a5bSOng Boon Leong 		}
26428b278a5bSOng Boon Leong 
2643132c32eeSOng Boon Leong 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2644132c32eeSOng Boon Leong 			tx_q->xsk_frames_done++;
2645132c32eeSOng Boon Leong 
2646be8b38a7SOng Boon Leong 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2647be8b38a7SOng Boon Leong 			if (likely(skb)) {
264838979574SBeniamino Galvani 				pkts_compl++;
264938979574SBeniamino Galvani 				bytes_compl += skb->len;
26507c565c33SEric W. Biederman 				dev_consume_skb_any(skb);
2651ce736788SJoao Pinto 				tx_q->tx_skbuff[entry] = NULL;
26527ac6653aSJeff Kirsher 			}
2653be8b38a7SOng Boon Leong 		}
26547ac6653aSJeff Kirsher 
265542de047dSJose Abreu 		stmmac_release_tx_desc(priv, p, priv->mode);
26567ac6653aSJeff Kirsher 
26578531c808SChristian Marangi 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
26587ac6653aSJeff Kirsher 	}
2659ce736788SJoao Pinto 	tx_q->dirty_tx = entry;
266038979574SBeniamino Galvani 
2661c22a3f48SJoao Pinto 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2662c22a3f48SJoao Pinto 				  pkts_compl, bytes_compl);
266338979574SBeniamino Galvani 
2664c22a3f48SJoao Pinto 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2665c22a3f48SJoao Pinto 								queue))) &&
2666aa042f60SSong, Yoong Siang 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2667c22a3f48SJoao Pinto 
2668b3e51069SLABBE Corentin 		netif_dbg(priv, tx_done, priv->dev,
2669b3e51069SLABBE Corentin 			  "%s: restart transmit\n", __func__);
2670c22a3f48SJoao Pinto 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
26717ac6653aSJeff Kirsher 	}
2672d765955dSGiuseppe CAVALLARO 
2673132c32eeSOng Boon Leong 	if (tx_q->xsk_pool) {
2674132c32eeSOng Boon Leong 		bool work_done;
2675132c32eeSOng Boon Leong 
2676132c32eeSOng Boon Leong 		if (tx_q->xsk_frames_done)
2677132c32eeSOng Boon Leong 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2678132c32eeSOng Boon Leong 
2679132c32eeSOng Boon Leong 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2680132c32eeSOng Boon Leong 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2681132c32eeSOng Boon Leong 
2682132c32eeSOng Boon Leong 		/* For XSK TX, we try to send as many as possible.
2683132c32eeSOng Boon Leong 		 * If XSK work done (XSK TX desc empty and budget still
2684132c32eeSOng Boon Leong 		 * available), return "budget - 1" to reenable TX IRQ.
2685132c32eeSOng Boon Leong 		 * Else, return "budget" to make NAPI continue polling.
2686132c32eeSOng Boon Leong 		 */
2687132c32eeSOng Boon Leong 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2688132c32eeSOng Boon Leong 					       STMMAC_XSK_TX_BUDGET_MAX);
2689132c32eeSOng Boon Leong 		if (work_done)
2690132c32eeSOng Boon Leong 			xmits = budget - 1;
2691132c32eeSOng Boon Leong 		else
2692132c32eeSOng Boon Leong 			xmits = budget;
2693132c32eeSOng Boon Leong 	}
2694132c32eeSOng Boon Leong 
2695be1c7eaeSVineetha G. Jaya Kumaran 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2696be1c7eaeSVineetha G. Jaya Kumaran 	    priv->eee_sw_timer_en) {
2697c74ead22SJisheng Zhang 		if (stmmac_enable_eee_mode(priv))
2698388e201dSVineetha G. Jaya Kumaran 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2699d765955dSGiuseppe CAVALLARO 	}
27008fce3331SJose Abreu 
27014ccb4585SJose Abreu 	/* We still have pending packets, let's call for a new scheduling */
27024ccb4585SJose Abreu 	if (tx_q->dirty_tx != tx_q->cur_tx)
2703db2f2842SOng Boon Leong 		hrtimer_start(&tx_q->txtimer,
2704db2f2842SOng Boon Leong 			      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2705d5a05e69SVincent Whitchurch 			      HRTIMER_MODE_REL);
27064ccb4585SJose Abreu 
27078fce3331SJose Abreu 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
27088fce3331SJose Abreu 
2709132c32eeSOng Boon Leong 	/* Combine decisions from TX clean and XSK TX */
2710132c32eeSOng Boon Leong 	return max(count, xmits);
27117ac6653aSJeff Kirsher }
27127ac6653aSJeff Kirsher 
27137ac6653aSJeff Kirsher /**
2714732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_err - to manage the tx error
271532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
27165bacd778SLABBE Corentin  * @chan: channel index
27177ac6653aSJeff Kirsher  * Description: it cleans the descriptors and restarts the transmission
2718732fdf0eSGiuseppe CAVALLARO  * in case of transmission errors.
27197ac6653aSJeff Kirsher  */
27205bacd778SLABBE Corentin static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
27217ac6653aSJeff Kirsher {
27228531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2723ce736788SJoao Pinto 
2724c22a3f48SJoao Pinto 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
27257ac6653aSJeff Kirsher 
2726ae4f0d46SJoao Pinto 	stmmac_stop_tx_dma(priv, chan);
2727ba39b344SChristian Marangi 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2728ba39b344SChristian Marangi 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2729f9ec5723SChristian Marangi 	stmmac_reset_tx_queue(priv, chan);
2730f421031eSJongsung Kim 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2731f421031eSJongsung Kim 			    tx_q->dma_tx_phy, chan);
2732ae4f0d46SJoao Pinto 	stmmac_start_tx_dma(priv, chan);
27337ac6653aSJeff Kirsher 
27347ac6653aSJeff Kirsher 	priv->dev->stats.tx_errors++;
2735c22a3f48SJoao Pinto 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
27367ac6653aSJeff Kirsher }
27377ac6653aSJeff Kirsher 
273832ceabcaSGiuseppe CAVALLARO /**
27396deee222SJoao Pinto  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
27406deee222SJoao Pinto  *  @priv: driver private structure
27416deee222SJoao Pinto  *  @txmode: TX operating mode
27426deee222SJoao Pinto  *  @rxmode: RX operating mode
27436deee222SJoao Pinto  *  @chan: channel index
27446deee222SJoao Pinto  *  Description: it is used for configuring of the DMA operation mode in
27456deee222SJoao Pinto  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
27466deee222SJoao Pinto  *  mode.
27476deee222SJoao Pinto  */
27486deee222SJoao Pinto static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
27496deee222SJoao Pinto 					  u32 rxmode, u32 chan)
27506deee222SJoao Pinto {
2751a0daae13SJose Abreu 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2752a0daae13SJose Abreu 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
275352a76235SJose Abreu 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
275452a76235SJose Abreu 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
27556deee222SJoao Pinto 	int rxfifosz = priv->plat->rx_fifo_size;
275652a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
27576deee222SJoao Pinto 
27586deee222SJoao Pinto 	if (rxfifosz == 0)
27596deee222SJoao Pinto 		rxfifosz = priv->dma_cap.rx_fifo_size;
276052a76235SJose Abreu 	if (txfifosz == 0)
276152a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
276252a76235SJose Abreu 
276352a76235SJose Abreu 	/* Adjust for real per queue fifo size */
276452a76235SJose Abreu 	rxfifosz /= rx_channels_count;
276552a76235SJose Abreu 	txfifosz /= tx_channels_count;
27666deee222SJoao Pinto 
2767ab0204e3SJose Abreu 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2768ab0204e3SJose Abreu 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
27696deee222SJoao Pinto }
27706deee222SJoao Pinto 
27718bf993a5SJose Abreu static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
27728bf993a5SJose Abreu {
277363a550fcSJose Abreu 	int ret;
27748bf993a5SJose Abreu 
2775c10d4c82SJose Abreu 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
27768bf993a5SJose Abreu 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2777c10d4c82SJose Abreu 	if (ret && (ret != -EINVAL)) {
27788bf993a5SJose Abreu 		stmmac_global_err(priv);
2779c10d4c82SJose Abreu 		return true;
2780c10d4c82SJose Abreu 	}
2781c10d4c82SJose Abreu 
2782c10d4c82SJose Abreu 	return false;
27838bf993a5SJose Abreu }
27848bf993a5SJose Abreu 
27857e1c520cSOng Boon Leong static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
27868fce3331SJose Abreu {
27878fce3331SJose Abreu 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
27887e1c520cSOng Boon Leong 						 &priv->xstats, chan, dir);
27898531c808SChristian Marangi 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
27908531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
27918fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[chan];
2792132c32eeSOng Boon Leong 	struct napi_struct *rx_napi;
2793132c32eeSOng Boon Leong 	struct napi_struct *tx_napi;
2794021bd5e3SJose Abreu 	unsigned long flags;
27958fce3331SJose Abreu 
2796132c32eeSOng Boon Leong 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2797132c32eeSOng Boon Leong 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2798132c32eeSOng Boon Leong 
27994ccb4585SJose Abreu 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2800132c32eeSOng Boon Leong 		if (napi_schedule_prep(rx_napi)) {
2801021bd5e3SJose Abreu 			spin_lock_irqsave(&ch->lock, flags);
2802021bd5e3SJose Abreu 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2803021bd5e3SJose Abreu 			spin_unlock_irqrestore(&ch->lock, flags);
2804132c32eeSOng Boon Leong 			__napi_schedule(rx_napi);
28053ba07debSJose Abreu 		}
28064ccb4585SJose Abreu 	}
28074ccb4585SJose Abreu 
2808021bd5e3SJose Abreu 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2809132c32eeSOng Boon Leong 		if (napi_schedule_prep(tx_napi)) {
2810021bd5e3SJose Abreu 			spin_lock_irqsave(&ch->lock, flags);
2811021bd5e3SJose Abreu 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2812021bd5e3SJose Abreu 			spin_unlock_irqrestore(&ch->lock, flags);
2813132c32eeSOng Boon Leong 			__napi_schedule(tx_napi);
2814021bd5e3SJose Abreu 		}
2815021bd5e3SJose Abreu 	}
28168fce3331SJose Abreu 
28178fce3331SJose Abreu 	return status;
28188fce3331SJose Abreu }
28198fce3331SJose Abreu 
28206deee222SJoao Pinto /**
2821732fdf0eSGiuseppe CAVALLARO  * stmmac_dma_interrupt - DMA ISR
282232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
282332ceabcaSGiuseppe CAVALLARO  * Description: this is the DMA ISR. It is called by the main ISR.
2824732fdf0eSGiuseppe CAVALLARO  * It calls the dwmac dma routine and schedule poll method in case of some
2825732fdf0eSGiuseppe CAVALLARO  * work can be done.
282632ceabcaSGiuseppe CAVALLARO  */
28277ac6653aSJeff Kirsher static void stmmac_dma_interrupt(struct stmmac_priv *priv)
28287ac6653aSJeff Kirsher {
2829d62a107aSJoao Pinto 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
28305a6a0445SNiklas Cassel 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
28315a6a0445SNiklas Cassel 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
28325a6a0445SNiklas Cassel 				tx_channel_count : rx_channel_count;
2833d62a107aSJoao Pinto 	u32 chan;
28348ac60ffbSKees Cook 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
28358ac60ffbSKees Cook 
28368ac60ffbSKees Cook 	/* Make sure we never check beyond our status buffer. */
28378ac60ffbSKees Cook 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
28388ac60ffbSKees Cook 		channels_to_check = ARRAY_SIZE(status);
283968e5cfafSJoao Pinto 
28405a6a0445SNiklas Cassel 	for (chan = 0; chan < channels_to_check; chan++)
28417e1c520cSOng Boon Leong 		status[chan] = stmmac_napi_check(priv, chan,
28427e1c520cSOng Boon Leong 						 DMA_DIR_RXTX);
2843d62a107aSJoao Pinto 
28445a6a0445SNiklas Cassel 	for (chan = 0; chan < tx_channel_count; chan++) {
28455a6a0445SNiklas Cassel 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
28467ac6653aSJeff Kirsher 			/* Try to bump up the dma threshold on this failure */
28473a6c12a0SXiaoliang Yang 			stmmac_bump_dma_threshold(priv, chan);
28485a6a0445SNiklas Cassel 		} else if (unlikely(status[chan] == tx_hard_error)) {
28494e593262SJoao Pinto 			stmmac_tx_err(priv, chan);
28507ac6653aSJeff Kirsher 		}
2851d62a107aSJoao Pinto 	}
2852d62a107aSJoao Pinto }
28537ac6653aSJeff Kirsher 
285432ceabcaSGiuseppe CAVALLARO /**
285532ceabcaSGiuseppe CAVALLARO  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
285632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
285732ceabcaSGiuseppe CAVALLARO  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
285832ceabcaSGiuseppe CAVALLARO  */
28591c901a46SGiuseppe CAVALLARO static void stmmac_mmc_setup(struct stmmac_priv *priv)
28601c901a46SGiuseppe CAVALLARO {
28611c901a46SGiuseppe CAVALLARO 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
28621c901a46SGiuseppe CAVALLARO 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
28631c901a46SGiuseppe CAVALLARO 
28643b1dd2c5SJose Abreu 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
28654f795b25SGiuseppe CAVALLARO 
28664f795b25SGiuseppe CAVALLARO 	if (priv->dma_cap.rmon) {
28673b1dd2c5SJose Abreu 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
28681c901a46SGiuseppe CAVALLARO 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
28694f795b25SGiuseppe CAVALLARO 	} else
287038ddc59dSLABBE Corentin 		netdev_info(priv->dev, "No MAC Management Counters available\n");
28711c901a46SGiuseppe CAVALLARO }
28721c901a46SGiuseppe CAVALLARO 
2873732fdf0eSGiuseppe CAVALLARO /**
2874732fdf0eSGiuseppe CAVALLARO  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
287532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
287619e30c14SGiuseppe CAVALLARO  * Description:
287719e30c14SGiuseppe CAVALLARO  *  new GMAC chip generations have a new register to indicate the
2878e7434821SGiuseppe CAVALLARO  *  presence of the optional feature/functions.
287919e30c14SGiuseppe CAVALLARO  *  This can be also used to override the value passed through the
288019e30c14SGiuseppe CAVALLARO  *  platform and necessary for old MAC10/100 and GMAC chips.
2881e7434821SGiuseppe CAVALLARO  */
2882e7434821SGiuseppe CAVALLARO static int stmmac_get_hw_features(struct stmmac_priv *priv)
2883e7434821SGiuseppe CAVALLARO {
2884a4e887faSJose Abreu 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2885e7434821SGiuseppe CAVALLARO }
2886e7434821SGiuseppe CAVALLARO 
288732ceabcaSGiuseppe CAVALLARO /**
2888732fdf0eSGiuseppe CAVALLARO  * stmmac_check_ether_addr - check if the MAC addr is valid
288932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
289032ceabcaSGiuseppe CAVALLARO  * Description:
289132ceabcaSGiuseppe CAVALLARO  * it is to verify if the MAC address is valid, in case of failures it
289232ceabcaSGiuseppe CAVALLARO  * generates a random MAC address
289332ceabcaSGiuseppe CAVALLARO  */
2894bfab27a1SGiuseppe CAVALLARO static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2895bfab27a1SGiuseppe CAVALLARO {
28967f9b8fe5SJakub Kicinski 	u8 addr[ETH_ALEN];
28977f9b8fe5SJakub Kicinski 
2898bfab27a1SGiuseppe CAVALLARO 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
28997f9b8fe5SJakub Kicinski 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
29007f9b8fe5SJakub Kicinski 		if (is_valid_ether_addr(addr))
29017f9b8fe5SJakub Kicinski 			eth_hw_addr_set(priv->dev, addr);
29027f9b8fe5SJakub Kicinski 		else
2903f2cedb63SDanny Kukawka 			eth_hw_addr_random(priv->dev);
2904af649352SJisheng Zhang 		dev_info(priv->device, "device MAC address %pM\n",
2905bfab27a1SGiuseppe CAVALLARO 			 priv->dev->dev_addr);
2906bfab27a1SGiuseppe CAVALLARO 	}
2907c88460b7SHans de Goede }
2908bfab27a1SGiuseppe CAVALLARO 
290932ceabcaSGiuseppe CAVALLARO /**
2910732fdf0eSGiuseppe CAVALLARO  * stmmac_init_dma_engine - DMA init.
291132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
291232ceabcaSGiuseppe CAVALLARO  * Description:
291332ceabcaSGiuseppe CAVALLARO  * It inits the DMA invoking the specific MAC/GMAC callback.
291432ceabcaSGiuseppe CAVALLARO  * Some DMA parameters can be passed from the platform;
291532ceabcaSGiuseppe CAVALLARO  * in case of these are not passed a default is kept for the MAC or GMAC.
291632ceabcaSGiuseppe CAVALLARO  */
29170f1f88a8SGiuseppe CAVALLARO static int stmmac_init_dma_engine(struct stmmac_priv *priv)
29180f1f88a8SGiuseppe CAVALLARO {
291947f2a9ceSJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
292047f2a9ceSJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
292124aaed0cSJose Abreu 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
292254139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q;
2923ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
292447f2a9ceSJoao Pinto 	u32 chan = 0;
2925c24602efSGiuseppe CAVALLARO 	int atds = 0;
2926495db273SGiuseppe Cavallaro 	int ret = 0;
29270f1f88a8SGiuseppe CAVALLARO 
2928a332e2faSNiklas Cassel 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2929a332e2faSNiklas Cassel 		dev_err(priv->device, "Invalid DMA configuration\n");
293089ab75bfSNiklas Cassel 		return -EINVAL;
29310f1f88a8SGiuseppe CAVALLARO 	}
29320f1f88a8SGiuseppe CAVALLARO 
2933c24602efSGiuseppe CAVALLARO 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2934c24602efSGiuseppe CAVALLARO 		atds = 1;
2935c24602efSGiuseppe CAVALLARO 
2936a4e887faSJose Abreu 	ret = stmmac_reset(priv, priv->ioaddr);
2937495db273SGiuseppe Cavallaro 	if (ret) {
2938495db273SGiuseppe Cavallaro 		dev_err(priv->device, "Failed to reset the dma\n");
2939495db273SGiuseppe Cavallaro 		return ret;
2940495db273SGiuseppe Cavallaro 	}
2941495db273SGiuseppe Cavallaro 
29427d9e6c5aSJose Abreu 	/* DMA Configuration */
29437d9e6c5aSJose Abreu 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
29447d9e6c5aSJose Abreu 
29457d9e6c5aSJose Abreu 	if (priv->plat->axi)
29467d9e6c5aSJose Abreu 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
29477d9e6c5aSJose Abreu 
2948af8f3fb7SWeifeng Voon 	/* DMA CSR Channel configuration */
2949087a7b94SVincent Whitchurch 	for (chan = 0; chan < dma_csr_ch; chan++) {
2950af8f3fb7SWeifeng Voon 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2951087a7b94SVincent Whitchurch 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2952087a7b94SVincent Whitchurch 	}
2953af8f3fb7SWeifeng Voon 
295447f2a9ceSJoao Pinto 	/* DMA RX Channel Configuration */
295547f2a9ceSJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++) {
29568531c808SChristian Marangi 		rx_q = &priv->dma_conf.rx_queue[chan];
295754139cf3SJoao Pinto 
295824aaed0cSJose Abreu 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
295924aaed0cSJose Abreu 				    rx_q->dma_rx_phy, chan);
296047f2a9ceSJoao Pinto 
296154139cf3SJoao Pinto 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2962bba2556eSOng Boon Leong 				     (rx_q->buf_alloc_num *
2963aa042f60SSong, Yoong Siang 				      sizeof(struct dma_desc));
2964a4e887faSJose Abreu 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2965a4e887faSJose Abreu 				       rx_q->rx_tail_addr, chan);
296647f2a9ceSJoao Pinto 	}
296747f2a9ceSJoao Pinto 
296847f2a9ceSJoao Pinto 	/* DMA TX Channel Configuration */
296947f2a9ceSJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++) {
29708531c808SChristian Marangi 		tx_q = &priv->dma_conf.tx_queue[chan];
2971ce736788SJoao Pinto 
297224aaed0cSJose Abreu 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
297324aaed0cSJose Abreu 				    tx_q->dma_tx_phy, chan);
2974f748be53SAlexandre TORGUE 
29750431100bSJose Abreu 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2976a4e887faSJose Abreu 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2977a4e887faSJose Abreu 				       tx_q->tx_tail_addr, chan);
297847f2a9ceSJoao Pinto 	}
297924aaed0cSJose Abreu 
2980495db273SGiuseppe Cavallaro 	return ret;
29810f1f88a8SGiuseppe CAVALLARO }
29820f1f88a8SGiuseppe CAVALLARO 
29838fce3331SJose Abreu static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
29848fce3331SJose Abreu {
29858531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
29868fce3331SJose Abreu 
2987db2f2842SOng Boon Leong 	hrtimer_start(&tx_q->txtimer,
2988db2f2842SOng Boon Leong 		      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2989d5a05e69SVincent Whitchurch 		      HRTIMER_MODE_REL);
29908fce3331SJose Abreu }
29918fce3331SJose Abreu 
2992bfab27a1SGiuseppe CAVALLARO /**
2993732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_timer - mitigation sw timer for tx.
2994d0ea5cbdSJesse Brandeburg  * @t: data pointer
29959125cdd1SGiuseppe CAVALLARO  * Description:
29969125cdd1SGiuseppe CAVALLARO  * This is the timer handler to directly invoke the stmmac_tx_clean.
29979125cdd1SGiuseppe CAVALLARO  */
2998d5a05e69SVincent Whitchurch static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
29999125cdd1SGiuseppe CAVALLARO {
3000d5a05e69SVincent Whitchurch 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
30018fce3331SJose Abreu 	struct stmmac_priv *priv = tx_q->priv_data;
30028fce3331SJose Abreu 	struct stmmac_channel *ch;
3003132c32eeSOng Boon Leong 	struct napi_struct *napi;
30049125cdd1SGiuseppe CAVALLARO 
30058fce3331SJose Abreu 	ch = &priv->channel[tx_q->queue_index];
3006132c32eeSOng Boon Leong 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
30078fce3331SJose Abreu 
3008132c32eeSOng Boon Leong 	if (likely(napi_schedule_prep(napi))) {
3009021bd5e3SJose Abreu 		unsigned long flags;
3010021bd5e3SJose Abreu 
3011021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
3012021bd5e3SJose Abreu 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3013021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
3014132c32eeSOng Boon Leong 		__napi_schedule(napi);
3015021bd5e3SJose Abreu 	}
3016d5a05e69SVincent Whitchurch 
3017d5a05e69SVincent Whitchurch 	return HRTIMER_NORESTART;
30189125cdd1SGiuseppe CAVALLARO }
30199125cdd1SGiuseppe CAVALLARO 
30209125cdd1SGiuseppe CAVALLARO /**
3021d429b66eSJose Abreu  * stmmac_init_coalesce - init mitigation options.
302232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
30239125cdd1SGiuseppe CAVALLARO  * Description:
3024d429b66eSJose Abreu  * This inits the coalesce parameters: i.e. timer rate,
30259125cdd1SGiuseppe CAVALLARO  * timer handler and default threshold used for enabling the
30269125cdd1SGiuseppe CAVALLARO  * interrupt on completion bit.
30279125cdd1SGiuseppe CAVALLARO  */
3028d429b66eSJose Abreu static void stmmac_init_coalesce(struct stmmac_priv *priv)
30299125cdd1SGiuseppe CAVALLARO {
30308fce3331SJose Abreu 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3031db2f2842SOng Boon Leong 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
30328fce3331SJose Abreu 	u32 chan;
30338fce3331SJose Abreu 
30348fce3331SJose Abreu 	for (chan = 0; chan < tx_channel_count; chan++) {
30358531c808SChristian Marangi 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
30368fce3331SJose Abreu 
3037db2f2842SOng Boon Leong 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3038db2f2842SOng Boon Leong 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3039db2f2842SOng Boon Leong 
3040d5a05e69SVincent Whitchurch 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3041d5a05e69SVincent Whitchurch 		tx_q->txtimer.function = stmmac_tx_timer;
30428fce3331SJose Abreu 	}
3043db2f2842SOng Boon Leong 
3044db2f2842SOng Boon Leong 	for (chan = 0; chan < rx_channel_count; chan++)
3045db2f2842SOng Boon Leong 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
30469125cdd1SGiuseppe CAVALLARO }
30479125cdd1SGiuseppe CAVALLARO 
30484854ab99SJoao Pinto static void stmmac_set_rings_length(struct stmmac_priv *priv)
30494854ab99SJoao Pinto {
30504854ab99SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
30514854ab99SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
30524854ab99SJoao Pinto 	u32 chan;
30534854ab99SJoao Pinto 
30544854ab99SJoao Pinto 	/* set TX ring length */
30554854ab99SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
3056a4e887faSJose Abreu 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
30578531c808SChristian Marangi 				       (priv->dma_conf.dma_tx_size - 1), chan);
30584854ab99SJoao Pinto 
30594854ab99SJoao Pinto 	/* set RX ring length */
30604854ab99SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
3061a4e887faSJose Abreu 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
30628531c808SChristian Marangi 				       (priv->dma_conf.dma_rx_size - 1), chan);
30634854ab99SJoao Pinto }
30644854ab99SJoao Pinto 
30659125cdd1SGiuseppe CAVALLARO /**
30666a3a7193SJoao Pinto  *  stmmac_set_tx_queue_weight - Set TX queue weight
30676a3a7193SJoao Pinto  *  @priv: driver private structure
30686a3a7193SJoao Pinto  *  Description: It is used for setting TX queues weight
30696a3a7193SJoao Pinto  */
30706a3a7193SJoao Pinto static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
30716a3a7193SJoao Pinto {
30726a3a7193SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
30736a3a7193SJoao Pinto 	u32 weight;
30746a3a7193SJoao Pinto 	u32 queue;
30756a3a7193SJoao Pinto 
30766a3a7193SJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
30776a3a7193SJoao Pinto 		weight = priv->plat->tx_queues_cfg[queue].weight;
3078c10d4c82SJose Abreu 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
30796a3a7193SJoao Pinto 	}
30806a3a7193SJoao Pinto }
30816a3a7193SJoao Pinto 
30826a3a7193SJoao Pinto /**
308319d91873SJoao Pinto  *  stmmac_configure_cbs - Configure CBS in TX queue
308419d91873SJoao Pinto  *  @priv: driver private structure
308519d91873SJoao Pinto  *  Description: It is used for configuring CBS in AVB TX queues
308619d91873SJoao Pinto  */
308719d91873SJoao Pinto static void stmmac_configure_cbs(struct stmmac_priv *priv)
308819d91873SJoao Pinto {
308919d91873SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
309019d91873SJoao Pinto 	u32 mode_to_use;
309119d91873SJoao Pinto 	u32 queue;
309219d91873SJoao Pinto 
309344781fefSJoao Pinto 	/* queue 0 is reserved for legacy traffic */
309444781fefSJoao Pinto 	for (queue = 1; queue < tx_queues_count; queue++) {
309519d91873SJoao Pinto 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
309619d91873SJoao Pinto 		if (mode_to_use == MTL_QUEUE_DCB)
309719d91873SJoao Pinto 			continue;
309819d91873SJoao Pinto 
3099c10d4c82SJose Abreu 		stmmac_config_cbs(priv, priv->hw,
310019d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].send_slope,
310119d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].idle_slope,
310219d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].high_credit,
310319d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].low_credit,
310419d91873SJoao Pinto 				queue);
310519d91873SJoao Pinto 	}
310619d91873SJoao Pinto }
310719d91873SJoao Pinto 
310819d91873SJoao Pinto /**
3109d43042f4SJoao Pinto  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3110d43042f4SJoao Pinto  *  @priv: driver private structure
3111d43042f4SJoao Pinto  *  Description: It is used for mapping RX queues to RX dma channels
3112d43042f4SJoao Pinto  */
3113d43042f4SJoao Pinto static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3114d43042f4SJoao Pinto {
3115d43042f4SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3116d43042f4SJoao Pinto 	u32 queue;
3117d43042f4SJoao Pinto 	u32 chan;
3118d43042f4SJoao Pinto 
3119d43042f4SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
3120d43042f4SJoao Pinto 		chan = priv->plat->rx_queues_cfg[queue].chan;
3121c10d4c82SJose Abreu 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3122d43042f4SJoao Pinto 	}
3123d43042f4SJoao Pinto }
3124d43042f4SJoao Pinto 
3125d43042f4SJoao Pinto /**
3126a8f5102aSJoao Pinto  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3127a8f5102aSJoao Pinto  *  @priv: driver private structure
3128a8f5102aSJoao Pinto  *  Description: It is used for configuring the RX Queue Priority
3129a8f5102aSJoao Pinto  */
3130a8f5102aSJoao Pinto static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3131a8f5102aSJoao Pinto {
3132a8f5102aSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3133a8f5102aSJoao Pinto 	u32 queue;
3134a8f5102aSJoao Pinto 	u32 prio;
3135a8f5102aSJoao Pinto 
3136a8f5102aSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
3137a8f5102aSJoao Pinto 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3138a8f5102aSJoao Pinto 			continue;
3139a8f5102aSJoao Pinto 
3140a8f5102aSJoao Pinto 		prio = priv->plat->rx_queues_cfg[queue].prio;
3141c10d4c82SJose Abreu 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3142a8f5102aSJoao Pinto 	}
3143a8f5102aSJoao Pinto }
3144a8f5102aSJoao Pinto 
3145a8f5102aSJoao Pinto /**
3146a8f5102aSJoao Pinto  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3147a8f5102aSJoao Pinto  *  @priv: driver private structure
3148a8f5102aSJoao Pinto  *  Description: It is used for configuring the TX Queue Priority
3149a8f5102aSJoao Pinto  */
3150a8f5102aSJoao Pinto static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3151a8f5102aSJoao Pinto {
3152a8f5102aSJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3153a8f5102aSJoao Pinto 	u32 queue;
3154a8f5102aSJoao Pinto 	u32 prio;
3155a8f5102aSJoao Pinto 
3156a8f5102aSJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
3157a8f5102aSJoao Pinto 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3158a8f5102aSJoao Pinto 			continue;
3159a8f5102aSJoao Pinto 
3160a8f5102aSJoao Pinto 		prio = priv->plat->tx_queues_cfg[queue].prio;
3161c10d4c82SJose Abreu 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3162a8f5102aSJoao Pinto 	}
3163a8f5102aSJoao Pinto }
3164a8f5102aSJoao Pinto 
3165a8f5102aSJoao Pinto /**
3166abe80fdcSJoao Pinto  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3167abe80fdcSJoao Pinto  *  @priv: driver private structure
3168abe80fdcSJoao Pinto  *  Description: It is used for configuring the RX queue routing
3169abe80fdcSJoao Pinto  */
3170abe80fdcSJoao Pinto static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3171abe80fdcSJoao Pinto {
3172abe80fdcSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3173abe80fdcSJoao Pinto 	u32 queue;
3174abe80fdcSJoao Pinto 	u8 packet;
3175abe80fdcSJoao Pinto 
3176abe80fdcSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
3177abe80fdcSJoao Pinto 		/* no specific packet type routing specified for the queue */
3178abe80fdcSJoao Pinto 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3179abe80fdcSJoao Pinto 			continue;
3180abe80fdcSJoao Pinto 
3181abe80fdcSJoao Pinto 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3182c10d4c82SJose Abreu 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3183abe80fdcSJoao Pinto 	}
3184abe80fdcSJoao Pinto }
3185abe80fdcSJoao Pinto 
318676067459SJose Abreu static void stmmac_mac_config_rss(struct stmmac_priv *priv)
318776067459SJose Abreu {
318876067459SJose Abreu 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
318976067459SJose Abreu 		priv->rss.enable = false;
319076067459SJose Abreu 		return;
319176067459SJose Abreu 	}
319276067459SJose Abreu 
319376067459SJose Abreu 	if (priv->dev->features & NETIF_F_RXHASH)
319476067459SJose Abreu 		priv->rss.enable = true;
319576067459SJose Abreu 	else
319676067459SJose Abreu 		priv->rss.enable = false;
319776067459SJose Abreu 
319876067459SJose Abreu 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
319976067459SJose Abreu 			     priv->plat->rx_queues_to_use);
320076067459SJose Abreu }
320176067459SJose Abreu 
3202abe80fdcSJoao Pinto /**
3203d0a9c9f9SJoao Pinto  *  stmmac_mtl_configuration - Configure MTL
3204d0a9c9f9SJoao Pinto  *  @priv: driver private structure
3205d0a9c9f9SJoao Pinto  *  Description: It is used for configurring MTL
3206d0a9c9f9SJoao Pinto  */
3207d0a9c9f9SJoao Pinto static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3208d0a9c9f9SJoao Pinto {
3209d0a9c9f9SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3210d0a9c9f9SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3211d0a9c9f9SJoao Pinto 
3212c10d4c82SJose Abreu 	if (tx_queues_count > 1)
32136a3a7193SJoao Pinto 		stmmac_set_tx_queue_weight(priv);
32146a3a7193SJoao Pinto 
3215d0a9c9f9SJoao Pinto 	/* Configure MTL RX algorithms */
3216c10d4c82SJose Abreu 	if (rx_queues_count > 1)
3217c10d4c82SJose Abreu 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3218d0a9c9f9SJoao Pinto 				priv->plat->rx_sched_algorithm);
3219d0a9c9f9SJoao Pinto 
3220d0a9c9f9SJoao Pinto 	/* Configure MTL TX algorithms */
3221c10d4c82SJose Abreu 	if (tx_queues_count > 1)
3222c10d4c82SJose Abreu 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3223d0a9c9f9SJoao Pinto 				priv->plat->tx_sched_algorithm);
3224d0a9c9f9SJoao Pinto 
322519d91873SJoao Pinto 	/* Configure CBS in AVB TX queues */
3226c10d4c82SJose Abreu 	if (tx_queues_count > 1)
322719d91873SJoao Pinto 		stmmac_configure_cbs(priv);
322819d91873SJoao Pinto 
3229d43042f4SJoao Pinto 	/* Map RX MTL to DMA channels */
3230d43042f4SJoao Pinto 	stmmac_rx_queue_dma_chan_map(priv);
3231d43042f4SJoao Pinto 
3232d0a9c9f9SJoao Pinto 	/* Enable MAC RX Queues */
3233d0a9c9f9SJoao Pinto 	stmmac_mac_enable_rx_queues(priv);
32346deee222SJoao Pinto 
3235a8f5102aSJoao Pinto 	/* Set RX priorities */
3236c10d4c82SJose Abreu 	if (rx_queues_count > 1)
3237a8f5102aSJoao Pinto 		stmmac_mac_config_rx_queues_prio(priv);
3238a8f5102aSJoao Pinto 
3239a8f5102aSJoao Pinto 	/* Set TX priorities */
3240c10d4c82SJose Abreu 	if (tx_queues_count > 1)
3241a8f5102aSJoao Pinto 		stmmac_mac_config_tx_queues_prio(priv);
3242abe80fdcSJoao Pinto 
3243abe80fdcSJoao Pinto 	/* Set RX routing */
3244c10d4c82SJose Abreu 	if (rx_queues_count > 1)
3245abe80fdcSJoao Pinto 		stmmac_mac_config_rx_queues_routing(priv);
324676067459SJose Abreu 
324776067459SJose Abreu 	/* Receive Side Scaling */
324876067459SJose Abreu 	if (rx_queues_count > 1)
324976067459SJose Abreu 		stmmac_mac_config_rss(priv);
3250d0a9c9f9SJoao Pinto }
3251d0a9c9f9SJoao Pinto 
32528bf993a5SJose Abreu static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
32538bf993a5SJose Abreu {
3254c10d4c82SJose Abreu 	if (priv->dma_cap.asp) {
32558bf993a5SJose Abreu 		netdev_info(priv->dev, "Enabling Safety Features\n");
32565ac712dcSWong Vee Khee 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
32575ac712dcSWong Vee Khee 					  priv->plat->safety_feat_cfg);
32588bf993a5SJose Abreu 	} else {
32598bf993a5SJose Abreu 		netdev_info(priv->dev, "No Safety Features support found\n");
32608bf993a5SJose Abreu 	}
32618bf993a5SJose Abreu }
32628bf993a5SJose Abreu 
32635a558611SOng Boon Leong static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
32645a558611SOng Boon Leong {
32655a558611SOng Boon Leong 	char *name;
32665a558611SOng Boon Leong 
32675a558611SOng Boon Leong 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3268db7c691dSMohammad Athari Bin Ismail 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
32695a558611SOng Boon Leong 
32705a558611SOng Boon Leong 	name = priv->wq_name;
32715a558611SOng Boon Leong 	sprintf(name, "%s-fpe", priv->dev->name);
32725a558611SOng Boon Leong 
32735a558611SOng Boon Leong 	priv->fpe_wq = create_singlethread_workqueue(name);
32745a558611SOng Boon Leong 	if (!priv->fpe_wq) {
32755a558611SOng Boon Leong 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
32765a558611SOng Boon Leong 
32775a558611SOng Boon Leong 		return -ENOMEM;
32785a558611SOng Boon Leong 	}
32795a558611SOng Boon Leong 	netdev_info(priv->dev, "FPE workqueue start");
32805a558611SOng Boon Leong 
32815a558611SOng Boon Leong 	return 0;
32825a558611SOng Boon Leong }
32835a558611SOng Boon Leong 
3284d0a9c9f9SJoao Pinto /**
3285732fdf0eSGiuseppe CAVALLARO  * stmmac_hw_setup - setup mac in a usable state.
3286523f11b5SSrinivas Kandagatla  *  @dev : pointer to the device structure.
32870735e639SMohammad Athari Bin Ismail  *  @ptp_register: register PTP if set
3288523f11b5SSrinivas Kandagatla  *  Description:
3289732fdf0eSGiuseppe CAVALLARO  *  this is the main function to setup the HW in a usable state because the
3290732fdf0eSGiuseppe CAVALLARO  *  dma engine is reset, the core registers are configured (e.g. AXI,
3291732fdf0eSGiuseppe CAVALLARO  *  Checksum features, timers). The DMA is ready to start receiving and
3292732fdf0eSGiuseppe CAVALLARO  *  transmitting.
3293523f11b5SSrinivas Kandagatla  *  Return value:
3294523f11b5SSrinivas Kandagatla  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3295523f11b5SSrinivas Kandagatla  *  file on failure.
3296523f11b5SSrinivas Kandagatla  */
32970735e639SMohammad Athari Bin Ismail static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3298523f11b5SSrinivas Kandagatla {
3299523f11b5SSrinivas Kandagatla 	struct stmmac_priv *priv = netdev_priv(dev);
33003c55d4d0SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3301146617b8SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3302d08d32d1SOng Boon Leong 	bool sph_en;
3303146617b8SJoao Pinto 	u32 chan;
3304523f11b5SSrinivas Kandagatla 	int ret;
3305523f11b5SSrinivas Kandagatla 
3306523f11b5SSrinivas Kandagatla 	/* DMA initialization and SW reset */
3307523f11b5SSrinivas Kandagatla 	ret = stmmac_init_dma_engine(priv);
3308523f11b5SSrinivas Kandagatla 	if (ret < 0) {
330938ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
331038ddc59dSLABBE Corentin 			   __func__);
3311523f11b5SSrinivas Kandagatla 		return ret;
3312523f11b5SSrinivas Kandagatla 	}
3313523f11b5SSrinivas Kandagatla 
3314523f11b5SSrinivas Kandagatla 	/* Copy the MAC addr into the HW  */
3315c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3316523f11b5SSrinivas Kandagatla 
331702e57b9dSGiuseppe CAVALLARO 	/* PS and related bits will be programmed according to the speed */
331802e57b9dSGiuseppe CAVALLARO 	if (priv->hw->pcs) {
331902e57b9dSGiuseppe CAVALLARO 		int speed = priv->plat->mac_port_sel_speed;
332002e57b9dSGiuseppe CAVALLARO 
332102e57b9dSGiuseppe CAVALLARO 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
332202e57b9dSGiuseppe CAVALLARO 		    (speed == SPEED_1000)) {
332302e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = speed;
332402e57b9dSGiuseppe CAVALLARO 		} else {
332502e57b9dSGiuseppe CAVALLARO 			dev_warn(priv->device, "invalid port speed\n");
332602e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = 0;
332702e57b9dSGiuseppe CAVALLARO 		}
332802e57b9dSGiuseppe CAVALLARO 	}
332902e57b9dSGiuseppe CAVALLARO 
3330523f11b5SSrinivas Kandagatla 	/* Initialize the MAC Core */
3331c10d4c82SJose Abreu 	stmmac_core_init(priv, priv->hw, dev);
3332523f11b5SSrinivas Kandagatla 
3333d0a9c9f9SJoao Pinto 	/* Initialize MTL*/
3334d0a9c9f9SJoao Pinto 	stmmac_mtl_configuration(priv);
33359eb12474Sjpinto 
33368bf993a5SJose Abreu 	/* Initialize Safety Features */
33378bf993a5SJose Abreu 	stmmac_safety_feat_configuration(priv);
33388bf993a5SJose Abreu 
3339c10d4c82SJose Abreu 	ret = stmmac_rx_ipc(priv, priv->hw);
3340978aded4SGiuseppe CAVALLARO 	if (!ret) {
334138ddc59dSLABBE Corentin 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3342978aded4SGiuseppe CAVALLARO 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3343d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
3344978aded4SGiuseppe CAVALLARO 	}
3345978aded4SGiuseppe CAVALLARO 
3346523f11b5SSrinivas Kandagatla 	/* Enable the MAC Rx/Tx */
3347c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
3348523f11b5SSrinivas Kandagatla 
3349b4f0a661SJoao Pinto 	/* Set the HW DMA mode and the COE */
3350b4f0a661SJoao Pinto 	stmmac_dma_operation_mode(priv);
3351b4f0a661SJoao Pinto 
3352523f11b5SSrinivas Kandagatla 	stmmac_mmc_setup(priv);
3353523f11b5SSrinivas Kandagatla 
3354f4c7d894SBiao Huang 	if (ptp_register) {
3355f4c7d894SBiao Huang 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3356f4c7d894SBiao Huang 		if (ret < 0)
3357f4c7d894SBiao Huang 			netdev_warn(priv->dev,
3358f4c7d894SBiao Huang 				    "failed to enable PTP reference clock: %pe\n",
3359f4c7d894SBiao Huang 				    ERR_PTR(ret));
3360f4c7d894SBiao Huang 	}
3361f4c7d894SBiao Huang 
3362523f11b5SSrinivas Kandagatla 	ret = stmmac_init_ptp(priv);
3363722eef28SHeiner Kallweit 	if (ret == -EOPNOTSUPP)
33641a212771SHeiner Kallweit 		netdev_info(priv->dev, "PTP not supported by HW\n");
3365722eef28SHeiner Kallweit 	else if (ret)
3366722eef28SHeiner Kallweit 		netdev_warn(priv->dev, "PTP init failed\n");
33670735e639SMohammad Athari Bin Ismail 	else if (ptp_register)
33680735e639SMohammad Athari Bin Ismail 		stmmac_ptp_register(priv);
3369523f11b5SSrinivas Kandagatla 
3370388e201dSVineetha G. Jaya Kumaran 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3371388e201dSVineetha G. Jaya Kumaran 
3372388e201dSVineetha G. Jaya Kumaran 	/* Convert the timer from msec to usec */
3373388e201dSVineetha G. Jaya Kumaran 	if (!priv->tx_lpi_timer)
3374388e201dSVineetha G. Jaya Kumaran 		priv->tx_lpi_timer = eee_timer * 1000;
3375523f11b5SSrinivas Kandagatla 
3376a4e887faSJose Abreu 	if (priv->use_riwt) {
3377db2f2842SOng Boon Leong 		u32 queue;
33784e4337ccSJose Abreu 
3379db2f2842SOng Boon Leong 		for (queue = 0; queue < rx_cnt; queue++) {
3380db2f2842SOng Boon Leong 			if (!priv->rx_riwt[queue])
3381db2f2842SOng Boon Leong 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3382db2f2842SOng Boon Leong 
3383db2f2842SOng Boon Leong 			stmmac_rx_watchdog(priv, priv->ioaddr,
3384db2f2842SOng Boon Leong 					   priv->rx_riwt[queue], queue);
3385db2f2842SOng Boon Leong 		}
3386523f11b5SSrinivas Kandagatla 	}
3387523f11b5SSrinivas Kandagatla 
3388c10d4c82SJose Abreu 	if (priv->hw->pcs)
3389c9ad4c10SBen Dooks (Codethink) 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3390523f11b5SSrinivas Kandagatla 
33914854ab99SJoao Pinto 	/* set TX and RX rings length */
33924854ab99SJoao Pinto 	stmmac_set_rings_length(priv);
33934854ab99SJoao Pinto 
3394f748be53SAlexandre TORGUE 	/* Enable TSO */
3395146617b8SJoao Pinto 	if (priv->tso) {
33965e6038b8SOng Boon Leong 		for (chan = 0; chan < tx_cnt; chan++) {
33978531c808SChristian Marangi 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
33985e6038b8SOng Boon Leong 
33995e6038b8SOng Boon Leong 			/* TSO and TBS cannot co-exist */
34005e6038b8SOng Boon Leong 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
34015e6038b8SOng Boon Leong 				continue;
34025e6038b8SOng Boon Leong 
3403a4e887faSJose Abreu 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3404146617b8SJoao Pinto 		}
34055e6038b8SOng Boon Leong 	}
3406f748be53SAlexandre TORGUE 
340767afd6d1SJose Abreu 	/* Enable Split Header */
3408d08d32d1SOng Boon Leong 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
340967afd6d1SJose Abreu 	for (chan = 0; chan < rx_cnt; chan++)
3410d08d32d1SOng Boon Leong 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3411d08d32d1SOng Boon Leong 
341267afd6d1SJose Abreu 
341330d93227SJose Abreu 	/* VLAN Tag Insertion */
341430d93227SJose Abreu 	if (priv->dma_cap.vlins)
341530d93227SJose Abreu 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
341630d93227SJose Abreu 
3417579a25a8SJose Abreu 	/* TBS */
3418579a25a8SJose Abreu 	for (chan = 0; chan < tx_cnt; chan++) {
34198531c808SChristian Marangi 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3420579a25a8SJose Abreu 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3421579a25a8SJose Abreu 
3422579a25a8SJose Abreu 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3423579a25a8SJose Abreu 	}
3424579a25a8SJose Abreu 
3425686cff3dSAashish Verma 	/* Configure real RX and TX queues */
3426686cff3dSAashish Verma 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3427686cff3dSAashish Verma 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3428686cff3dSAashish Verma 
34297d9e6c5aSJose Abreu 	/* Start the ball rolling... */
34307d9e6c5aSJose Abreu 	stmmac_start_all_dma(priv);
34317d9e6c5aSJose Abreu 
34325a558611SOng Boon Leong 	if (priv->dma_cap.fpesel) {
34335a558611SOng Boon Leong 		stmmac_fpe_start_wq(priv);
34345a558611SOng Boon Leong 
34355a558611SOng Boon Leong 		if (priv->plat->fpe_cfg->enable)
34365a558611SOng Boon Leong 			stmmac_fpe_handshake(priv, true);
34375a558611SOng Boon Leong 	}
34385a558611SOng Boon Leong 
3439523f11b5SSrinivas Kandagatla 	return 0;
3440523f11b5SSrinivas Kandagatla }
3441523f11b5SSrinivas Kandagatla 
3442c66f6c37SThierry Reding static void stmmac_hw_teardown(struct net_device *dev)
3443c66f6c37SThierry Reding {
3444c66f6c37SThierry Reding 	struct stmmac_priv *priv = netdev_priv(dev);
3445c66f6c37SThierry Reding 
3446c66f6c37SThierry Reding 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3447c66f6c37SThierry Reding }
3448c66f6c37SThierry Reding 
34498532f613SOng Boon Leong static void stmmac_free_irq(struct net_device *dev,
34508532f613SOng Boon Leong 			    enum request_irq_err irq_err, int irq_idx)
34518532f613SOng Boon Leong {
34528532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
34538532f613SOng Boon Leong 	int j;
34548532f613SOng Boon Leong 
34558532f613SOng Boon Leong 	switch (irq_err) {
34568532f613SOng Boon Leong 	case REQ_IRQ_ERR_ALL:
34578532f613SOng Boon Leong 		irq_idx = priv->plat->tx_queues_to_use;
34588532f613SOng Boon Leong 		fallthrough;
34598532f613SOng Boon Leong 	case REQ_IRQ_ERR_TX:
34608532f613SOng Boon Leong 		for (j = irq_idx - 1; j >= 0; j--) {
34618deec94cSOng Boon Leong 			if (priv->tx_irq[j] > 0) {
34628deec94cSOng Boon Leong 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
34638531c808SChristian Marangi 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
34648532f613SOng Boon Leong 			}
34658deec94cSOng Boon Leong 		}
34668532f613SOng Boon Leong 		irq_idx = priv->plat->rx_queues_to_use;
34678532f613SOng Boon Leong 		fallthrough;
34688532f613SOng Boon Leong 	case REQ_IRQ_ERR_RX:
34698532f613SOng Boon Leong 		for (j = irq_idx - 1; j >= 0; j--) {
34708deec94cSOng Boon Leong 			if (priv->rx_irq[j] > 0) {
34718deec94cSOng Boon Leong 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
34728531c808SChristian Marangi 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
34738532f613SOng Boon Leong 			}
34748deec94cSOng Boon Leong 		}
34758532f613SOng Boon Leong 
34768532f613SOng Boon Leong 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
34778532f613SOng Boon Leong 			free_irq(priv->sfty_ue_irq, dev);
34788532f613SOng Boon Leong 		fallthrough;
34798532f613SOng Boon Leong 	case REQ_IRQ_ERR_SFTY_UE:
34808532f613SOng Boon Leong 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
34818532f613SOng Boon Leong 			free_irq(priv->sfty_ce_irq, dev);
34828532f613SOng Boon Leong 		fallthrough;
34838532f613SOng Boon Leong 	case REQ_IRQ_ERR_SFTY_CE:
34848532f613SOng Boon Leong 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
34858532f613SOng Boon Leong 			free_irq(priv->lpi_irq, dev);
34868532f613SOng Boon Leong 		fallthrough;
34878532f613SOng Boon Leong 	case REQ_IRQ_ERR_LPI:
34888532f613SOng Boon Leong 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
34898532f613SOng Boon Leong 			free_irq(priv->wol_irq, dev);
34908532f613SOng Boon Leong 		fallthrough;
34918532f613SOng Boon Leong 	case REQ_IRQ_ERR_WOL:
34928532f613SOng Boon Leong 		free_irq(dev->irq, dev);
34938532f613SOng Boon Leong 		fallthrough;
34948532f613SOng Boon Leong 	case REQ_IRQ_ERR_MAC:
34958532f613SOng Boon Leong 	case REQ_IRQ_ERR_NO:
34968532f613SOng Boon Leong 		/* If MAC IRQ request error, no more IRQ to free */
34978532f613SOng Boon Leong 		break;
34988532f613SOng Boon Leong 	}
34998532f613SOng Boon Leong }
35008532f613SOng Boon Leong 
35018532f613SOng Boon Leong static int stmmac_request_irq_multi_msi(struct net_device *dev)
35028532f613SOng Boon Leong {
35038532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
35043e6dc7b6SWong Vee Khee 	enum request_irq_err irq_err;
35058deec94cSOng Boon Leong 	cpumask_t cpu_mask;
35068532f613SOng Boon Leong 	int irq_idx = 0;
35078532f613SOng Boon Leong 	char *int_name;
35088532f613SOng Boon Leong 	int ret;
35098532f613SOng Boon Leong 	int i;
35108532f613SOng Boon Leong 
35118532f613SOng Boon Leong 	/* For common interrupt */
35128532f613SOng Boon Leong 	int_name = priv->int_name_mac;
35138532f613SOng Boon Leong 	sprintf(int_name, "%s:%s", dev->name, "mac");
35148532f613SOng Boon Leong 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
35158532f613SOng Boon Leong 			  0, int_name, dev);
35168532f613SOng Boon Leong 	if (unlikely(ret < 0)) {
35178532f613SOng Boon Leong 		netdev_err(priv->dev,
35188532f613SOng Boon Leong 			   "%s: alloc mac MSI %d (error: %d)\n",
35198532f613SOng Boon Leong 			   __func__, dev->irq, ret);
35208532f613SOng Boon Leong 		irq_err = REQ_IRQ_ERR_MAC;
35218532f613SOng Boon Leong 		goto irq_error;
35228532f613SOng Boon Leong 	}
35238532f613SOng Boon Leong 
35248532f613SOng Boon Leong 	/* Request the Wake IRQ in case of another line
35258532f613SOng Boon Leong 	 * is used for WoL
35268532f613SOng Boon Leong 	 */
35278532f613SOng Boon Leong 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
35288532f613SOng Boon Leong 		int_name = priv->int_name_wol;
35298532f613SOng Boon Leong 		sprintf(int_name, "%s:%s", dev->name, "wol");
35308532f613SOng Boon Leong 		ret = request_irq(priv->wol_irq,
35318532f613SOng Boon Leong 				  stmmac_mac_interrupt,
35328532f613SOng Boon Leong 				  0, int_name, dev);
35338532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
35348532f613SOng Boon Leong 			netdev_err(priv->dev,
35358532f613SOng Boon Leong 				   "%s: alloc wol MSI %d (error: %d)\n",
35368532f613SOng Boon Leong 				   __func__, priv->wol_irq, ret);
35378532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_WOL;
35388532f613SOng Boon Leong 			goto irq_error;
35398532f613SOng Boon Leong 		}
35408532f613SOng Boon Leong 	}
35418532f613SOng Boon Leong 
35428532f613SOng Boon Leong 	/* Request the LPI IRQ in case of another line
35438532f613SOng Boon Leong 	 * is used for LPI
35448532f613SOng Boon Leong 	 */
35458532f613SOng Boon Leong 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
35468532f613SOng Boon Leong 		int_name = priv->int_name_lpi;
35478532f613SOng Boon Leong 		sprintf(int_name, "%s:%s", dev->name, "lpi");
35488532f613SOng Boon Leong 		ret = request_irq(priv->lpi_irq,
35498532f613SOng Boon Leong 				  stmmac_mac_interrupt,
35508532f613SOng Boon Leong 				  0, int_name, dev);
35518532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
35528532f613SOng Boon Leong 			netdev_err(priv->dev,
35538532f613SOng Boon Leong 				   "%s: alloc lpi MSI %d (error: %d)\n",
35548532f613SOng Boon Leong 				   __func__, priv->lpi_irq, ret);
35558532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_LPI;
35568532f613SOng Boon Leong 			goto irq_error;
35578532f613SOng Boon Leong 		}
35588532f613SOng Boon Leong 	}
35598532f613SOng Boon Leong 
35608532f613SOng Boon Leong 	/* Request the Safety Feature Correctible Error line in
35618532f613SOng Boon Leong 	 * case of another line is used
35628532f613SOng Boon Leong 	 */
35638532f613SOng Boon Leong 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
35648532f613SOng Boon Leong 		int_name = priv->int_name_sfty_ce;
35658532f613SOng Boon Leong 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
35668532f613SOng Boon Leong 		ret = request_irq(priv->sfty_ce_irq,
35678532f613SOng Boon Leong 				  stmmac_safety_interrupt,
35688532f613SOng Boon Leong 				  0, int_name, dev);
35698532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
35708532f613SOng Boon Leong 			netdev_err(priv->dev,
35718532f613SOng Boon Leong 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
35728532f613SOng Boon Leong 				   __func__, priv->sfty_ce_irq, ret);
35738532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_SFTY_CE;
35748532f613SOng Boon Leong 			goto irq_error;
35758532f613SOng Boon Leong 		}
35768532f613SOng Boon Leong 	}
35778532f613SOng Boon Leong 
35788532f613SOng Boon Leong 	/* Request the Safety Feature Uncorrectible Error line in
35798532f613SOng Boon Leong 	 * case of another line is used
35808532f613SOng Boon Leong 	 */
35818532f613SOng Boon Leong 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
35828532f613SOng Boon Leong 		int_name = priv->int_name_sfty_ue;
35838532f613SOng Boon Leong 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
35848532f613SOng Boon Leong 		ret = request_irq(priv->sfty_ue_irq,
35858532f613SOng Boon Leong 				  stmmac_safety_interrupt,
35868532f613SOng Boon Leong 				  0, int_name, dev);
35878532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
35888532f613SOng Boon Leong 			netdev_err(priv->dev,
35898532f613SOng Boon Leong 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
35908532f613SOng Boon Leong 				   __func__, priv->sfty_ue_irq, ret);
35918532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_SFTY_UE;
35928532f613SOng Boon Leong 			goto irq_error;
35938532f613SOng Boon Leong 		}
35948532f613SOng Boon Leong 	}
35958532f613SOng Boon Leong 
35968532f613SOng Boon Leong 	/* Request Rx MSI irq */
35978532f613SOng Boon Leong 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3598d68c2e1dSArnd Bergmann 		if (i >= MTL_MAX_RX_QUEUES)
35993e0d5699SArnd Bergmann 			break;
36008532f613SOng Boon Leong 		if (priv->rx_irq[i] == 0)
36018532f613SOng Boon Leong 			continue;
36028532f613SOng Boon Leong 
36038532f613SOng Boon Leong 		int_name = priv->int_name_rx_irq[i];
36048532f613SOng Boon Leong 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
36058532f613SOng Boon Leong 		ret = request_irq(priv->rx_irq[i],
36068532f613SOng Boon Leong 				  stmmac_msi_intr_rx,
36078531c808SChristian Marangi 				  0, int_name, &priv->dma_conf.rx_queue[i]);
36088532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
36098532f613SOng Boon Leong 			netdev_err(priv->dev,
36108532f613SOng Boon Leong 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
36118532f613SOng Boon Leong 				   __func__, i, priv->rx_irq[i], ret);
36128532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_RX;
36138532f613SOng Boon Leong 			irq_idx = i;
36148532f613SOng Boon Leong 			goto irq_error;
36158532f613SOng Boon Leong 		}
36168deec94cSOng Boon Leong 		cpumask_clear(&cpu_mask);
36178deec94cSOng Boon Leong 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
36188deec94cSOng Boon Leong 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
36198532f613SOng Boon Leong 	}
36208532f613SOng Boon Leong 
36218532f613SOng Boon Leong 	/* Request Tx MSI irq */
36228532f613SOng Boon Leong 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3623d68c2e1dSArnd Bergmann 		if (i >= MTL_MAX_TX_QUEUES)
36243e0d5699SArnd Bergmann 			break;
36258532f613SOng Boon Leong 		if (priv->tx_irq[i] == 0)
36268532f613SOng Boon Leong 			continue;
36278532f613SOng Boon Leong 
36288532f613SOng Boon Leong 		int_name = priv->int_name_tx_irq[i];
36298532f613SOng Boon Leong 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
36308532f613SOng Boon Leong 		ret = request_irq(priv->tx_irq[i],
36318532f613SOng Boon Leong 				  stmmac_msi_intr_tx,
36328531c808SChristian Marangi 				  0, int_name, &priv->dma_conf.tx_queue[i]);
36338532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
36348532f613SOng Boon Leong 			netdev_err(priv->dev,
36358532f613SOng Boon Leong 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
36368532f613SOng Boon Leong 				   __func__, i, priv->tx_irq[i], ret);
36378532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_TX;
36388532f613SOng Boon Leong 			irq_idx = i;
36398532f613SOng Boon Leong 			goto irq_error;
36408532f613SOng Boon Leong 		}
36418deec94cSOng Boon Leong 		cpumask_clear(&cpu_mask);
36428deec94cSOng Boon Leong 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
36438deec94cSOng Boon Leong 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
36448532f613SOng Boon Leong 	}
36458532f613SOng Boon Leong 
36468532f613SOng Boon Leong 	return 0;
36478532f613SOng Boon Leong 
36488532f613SOng Boon Leong irq_error:
36498532f613SOng Boon Leong 	stmmac_free_irq(dev, irq_err, irq_idx);
36508532f613SOng Boon Leong 	return ret;
36518532f613SOng Boon Leong }
36528532f613SOng Boon Leong 
36538532f613SOng Boon Leong static int stmmac_request_irq_single(struct net_device *dev)
36548532f613SOng Boon Leong {
36558532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
36563e6dc7b6SWong Vee Khee 	enum request_irq_err irq_err;
36578532f613SOng Boon Leong 	int ret;
36588532f613SOng Boon Leong 
36598532f613SOng Boon Leong 	ret = request_irq(dev->irq, stmmac_interrupt,
36608532f613SOng Boon Leong 			  IRQF_SHARED, dev->name, dev);
36618532f613SOng Boon Leong 	if (unlikely(ret < 0)) {
36628532f613SOng Boon Leong 		netdev_err(priv->dev,
36638532f613SOng Boon Leong 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
36648532f613SOng Boon Leong 			   __func__, dev->irq, ret);
36658532f613SOng Boon Leong 		irq_err = REQ_IRQ_ERR_MAC;
36663e6dc7b6SWong Vee Khee 		goto irq_error;
36678532f613SOng Boon Leong 	}
36688532f613SOng Boon Leong 
36698532f613SOng Boon Leong 	/* Request the Wake IRQ in case of another line
36708532f613SOng Boon Leong 	 * is used for WoL
36718532f613SOng Boon Leong 	 */
36728532f613SOng Boon Leong 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
36738532f613SOng Boon Leong 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
36748532f613SOng Boon Leong 				  IRQF_SHARED, dev->name, dev);
36758532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
36768532f613SOng Boon Leong 			netdev_err(priv->dev,
36778532f613SOng Boon Leong 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
36788532f613SOng Boon Leong 				   __func__, priv->wol_irq, ret);
36798532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_WOL;
36803e6dc7b6SWong Vee Khee 			goto irq_error;
36818532f613SOng Boon Leong 		}
36828532f613SOng Boon Leong 	}
36838532f613SOng Boon Leong 
36848532f613SOng Boon Leong 	/* Request the IRQ lines */
36858532f613SOng Boon Leong 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
36868532f613SOng Boon Leong 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
36878532f613SOng Boon Leong 				  IRQF_SHARED, dev->name, dev);
36888532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
36898532f613SOng Boon Leong 			netdev_err(priv->dev,
36908532f613SOng Boon Leong 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
36918532f613SOng Boon Leong 				   __func__, priv->lpi_irq, ret);
36928532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_LPI;
36938532f613SOng Boon Leong 			goto irq_error;
36948532f613SOng Boon Leong 		}
36958532f613SOng Boon Leong 	}
36968532f613SOng Boon Leong 
36978532f613SOng Boon Leong 	return 0;
36988532f613SOng Boon Leong 
36998532f613SOng Boon Leong irq_error:
37008532f613SOng Boon Leong 	stmmac_free_irq(dev, irq_err, 0);
37018532f613SOng Boon Leong 	return ret;
37028532f613SOng Boon Leong }
37038532f613SOng Boon Leong 
37048532f613SOng Boon Leong static int stmmac_request_irq(struct net_device *dev)
37058532f613SOng Boon Leong {
37068532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
37078532f613SOng Boon Leong 	int ret;
37088532f613SOng Boon Leong 
37098532f613SOng Boon Leong 	/* Request the IRQ lines */
37108532f613SOng Boon Leong 	if (priv->plat->multi_msi_en)
37118532f613SOng Boon Leong 		ret = stmmac_request_irq_multi_msi(dev);
37128532f613SOng Boon Leong 	else
37138532f613SOng Boon Leong 		ret = stmmac_request_irq_single(dev);
37148532f613SOng Boon Leong 
37158532f613SOng Boon Leong 	return ret;
37168532f613SOng Boon Leong }
37178532f613SOng Boon Leong 
3718523f11b5SSrinivas Kandagatla /**
3719ba39b344SChristian Marangi  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3720ba39b344SChristian Marangi  *  @priv: driver private structure
3721ba39b344SChristian Marangi  *  @mtu: MTU to setup the dma queue and buf with
3722ba39b344SChristian Marangi  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3723ba39b344SChristian Marangi  *  Allocate the Tx/Rx DMA queue and init them.
3724ba39b344SChristian Marangi  *  Return value:
3725ba39b344SChristian Marangi  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3726ba39b344SChristian Marangi  */
3727ba39b344SChristian Marangi static struct stmmac_dma_conf *
3728ba39b344SChristian Marangi stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3729ba39b344SChristian Marangi {
3730ba39b344SChristian Marangi 	struct stmmac_dma_conf *dma_conf;
3731ba39b344SChristian Marangi 	int chan, bfsize, ret;
3732ba39b344SChristian Marangi 
3733ba39b344SChristian Marangi 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3734ba39b344SChristian Marangi 	if (!dma_conf) {
3735ba39b344SChristian Marangi 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3736ba39b344SChristian Marangi 			   __func__);
3737ba39b344SChristian Marangi 		return ERR_PTR(-ENOMEM);
3738ba39b344SChristian Marangi 	}
3739ba39b344SChristian Marangi 
3740ba39b344SChristian Marangi 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3741ba39b344SChristian Marangi 	if (bfsize < 0)
3742ba39b344SChristian Marangi 		bfsize = 0;
3743ba39b344SChristian Marangi 
3744ba39b344SChristian Marangi 	if (bfsize < BUF_SIZE_16KiB)
3745ba39b344SChristian Marangi 		bfsize = stmmac_set_bfsize(mtu, 0);
3746ba39b344SChristian Marangi 
3747ba39b344SChristian Marangi 	dma_conf->dma_buf_sz = bfsize;
3748ba39b344SChristian Marangi 	/* Chose the tx/rx size from the already defined one in the
3749ba39b344SChristian Marangi 	 * priv struct. (if defined)
3750ba39b344SChristian Marangi 	 */
3751ba39b344SChristian Marangi 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3752ba39b344SChristian Marangi 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3753ba39b344SChristian Marangi 
3754ba39b344SChristian Marangi 	if (!dma_conf->dma_tx_size)
3755ba39b344SChristian Marangi 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3756ba39b344SChristian Marangi 	if (!dma_conf->dma_rx_size)
3757ba39b344SChristian Marangi 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3758ba39b344SChristian Marangi 
3759ba39b344SChristian Marangi 	/* Earlier check for TBS */
3760ba39b344SChristian Marangi 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3761ba39b344SChristian Marangi 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3762ba39b344SChristian Marangi 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3763ba39b344SChristian Marangi 
3764ba39b344SChristian Marangi 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3765ba39b344SChristian Marangi 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3766ba39b344SChristian Marangi 	}
3767ba39b344SChristian Marangi 
3768ba39b344SChristian Marangi 	ret = alloc_dma_desc_resources(priv, dma_conf);
3769ba39b344SChristian Marangi 	if (ret < 0) {
3770ba39b344SChristian Marangi 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3771ba39b344SChristian Marangi 			   __func__);
3772ba39b344SChristian Marangi 		goto alloc_error;
3773ba39b344SChristian Marangi 	}
3774ba39b344SChristian Marangi 
3775ba39b344SChristian Marangi 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3776ba39b344SChristian Marangi 	if (ret < 0) {
3777ba39b344SChristian Marangi 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3778ba39b344SChristian Marangi 			   __func__);
3779ba39b344SChristian Marangi 		goto init_error;
3780ba39b344SChristian Marangi 	}
3781ba39b344SChristian Marangi 
3782ba39b344SChristian Marangi 	return dma_conf;
3783ba39b344SChristian Marangi 
3784ba39b344SChristian Marangi init_error:
3785ba39b344SChristian Marangi 	free_dma_desc_resources(priv, dma_conf);
3786ba39b344SChristian Marangi alloc_error:
3787ba39b344SChristian Marangi 	kfree(dma_conf);
3788ba39b344SChristian Marangi 	return ERR_PTR(ret);
3789ba39b344SChristian Marangi }
3790ba39b344SChristian Marangi 
3791ba39b344SChristian Marangi /**
3792ba39b344SChristian Marangi  *  __stmmac_open - open entry point of the driver
37937ac6653aSJeff Kirsher  *  @dev : pointer to the device structure.
3794ba39b344SChristian Marangi  *  @dma_conf :  structure to take the dma data
37957ac6653aSJeff Kirsher  *  Description:
37967ac6653aSJeff Kirsher  *  This function is the open entry point of the driver.
37977ac6653aSJeff Kirsher  *  Return value:
37987ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
37997ac6653aSJeff Kirsher  *  file on failure.
38007ac6653aSJeff Kirsher  */
3801ba39b344SChristian Marangi static int __stmmac_open(struct net_device *dev,
3802ba39b344SChristian Marangi 			 struct stmmac_dma_conf *dma_conf)
38037ac6653aSJeff Kirsher {
38047ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
38059900074eSVladimir Oltean 	int mode = priv->plat->phy_interface;
38068fce3331SJose Abreu 	u32 chan;
38077ac6653aSJeff Kirsher 	int ret;
38087ac6653aSJeff Kirsher 
380985648865SMinghao Chi 	ret = pm_runtime_resume_and_get(priv->device);
381085648865SMinghao Chi 	if (ret < 0)
38115ec55823SJoakim Zhang 		return ret;
38125ec55823SJoakim Zhang 
3813a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3814f213bbe8SJose Abreu 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
38159900074eSVladimir Oltean 	    (!priv->hw->xpcs ||
381611059740SVladimir Oltean 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
38177ac6653aSJeff Kirsher 		ret = stmmac_init_phy(dev);
3818e58bb43fSGiuseppe CAVALLARO 		if (ret) {
381938ddc59dSLABBE Corentin 			netdev_err(priv->dev,
382038ddc59dSLABBE Corentin 				   "%s: Cannot attach to PHY (error: %d)\n",
3821e58bb43fSGiuseppe CAVALLARO 				   __func__, ret);
38225ec55823SJoakim Zhang 			goto init_phy_error;
38237ac6653aSJeff Kirsher 		}
3824e58bb43fSGiuseppe CAVALLARO 	}
38257ac6653aSJeff Kirsher 
3826523f11b5SSrinivas Kandagatla 	/* Extra statistics */
3827523f11b5SSrinivas Kandagatla 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
3828523f11b5SSrinivas Kandagatla 	priv->xstats.threshold = tc;
3829523f11b5SSrinivas Kandagatla 
383022ad3838SGiuseppe Cavallaro 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
383156329137SBartlomiej Zolnierkiewicz 
3832ba39b344SChristian Marangi 	buf_sz = dma_conf->dma_buf_sz;
3833ba39b344SChristian Marangi 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
38345bacd778SLABBE Corentin 
3835f9ec5723SChristian Marangi 	stmmac_reset_queues_param(priv);
3836f9ec5723SChristian Marangi 
3837a46e9010SRevanth Kumar Uppala 	if (!priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup) {
383849725ffcSJunxiao Chang 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
383949725ffcSJunxiao Chang 		if (ret < 0) {
384049725ffcSJunxiao Chang 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
384149725ffcSJunxiao Chang 				   __func__);
384249725ffcSJunxiao Chang 			goto init_error;
384349725ffcSJunxiao Chang 		}
384449725ffcSJunxiao Chang 	}
384549725ffcSJunxiao Chang 
3846fe131929SHuacai Chen 	ret = stmmac_hw_setup(dev, true);
384756329137SBartlomiej Zolnierkiewicz 	if (ret < 0) {
384838ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3849c9324d18SGiuseppe CAVALLARO 		goto init_error;
38507ac6653aSJeff Kirsher 	}
38517ac6653aSJeff Kirsher 
3852d429b66eSJose Abreu 	stmmac_init_coalesce(priv);
3853777da230SGiuseppe CAVALLARO 
385474371272SJose Abreu 	phylink_start(priv->phylink);
385577b28983SJisheng Zhang 	/* We may have called phylink_speed_down before */
385677b28983SJisheng Zhang 	phylink_speed_up(priv->phylink);
38577ac6653aSJeff Kirsher 
38588532f613SOng Boon Leong 	ret = stmmac_request_irq(dev);
38598532f613SOng Boon Leong 	if (ret)
38606c1e5abeSThierry Reding 		goto irq_error;
3861d765955dSGiuseppe CAVALLARO 
3862c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
38639f19306dSOng Boon Leong 	netif_tx_start_all_queues(priv->dev);
3864087a7b94SVincent Whitchurch 	stmmac_enable_all_dma_irq(priv);
38657ac6653aSJeff Kirsher 
38667ac6653aSJeff Kirsher 	return 0;
38677ac6653aSJeff Kirsher 
38686c1e5abeSThierry Reding irq_error:
386974371272SJose Abreu 	phylink_stop(priv->phylink);
38707a13f8f5SFrancesco Virlinzi 
38718fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
38728531c808SChristian Marangi 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
38738fce3331SJose Abreu 
3874c66f6c37SThierry Reding 	stmmac_hw_teardown(dev);
3875c9324d18SGiuseppe CAVALLARO init_error:
387674371272SJose Abreu 	phylink_disconnect_phy(priv->phylink);
38775ec55823SJoakim Zhang init_phy_error:
38785ec55823SJoakim Zhang 	pm_runtime_put(priv->device);
38797ac6653aSJeff Kirsher 	return ret;
38807ac6653aSJeff Kirsher }
38817ac6653aSJeff Kirsher 
3882ba39b344SChristian Marangi static int stmmac_open(struct net_device *dev)
3883ba39b344SChristian Marangi {
3884ba39b344SChristian Marangi 	struct stmmac_priv *priv = netdev_priv(dev);
3885ba39b344SChristian Marangi 	struct stmmac_dma_conf *dma_conf;
3886ba39b344SChristian Marangi 	int ret;
3887ba39b344SChristian Marangi 
3888ba39b344SChristian Marangi 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3889ba39b344SChristian Marangi 	if (IS_ERR(dma_conf))
3890ba39b344SChristian Marangi 		return PTR_ERR(dma_conf);
3891ba39b344SChristian Marangi 
3892ba39b344SChristian Marangi 	ret = __stmmac_open(dev, dma_conf);
3893*30134b7cSChristian Marangi 	if (ret)
3894*30134b7cSChristian Marangi 		free_dma_desc_resources(priv, dma_conf);
3895*30134b7cSChristian Marangi 
3896ba39b344SChristian Marangi 	kfree(dma_conf);
3897ba39b344SChristian Marangi 	return ret;
3898ba39b344SChristian Marangi }
3899ba39b344SChristian Marangi 
39005a558611SOng Boon Leong static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
39015a558611SOng Boon Leong {
39025a558611SOng Boon Leong 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
39035a558611SOng Boon Leong 
39045a558611SOng Boon Leong 	if (priv->fpe_wq)
39055a558611SOng Boon Leong 		destroy_workqueue(priv->fpe_wq);
39065a558611SOng Boon Leong 
39075a558611SOng Boon Leong 	netdev_info(priv->dev, "FPE workqueue stop");
39085a558611SOng Boon Leong }
39095a558611SOng Boon Leong 
39107ac6653aSJeff Kirsher /**
39117ac6653aSJeff Kirsher  *  stmmac_release - close entry point of the driver
39127ac6653aSJeff Kirsher  *  @dev : device pointer.
39137ac6653aSJeff Kirsher  *  Description:
39147ac6653aSJeff Kirsher  *  This is the stop entry point of the driver.
39157ac6653aSJeff Kirsher  */
3916ac746c85SOng Boon Leong static int stmmac_release(struct net_device *dev)
39177ac6653aSJeff Kirsher {
39187ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
39198fce3331SJose Abreu 	u32 chan;
39207ac6653aSJeff Kirsher 
392177b28983SJisheng Zhang 	if (device_may_wakeup(priv->device))
392277b28983SJisheng Zhang 		phylink_speed_down(priv->phylink, false);
39237ac6653aSJeff Kirsher 	/* Stop and disconnect the PHY */
392474371272SJose Abreu 	phylink_stop(priv->phylink);
392574371272SJose Abreu 	phylink_disconnect_phy(priv->phylink);
39267ac6653aSJeff Kirsher 
3927c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
39287ac6653aSJeff Kirsher 
39298fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
39308531c808SChristian Marangi 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
39319125cdd1SGiuseppe CAVALLARO 
39327028471eSChristian Marangi 	netif_tx_disable(dev);
39337028471eSChristian Marangi 
39347ac6653aSJeff Kirsher 	/* Free the IRQ lines */
39358532f613SOng Boon Leong 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
39367ac6653aSJeff Kirsher 
39375f585913SFugang Duan 	if (priv->eee_enabled) {
39385f585913SFugang Duan 		priv->tx_path_in_lpi_mode = false;
39395f585913SFugang Duan 		del_timer_sync(&priv->eee_ctrl_timer);
39405f585913SFugang Duan 	}
39415f585913SFugang Duan 
39427ac6653aSJeff Kirsher 	/* Stop TX/RX DMA and clear the descriptors */
3943ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
39447ac6653aSJeff Kirsher 
39457ac6653aSJeff Kirsher 	/* Release and free the Rx/Tx resources */
3946ba39b344SChristian Marangi 	free_dma_desc_resources(priv, &priv->dma_conf);
39477ac6653aSJeff Kirsher 
39487ac6653aSJeff Kirsher 	/* Disable the MAC Rx/Tx */
3949c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
39507ac6653aSJeff Kirsher 
395149725ffcSJunxiao Chang 	/* Powerdown Serdes if there is */
395249725ffcSJunxiao Chang 	if (priv->plat->serdes_powerdown)
395349725ffcSJunxiao Chang 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
395449725ffcSJunxiao Chang 
39557ac6653aSJeff Kirsher 	netif_carrier_off(dev);
39567ac6653aSJeff Kirsher 
395792ba6888SRayagond Kokatanur 	stmmac_release_ptp(priv);
395892ba6888SRayagond Kokatanur 
39595ec55823SJoakim Zhang 	pm_runtime_put(priv->device);
39605ec55823SJoakim Zhang 
39615a558611SOng Boon Leong 	if (priv->dma_cap.fpesel)
39625a558611SOng Boon Leong 		stmmac_fpe_stop_wq(priv);
39635a558611SOng Boon Leong 
39647ac6653aSJeff Kirsher 	return 0;
39657ac6653aSJeff Kirsher }
39667ac6653aSJeff Kirsher 
396730d93227SJose Abreu static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
396830d93227SJose Abreu 			       struct stmmac_tx_queue *tx_q)
396930d93227SJose Abreu {
397030d93227SJose Abreu 	u16 tag = 0x0, inner_tag = 0x0;
397130d93227SJose Abreu 	u32 inner_type = 0x0;
397230d93227SJose Abreu 	struct dma_desc *p;
397330d93227SJose Abreu 
397430d93227SJose Abreu 	if (!priv->dma_cap.vlins)
397530d93227SJose Abreu 		return false;
397630d93227SJose Abreu 	if (!skb_vlan_tag_present(skb))
397730d93227SJose Abreu 		return false;
397830d93227SJose Abreu 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
397930d93227SJose Abreu 		inner_tag = skb_vlan_tag_get(skb);
398030d93227SJose Abreu 		inner_type = STMMAC_VLAN_INSERT;
398130d93227SJose Abreu 	}
398230d93227SJose Abreu 
398330d93227SJose Abreu 	tag = skb_vlan_tag_get(skb);
398430d93227SJose Abreu 
3985579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3986579a25a8SJose Abreu 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3987579a25a8SJose Abreu 	else
3988579a25a8SJose Abreu 		p = &tx_q->dma_tx[tx_q->cur_tx];
3989579a25a8SJose Abreu 
399030d93227SJose Abreu 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
399130d93227SJose Abreu 		return false;
399230d93227SJose Abreu 
399330d93227SJose Abreu 	stmmac_set_tx_owner(priv, p);
39948531c808SChristian Marangi 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
399530d93227SJose Abreu 	return true;
399630d93227SJose Abreu }
399730d93227SJose Abreu 
39987ac6653aSJeff Kirsher /**
3999f748be53SAlexandre TORGUE  *  stmmac_tso_allocator - close entry point of the driver
4000f748be53SAlexandre TORGUE  *  @priv: driver private structure
4001f748be53SAlexandre TORGUE  *  @des: buffer start address
4002f748be53SAlexandre TORGUE  *  @total_len: total length to fill in descriptors
4003d0ea5cbdSJesse Brandeburg  *  @last_segment: condition for the last descriptor
4004ce736788SJoao Pinto  *  @queue: TX queue index
4005f748be53SAlexandre TORGUE  *  Description:
4006f748be53SAlexandre TORGUE  *  This function fills descriptor and request new descriptors according to
4007f748be53SAlexandre TORGUE  *  buffer length to fill
4008f748be53SAlexandre TORGUE  */
4009a993db88SJose Abreu static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4010ce736788SJoao Pinto 				 int total_len, bool last_segment, u32 queue)
4011f748be53SAlexandre TORGUE {
40128531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4013f748be53SAlexandre TORGUE 	struct dma_desc *desc;
40145bacd778SLABBE Corentin 	u32 buff_size;
4015ce736788SJoao Pinto 	int tmp_len;
4016f748be53SAlexandre TORGUE 
4017f748be53SAlexandre TORGUE 	tmp_len = total_len;
4018f748be53SAlexandre TORGUE 
4019f748be53SAlexandre TORGUE 	while (tmp_len > 0) {
4020a993db88SJose Abreu 		dma_addr_t curr_addr;
4021a993db88SJose Abreu 
4022aa042f60SSong, Yoong Siang 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
40238531c808SChristian Marangi 						priv->dma_conf.dma_tx_size);
4024b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4025579a25a8SJose Abreu 
4026579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4027579a25a8SJose Abreu 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4028579a25a8SJose Abreu 		else
4029579a25a8SJose Abreu 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4030f748be53SAlexandre TORGUE 
4031a993db88SJose Abreu 		curr_addr = des + (total_len - tmp_len);
4032a993db88SJose Abreu 		if (priv->dma_cap.addr64 <= 32)
4033a993db88SJose Abreu 			desc->des0 = cpu_to_le32(curr_addr);
4034a993db88SJose Abreu 		else
4035a993db88SJose Abreu 			stmmac_set_desc_addr(priv, desc, curr_addr);
4036a993db88SJose Abreu 
4037f748be53SAlexandre TORGUE 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4038f748be53SAlexandre TORGUE 			    TSO_MAX_BUFF_SIZE : tmp_len;
4039f748be53SAlexandre TORGUE 
404042de047dSJose Abreu 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4041f748be53SAlexandre TORGUE 				0, 1,
4042426849e6SNiklas Cassel 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4043f748be53SAlexandre TORGUE 				0, 0);
4044f748be53SAlexandre TORGUE 
4045f748be53SAlexandre TORGUE 		tmp_len -= TSO_MAX_BUFF_SIZE;
4046f748be53SAlexandre TORGUE 	}
4047f748be53SAlexandre TORGUE }
4048f748be53SAlexandre TORGUE 
4049d96febedSOng Boon Leong static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4050d96febedSOng Boon Leong {
40518531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4052d96febedSOng Boon Leong 	int desc_size;
4053d96febedSOng Boon Leong 
4054d96febedSOng Boon Leong 	if (likely(priv->extend_desc))
4055d96febedSOng Boon Leong 		desc_size = sizeof(struct dma_extended_desc);
4056d96febedSOng Boon Leong 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4057d96febedSOng Boon Leong 		desc_size = sizeof(struct dma_edesc);
4058d96febedSOng Boon Leong 	else
4059d96febedSOng Boon Leong 		desc_size = sizeof(struct dma_desc);
4060d96febedSOng Boon Leong 
4061d96febedSOng Boon Leong 	/* The own bit must be the latest setting done when prepare the
4062d96febedSOng Boon Leong 	 * descriptor and then barrier is needed to make sure that
4063d96febedSOng Boon Leong 	 * all is coherent before granting the DMA engine.
4064d96febedSOng Boon Leong 	 */
4065d96febedSOng Boon Leong 	wmb();
4066d96febedSOng Boon Leong 
4067d96febedSOng Boon Leong 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4068d96febedSOng Boon Leong 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4069d96febedSOng Boon Leong }
4070d96febedSOng Boon Leong 
4071f748be53SAlexandre TORGUE /**
4072f748be53SAlexandre TORGUE  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4073f748be53SAlexandre TORGUE  *  @skb : the socket buffer
4074f748be53SAlexandre TORGUE  *  @dev : device pointer
4075f748be53SAlexandre TORGUE  *  Description: this is the transmit function that is called on TSO frames
4076f748be53SAlexandre TORGUE  *  (support available on GMAC4 and newer chips).
4077f748be53SAlexandre TORGUE  *  Diagram below show the ring programming in case of TSO frames:
4078f748be53SAlexandre TORGUE  *
4079f748be53SAlexandre TORGUE  *  First Descriptor
4080f748be53SAlexandre TORGUE  *   --------
4081f748be53SAlexandre TORGUE  *   | DES0 |---> buffer1 = L2/L3/L4 header
4082f748be53SAlexandre TORGUE  *   | DES1 |---> TCP Payload (can continue on next descr...)
4083f748be53SAlexandre TORGUE  *   | DES2 |---> buffer 1 and 2 len
4084f748be53SAlexandre TORGUE  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4085f748be53SAlexandre TORGUE  *   --------
4086f748be53SAlexandre TORGUE  *	|
4087f748be53SAlexandre TORGUE  *     ...
4088f748be53SAlexandre TORGUE  *	|
4089f748be53SAlexandre TORGUE  *   --------
4090f748be53SAlexandre TORGUE  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4091f748be53SAlexandre TORGUE  *   | DES1 | --|
4092f748be53SAlexandre TORGUE  *   | DES2 | --> buffer 1 and 2 len
4093f748be53SAlexandre TORGUE  *   | DES3 |
4094f748be53SAlexandre TORGUE  *   --------
4095f748be53SAlexandre TORGUE  *
4096f748be53SAlexandre TORGUE  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4097f748be53SAlexandre TORGUE  */
4098f748be53SAlexandre TORGUE static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4099f748be53SAlexandre TORGUE {
4100ce736788SJoao Pinto 	struct dma_desc *desc, *first, *mss_desc = NULL;
4101f748be53SAlexandre TORGUE 	struct stmmac_priv *priv = netdev_priv(dev);
4102f748be53SAlexandre TORGUE 	int nfrags = skb_shinfo(skb)->nr_frags;
4103ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
4104c2837423SJose Abreu 	unsigned int first_entry, tx_packets;
4105d96febedSOng Boon Leong 	int tmp_pay_len = 0, first_tx;
4106ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
4107c2837423SJose Abreu 	bool has_vlan, set_ic;
4108579a25a8SJose Abreu 	u8 proto_hdr_len, hdr;
4109ce736788SJoao Pinto 	u32 pay_len, mss;
4110a993db88SJose Abreu 	dma_addr_t des;
4111f748be53SAlexandre TORGUE 	int i;
4112f748be53SAlexandre TORGUE 
41138531c808SChristian Marangi 	tx_q = &priv->dma_conf.tx_queue[queue];
4114c2837423SJose Abreu 	first_tx = tx_q->cur_tx;
4115ce736788SJoao Pinto 
4116f748be53SAlexandre TORGUE 	/* Compute header lengths */
4117b7766206SJose Abreu 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4118b7766206SJose Abreu 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4119b7766206SJose Abreu 		hdr = sizeof(struct udphdr);
4120b7766206SJose Abreu 	} else {
4121504148feSEric Dumazet 		proto_hdr_len = skb_tcp_all_headers(skb);
4122b7766206SJose Abreu 		hdr = tcp_hdrlen(skb);
4123b7766206SJose Abreu 	}
4124f748be53SAlexandre TORGUE 
4125f748be53SAlexandre TORGUE 	/* Desc availability based on threshold should be enough safe */
4126ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <
4127f748be53SAlexandre TORGUE 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4128c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4129c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4130c22a3f48SJoao Pinto 								queue));
4131f748be53SAlexandre TORGUE 			/* This is a hard error, log it. */
413238ddc59dSLABBE Corentin 			netdev_err(priv->dev,
413338ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
413438ddc59dSLABBE Corentin 				   __func__);
4135f748be53SAlexandre TORGUE 		}
4136f748be53SAlexandre TORGUE 		return NETDEV_TX_BUSY;
4137f748be53SAlexandre TORGUE 	}
4138f748be53SAlexandre TORGUE 
4139f748be53SAlexandre TORGUE 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4140f748be53SAlexandre TORGUE 
4141f748be53SAlexandre TORGUE 	mss = skb_shinfo(skb)->gso_size;
4142f748be53SAlexandre TORGUE 
4143f748be53SAlexandre TORGUE 	/* set new MSS value if needed */
41448d212a9eSNiklas Cassel 	if (mss != tx_q->mss) {
4145579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4146579a25a8SJose Abreu 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4147579a25a8SJose Abreu 		else
4148579a25a8SJose Abreu 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4149579a25a8SJose Abreu 
415042de047dSJose Abreu 		stmmac_set_mss(priv, mss_desc, mss);
41518d212a9eSNiklas Cassel 		tx_q->mss = mss;
4152aa042f60SSong, Yoong Siang 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
41538531c808SChristian Marangi 						priv->dma_conf.dma_tx_size);
4154b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4155f748be53SAlexandre TORGUE 	}
4156f748be53SAlexandre TORGUE 
4157f748be53SAlexandre TORGUE 	if (netif_msg_tx_queued(priv)) {
4158b7766206SJose Abreu 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4159b7766206SJose Abreu 			__func__, hdr, proto_hdr_len, pay_len, mss);
4160f748be53SAlexandre TORGUE 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4161f748be53SAlexandre TORGUE 			skb->data_len);
4162f748be53SAlexandre TORGUE 	}
4163f748be53SAlexandre TORGUE 
416430d93227SJose Abreu 	/* Check if VLAN can be inserted by HW */
416530d93227SJose Abreu 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
416630d93227SJose Abreu 
4167ce736788SJoao Pinto 	first_entry = tx_q->cur_tx;
4168b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4169f748be53SAlexandre TORGUE 
4170579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4171579a25a8SJose Abreu 		desc = &tx_q->dma_entx[first_entry].basic;
4172579a25a8SJose Abreu 	else
4173579a25a8SJose Abreu 		desc = &tx_q->dma_tx[first_entry];
4174f748be53SAlexandre TORGUE 	first = desc;
4175f748be53SAlexandre TORGUE 
417630d93227SJose Abreu 	if (has_vlan)
417730d93227SJose Abreu 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
417830d93227SJose Abreu 
4179f748be53SAlexandre TORGUE 	/* first descriptor: fill Headers on Buf1 */
4180f748be53SAlexandre TORGUE 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4181f748be53SAlexandre TORGUE 			     DMA_TO_DEVICE);
4182f748be53SAlexandre TORGUE 	if (dma_mapping_error(priv->device, des))
4183f748be53SAlexandre TORGUE 		goto dma_map_err;
4184f748be53SAlexandre TORGUE 
4185ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4186ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4187be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4188be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4189f748be53SAlexandre TORGUE 
4190a993db88SJose Abreu 	if (priv->dma_cap.addr64 <= 32) {
4191f8be0d78SMichael Weiser 		first->des0 = cpu_to_le32(des);
4192f748be53SAlexandre TORGUE 
4193f748be53SAlexandre TORGUE 		/* Fill start of payload in buff2 of first descriptor */
4194f748be53SAlexandre TORGUE 		if (pay_len)
4195f8be0d78SMichael Weiser 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4196f748be53SAlexandre TORGUE 
4197f748be53SAlexandre TORGUE 		/* If needed take extra descriptors to fill the remaining payload */
4198f748be53SAlexandre TORGUE 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4199a993db88SJose Abreu 	} else {
4200a993db88SJose Abreu 		stmmac_set_desc_addr(priv, first, des);
4201a993db88SJose Abreu 		tmp_pay_len = pay_len;
420234c15202Syuqi jin 		des += proto_hdr_len;
4203b2f07199SJose Abreu 		pay_len = 0;
4204a993db88SJose Abreu 	}
4205f748be53SAlexandre TORGUE 
4206ce736788SJoao Pinto 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4207f748be53SAlexandre TORGUE 
4208f748be53SAlexandre TORGUE 	/* Prepare fragments */
4209f748be53SAlexandre TORGUE 	for (i = 0; i < nfrags; i++) {
4210f748be53SAlexandre TORGUE 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4211f748be53SAlexandre TORGUE 
4212f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0,
4213f748be53SAlexandre TORGUE 				       skb_frag_size(frag),
4214f748be53SAlexandre TORGUE 				       DMA_TO_DEVICE);
4215937071c1SThierry Reding 		if (dma_mapping_error(priv->device, des))
4216937071c1SThierry Reding 			goto dma_map_err;
4217f748be53SAlexandre TORGUE 
4218f748be53SAlexandre TORGUE 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4219ce736788SJoao Pinto 				     (i == nfrags - 1), queue);
4220f748be53SAlexandre TORGUE 
4221ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4222ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4223ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4224be8b38a7SOng Boon Leong 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4225f748be53SAlexandre TORGUE 	}
4226f748be53SAlexandre TORGUE 
4227ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4228f748be53SAlexandre TORGUE 
422905cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
423005cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4231be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
423205cf0d1bSNiklas Cassel 
42337df4a3a7SJose Abreu 	/* Manage tx mitigation */
4234c2837423SJose Abreu 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4235c2837423SJose Abreu 	tx_q->tx_count_frames += tx_packets;
4236c2837423SJose Abreu 
4237c2837423SJose Abreu 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4238c2837423SJose Abreu 		set_ic = true;
4239db2f2842SOng Boon Leong 	else if (!priv->tx_coal_frames[queue])
4240c2837423SJose Abreu 		set_ic = false;
4241db2f2842SOng Boon Leong 	else if (tx_packets > priv->tx_coal_frames[queue])
4242c2837423SJose Abreu 		set_ic = true;
4243db2f2842SOng Boon Leong 	else if ((tx_q->tx_count_frames %
4244db2f2842SOng Boon Leong 		  priv->tx_coal_frames[queue]) < tx_packets)
4245c2837423SJose Abreu 		set_ic = true;
4246c2837423SJose Abreu 	else
4247c2837423SJose Abreu 		set_ic = false;
4248c2837423SJose Abreu 
4249c2837423SJose Abreu 	if (set_ic) {
4250579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4251579a25a8SJose Abreu 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4252579a25a8SJose Abreu 		else
42537df4a3a7SJose Abreu 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4254579a25a8SJose Abreu 
42557df4a3a7SJose Abreu 		tx_q->tx_count_frames = 0;
42567df4a3a7SJose Abreu 		stmmac_set_tx_ic(priv, desc);
42577df4a3a7SJose Abreu 		priv->xstats.tx_set_ic_bit++;
42587df4a3a7SJose Abreu 	}
42597df4a3a7SJose Abreu 
426005cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
426105cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
426205cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
426305cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
426405cf0d1bSNiklas Cassel 	 */
42658531c808SChristian Marangi 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4266f748be53SAlexandre TORGUE 
4267ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4268b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
426938ddc59dSLABBE Corentin 			  __func__);
4270c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4271f748be53SAlexandre TORGUE 	}
4272f748be53SAlexandre TORGUE 
4273f748be53SAlexandre TORGUE 	dev->stats.tx_bytes += skb->len;
4274f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_frames++;
4275f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_nfrags += nfrags;
4276f748be53SAlexandre TORGUE 
42778000ddc0SJose Abreu 	if (priv->sarc_type)
42788000ddc0SJose Abreu 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
42798000ddc0SJose Abreu 
4280f748be53SAlexandre TORGUE 	skb_tx_timestamp(skb);
4281f748be53SAlexandre TORGUE 
4282f748be53SAlexandre TORGUE 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4283f748be53SAlexandre TORGUE 		     priv->hwts_tx_en)) {
4284f748be53SAlexandre TORGUE 		/* declare that device is doing timestamping */
4285f748be53SAlexandre TORGUE 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
428642de047dSJose Abreu 		stmmac_enable_tx_timestamp(priv, first);
4287f748be53SAlexandre TORGUE 	}
4288f748be53SAlexandre TORGUE 
4289f748be53SAlexandre TORGUE 	/* Complete the first descriptor before granting the DMA */
429042de047dSJose Abreu 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4291f748be53SAlexandre TORGUE 			proto_hdr_len,
4292f748be53SAlexandre TORGUE 			pay_len,
4293ce736788SJoao Pinto 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4294b7766206SJose Abreu 			hdr / 4, (skb->len - proto_hdr_len));
4295f748be53SAlexandre TORGUE 
4296f748be53SAlexandre TORGUE 	/* If context desc is used to change MSS */
429715d2ee42SNiklas Cassel 	if (mss_desc) {
429815d2ee42SNiklas Cassel 		/* Make sure that first descriptor has been completely
429915d2ee42SNiklas Cassel 		 * written, including its own bit. This is because MSS is
430015d2ee42SNiklas Cassel 		 * actually before first descriptor, so we need to make
430115d2ee42SNiklas Cassel 		 * sure that MSS's own bit is the last thing written.
430215d2ee42SNiklas Cassel 		 */
430315d2ee42SNiklas Cassel 		dma_wmb();
430442de047dSJose Abreu 		stmmac_set_tx_owner(priv, mss_desc);
430515d2ee42SNiklas Cassel 	}
4306f748be53SAlexandre TORGUE 
4307f748be53SAlexandre TORGUE 	if (netif_msg_pktdata(priv)) {
4308f748be53SAlexandre TORGUE 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4309ce736788SJoao Pinto 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4310ce736788SJoao Pinto 			tx_q->cur_tx, first, nfrags);
4311f748be53SAlexandre TORGUE 		pr_info(">>> frame to be transmitted: ");
4312f748be53SAlexandre TORGUE 		print_pkt(skb->data, skb_headlen(skb));
4313f748be53SAlexandre TORGUE 	}
4314f748be53SAlexandre TORGUE 
4315c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4316f748be53SAlexandre TORGUE 
4317d96febedSOng Boon Leong 	stmmac_flush_tx_descriptors(priv, queue);
43184772f26dSJose Abreu 	stmmac_tx_timer_arm(priv, queue);
4319f748be53SAlexandre TORGUE 
4320f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
4321f748be53SAlexandre TORGUE 
4322f748be53SAlexandre TORGUE dma_map_err:
4323f748be53SAlexandre TORGUE 	dev_err(priv->device, "Tx dma map failed\n");
4324f748be53SAlexandre TORGUE 	dev_kfree_skb(skb);
4325f748be53SAlexandre TORGUE 	priv->dev->stats.tx_dropped++;
4326f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
4327f748be53SAlexandre TORGUE }
4328f748be53SAlexandre TORGUE 
4329f748be53SAlexandre TORGUE /**
4330732fdf0eSGiuseppe CAVALLARO  *  stmmac_xmit - Tx entry point of the driver
43317ac6653aSJeff Kirsher  *  @skb : the socket buffer
43327ac6653aSJeff Kirsher  *  @dev : device pointer
433332ceabcaSGiuseppe CAVALLARO  *  Description : this is the tx entry point of the driver.
433432ceabcaSGiuseppe CAVALLARO  *  It programs the chain or the ring and supports oversized frames
433532ceabcaSGiuseppe CAVALLARO  *  and SG feature.
43367ac6653aSJeff Kirsher  */
43377ac6653aSJeff Kirsher static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
43387ac6653aSJeff Kirsher {
4339c2837423SJose Abreu 	unsigned int first_entry, tx_packets, enh_desc;
43407ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
43410e80bdc9SGiuseppe Cavallaro 	unsigned int nopaged_len = skb_headlen(skb);
43424a7d666aSGiuseppe CAVALLARO 	int i, csum_insertion = 0, is_jumbo = 0;
4343ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
43447ac6653aSJeff Kirsher 	int nfrags = skb_shinfo(skb)->nr_frags;
4345b7766206SJose Abreu 	int gso = skb_shinfo(skb)->gso_type;
4346579a25a8SJose Abreu 	struct dma_edesc *tbs_desc = NULL;
43477ac6653aSJeff Kirsher 	struct dma_desc *desc, *first;
4348ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
4349c2837423SJose Abreu 	bool has_vlan, set_ic;
4350d96febedSOng Boon Leong 	int entry, first_tx;
4351a993db88SJose Abreu 	dma_addr_t des;
4352f748be53SAlexandre TORGUE 
43538531c808SChristian Marangi 	tx_q = &priv->dma_conf.tx_queue[queue];
4354c2837423SJose Abreu 	first_tx = tx_q->cur_tx;
4355ce736788SJoao Pinto 
4356be1c7eaeSVineetha G. Jaya Kumaran 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4357e2cd682dSJose Abreu 		stmmac_disable_eee_mode(priv);
4358e2cd682dSJose Abreu 
4359f748be53SAlexandre TORGUE 	/* Manage oversized TCP frames for GMAC4 device */
4360f748be53SAlexandre TORGUE 	if (skb_is_gso(skb) && priv->tso) {
4361b7766206SJose Abreu 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4362b7766206SJose Abreu 			return stmmac_tso_xmit(skb, dev);
4363b7766206SJose Abreu 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4364f748be53SAlexandre TORGUE 			return stmmac_tso_xmit(skb, dev);
4365f748be53SAlexandre TORGUE 	}
43667ac6653aSJeff Kirsher 
4367ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4368c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4369c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4370c22a3f48SJoao Pinto 								queue));
43717ac6653aSJeff Kirsher 			/* This is a hard error, log it. */
437238ddc59dSLABBE Corentin 			netdev_err(priv->dev,
437338ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
437438ddc59dSLABBE Corentin 				   __func__);
43757ac6653aSJeff Kirsher 		}
43767ac6653aSJeff Kirsher 		return NETDEV_TX_BUSY;
43777ac6653aSJeff Kirsher 	}
43787ac6653aSJeff Kirsher 
437930d93227SJose Abreu 	/* Check if VLAN can be inserted by HW */
438030d93227SJose Abreu 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
438130d93227SJose Abreu 
4382ce736788SJoao Pinto 	entry = tx_q->cur_tx;
43830e80bdc9SGiuseppe Cavallaro 	first_entry = entry;
4384b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
43857ac6653aSJeff Kirsher 
43867ac6653aSJeff Kirsher 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
43877ac6653aSJeff Kirsher 
43880e80bdc9SGiuseppe Cavallaro 	if (likely(priv->extend_desc))
4389ce736788SJoao Pinto 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4390579a25a8SJose Abreu 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4391579a25a8SJose Abreu 		desc = &tx_q->dma_entx[entry].basic;
4392c24602efSGiuseppe CAVALLARO 	else
4393ce736788SJoao Pinto 		desc = tx_q->dma_tx + entry;
4394c24602efSGiuseppe CAVALLARO 
43957ac6653aSJeff Kirsher 	first = desc;
43967ac6653aSJeff Kirsher 
439730d93227SJose Abreu 	if (has_vlan)
439830d93227SJose Abreu 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
439930d93227SJose Abreu 
44000e80bdc9SGiuseppe Cavallaro 	enh_desc = priv->plat->enh_desc;
44014a7d666aSGiuseppe CAVALLARO 	/* To program the descriptors according to the size of the frame */
440229896a67SGiuseppe CAVALLARO 	if (enh_desc)
44032c520b1cSJose Abreu 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
440429896a67SGiuseppe CAVALLARO 
440563a550fcSJose Abreu 	if (unlikely(is_jumbo)) {
44062c520b1cSJose Abreu 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
440763a550fcSJose Abreu 		if (unlikely(entry < 0) && (entry != -EINVAL))
4408362b37beSGiuseppe CAVALLARO 			goto dma_map_err;
440929896a67SGiuseppe CAVALLARO 	}
44107ac6653aSJeff Kirsher 
44117ac6653aSJeff Kirsher 	for (i = 0; i < nfrags; i++) {
44129e903e08SEric Dumazet 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
44139e903e08SEric Dumazet 		int len = skb_frag_size(frag);
4414be434d50SGiuseppe Cavallaro 		bool last_segment = (i == (nfrags - 1));
44157ac6653aSJeff Kirsher 
44168531c808SChristian Marangi 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4417b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[entry]);
4418e3ad57c9SGiuseppe Cavallaro 
44190e80bdc9SGiuseppe Cavallaro 		if (likely(priv->extend_desc))
4420ce736788SJoao Pinto 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4421579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4422579a25a8SJose Abreu 			desc = &tx_q->dma_entx[entry].basic;
4423c24602efSGiuseppe CAVALLARO 		else
4424ce736788SJoao Pinto 			desc = tx_q->dma_tx + entry;
44257ac6653aSJeff Kirsher 
4426f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4427f722380dSIan Campbell 				       DMA_TO_DEVICE);
4428f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
4429362b37beSGiuseppe CAVALLARO 			goto dma_map_err; /* should reuse desc w/o issues */
4430362b37beSGiuseppe CAVALLARO 
4431ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].buf = des;
44326844171dSJose Abreu 
44336844171dSJose Abreu 		stmmac_set_desc_addr(priv, desc, des);
4434f748be53SAlexandre TORGUE 
4435ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4436ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].len = len;
4437ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4438be8b38a7SOng Boon Leong 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
44390e80bdc9SGiuseppe Cavallaro 
44400e80bdc9SGiuseppe Cavallaro 		/* Prepare the descriptor and set the own bit too */
444142de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
444242de047dSJose Abreu 				priv->mode, 1, last_segment, skb->len);
44437ac6653aSJeff Kirsher 	}
44447ac6653aSJeff Kirsher 
444505cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
444605cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[entry] = skb;
4447be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4448e3ad57c9SGiuseppe Cavallaro 
44497df4a3a7SJose Abreu 	/* According to the coalesce parameter the IC bit for the latest
44507df4a3a7SJose Abreu 	 * segment is reset and the timer re-started to clean the tx status.
44517df4a3a7SJose Abreu 	 * This approach takes care about the fragments: desc is the first
44527df4a3a7SJose Abreu 	 * element in case of no SG.
44537df4a3a7SJose Abreu 	 */
4454c2837423SJose Abreu 	tx_packets = (entry + 1) - first_tx;
4455c2837423SJose Abreu 	tx_q->tx_count_frames += tx_packets;
4456c2837423SJose Abreu 
4457c2837423SJose Abreu 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4458c2837423SJose Abreu 		set_ic = true;
4459db2f2842SOng Boon Leong 	else if (!priv->tx_coal_frames[queue])
4460c2837423SJose Abreu 		set_ic = false;
4461db2f2842SOng Boon Leong 	else if (tx_packets > priv->tx_coal_frames[queue])
4462c2837423SJose Abreu 		set_ic = true;
4463db2f2842SOng Boon Leong 	else if ((tx_q->tx_count_frames %
4464db2f2842SOng Boon Leong 		  priv->tx_coal_frames[queue]) < tx_packets)
4465c2837423SJose Abreu 		set_ic = true;
4466c2837423SJose Abreu 	else
4467c2837423SJose Abreu 		set_ic = false;
4468c2837423SJose Abreu 
4469c2837423SJose Abreu 	if (set_ic) {
44707df4a3a7SJose Abreu 		if (likely(priv->extend_desc))
44717df4a3a7SJose Abreu 			desc = &tx_q->dma_etx[entry].basic;
4472579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4473579a25a8SJose Abreu 			desc = &tx_q->dma_entx[entry].basic;
44747df4a3a7SJose Abreu 		else
44757df4a3a7SJose Abreu 			desc = &tx_q->dma_tx[entry];
44767df4a3a7SJose Abreu 
44777df4a3a7SJose Abreu 		tx_q->tx_count_frames = 0;
44787df4a3a7SJose Abreu 		stmmac_set_tx_ic(priv, desc);
44797df4a3a7SJose Abreu 		priv->xstats.tx_set_ic_bit++;
44807df4a3a7SJose Abreu 	}
44817df4a3a7SJose Abreu 
448205cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
448305cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
448405cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
448505cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
448605cf0d1bSNiklas Cassel 	 */
44878531c808SChristian Marangi 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4488ce736788SJoao Pinto 	tx_q->cur_tx = entry;
44897ac6653aSJeff Kirsher 
44907ac6653aSJeff Kirsher 	if (netif_msg_pktdata(priv)) {
449138ddc59dSLABBE Corentin 		netdev_dbg(priv->dev,
449238ddc59dSLABBE Corentin 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4493ce736788SJoao Pinto 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
44940e80bdc9SGiuseppe Cavallaro 			   entry, first, nfrags);
449583d7af64SGiuseppe CAVALLARO 
449638ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
44977ac6653aSJeff Kirsher 		print_pkt(skb->data, skb->len);
44987ac6653aSJeff Kirsher 	}
44990e80bdc9SGiuseppe Cavallaro 
4500ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4501b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4502b3e51069SLABBE Corentin 			  __func__);
4503c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
45047ac6653aSJeff Kirsher 	}
45057ac6653aSJeff Kirsher 
45067ac6653aSJeff Kirsher 	dev->stats.tx_bytes += skb->len;
45077ac6653aSJeff Kirsher 
45088000ddc0SJose Abreu 	if (priv->sarc_type)
45098000ddc0SJose Abreu 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
45108000ddc0SJose Abreu 
45110e80bdc9SGiuseppe Cavallaro 	skb_tx_timestamp(skb);
45120e80bdc9SGiuseppe Cavallaro 
45130e80bdc9SGiuseppe Cavallaro 	/* Ready to fill the first descriptor and set the OWN bit w/o any
45140e80bdc9SGiuseppe Cavallaro 	 * problems because all the descriptors are actually ready to be
45150e80bdc9SGiuseppe Cavallaro 	 * passed to the DMA engine.
45160e80bdc9SGiuseppe Cavallaro 	 */
45170e80bdc9SGiuseppe Cavallaro 	if (likely(!is_jumbo)) {
45180e80bdc9SGiuseppe Cavallaro 		bool last_segment = (nfrags == 0);
45190e80bdc9SGiuseppe Cavallaro 
4520f748be53SAlexandre TORGUE 		des = dma_map_single(priv->device, skb->data,
45210e80bdc9SGiuseppe Cavallaro 				     nopaged_len, DMA_TO_DEVICE);
4522f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
45230e80bdc9SGiuseppe Cavallaro 			goto dma_map_err;
45240e80bdc9SGiuseppe Cavallaro 
4525ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4526be8b38a7SOng Boon Leong 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4527be8b38a7SOng Boon Leong 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
45286844171dSJose Abreu 
45296844171dSJose Abreu 		stmmac_set_desc_addr(priv, first, des);
4530f748be53SAlexandre TORGUE 
4531ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4532ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
45330e80bdc9SGiuseppe Cavallaro 
4534891434b1SRayagond Kokatanur 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4535891434b1SRayagond Kokatanur 			     priv->hwts_tx_en)) {
4536891434b1SRayagond Kokatanur 			/* declare that device is doing timestamping */
4537891434b1SRayagond Kokatanur 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
453842de047dSJose Abreu 			stmmac_enable_tx_timestamp(priv, first);
4539891434b1SRayagond Kokatanur 		}
4540891434b1SRayagond Kokatanur 
45410e80bdc9SGiuseppe Cavallaro 		/* Prepare the first descriptor setting the OWN bit too */
454242de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4543579a25a8SJose Abreu 				csum_insertion, priv->mode, 0, last_segment,
454442de047dSJose Abreu 				skb->len);
454580acbed9SAaro Koskinen 	}
45460e80bdc9SGiuseppe Cavallaro 
4547579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_EN) {
4548579a25a8SJose Abreu 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4549579a25a8SJose Abreu 
4550579a25a8SJose Abreu 		tbs_desc = &tx_q->dma_entx[first_entry];
4551579a25a8SJose Abreu 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4552579a25a8SJose Abreu 	}
4553579a25a8SJose Abreu 
4554579a25a8SJose Abreu 	stmmac_set_tx_owner(priv, first);
4555579a25a8SJose Abreu 
4556c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4557f748be53SAlexandre TORGUE 
4558a4e887faSJose Abreu 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
45598fce3331SJose Abreu 
4560d96febedSOng Boon Leong 	stmmac_flush_tx_descriptors(priv, queue);
45614772f26dSJose Abreu 	stmmac_tx_timer_arm(priv, queue);
45627ac6653aSJeff Kirsher 
4563362b37beSGiuseppe CAVALLARO 	return NETDEV_TX_OK;
4564a9097a96SGiuseppe CAVALLARO 
4565362b37beSGiuseppe CAVALLARO dma_map_err:
456638ddc59dSLABBE Corentin 	netdev_err(priv->dev, "Tx DMA map failed\n");
4567362b37beSGiuseppe CAVALLARO 	dev_kfree_skb(skb);
4568362b37beSGiuseppe CAVALLARO 	priv->dev->stats.tx_dropped++;
45697ac6653aSJeff Kirsher 	return NETDEV_TX_OK;
45707ac6653aSJeff Kirsher }
45717ac6653aSJeff Kirsher 
4572b9381985SVince Bridgers static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4573b9381985SVince Bridgers {
45741f5020acSVladimir Oltean 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
45751f5020acSVladimir Oltean 	__be16 vlan_proto = veth->h_vlan_proto;
4576b9381985SVince Bridgers 	u16 vlanid;
4577b9381985SVince Bridgers 
4578ab188e8fSElad Nachman 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4579ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4580ab188e8fSElad Nachman 	    (vlan_proto == htons(ETH_P_8021AD) &&
4581ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4582b9381985SVince Bridgers 		/* pop the vlan tag */
4583ab188e8fSElad Nachman 		vlanid = ntohs(veth->h_vlan_TCI);
4584ab188e8fSElad Nachman 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4585b9381985SVince Bridgers 		skb_pull(skb, VLAN_HLEN);
4586ab188e8fSElad Nachman 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4587b9381985SVince Bridgers 	}
4588b9381985SVince Bridgers }
4589b9381985SVince Bridgers 
459032ceabcaSGiuseppe CAVALLARO /**
4591732fdf0eSGiuseppe CAVALLARO  * stmmac_rx_refill - refill used skb preallocated buffers
459232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
459354139cf3SJoao Pinto  * @queue: RX queue index
459432ceabcaSGiuseppe CAVALLARO  * Description : this is to reallocate the skb for the reception process
459532ceabcaSGiuseppe CAVALLARO  * that is based on zero-copy.
459632ceabcaSGiuseppe CAVALLARO  */
459754139cf3SJoao Pinto static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
45987ac6653aSJeff Kirsher {
45998531c808SChristian Marangi 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
46005fabb012SOng Boon Leong 	int dirty = stmmac_rx_dirty(priv, queue);
460154139cf3SJoao Pinto 	unsigned int entry = rx_q->dirty_rx;
4602884d2b84SDavid Wu 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4603884d2b84SDavid Wu 
4604070246e4SJochen Henneberg 	if (priv->dma_cap.host_dma_width <= 32)
4605884d2b84SDavid Wu 		gfp |= GFP_DMA32;
460654139cf3SJoao Pinto 
4607e3ad57c9SGiuseppe Cavallaro 	while (dirty-- > 0) {
46082af6106aSJose Abreu 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4609c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
4610d429b66eSJose Abreu 		bool use_rx_wd;
4611c24602efSGiuseppe CAVALLARO 
4612c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
461354139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4614c24602efSGiuseppe CAVALLARO 		else
461554139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
4616c24602efSGiuseppe CAVALLARO 
46172af6106aSJose Abreu 		if (!buf->page) {
4618884d2b84SDavid Wu 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
46192af6106aSJose Abreu 			if (!buf->page)
46207ac6653aSJeff Kirsher 				break;
4621120e87f9SGiuseppe Cavallaro 		}
46227ac6653aSJeff Kirsher 
462367afd6d1SJose Abreu 		if (priv->sph && !buf->sec_page) {
4624884d2b84SDavid Wu 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
462567afd6d1SJose Abreu 			if (!buf->sec_page)
462667afd6d1SJose Abreu 				break;
462767afd6d1SJose Abreu 
462867afd6d1SJose Abreu 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
462967afd6d1SJose Abreu 		}
463067afd6d1SJose Abreu 
46315fabb012SOng Boon Leong 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
46323caa61c2SJose Abreu 
46332af6106aSJose Abreu 		stmmac_set_desc_addr(priv, p, buf->addr);
4634396e13e1SJoakim Zhang 		if (priv->sph)
4635396e13e1SJoakim Zhang 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4636396e13e1SJoakim Zhang 		else
4637396e13e1SJoakim Zhang 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
46382c520b1cSJose Abreu 		stmmac_refill_desc3(priv, rx_q, p);
4639286a8372SGiuseppe CAVALLARO 
4640d429b66eSJose Abreu 		rx_q->rx_count_frames++;
4641db2f2842SOng Boon Leong 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4642db2f2842SOng Boon Leong 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
46436fa9d691SJose Abreu 			rx_q->rx_count_frames = 0;
464409146abeSJose Abreu 
4645db2f2842SOng Boon Leong 		use_rx_wd = !priv->rx_coal_frames[queue];
464609146abeSJose Abreu 		use_rx_wd |= rx_q->rx_count_frames > 0;
464709146abeSJose Abreu 		if (!priv->use_riwt)
464809146abeSJose Abreu 			use_rx_wd = false;
4649d429b66eSJose Abreu 
4650ad688cdbSPavel Machek 		dma_wmb();
46512af6106aSJose Abreu 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4652e3ad57c9SGiuseppe Cavallaro 
46538531c808SChristian Marangi 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
46547ac6653aSJeff Kirsher 	}
465554139cf3SJoao Pinto 	rx_q->dirty_rx = entry;
4656858a31ffSJose Abreu 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4657858a31ffSJose Abreu 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
46584523a561SBiao Huang 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
46597ac6653aSJeff Kirsher }
46607ac6653aSJeff Kirsher 
466188ebe2cfSJose Abreu static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
466288ebe2cfSJose Abreu 				       struct dma_desc *p,
466388ebe2cfSJose Abreu 				       int status, unsigned int len)
466488ebe2cfSJose Abreu {
466588ebe2cfSJose Abreu 	unsigned int plen = 0, hlen = 0;
466631f2760eSLuo Jiaxing 	int coe = priv->hw->rx_csum;
466788ebe2cfSJose Abreu 
466888ebe2cfSJose Abreu 	/* Not first descriptor, buffer is always zero */
466988ebe2cfSJose Abreu 	if (priv->sph && len)
467088ebe2cfSJose Abreu 		return 0;
467188ebe2cfSJose Abreu 
467288ebe2cfSJose Abreu 	/* First descriptor, get split header length */
467331f2760eSLuo Jiaxing 	stmmac_get_rx_header_len(priv, p, &hlen);
467488ebe2cfSJose Abreu 	if (priv->sph && hlen) {
467588ebe2cfSJose Abreu 		priv->xstats.rx_split_hdr_pkt_n++;
467688ebe2cfSJose Abreu 		return hlen;
467788ebe2cfSJose Abreu 	}
467888ebe2cfSJose Abreu 
467988ebe2cfSJose Abreu 	/* First descriptor, not last descriptor and not split header */
468088ebe2cfSJose Abreu 	if (status & rx_not_ls)
46818531c808SChristian Marangi 		return priv->dma_conf.dma_buf_sz;
468288ebe2cfSJose Abreu 
468388ebe2cfSJose Abreu 	plen = stmmac_get_rx_frame_len(priv, p, coe);
468488ebe2cfSJose Abreu 
468588ebe2cfSJose Abreu 	/* First descriptor and last descriptor and not split header */
46868531c808SChristian Marangi 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
468788ebe2cfSJose Abreu }
468888ebe2cfSJose Abreu 
468988ebe2cfSJose Abreu static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
469088ebe2cfSJose Abreu 				       struct dma_desc *p,
469188ebe2cfSJose Abreu 				       int status, unsigned int len)
469288ebe2cfSJose Abreu {
469388ebe2cfSJose Abreu 	int coe = priv->hw->rx_csum;
469488ebe2cfSJose Abreu 	unsigned int plen = 0;
469588ebe2cfSJose Abreu 
469688ebe2cfSJose Abreu 	/* Not split header, buffer is not available */
469788ebe2cfSJose Abreu 	if (!priv->sph)
469888ebe2cfSJose Abreu 		return 0;
469988ebe2cfSJose Abreu 
470088ebe2cfSJose Abreu 	/* Not last descriptor */
470188ebe2cfSJose Abreu 	if (status & rx_not_ls)
47028531c808SChristian Marangi 		return priv->dma_conf.dma_buf_sz;
470388ebe2cfSJose Abreu 
470488ebe2cfSJose Abreu 	plen = stmmac_get_rx_frame_len(priv, p, coe);
470588ebe2cfSJose Abreu 
470688ebe2cfSJose Abreu 	/* Last descriptor */
470788ebe2cfSJose Abreu 	return plen - len;
470888ebe2cfSJose Abreu }
470988ebe2cfSJose Abreu 
4710be8b38a7SOng Boon Leong static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
47118b278a5bSOng Boon Leong 				struct xdp_frame *xdpf, bool dma_map)
4712be8b38a7SOng Boon Leong {
47138531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4714be8b38a7SOng Boon Leong 	unsigned int entry = tx_q->cur_tx;
4715be8b38a7SOng Boon Leong 	struct dma_desc *tx_desc;
4716be8b38a7SOng Boon Leong 	dma_addr_t dma_addr;
4717be8b38a7SOng Boon Leong 	bool set_ic;
4718be8b38a7SOng Boon Leong 
4719be8b38a7SOng Boon Leong 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4720be8b38a7SOng Boon Leong 		return STMMAC_XDP_CONSUMED;
4721be8b38a7SOng Boon Leong 
4722be8b38a7SOng Boon Leong 	if (likely(priv->extend_desc))
4723be8b38a7SOng Boon Leong 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4724be8b38a7SOng Boon Leong 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4725be8b38a7SOng Boon Leong 		tx_desc = &tx_q->dma_entx[entry].basic;
4726be8b38a7SOng Boon Leong 	else
4727be8b38a7SOng Boon Leong 		tx_desc = tx_q->dma_tx + entry;
4728be8b38a7SOng Boon Leong 
47298b278a5bSOng Boon Leong 	if (dma_map) {
47308b278a5bSOng Boon Leong 		dma_addr = dma_map_single(priv->device, xdpf->data,
47318b278a5bSOng Boon Leong 					  xdpf->len, DMA_TO_DEVICE);
47328b278a5bSOng Boon Leong 		if (dma_mapping_error(priv->device, dma_addr))
47338b278a5bSOng Boon Leong 			return STMMAC_XDP_CONSUMED;
47348b278a5bSOng Boon Leong 
47358b278a5bSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
47368b278a5bSOng Boon Leong 	} else {
47378b278a5bSOng Boon Leong 		struct page *page = virt_to_page(xdpf->data);
47388b278a5bSOng Boon Leong 
4739be8b38a7SOng Boon Leong 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4740be8b38a7SOng Boon Leong 			   xdpf->headroom;
4741be8b38a7SOng Boon Leong 		dma_sync_single_for_device(priv->device, dma_addr,
4742be8b38a7SOng Boon Leong 					   xdpf->len, DMA_BIDIRECTIONAL);
4743be8b38a7SOng Boon Leong 
4744be8b38a7SOng Boon Leong 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
47458b278a5bSOng Boon Leong 	}
4746be8b38a7SOng Boon Leong 
4747be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4748be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4749be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4750be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4751be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4752be8b38a7SOng Boon Leong 
4753be8b38a7SOng Boon Leong 	tx_q->xdpf[entry] = xdpf;
4754be8b38a7SOng Boon Leong 
4755be8b38a7SOng Boon Leong 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4756be8b38a7SOng Boon Leong 
4757be8b38a7SOng Boon Leong 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4758be8b38a7SOng Boon Leong 			       true, priv->mode, true, true,
4759be8b38a7SOng Boon Leong 			       xdpf->len);
4760be8b38a7SOng Boon Leong 
4761be8b38a7SOng Boon Leong 	tx_q->tx_count_frames++;
4762be8b38a7SOng Boon Leong 
4763be8b38a7SOng Boon Leong 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4764be8b38a7SOng Boon Leong 		set_ic = true;
4765be8b38a7SOng Boon Leong 	else
4766be8b38a7SOng Boon Leong 		set_ic = false;
4767be8b38a7SOng Boon Leong 
4768be8b38a7SOng Boon Leong 	if (set_ic) {
4769be8b38a7SOng Boon Leong 		tx_q->tx_count_frames = 0;
4770be8b38a7SOng Boon Leong 		stmmac_set_tx_ic(priv, tx_desc);
4771be8b38a7SOng Boon Leong 		priv->xstats.tx_set_ic_bit++;
4772be8b38a7SOng Boon Leong 	}
4773be8b38a7SOng Boon Leong 
4774be8b38a7SOng Boon Leong 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4775be8b38a7SOng Boon Leong 
47768531c808SChristian Marangi 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4777be8b38a7SOng Boon Leong 	tx_q->cur_tx = entry;
4778be8b38a7SOng Boon Leong 
4779be8b38a7SOng Boon Leong 	return STMMAC_XDP_TX;
4780be8b38a7SOng Boon Leong }
4781be8b38a7SOng Boon Leong 
4782be8b38a7SOng Boon Leong static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4783be8b38a7SOng Boon Leong 				   int cpu)
4784be8b38a7SOng Boon Leong {
4785be8b38a7SOng Boon Leong 	int index = cpu;
4786be8b38a7SOng Boon Leong 
4787be8b38a7SOng Boon Leong 	if (unlikely(index < 0))
4788be8b38a7SOng Boon Leong 		index = 0;
4789be8b38a7SOng Boon Leong 
4790be8b38a7SOng Boon Leong 	while (index >= priv->plat->tx_queues_to_use)
4791be8b38a7SOng Boon Leong 		index -= priv->plat->tx_queues_to_use;
4792be8b38a7SOng Boon Leong 
4793be8b38a7SOng Boon Leong 	return index;
4794be8b38a7SOng Boon Leong }
4795be8b38a7SOng Boon Leong 
4796be8b38a7SOng Boon Leong static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4797be8b38a7SOng Boon Leong 				struct xdp_buff *xdp)
4798be8b38a7SOng Boon Leong {
4799be8b38a7SOng Boon Leong 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4800be8b38a7SOng Boon Leong 	int cpu = smp_processor_id();
4801be8b38a7SOng Boon Leong 	struct netdev_queue *nq;
4802be8b38a7SOng Boon Leong 	int queue;
4803be8b38a7SOng Boon Leong 	int res;
4804be8b38a7SOng Boon Leong 
4805be8b38a7SOng Boon Leong 	if (unlikely(!xdpf))
4806be8b38a7SOng Boon Leong 		return STMMAC_XDP_CONSUMED;
4807be8b38a7SOng Boon Leong 
4808be8b38a7SOng Boon Leong 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4809be8b38a7SOng Boon Leong 	nq = netdev_get_tx_queue(priv->dev, queue);
4810be8b38a7SOng Boon Leong 
4811be8b38a7SOng Boon Leong 	__netif_tx_lock(nq, cpu);
4812be8b38a7SOng Boon Leong 	/* Avoids TX time-out as we are sharing with slow path */
4813e92af33eSAlexander Lobakin 	txq_trans_cond_update(nq);
4814be8b38a7SOng Boon Leong 
48158b278a5bSOng Boon Leong 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4816be8b38a7SOng Boon Leong 	if (res == STMMAC_XDP_TX)
4817be8b38a7SOng Boon Leong 		stmmac_flush_tx_descriptors(priv, queue);
4818be8b38a7SOng Boon Leong 
4819be8b38a7SOng Boon Leong 	__netif_tx_unlock(nq);
4820be8b38a7SOng Boon Leong 
4821be8b38a7SOng Boon Leong 	return res;
4822be8b38a7SOng Boon Leong }
4823be8b38a7SOng Boon Leong 
4824bba71cacSOng Boon Leong static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4825bba71cacSOng Boon Leong 				 struct bpf_prog *prog,
48265fabb012SOng Boon Leong 				 struct xdp_buff *xdp)
48275fabb012SOng Boon Leong {
48285fabb012SOng Boon Leong 	u32 act;
4829bba71cacSOng Boon Leong 	int res;
48305fabb012SOng Boon Leong 
48315fabb012SOng Boon Leong 	act = bpf_prog_run_xdp(prog, xdp);
48325fabb012SOng Boon Leong 	switch (act) {
48335fabb012SOng Boon Leong 	case XDP_PASS:
48345fabb012SOng Boon Leong 		res = STMMAC_XDP_PASS;
48355fabb012SOng Boon Leong 		break;
4836be8b38a7SOng Boon Leong 	case XDP_TX:
4837be8b38a7SOng Boon Leong 		res = stmmac_xdp_xmit_back(priv, xdp);
4838be8b38a7SOng Boon Leong 		break;
48398b278a5bSOng Boon Leong 	case XDP_REDIRECT:
48408b278a5bSOng Boon Leong 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
48418b278a5bSOng Boon Leong 			res = STMMAC_XDP_CONSUMED;
48428b278a5bSOng Boon Leong 		else
48438b278a5bSOng Boon Leong 			res = STMMAC_XDP_REDIRECT;
48448b278a5bSOng Boon Leong 		break;
48455fabb012SOng Boon Leong 	default:
4846c8064e5bSPaolo Abeni 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
48475fabb012SOng Boon Leong 		fallthrough;
48485fabb012SOng Boon Leong 	case XDP_ABORTED:
48495fabb012SOng Boon Leong 		trace_xdp_exception(priv->dev, prog, act);
48505fabb012SOng Boon Leong 		fallthrough;
48515fabb012SOng Boon Leong 	case XDP_DROP:
48525fabb012SOng Boon Leong 		res = STMMAC_XDP_CONSUMED;
48535fabb012SOng Boon Leong 		break;
48545fabb012SOng Boon Leong 	}
48555fabb012SOng Boon Leong 
4856bba71cacSOng Boon Leong 	return res;
4857bba71cacSOng Boon Leong }
4858bba71cacSOng Boon Leong 
4859bba71cacSOng Boon Leong static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4860bba71cacSOng Boon Leong 					   struct xdp_buff *xdp)
4861bba71cacSOng Boon Leong {
4862bba71cacSOng Boon Leong 	struct bpf_prog *prog;
4863bba71cacSOng Boon Leong 	int res;
4864bba71cacSOng Boon Leong 
4865bba71cacSOng Boon Leong 	prog = READ_ONCE(priv->xdp_prog);
4866bba71cacSOng Boon Leong 	if (!prog) {
4867bba71cacSOng Boon Leong 		res = STMMAC_XDP_PASS;
48682f1e432dSToke Høiland-Jørgensen 		goto out;
4869bba71cacSOng Boon Leong 	}
4870bba71cacSOng Boon Leong 
4871bba71cacSOng Boon Leong 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
48722f1e432dSToke Høiland-Jørgensen out:
48735fabb012SOng Boon Leong 	return ERR_PTR(-res);
48745fabb012SOng Boon Leong }
48755fabb012SOng Boon Leong 
4876be8b38a7SOng Boon Leong static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4877be8b38a7SOng Boon Leong 				   int xdp_status)
4878be8b38a7SOng Boon Leong {
4879be8b38a7SOng Boon Leong 	int cpu = smp_processor_id();
4880be8b38a7SOng Boon Leong 	int queue;
4881be8b38a7SOng Boon Leong 
4882be8b38a7SOng Boon Leong 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4883be8b38a7SOng Boon Leong 
4884be8b38a7SOng Boon Leong 	if (xdp_status & STMMAC_XDP_TX)
4885be8b38a7SOng Boon Leong 		stmmac_tx_timer_arm(priv, queue);
48868b278a5bSOng Boon Leong 
48878b278a5bSOng Boon Leong 	if (xdp_status & STMMAC_XDP_REDIRECT)
48888b278a5bSOng Boon Leong 		xdp_do_flush();
4889be8b38a7SOng Boon Leong }
4890be8b38a7SOng Boon Leong 
4891bba2556eSOng Boon Leong static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4892bba2556eSOng Boon Leong 					       struct xdp_buff *xdp)
4893bba2556eSOng Boon Leong {
4894bba2556eSOng Boon Leong 	unsigned int metasize = xdp->data - xdp->data_meta;
4895bba2556eSOng Boon Leong 	unsigned int datasize = xdp->data_end - xdp->data;
4896bba2556eSOng Boon Leong 	struct sk_buff *skb;
4897bba2556eSOng Boon Leong 
4898132c32eeSOng Boon Leong 	skb = __napi_alloc_skb(&ch->rxtx_napi,
4899bba2556eSOng Boon Leong 			       xdp->data_end - xdp->data_hard_start,
4900bba2556eSOng Boon Leong 			       GFP_ATOMIC | __GFP_NOWARN);
4901bba2556eSOng Boon Leong 	if (unlikely(!skb))
4902bba2556eSOng Boon Leong 		return NULL;
4903bba2556eSOng Boon Leong 
4904bba2556eSOng Boon Leong 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
4905bba2556eSOng Boon Leong 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4906bba2556eSOng Boon Leong 	if (metasize)
4907bba2556eSOng Boon Leong 		skb_metadata_set(skb, metasize);
4908bba2556eSOng Boon Leong 
4909bba2556eSOng Boon Leong 	return skb;
4910bba2556eSOng Boon Leong }
4911bba2556eSOng Boon Leong 
4912bba2556eSOng Boon Leong static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4913bba2556eSOng Boon Leong 				   struct dma_desc *p, struct dma_desc *np,
4914bba2556eSOng Boon Leong 				   struct xdp_buff *xdp)
4915bba2556eSOng Boon Leong {
4916bba2556eSOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
4917bba2556eSOng Boon Leong 	unsigned int len = xdp->data_end - xdp->data;
4918bba2556eSOng Boon Leong 	enum pkt_hash_types hash_type;
4919bba2556eSOng Boon Leong 	int coe = priv->hw->rx_csum;
4920bba2556eSOng Boon Leong 	struct sk_buff *skb;
4921bba2556eSOng Boon Leong 	u32 hash;
4922bba2556eSOng Boon Leong 
4923bba2556eSOng Boon Leong 	skb = stmmac_construct_skb_zc(ch, xdp);
4924bba2556eSOng Boon Leong 	if (!skb) {
4925bba2556eSOng Boon Leong 		priv->dev->stats.rx_dropped++;
4926bba2556eSOng Boon Leong 		return;
4927bba2556eSOng Boon Leong 	}
4928bba2556eSOng Boon Leong 
4929bba2556eSOng Boon Leong 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
4930bba2556eSOng Boon Leong 	stmmac_rx_vlan(priv->dev, skb);
4931bba2556eSOng Boon Leong 	skb->protocol = eth_type_trans(skb, priv->dev);
4932bba2556eSOng Boon Leong 
4933bba2556eSOng Boon Leong 	if (unlikely(!coe))
4934bba2556eSOng Boon Leong 		skb_checksum_none_assert(skb);
4935bba2556eSOng Boon Leong 	else
4936bba2556eSOng Boon Leong 		skb->ip_summed = CHECKSUM_UNNECESSARY;
4937bba2556eSOng Boon Leong 
4938bba2556eSOng Boon Leong 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4939bba2556eSOng Boon Leong 		skb_set_hash(skb, hash, hash_type);
4940bba2556eSOng Boon Leong 
4941bba2556eSOng Boon Leong 	skb_record_rx_queue(skb, queue);
4942132c32eeSOng Boon Leong 	napi_gro_receive(&ch->rxtx_napi, skb);
4943bba2556eSOng Boon Leong 
4944bba2556eSOng Boon Leong 	priv->dev->stats.rx_packets++;
4945bba2556eSOng Boon Leong 	priv->dev->stats.rx_bytes += len;
4946bba2556eSOng Boon Leong }
4947bba2556eSOng Boon Leong 
4948bba2556eSOng Boon Leong static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4949bba2556eSOng Boon Leong {
49508531c808SChristian Marangi 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4951bba2556eSOng Boon Leong 	unsigned int entry = rx_q->dirty_rx;
4952bba2556eSOng Boon Leong 	struct dma_desc *rx_desc = NULL;
4953bba2556eSOng Boon Leong 	bool ret = true;
4954bba2556eSOng Boon Leong 
4955bba2556eSOng Boon Leong 	budget = min(budget, stmmac_rx_dirty(priv, queue));
4956bba2556eSOng Boon Leong 
4957bba2556eSOng Boon Leong 	while (budget-- > 0 && entry != rx_q->cur_rx) {
4958bba2556eSOng Boon Leong 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4959bba2556eSOng Boon Leong 		dma_addr_t dma_addr;
4960bba2556eSOng Boon Leong 		bool use_rx_wd;
4961bba2556eSOng Boon Leong 
4962bba2556eSOng Boon Leong 		if (!buf->xdp) {
4963bba2556eSOng Boon Leong 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
4964bba2556eSOng Boon Leong 			if (!buf->xdp) {
4965bba2556eSOng Boon Leong 				ret = false;
4966bba2556eSOng Boon Leong 				break;
4967bba2556eSOng Boon Leong 			}
4968bba2556eSOng Boon Leong 		}
4969bba2556eSOng Boon Leong 
4970bba2556eSOng Boon Leong 		if (priv->extend_desc)
4971bba2556eSOng Boon Leong 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
4972bba2556eSOng Boon Leong 		else
4973bba2556eSOng Boon Leong 			rx_desc = rx_q->dma_rx + entry;
4974bba2556eSOng Boon Leong 
4975bba2556eSOng Boon Leong 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
4976bba2556eSOng Boon Leong 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
4977bba2556eSOng Boon Leong 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
4978bba2556eSOng Boon Leong 		stmmac_refill_desc3(priv, rx_q, rx_desc);
4979bba2556eSOng Boon Leong 
4980bba2556eSOng Boon Leong 		rx_q->rx_count_frames++;
4981bba2556eSOng Boon Leong 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4982bba2556eSOng Boon Leong 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4983bba2556eSOng Boon Leong 			rx_q->rx_count_frames = 0;
4984bba2556eSOng Boon Leong 
4985bba2556eSOng Boon Leong 		use_rx_wd = !priv->rx_coal_frames[queue];
4986bba2556eSOng Boon Leong 		use_rx_wd |= rx_q->rx_count_frames > 0;
4987bba2556eSOng Boon Leong 		if (!priv->use_riwt)
4988bba2556eSOng Boon Leong 			use_rx_wd = false;
4989bba2556eSOng Boon Leong 
4990bba2556eSOng Boon Leong 		dma_wmb();
4991bba2556eSOng Boon Leong 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
4992bba2556eSOng Boon Leong 
49938531c808SChristian Marangi 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4994bba2556eSOng Boon Leong 	}
4995bba2556eSOng Boon Leong 
4996bba2556eSOng Boon Leong 	if (rx_desc) {
4997bba2556eSOng Boon Leong 		rx_q->dirty_rx = entry;
4998bba2556eSOng Boon Leong 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4999bba2556eSOng Boon Leong 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5000bba2556eSOng Boon Leong 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5001bba2556eSOng Boon Leong 	}
5002bba2556eSOng Boon Leong 
5003bba2556eSOng Boon Leong 	return ret;
5004bba2556eSOng Boon Leong }
5005bba2556eSOng Boon Leong 
50069570df35SSong Yoong Siang static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
50079570df35SSong Yoong Siang {
50089570df35SSong Yoong Siang 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
50099570df35SSong Yoong Siang 	 * to represent incoming packet, whereas cb field in the same structure
50109570df35SSong Yoong Siang 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
50119570df35SSong Yoong Siang 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
50129570df35SSong Yoong Siang 	 */
50139570df35SSong Yoong Siang 	return (struct stmmac_xdp_buff *)xdp;
50149570df35SSong Yoong Siang }
50159570df35SSong Yoong Siang 
5016bba2556eSOng Boon Leong static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5017bba2556eSOng Boon Leong {
50188531c808SChristian Marangi 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5019bba2556eSOng Boon Leong 	unsigned int count = 0, error = 0, len = 0;
5020bba2556eSOng Boon Leong 	int dirty = stmmac_rx_dirty(priv, queue);
5021bba2556eSOng Boon Leong 	unsigned int next_entry = rx_q->cur_rx;
5022bba2556eSOng Boon Leong 	unsigned int desc_size;
5023bba2556eSOng Boon Leong 	struct bpf_prog *prog;
5024bba2556eSOng Boon Leong 	bool failure = false;
5025bba2556eSOng Boon Leong 	int xdp_status = 0;
5026bba2556eSOng Boon Leong 	int status = 0;
5027bba2556eSOng Boon Leong 
5028bba2556eSOng Boon Leong 	if (netif_msg_rx_status(priv)) {
5029bba2556eSOng Boon Leong 		void *rx_head;
5030bba2556eSOng Boon Leong 
5031bba2556eSOng Boon Leong 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5032bba2556eSOng Boon Leong 		if (priv->extend_desc) {
5033bba2556eSOng Boon Leong 			rx_head = (void *)rx_q->dma_erx;
5034bba2556eSOng Boon Leong 			desc_size = sizeof(struct dma_extended_desc);
5035bba2556eSOng Boon Leong 		} else {
5036bba2556eSOng Boon Leong 			rx_head = (void *)rx_q->dma_rx;
5037bba2556eSOng Boon Leong 			desc_size = sizeof(struct dma_desc);
5038bba2556eSOng Boon Leong 		}
5039bba2556eSOng Boon Leong 
50408531c808SChristian Marangi 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5041bba2556eSOng Boon Leong 				    rx_q->dma_rx_phy, desc_size);
5042bba2556eSOng Boon Leong 	}
5043bba2556eSOng Boon Leong 	while (count < limit) {
5044bba2556eSOng Boon Leong 		struct stmmac_rx_buffer *buf;
50459570df35SSong Yoong Siang 		struct stmmac_xdp_buff *ctx;
5046bba2556eSOng Boon Leong 		unsigned int buf1_len = 0;
5047bba2556eSOng Boon Leong 		struct dma_desc *np, *p;
5048bba2556eSOng Boon Leong 		int entry;
5049bba2556eSOng Boon Leong 		int res;
5050bba2556eSOng Boon Leong 
5051bba2556eSOng Boon Leong 		if (!count && rx_q->state_saved) {
5052bba2556eSOng Boon Leong 			error = rx_q->state.error;
5053bba2556eSOng Boon Leong 			len = rx_q->state.len;
5054bba2556eSOng Boon Leong 		} else {
5055bba2556eSOng Boon Leong 			rx_q->state_saved = false;
5056bba2556eSOng Boon Leong 			error = 0;
5057bba2556eSOng Boon Leong 			len = 0;
5058bba2556eSOng Boon Leong 		}
5059bba2556eSOng Boon Leong 
5060bba2556eSOng Boon Leong 		if (count >= limit)
5061bba2556eSOng Boon Leong 			break;
5062bba2556eSOng Boon Leong 
5063bba2556eSOng Boon Leong read_again:
5064bba2556eSOng Boon Leong 		buf1_len = 0;
5065bba2556eSOng Boon Leong 		entry = next_entry;
5066bba2556eSOng Boon Leong 		buf = &rx_q->buf_pool[entry];
5067bba2556eSOng Boon Leong 
5068bba2556eSOng Boon Leong 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5069bba2556eSOng Boon Leong 			failure = failure ||
5070bba2556eSOng Boon Leong 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5071bba2556eSOng Boon Leong 			dirty = 0;
5072bba2556eSOng Boon Leong 		}
5073bba2556eSOng Boon Leong 
5074bba2556eSOng Boon Leong 		if (priv->extend_desc)
5075bba2556eSOng Boon Leong 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5076bba2556eSOng Boon Leong 		else
5077bba2556eSOng Boon Leong 			p = rx_q->dma_rx + entry;
5078bba2556eSOng Boon Leong 
5079bba2556eSOng Boon Leong 		/* read the status of the incoming frame */
5080bba2556eSOng Boon Leong 		status = stmmac_rx_status(priv, &priv->dev->stats,
5081bba2556eSOng Boon Leong 					  &priv->xstats, p);
5082bba2556eSOng Boon Leong 		/* check if managed by the DMA otherwise go ahead */
5083bba2556eSOng Boon Leong 		if (unlikely(status & dma_own))
5084bba2556eSOng Boon Leong 			break;
5085bba2556eSOng Boon Leong 
5086bba2556eSOng Boon Leong 		/* Prefetch the next RX descriptor */
5087bba2556eSOng Boon Leong 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
50888531c808SChristian Marangi 						priv->dma_conf.dma_rx_size);
5089bba2556eSOng Boon Leong 		next_entry = rx_q->cur_rx;
5090bba2556eSOng Boon Leong 
5091bba2556eSOng Boon Leong 		if (priv->extend_desc)
5092bba2556eSOng Boon Leong 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5093bba2556eSOng Boon Leong 		else
5094bba2556eSOng Boon Leong 			np = rx_q->dma_rx + next_entry;
5095bba2556eSOng Boon Leong 
5096bba2556eSOng Boon Leong 		prefetch(np);
5097bba2556eSOng Boon Leong 
50982b9fff64SSong Yoong Siang 		/* Ensure a valid XSK buffer before proceed */
50992b9fff64SSong Yoong Siang 		if (!buf->xdp)
51002b9fff64SSong Yoong Siang 			break;
51012b9fff64SSong Yoong Siang 
5102bba2556eSOng Boon Leong 		if (priv->extend_desc)
5103bba2556eSOng Boon Leong 			stmmac_rx_extended_status(priv, &priv->dev->stats,
5104bba2556eSOng Boon Leong 						  &priv->xstats,
5105bba2556eSOng Boon Leong 						  rx_q->dma_erx + entry);
5106bba2556eSOng Boon Leong 		if (unlikely(status == discard_frame)) {
5107bba2556eSOng Boon Leong 			xsk_buff_free(buf->xdp);
5108bba2556eSOng Boon Leong 			buf->xdp = NULL;
5109bba2556eSOng Boon Leong 			dirty++;
5110bba2556eSOng Boon Leong 			error = 1;
5111bba2556eSOng Boon Leong 			if (!priv->hwts_rx_en)
5112bba2556eSOng Boon Leong 				priv->dev->stats.rx_errors++;
5113bba2556eSOng Boon Leong 		}
5114bba2556eSOng Boon Leong 
5115bba2556eSOng Boon Leong 		if (unlikely(error && (status & rx_not_ls)))
5116bba2556eSOng Boon Leong 			goto read_again;
5117bba2556eSOng Boon Leong 		if (unlikely(error)) {
5118bba2556eSOng Boon Leong 			count++;
5119bba2556eSOng Boon Leong 			continue;
5120bba2556eSOng Boon Leong 		}
5121bba2556eSOng Boon Leong 
5122bba2556eSOng Boon Leong 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5123bba2556eSOng Boon Leong 		if (likely(status & rx_not_ls)) {
5124bba2556eSOng Boon Leong 			xsk_buff_free(buf->xdp);
5125bba2556eSOng Boon Leong 			buf->xdp = NULL;
5126bba2556eSOng Boon Leong 			dirty++;
5127bba2556eSOng Boon Leong 			count++;
5128bba2556eSOng Boon Leong 			goto read_again;
5129bba2556eSOng Boon Leong 		}
5130bba2556eSOng Boon Leong 
51319570df35SSong Yoong Siang 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
51329570df35SSong Yoong Siang 		ctx->priv = priv;
51339570df35SSong Yoong Siang 		ctx->desc = p;
51349570df35SSong Yoong Siang 		ctx->ndesc = np;
51359570df35SSong Yoong Siang 
5136bba2556eSOng Boon Leong 		/* XDP ZC Frame only support primary buffers for now */
5137bba2556eSOng Boon Leong 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5138bba2556eSOng Boon Leong 		len += buf1_len;
5139bba2556eSOng Boon Leong 
5140929d4342SKurt Kanzenbach 		/* ACS is disabled; strip manually. */
5141929d4342SKurt Kanzenbach 		if (likely(!(status & rx_not_ls))) {
5142bba2556eSOng Boon Leong 			buf1_len -= ETH_FCS_LEN;
5143bba2556eSOng Boon Leong 			len -= ETH_FCS_LEN;
5144bba2556eSOng Boon Leong 		}
5145bba2556eSOng Boon Leong 
5146bba2556eSOng Boon Leong 		/* RX buffer is good and fit into a XSK pool buffer */
5147bba2556eSOng Boon Leong 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5148bba2556eSOng Boon Leong 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5149bba2556eSOng Boon Leong 
5150bba2556eSOng Boon Leong 		prog = READ_ONCE(priv->xdp_prog);
5151bba2556eSOng Boon Leong 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5152bba2556eSOng Boon Leong 
5153bba2556eSOng Boon Leong 		switch (res) {
5154bba2556eSOng Boon Leong 		case STMMAC_XDP_PASS:
5155bba2556eSOng Boon Leong 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5156bba2556eSOng Boon Leong 			xsk_buff_free(buf->xdp);
5157bba2556eSOng Boon Leong 			break;
5158bba2556eSOng Boon Leong 		case STMMAC_XDP_CONSUMED:
5159bba2556eSOng Boon Leong 			xsk_buff_free(buf->xdp);
5160bba2556eSOng Boon Leong 			priv->dev->stats.rx_dropped++;
5161bba2556eSOng Boon Leong 			break;
5162bba2556eSOng Boon Leong 		case STMMAC_XDP_TX:
5163bba2556eSOng Boon Leong 		case STMMAC_XDP_REDIRECT:
5164bba2556eSOng Boon Leong 			xdp_status |= res;
5165bba2556eSOng Boon Leong 			break;
5166bba2556eSOng Boon Leong 		}
5167bba2556eSOng Boon Leong 
5168bba2556eSOng Boon Leong 		buf->xdp = NULL;
5169bba2556eSOng Boon Leong 		dirty++;
5170bba2556eSOng Boon Leong 		count++;
5171bba2556eSOng Boon Leong 	}
5172bba2556eSOng Boon Leong 
5173bba2556eSOng Boon Leong 	if (status & rx_not_ls) {
5174bba2556eSOng Boon Leong 		rx_q->state_saved = true;
5175bba2556eSOng Boon Leong 		rx_q->state.error = error;
5176bba2556eSOng Boon Leong 		rx_q->state.len = len;
5177bba2556eSOng Boon Leong 	}
5178bba2556eSOng Boon Leong 
5179bba2556eSOng Boon Leong 	stmmac_finalize_xdp_rx(priv, xdp_status);
5180bba2556eSOng Boon Leong 
518168e9c5deSVijayakannan Ayyathurai 	priv->xstats.rx_pkt_n += count;
518268e9c5deSVijayakannan Ayyathurai 	priv->xstats.rxq_stats[queue].rx_pkt_n += count;
518368e9c5deSVijayakannan Ayyathurai 
5184bba2556eSOng Boon Leong 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5185bba2556eSOng Boon Leong 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5186bba2556eSOng Boon Leong 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5187bba2556eSOng Boon Leong 		else
5188bba2556eSOng Boon Leong 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5189bba2556eSOng Boon Leong 
5190bba2556eSOng Boon Leong 		return (int)count;
5191bba2556eSOng Boon Leong 	}
5192bba2556eSOng Boon Leong 
5193bba2556eSOng Boon Leong 	return failure ? limit : (int)count;
5194bba2556eSOng Boon Leong }
5195bba2556eSOng Boon Leong 
519632ceabcaSGiuseppe CAVALLARO /**
5197732fdf0eSGiuseppe CAVALLARO  * stmmac_rx - manage the receive process
519832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
519954139cf3SJoao Pinto  * @limit: napi bugget
520054139cf3SJoao Pinto  * @queue: RX queue index.
520132ceabcaSGiuseppe CAVALLARO  * Description :  this the function called by the napi poll method.
520232ceabcaSGiuseppe CAVALLARO  * It gets all the frames inside the ring.
520332ceabcaSGiuseppe CAVALLARO  */
520454139cf3SJoao Pinto static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
52057ac6653aSJeff Kirsher {
52068531c808SChristian Marangi 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
52078fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[queue];
5208ec222003SJose Abreu 	unsigned int count = 0, error = 0, len = 0;
5209ec222003SJose Abreu 	int status = 0, coe = priv->hw->rx_csum;
521007b39753SAaro Koskinen 	unsigned int next_entry = rx_q->cur_rx;
52115fabb012SOng Boon Leong 	enum dma_data_direction dma_dir;
5212bfaf91caSJoakim Zhang 	unsigned int desc_size;
5213ec222003SJose Abreu 	struct sk_buff *skb = NULL;
52145b24324aSSong Yoong Siang 	struct stmmac_xdp_buff ctx;
5215be8b38a7SOng Boon Leong 	int xdp_status = 0;
52165fabb012SOng Boon Leong 	int buf_sz;
52175fabb012SOng Boon Leong 
52185fabb012SOng Boon Leong 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
52198531c808SChristian Marangi 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
52207ac6653aSJeff Kirsher 
522183d7af64SGiuseppe CAVALLARO 	if (netif_msg_rx_status(priv)) {
5222d0225e7dSAlexandre TORGUE 		void *rx_head;
5223d0225e7dSAlexandre TORGUE 
522438ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5225bfaf91caSJoakim Zhang 		if (priv->extend_desc) {
522654139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_erx;
5227bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_extended_desc);
5228bfaf91caSJoakim Zhang 		} else {
522954139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_rx;
5230bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_desc);
5231bfaf91caSJoakim Zhang 		}
5232d0225e7dSAlexandre TORGUE 
52338531c808SChristian Marangi 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5234bfaf91caSJoakim Zhang 				    rx_q->dma_rx_phy, desc_size);
52357ac6653aSJeff Kirsher 	}
5236c24602efSGiuseppe CAVALLARO 	while (count < limit) {
523788ebe2cfSJose Abreu 		unsigned int buf1_len = 0, buf2_len = 0;
5238ec222003SJose Abreu 		enum pkt_hash_types hash_type;
52392af6106aSJose Abreu 		struct stmmac_rx_buffer *buf;
52402af6106aSJose Abreu 		struct dma_desc *np, *p;
5241ec222003SJose Abreu 		int entry;
5242ec222003SJose Abreu 		u32 hash;
52437ac6653aSJeff Kirsher 
5244ec222003SJose Abreu 		if (!count && rx_q->state_saved) {
5245ec222003SJose Abreu 			skb = rx_q->state.skb;
5246ec222003SJose Abreu 			error = rx_q->state.error;
5247ec222003SJose Abreu 			len = rx_q->state.len;
5248ec222003SJose Abreu 		} else {
5249ec222003SJose Abreu 			rx_q->state_saved = false;
5250ec222003SJose Abreu 			skb = NULL;
5251ec222003SJose Abreu 			error = 0;
5252ec222003SJose Abreu 			len = 0;
5253ec222003SJose Abreu 		}
5254ec222003SJose Abreu 
5255ec222003SJose Abreu 		if (count >= limit)
5256ec222003SJose Abreu 			break;
5257ec222003SJose Abreu 
5258ec222003SJose Abreu read_again:
525988ebe2cfSJose Abreu 		buf1_len = 0;
526088ebe2cfSJose Abreu 		buf2_len = 0;
526107b39753SAaro Koskinen 		entry = next_entry;
52622af6106aSJose Abreu 		buf = &rx_q->buf_pool[entry];
526307b39753SAaro Koskinen 
5264c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
526554139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5266c24602efSGiuseppe CAVALLARO 		else
526754139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
5268c24602efSGiuseppe CAVALLARO 
5269c1fa3212SFabrice Gasnier 		/* read the status of the incoming frame */
527042de047dSJose Abreu 		status = stmmac_rx_status(priv, &priv->dev->stats,
5271c1fa3212SFabrice Gasnier 				&priv->xstats, p);
5272c1fa3212SFabrice Gasnier 		/* check if managed by the DMA otherwise go ahead */
5273c1fa3212SFabrice Gasnier 		if (unlikely(status & dma_own))
52747ac6653aSJeff Kirsher 			break;
52757ac6653aSJeff Kirsher 
5276aa042f60SSong, Yoong Siang 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
52778531c808SChristian Marangi 						priv->dma_conf.dma_rx_size);
527854139cf3SJoao Pinto 		next_entry = rx_q->cur_rx;
5279e3ad57c9SGiuseppe Cavallaro 
5280c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
528154139cf3SJoao Pinto 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5282c24602efSGiuseppe CAVALLARO 		else
528354139cf3SJoao Pinto 			np = rx_q->dma_rx + next_entry;
5284ba1ffd74SGiuseppe CAVALLARO 
5285ba1ffd74SGiuseppe CAVALLARO 		prefetch(np);
52867ac6653aSJeff Kirsher 
528742de047dSJose Abreu 		if (priv->extend_desc)
528842de047dSJose Abreu 			stmmac_rx_extended_status(priv, &priv->dev->stats,
528942de047dSJose Abreu 					&priv->xstats, rx_q->dma_erx + entry);
5290891434b1SRayagond Kokatanur 		if (unlikely(status == discard_frame)) {
52912af6106aSJose Abreu 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
52922af6106aSJose Abreu 			buf->page = NULL;
5293ec222003SJose Abreu 			error = 1;
52940b273ca4SJose Abreu 			if (!priv->hwts_rx_en)
52950b273ca4SJose Abreu 				priv->dev->stats.rx_errors++;
5296ec222003SJose Abreu 		}
5297f748be53SAlexandre TORGUE 
5298ec222003SJose Abreu 		if (unlikely(error && (status & rx_not_ls)))
5299ec222003SJose Abreu 			goto read_again;
5300ec222003SJose Abreu 		if (unlikely(error)) {
5301ec222003SJose Abreu 			dev_kfree_skb(skb);
530288ebe2cfSJose Abreu 			skb = NULL;
5303cda4985aSJose Abreu 			count++;
530407b39753SAaro Koskinen 			continue;
5305e527c4a7SGiuseppe CAVALLARO 		}
5306e527c4a7SGiuseppe CAVALLARO 
5307ec222003SJose Abreu 		/* Buffer is good. Go on. */
5308ec222003SJose Abreu 
53094744bf07SMatteo Croce 		prefetch(page_address(buf->page) + buf->page_offset);
531088ebe2cfSJose Abreu 		if (buf->sec_page)
531188ebe2cfSJose Abreu 			prefetch(page_address(buf->sec_page));
531288ebe2cfSJose Abreu 
531388ebe2cfSJose Abreu 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
531488ebe2cfSJose Abreu 		len += buf1_len;
531588ebe2cfSJose Abreu 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
531688ebe2cfSJose Abreu 		len += buf2_len;
5317ec222003SJose Abreu 
5318929d4342SKurt Kanzenbach 		/* ACS is disabled; strip manually. */
5319929d4342SKurt Kanzenbach 		if (likely(!(status & rx_not_ls))) {
53200f296e78SZekun Shen 			if (buf2_len) {
532188ebe2cfSJose Abreu 				buf2_len -= ETH_FCS_LEN;
5322ec222003SJose Abreu 				len -= ETH_FCS_LEN;
53230f296e78SZekun Shen 			} else if (buf1_len) {
53240f296e78SZekun Shen 				buf1_len -= ETH_FCS_LEN;
53250f296e78SZekun Shen 				len -= ETH_FCS_LEN;
53260f296e78SZekun Shen 			}
532783d7af64SGiuseppe CAVALLARO 		}
532822ad3838SGiuseppe Cavallaro 
5329ec222003SJose Abreu 		if (!skb) {
5330be8b38a7SOng Boon Leong 			unsigned int pre_len, sync_len;
5331be8b38a7SOng Boon Leong 
53325fabb012SOng Boon Leong 			dma_sync_single_for_cpu(priv->device, buf->addr,
53335fabb012SOng Boon Leong 						buf1_len, dma_dir);
53345fabb012SOng Boon Leong 
53355b24324aSSong Yoong Siang 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
53365b24324aSSong Yoong Siang 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5337e3f9c3e3SSong Yoong Siang 					 buf->page_offset, buf1_len, true);
53385fabb012SOng Boon Leong 
53395b24324aSSong Yoong Siang 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5340be8b38a7SOng Boon Leong 				  buf->page_offset;
5341e3f9c3e3SSong Yoong Siang 
5342e3f9c3e3SSong Yoong Siang 			ctx.priv = priv;
5343e3f9c3e3SSong Yoong Siang 			ctx.desc = p;
5344e3f9c3e3SSong Yoong Siang 			ctx.ndesc = np;
5345e3f9c3e3SSong Yoong Siang 
53465b24324aSSong Yoong Siang 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5347be8b38a7SOng Boon Leong 			/* Due xdp_adjust_tail: DMA sync for_device
5348be8b38a7SOng Boon Leong 			 * cover max len CPU touch
5349be8b38a7SOng Boon Leong 			 */
53505b24324aSSong Yoong Siang 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5351be8b38a7SOng Boon Leong 				   buf->page_offset;
5352be8b38a7SOng Boon Leong 			sync_len = max(sync_len, pre_len);
53535fabb012SOng Boon Leong 
53545fabb012SOng Boon Leong 			/* For Not XDP_PASS verdict */
53555fabb012SOng Boon Leong 			if (IS_ERR(skb)) {
53565fabb012SOng Boon Leong 				unsigned int xdp_res = -PTR_ERR(skb);
53575fabb012SOng Boon Leong 
53585fabb012SOng Boon Leong 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5359be8b38a7SOng Boon Leong 					page_pool_put_page(rx_q->page_pool,
53605b24324aSSong Yoong Siang 							   virt_to_head_page(ctx.xdp.data),
5361be8b38a7SOng Boon Leong 							   sync_len, true);
53625fabb012SOng Boon Leong 					buf->page = NULL;
53635fabb012SOng Boon Leong 					priv->dev->stats.rx_dropped++;
53645fabb012SOng Boon Leong 
53655fabb012SOng Boon Leong 					/* Clear skb as it was set as
53665fabb012SOng Boon Leong 					 * status by XDP program.
53675fabb012SOng Boon Leong 					 */
53685fabb012SOng Boon Leong 					skb = NULL;
53695fabb012SOng Boon Leong 
53705fabb012SOng Boon Leong 					if (unlikely((status & rx_not_ls)))
53715fabb012SOng Boon Leong 						goto read_again;
53725fabb012SOng Boon Leong 
53735fabb012SOng Boon Leong 					count++;
53745fabb012SOng Boon Leong 					continue;
53758b278a5bSOng Boon Leong 				} else if (xdp_res & (STMMAC_XDP_TX |
53768b278a5bSOng Boon Leong 						      STMMAC_XDP_REDIRECT)) {
5377be8b38a7SOng Boon Leong 					xdp_status |= xdp_res;
5378be8b38a7SOng Boon Leong 					buf->page = NULL;
5379be8b38a7SOng Boon Leong 					skb = NULL;
5380be8b38a7SOng Boon Leong 					count++;
5381be8b38a7SOng Boon Leong 					continue;
53825fabb012SOng Boon Leong 				}
53835fabb012SOng Boon Leong 			}
53845fabb012SOng Boon Leong 		}
53855fabb012SOng Boon Leong 
53865fabb012SOng Boon Leong 		if (!skb) {
53875fabb012SOng Boon Leong 			/* XDP program may expand or reduce tail */
53885b24324aSSong Yoong Siang 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
53895fabb012SOng Boon Leong 
539088ebe2cfSJose Abreu 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5391ec222003SJose Abreu 			if (!skb) {
539222ad3838SGiuseppe Cavallaro 				priv->dev->stats.rx_dropped++;
5393cda4985aSJose Abreu 				count++;
539488ebe2cfSJose Abreu 				goto drain_data;
539522ad3838SGiuseppe Cavallaro 			}
539622ad3838SGiuseppe Cavallaro 
53975fabb012SOng Boon Leong 			/* XDP program may adjust header */
53985b24324aSSong Yoong Siang 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
539988ebe2cfSJose Abreu 			skb_put(skb, buf1_len);
540022ad3838SGiuseppe Cavallaro 
5401ec222003SJose Abreu 			/* Data payload copied into SKB, page ready for recycle */
5402ec222003SJose Abreu 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5403ec222003SJose Abreu 			buf->page = NULL;
540488ebe2cfSJose Abreu 		} else if (buf1_len) {
5405ec222003SJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->addr,
54065fabb012SOng Boon Leong 						buf1_len, dma_dir);
5407ec222003SJose Abreu 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
54085fabb012SOng Boon Leong 					buf->page, buf->page_offset, buf1_len,
54098531c808SChristian Marangi 					priv->dma_conf.dma_buf_sz);
5410ec222003SJose Abreu 
5411ec222003SJose Abreu 			/* Data payload appended into SKB */
5412ec222003SJose Abreu 			page_pool_release_page(rx_q->page_pool, buf->page);
5413ec222003SJose Abreu 			buf->page = NULL;
54147ac6653aSJeff Kirsher 		}
541583d7af64SGiuseppe CAVALLARO 
541688ebe2cfSJose Abreu 		if (buf2_len) {
541767afd6d1SJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
54185fabb012SOng Boon Leong 						buf2_len, dma_dir);
541967afd6d1SJose Abreu 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
542088ebe2cfSJose Abreu 					buf->sec_page, 0, buf2_len,
54218531c808SChristian Marangi 					priv->dma_conf.dma_buf_sz);
542267afd6d1SJose Abreu 
542367afd6d1SJose Abreu 			/* Data payload appended into SKB */
542467afd6d1SJose Abreu 			page_pool_release_page(rx_q->page_pool, buf->sec_page);
542567afd6d1SJose Abreu 			buf->sec_page = NULL;
542667afd6d1SJose Abreu 		}
542767afd6d1SJose Abreu 
542888ebe2cfSJose Abreu drain_data:
5429ec222003SJose Abreu 		if (likely(status & rx_not_ls))
5430ec222003SJose Abreu 			goto read_again;
543188ebe2cfSJose Abreu 		if (!skb)
543288ebe2cfSJose Abreu 			continue;
5433ec222003SJose Abreu 
5434ec222003SJose Abreu 		/* Got entire packet into SKB. Finish it. */
5435ec222003SJose Abreu 
5436ba1ffd74SGiuseppe CAVALLARO 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5437b9381985SVince Bridgers 		stmmac_rx_vlan(priv->dev, skb);
54387ac6653aSJeff Kirsher 		skb->protocol = eth_type_trans(skb, priv->dev);
54397ac6653aSJeff Kirsher 
5440ceb69499SGiuseppe CAVALLARO 		if (unlikely(!coe))
54417ac6653aSJeff Kirsher 			skb_checksum_none_assert(skb);
544262a2ab93SGiuseppe CAVALLARO 		else
54437ac6653aSJeff Kirsher 			skb->ip_summed = CHECKSUM_UNNECESSARY;
544462a2ab93SGiuseppe CAVALLARO 
544576067459SJose Abreu 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
544676067459SJose Abreu 			skb_set_hash(skb, hash, hash_type);
544776067459SJose Abreu 
544876067459SJose Abreu 		skb_record_rx_queue(skb, queue);
54494ccb4585SJose Abreu 		napi_gro_receive(&ch->rx_napi, skb);
545088ebe2cfSJose Abreu 		skb = NULL;
54517ac6653aSJeff Kirsher 
54527ac6653aSJeff Kirsher 		priv->dev->stats.rx_packets++;
5453ec222003SJose Abreu 		priv->dev->stats.rx_bytes += len;
5454cda4985aSJose Abreu 		count++;
54557ac6653aSJeff Kirsher 	}
5456ec222003SJose Abreu 
545788ebe2cfSJose Abreu 	if (status & rx_not_ls || skb) {
5458ec222003SJose Abreu 		rx_q->state_saved = true;
5459ec222003SJose Abreu 		rx_q->state.skb = skb;
5460ec222003SJose Abreu 		rx_q->state.error = error;
5461ec222003SJose Abreu 		rx_q->state.len = len;
54627ac6653aSJeff Kirsher 	}
54637ac6653aSJeff Kirsher 
5464be8b38a7SOng Boon Leong 	stmmac_finalize_xdp_rx(priv, xdp_status);
5465be8b38a7SOng Boon Leong 
546654139cf3SJoao Pinto 	stmmac_rx_refill(priv, queue);
54677ac6653aSJeff Kirsher 
54687ac6653aSJeff Kirsher 	priv->xstats.rx_pkt_n += count;
546968e9c5deSVijayakannan Ayyathurai 	priv->xstats.rxq_stats[queue].rx_pkt_n += count;
54707ac6653aSJeff Kirsher 
54717ac6653aSJeff Kirsher 	return count;
54727ac6653aSJeff Kirsher }
54737ac6653aSJeff Kirsher 
54744ccb4585SJose Abreu static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
54757ac6653aSJeff Kirsher {
54768fce3331SJose Abreu 	struct stmmac_channel *ch =
54774ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, rx_napi);
54788fce3331SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
54798fce3331SJose Abreu 	u32 chan = ch->index;
54804ccb4585SJose Abreu 	int work_done;
54817ac6653aSJeff Kirsher 
54829125cdd1SGiuseppe CAVALLARO 	priv->xstats.napi_poll++;
5483ce736788SJoao Pinto 
5484132c32eeSOng Boon Leong 	work_done = stmmac_rx(priv, budget, chan);
5485021bd5e3SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5486021bd5e3SJose Abreu 		unsigned long flags;
5487021bd5e3SJose Abreu 
5488021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
5489021bd5e3SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5490021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
5491021bd5e3SJose Abreu 	}
5492021bd5e3SJose Abreu 
54934ccb4585SJose Abreu 	return work_done;
54944ccb4585SJose Abreu }
5495ce736788SJoao Pinto 
54964ccb4585SJose Abreu static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
54974ccb4585SJose Abreu {
54984ccb4585SJose Abreu 	struct stmmac_channel *ch =
54994ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, tx_napi);
55004ccb4585SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
55014ccb4585SJose Abreu 	u32 chan = ch->index;
55024ccb4585SJose Abreu 	int work_done;
55034ccb4585SJose Abreu 
55044ccb4585SJose Abreu 	priv->xstats.napi_poll++;
55054ccb4585SJose Abreu 
5506132c32eeSOng Boon Leong 	work_done = stmmac_tx_clean(priv, budget, chan);
5507fa0be0a4SJose Abreu 	work_done = min(work_done, budget);
55088fce3331SJose Abreu 
5509021bd5e3SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5510021bd5e3SJose Abreu 		unsigned long flags;
55114ccb4585SJose Abreu 
5512021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
5513021bd5e3SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5514021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
5515fa0be0a4SJose Abreu 	}
55168fce3331SJose Abreu 
55177ac6653aSJeff Kirsher 	return work_done;
55187ac6653aSJeff Kirsher }
55197ac6653aSJeff Kirsher 
5520132c32eeSOng Boon Leong static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5521132c32eeSOng Boon Leong {
5522132c32eeSOng Boon Leong 	struct stmmac_channel *ch =
5523132c32eeSOng Boon Leong 		container_of(napi, struct stmmac_channel, rxtx_napi);
5524132c32eeSOng Boon Leong 	struct stmmac_priv *priv = ch->priv_data;
552581d0885dSSong Yoong Siang 	int rx_done, tx_done, rxtx_done;
5526132c32eeSOng Boon Leong 	u32 chan = ch->index;
5527132c32eeSOng Boon Leong 
5528132c32eeSOng Boon Leong 	priv->xstats.napi_poll++;
5529132c32eeSOng Boon Leong 
5530132c32eeSOng Boon Leong 	tx_done = stmmac_tx_clean(priv, budget, chan);
5531132c32eeSOng Boon Leong 	tx_done = min(tx_done, budget);
5532132c32eeSOng Boon Leong 
5533132c32eeSOng Boon Leong 	rx_done = stmmac_rx_zc(priv, budget, chan);
5534132c32eeSOng Boon Leong 
553581d0885dSSong Yoong Siang 	rxtx_done = max(tx_done, rx_done);
553681d0885dSSong Yoong Siang 
5537132c32eeSOng Boon Leong 	/* If either TX or RX work is not complete, return budget
5538132c32eeSOng Boon Leong 	 * and keep pooling
5539132c32eeSOng Boon Leong 	 */
554081d0885dSSong Yoong Siang 	if (rxtx_done >= budget)
5541132c32eeSOng Boon Leong 		return budget;
5542132c32eeSOng Boon Leong 
5543132c32eeSOng Boon Leong 	/* all work done, exit the polling mode */
554481d0885dSSong Yoong Siang 	if (napi_complete_done(napi, rxtx_done)) {
5545132c32eeSOng Boon Leong 		unsigned long flags;
5546132c32eeSOng Boon Leong 
5547132c32eeSOng Boon Leong 		spin_lock_irqsave(&ch->lock, flags);
5548132c32eeSOng Boon Leong 		/* Both RX and TX work done are compelte,
5549132c32eeSOng Boon Leong 		 * so enable both RX & TX IRQs.
5550132c32eeSOng Boon Leong 		 */
5551132c32eeSOng Boon Leong 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5552132c32eeSOng Boon Leong 		spin_unlock_irqrestore(&ch->lock, flags);
5553132c32eeSOng Boon Leong 	}
5554132c32eeSOng Boon Leong 
555581d0885dSSong Yoong Siang 	return min(rxtx_done, budget - 1);
5556132c32eeSOng Boon Leong }
5557132c32eeSOng Boon Leong 
55587ac6653aSJeff Kirsher /**
55597ac6653aSJeff Kirsher  *  stmmac_tx_timeout
55607ac6653aSJeff Kirsher  *  @dev : Pointer to net device structure
5561d0ea5cbdSJesse Brandeburg  *  @txqueue: the index of the hanging transmit queue
55627ac6653aSJeff Kirsher  *  Description: this function is called when a packet transmission fails to
55637284a3f1SGiuseppe CAVALLARO  *   complete within a reasonable time. The driver will mark the error in the
55647ac6653aSJeff Kirsher  *   netdev structure and arrange for the device to be reset to a sane state
55657ac6653aSJeff Kirsher  *   in order to transmit a new packet.
55667ac6653aSJeff Kirsher  */
55670290bd29SMichael S. Tsirkin static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
55687ac6653aSJeff Kirsher {
55697ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
55707ac6653aSJeff Kirsher 
557134877a15SJose Abreu 	stmmac_global_err(priv);
55727ac6653aSJeff Kirsher }
55737ac6653aSJeff Kirsher 
55747ac6653aSJeff Kirsher /**
557501789349SJiri Pirko  *  stmmac_set_rx_mode - entry point for multicast addressing
55767ac6653aSJeff Kirsher  *  @dev : pointer to the device structure
55777ac6653aSJeff Kirsher  *  Description:
55787ac6653aSJeff Kirsher  *  This function is a driver entry point which gets called by the kernel
55797ac6653aSJeff Kirsher  *  whenever multicast addresses must be enabled/disabled.
55807ac6653aSJeff Kirsher  *  Return value:
55817ac6653aSJeff Kirsher  *  void.
55827ac6653aSJeff Kirsher  */
558301789349SJiri Pirko static void stmmac_set_rx_mode(struct net_device *dev)
55847ac6653aSJeff Kirsher {
55857ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
55867ac6653aSJeff Kirsher 
5587c10d4c82SJose Abreu 	stmmac_set_filter(priv, priv->hw, dev);
55887ac6653aSJeff Kirsher }
55897ac6653aSJeff Kirsher 
55907ac6653aSJeff Kirsher /**
55917ac6653aSJeff Kirsher  *  stmmac_change_mtu - entry point to change MTU size for the device.
55927ac6653aSJeff Kirsher  *  @dev : device pointer.
55937ac6653aSJeff Kirsher  *  @new_mtu : the new MTU size for the device.
55947ac6653aSJeff Kirsher  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
55957ac6653aSJeff Kirsher  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
55967ac6653aSJeff Kirsher  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
55977ac6653aSJeff Kirsher  *  Return value:
55987ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
55997ac6653aSJeff Kirsher  *  file on failure.
56007ac6653aSJeff Kirsher  */
56017ac6653aSJeff Kirsher static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
56027ac6653aSJeff Kirsher {
560338ddc59dSLABBE Corentin 	struct stmmac_priv *priv = netdev_priv(dev);
5604eaf4fac4SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
560534700796SChristian Marangi 	struct stmmac_dma_conf *dma_conf;
56065b55299eSDavid Wu 	const int mtu = new_mtu;
560734700796SChristian Marangi 	int ret;
5608eaf4fac4SJose Abreu 
5609eaf4fac4SJose Abreu 	if (txfifosz == 0)
5610eaf4fac4SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
5611eaf4fac4SJose Abreu 
5612eaf4fac4SJose Abreu 	txfifosz /= priv->plat->tx_queues_to_use;
561338ddc59dSLABBE Corentin 
56145fabb012SOng Boon Leong 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
56155fabb012SOng Boon Leong 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
56165fabb012SOng Boon Leong 		return -EINVAL;
56175fabb012SOng Boon Leong 	}
56185fabb012SOng Boon Leong 
5619eaf4fac4SJose Abreu 	new_mtu = STMMAC_ALIGN(new_mtu);
5620eaf4fac4SJose Abreu 
5621eaf4fac4SJose Abreu 	/* If condition true, FIFO is too small or MTU too large */
5622eaf4fac4SJose Abreu 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5623eaf4fac4SJose Abreu 		return -EINVAL;
5624eaf4fac4SJose Abreu 
562534700796SChristian Marangi 	if (netif_running(dev)) {
562634700796SChristian Marangi 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
562734700796SChristian Marangi 		/* Try to allocate the new DMA conf with the new mtu */
562834700796SChristian Marangi 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
562934700796SChristian Marangi 		if (IS_ERR(dma_conf)) {
563034700796SChristian Marangi 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
563134700796SChristian Marangi 				   mtu);
563234700796SChristian Marangi 			return PTR_ERR(dma_conf);
563334700796SChristian Marangi 		}
5634f748be53SAlexandre TORGUE 
563534700796SChristian Marangi 		stmmac_release(dev);
563634700796SChristian Marangi 
563734700796SChristian Marangi 		ret = __stmmac_open(dev, dma_conf);
563834700796SChristian Marangi 		if (ret) {
5639*30134b7cSChristian Marangi 			free_dma_desc_resources(priv, dma_conf);
5640*30134b7cSChristian Marangi 			kfree(dma_conf);
564134700796SChristian Marangi 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
564234700796SChristian Marangi 			return ret;
564334700796SChristian Marangi 		}
564434700796SChristian Marangi 
5645*30134b7cSChristian Marangi 		kfree(dma_conf);
5646*30134b7cSChristian Marangi 
564734700796SChristian Marangi 		stmmac_set_rx_mode(dev);
564834700796SChristian Marangi 	}
564934700796SChristian Marangi 
565034700796SChristian Marangi 	dev->mtu = mtu;
56517ac6653aSJeff Kirsher 	netdev_update_features(dev);
56527ac6653aSJeff Kirsher 
56537ac6653aSJeff Kirsher 	return 0;
56547ac6653aSJeff Kirsher }
56557ac6653aSJeff Kirsher 
5656c8f44affSMichał Mirosław static netdev_features_t stmmac_fix_features(struct net_device *dev,
5657c8f44affSMichał Mirosław 					     netdev_features_t features)
56587ac6653aSJeff Kirsher {
56597ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
56607ac6653aSJeff Kirsher 
566138912bdbSDeepak SIKRI 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
56627ac6653aSJeff Kirsher 		features &= ~NETIF_F_RXCSUM;
5663d2afb5bdSGiuseppe CAVALLARO 
56647ac6653aSJeff Kirsher 	if (!priv->plat->tx_coe)
5665a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
56667ac6653aSJeff Kirsher 
56677ac6653aSJeff Kirsher 	/* Some GMAC devices have a bugged Jumbo frame support that
56687ac6653aSJeff Kirsher 	 * needs to have the Tx COE disabled for oversized frames
56697ac6653aSJeff Kirsher 	 * (due to limited buffer sizes). In this case we disable
5670ceb69499SGiuseppe CAVALLARO 	 * the TX csum insertion in the TDES and not use SF.
5671ceb69499SGiuseppe CAVALLARO 	 */
56727ac6653aSJeff Kirsher 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5673a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
56747ac6653aSJeff Kirsher 
5675f748be53SAlexandre TORGUE 	/* Disable tso if asked by ethtool */
5676f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
5677f748be53SAlexandre TORGUE 		if (features & NETIF_F_TSO)
5678f748be53SAlexandre TORGUE 			priv->tso = true;
5679f748be53SAlexandre TORGUE 		else
5680f748be53SAlexandre TORGUE 			priv->tso = false;
5681f748be53SAlexandre TORGUE 	}
5682f748be53SAlexandre TORGUE 
56837ac6653aSJeff Kirsher 	return features;
56847ac6653aSJeff Kirsher }
56857ac6653aSJeff Kirsher 
5686d2afb5bdSGiuseppe CAVALLARO static int stmmac_set_features(struct net_device *netdev,
5687d2afb5bdSGiuseppe CAVALLARO 			       netdev_features_t features)
5688d2afb5bdSGiuseppe CAVALLARO {
5689d2afb5bdSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(netdev);
5690d2afb5bdSGiuseppe CAVALLARO 
5691d2afb5bdSGiuseppe CAVALLARO 	/* Keep the COE Type in case of csum is supporting */
5692d2afb5bdSGiuseppe CAVALLARO 	if (features & NETIF_F_RXCSUM)
5693d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
5694d2afb5bdSGiuseppe CAVALLARO 	else
5695d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
5696d2afb5bdSGiuseppe CAVALLARO 	/* No check needed because rx_coe has been set before and it will be
5697d2afb5bdSGiuseppe CAVALLARO 	 * fixed in case of issue.
5698d2afb5bdSGiuseppe CAVALLARO 	 */
5699c10d4c82SJose Abreu 	stmmac_rx_ipc(priv, priv->hw);
5700d2afb5bdSGiuseppe CAVALLARO 
5701f8e7dfd6SVincent Whitchurch 	if (priv->sph_cap) {
5702f8e7dfd6SVincent Whitchurch 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5703f8e7dfd6SVincent Whitchurch 		u32 chan;
57045fabb012SOng Boon Leong 
570567afd6d1SJose Abreu 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
570667afd6d1SJose Abreu 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5707f8e7dfd6SVincent Whitchurch 	}
570867afd6d1SJose Abreu 
5709d2afb5bdSGiuseppe CAVALLARO 	return 0;
5710d2afb5bdSGiuseppe CAVALLARO }
5711d2afb5bdSGiuseppe CAVALLARO 
57125a558611SOng Boon Leong static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
57135a558611SOng Boon Leong {
57145a558611SOng Boon Leong 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
57155a558611SOng Boon Leong 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
57165a558611SOng Boon Leong 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
57175a558611SOng Boon Leong 	bool *hs_enable = &fpe_cfg->hs_enable;
57185a558611SOng Boon Leong 
57195a558611SOng Boon Leong 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
57205a558611SOng Boon Leong 		return;
57215a558611SOng Boon Leong 
57225a558611SOng Boon Leong 	/* If LP has sent verify mPacket, LP is FPE capable */
57235a558611SOng Boon Leong 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
57245a558611SOng Boon Leong 		if (*lp_state < FPE_STATE_CAPABLE)
57255a558611SOng Boon Leong 			*lp_state = FPE_STATE_CAPABLE;
57265a558611SOng Boon Leong 
57275a558611SOng Boon Leong 		/* If user has requested FPE enable, quickly response */
57285a558611SOng Boon Leong 		if (*hs_enable)
57295a558611SOng Boon Leong 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
57305a558611SOng Boon Leong 						MPACKET_RESPONSE);
57315a558611SOng Boon Leong 	}
57325a558611SOng Boon Leong 
57335a558611SOng Boon Leong 	/* If Local has sent verify mPacket, Local is FPE capable */
57345a558611SOng Boon Leong 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
57355a558611SOng Boon Leong 		if (*lo_state < FPE_STATE_CAPABLE)
57365a558611SOng Boon Leong 			*lo_state = FPE_STATE_CAPABLE;
57375a558611SOng Boon Leong 	}
57385a558611SOng Boon Leong 
57395a558611SOng Boon Leong 	/* If LP has sent response mPacket, LP is entering FPE ON */
57405a558611SOng Boon Leong 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
57415a558611SOng Boon Leong 		*lp_state = FPE_STATE_ENTERING_ON;
57425a558611SOng Boon Leong 
57435a558611SOng Boon Leong 	/* If Local has sent response mPacket, Local is entering FPE ON */
57445a558611SOng Boon Leong 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
57455a558611SOng Boon Leong 		*lo_state = FPE_STATE_ENTERING_ON;
57465a558611SOng Boon Leong 
57475a558611SOng Boon Leong 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
57485a558611SOng Boon Leong 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
57495a558611SOng Boon Leong 	    priv->fpe_wq) {
57505a558611SOng Boon Leong 		queue_work(priv->fpe_wq, &priv->fpe_task);
57515a558611SOng Boon Leong 	}
57525a558611SOng Boon Leong }
57535a558611SOng Boon Leong 
575429e6573cSOng Boon Leong static void stmmac_common_interrupt(struct stmmac_priv *priv)
57557ac6653aSJeff Kirsher {
57567bac4e1eSJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
57577bac4e1eSJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
57587bac4e1eSJoao Pinto 	u32 queues_count;
57597bac4e1eSJoao Pinto 	u32 queue;
57607d9e6c5aSJose Abreu 	bool xmac;
57617bac4e1eSJoao Pinto 
57627d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
57637bac4e1eSJoao Pinto 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
57647ac6653aSJeff Kirsher 
576589f7f2cfSSrinivas Kandagatla 	if (priv->irq_wake)
576689f7f2cfSSrinivas Kandagatla 		pm_wakeup_event(priv->device, 0);
576789f7f2cfSSrinivas Kandagatla 
5768e49aa315SVoon Weifeng 	if (priv->dma_cap.estsel)
57699f298959SOng Boon Leong 		stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
57709f298959SOng Boon Leong 				      &priv->xstats, tx_cnt);
5771e49aa315SVoon Weifeng 
57725a558611SOng Boon Leong 	if (priv->dma_cap.fpesel) {
57735a558611SOng Boon Leong 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
57745a558611SOng Boon Leong 						   priv->dev);
57755a558611SOng Boon Leong 
57765a558611SOng Boon Leong 		stmmac_fpe_event_status(priv, status);
57775a558611SOng Boon Leong 	}
57785a558611SOng Boon Leong 
57797ac6653aSJeff Kirsher 	/* To handle GMAC own interrupts */
57807d9e6c5aSJose Abreu 	if ((priv->plat->has_gmac) || xmac) {
5781c10d4c82SJose Abreu 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
57828f71a88dSJoao Pinto 
5783d765955dSGiuseppe CAVALLARO 		if (unlikely(status)) {
5784d765955dSGiuseppe CAVALLARO 			/* For LPI we need to save the tx status */
57850982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5786d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = true;
57870982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5788d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = false;
57897bac4e1eSJoao Pinto 		}
57907bac4e1eSJoao Pinto 
57917bac4e1eSJoao Pinto 		for (queue = 0; queue < queues_count; queue++) {
57928a7cb245SYannick Vignon 			status = stmmac_host_mtl_irq_status(priv, priv->hw,
57937bac4e1eSJoao Pinto 							    queue);
57947bac4e1eSJoao Pinto 		}
579570523e63SGiuseppe CAVALLARO 
579670523e63SGiuseppe CAVALLARO 		/* PCS link status */
57973fe5cadbSGiuseppe CAVALLARO 		if (priv->hw->pcs) {
579870523e63SGiuseppe CAVALLARO 			if (priv->xstats.pcs_link)
579929e6573cSOng Boon Leong 				netif_carrier_on(priv->dev);
580070523e63SGiuseppe CAVALLARO 			else
580129e6573cSOng Boon Leong 				netif_carrier_off(priv->dev);
580270523e63SGiuseppe CAVALLARO 		}
5803f4da5652STan Tee Min 
5804f4da5652STan Tee Min 		stmmac_timestamp_interrupt(priv, priv);
5805d765955dSGiuseppe CAVALLARO 	}
580629e6573cSOng Boon Leong }
580729e6573cSOng Boon Leong 
580829e6573cSOng Boon Leong /**
580929e6573cSOng Boon Leong  *  stmmac_interrupt - main ISR
581029e6573cSOng Boon Leong  *  @irq: interrupt number.
581129e6573cSOng Boon Leong  *  @dev_id: to pass the net device pointer.
581229e6573cSOng Boon Leong  *  Description: this is the main driver interrupt service routine.
581329e6573cSOng Boon Leong  *  It can call:
581429e6573cSOng Boon Leong  *  o DMA service routine (to manage incoming frame reception and transmission
581529e6573cSOng Boon Leong  *    status)
581629e6573cSOng Boon Leong  *  o Core interrupts to manage: remote wake-up, management counter, LPI
581729e6573cSOng Boon Leong  *    interrupts.
581829e6573cSOng Boon Leong  */
581929e6573cSOng Boon Leong static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
582029e6573cSOng Boon Leong {
582129e6573cSOng Boon Leong 	struct net_device *dev = (struct net_device *)dev_id;
582229e6573cSOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
582329e6573cSOng Boon Leong 
582429e6573cSOng Boon Leong 	/* Check if adapter is up */
582529e6573cSOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
582629e6573cSOng Boon Leong 		return IRQ_HANDLED;
582729e6573cSOng Boon Leong 
582829e6573cSOng Boon Leong 	/* Check if a fatal error happened */
582929e6573cSOng Boon Leong 	if (stmmac_safety_feat_interrupt(priv))
583029e6573cSOng Boon Leong 		return IRQ_HANDLED;
583129e6573cSOng Boon Leong 
583229e6573cSOng Boon Leong 	/* To handle Common interrupts */
583329e6573cSOng Boon Leong 	stmmac_common_interrupt(priv);
5834d765955dSGiuseppe CAVALLARO 
5835d765955dSGiuseppe CAVALLARO 	/* To handle DMA interrupts */
58367ac6653aSJeff Kirsher 	stmmac_dma_interrupt(priv);
58377ac6653aSJeff Kirsher 
58387ac6653aSJeff Kirsher 	return IRQ_HANDLED;
58397ac6653aSJeff Kirsher }
58407ac6653aSJeff Kirsher 
58418532f613SOng Boon Leong static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
58428532f613SOng Boon Leong {
58438532f613SOng Boon Leong 	struct net_device *dev = (struct net_device *)dev_id;
58448532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
58458532f613SOng Boon Leong 
58468532f613SOng Boon Leong 	if (unlikely(!dev)) {
58478532f613SOng Boon Leong 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
58488532f613SOng Boon Leong 		return IRQ_NONE;
58498532f613SOng Boon Leong 	}
58508532f613SOng Boon Leong 
58518532f613SOng Boon Leong 	/* Check if adapter is up */
58528532f613SOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
58538532f613SOng Boon Leong 		return IRQ_HANDLED;
58548532f613SOng Boon Leong 
58558532f613SOng Boon Leong 	/* To handle Common interrupts */
58568532f613SOng Boon Leong 	stmmac_common_interrupt(priv);
58578532f613SOng Boon Leong 
58588532f613SOng Boon Leong 	return IRQ_HANDLED;
58598532f613SOng Boon Leong }
58608532f613SOng Boon Leong 
58618532f613SOng Boon Leong static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
58628532f613SOng Boon Leong {
58638532f613SOng Boon Leong 	struct net_device *dev = (struct net_device *)dev_id;
58648532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
58658532f613SOng Boon Leong 
58668532f613SOng Boon Leong 	if (unlikely(!dev)) {
58678532f613SOng Boon Leong 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
58688532f613SOng Boon Leong 		return IRQ_NONE;
58698532f613SOng Boon Leong 	}
58708532f613SOng Boon Leong 
58718532f613SOng Boon Leong 	/* Check if adapter is up */
58728532f613SOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
58738532f613SOng Boon Leong 		return IRQ_HANDLED;
58748532f613SOng Boon Leong 
58758532f613SOng Boon Leong 	/* Check if a fatal error happened */
58768532f613SOng Boon Leong 	stmmac_safety_feat_interrupt(priv);
58778532f613SOng Boon Leong 
58788532f613SOng Boon Leong 	return IRQ_HANDLED;
58798532f613SOng Boon Leong }
58808532f613SOng Boon Leong 
58818532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
58828532f613SOng Boon Leong {
58838532f613SOng Boon Leong 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
58848531c808SChristian Marangi 	struct stmmac_dma_conf *dma_conf;
58858532f613SOng Boon Leong 	int chan = tx_q->queue_index;
58868532f613SOng Boon Leong 	struct stmmac_priv *priv;
58878532f613SOng Boon Leong 	int status;
58888532f613SOng Boon Leong 
58898531c808SChristian Marangi 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
58908531c808SChristian Marangi 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
58918532f613SOng Boon Leong 
58928532f613SOng Boon Leong 	if (unlikely(!data)) {
58938532f613SOng Boon Leong 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
58948532f613SOng Boon Leong 		return IRQ_NONE;
58958532f613SOng Boon Leong 	}
58968532f613SOng Boon Leong 
58978532f613SOng Boon Leong 	/* Check if adapter is up */
58988532f613SOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
58998532f613SOng Boon Leong 		return IRQ_HANDLED;
59008532f613SOng Boon Leong 
59018532f613SOng Boon Leong 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
59028532f613SOng Boon Leong 
59038532f613SOng Boon Leong 	if (unlikely(status & tx_hard_error_bump_tc)) {
59048532f613SOng Boon Leong 		/* Try to bump up the dma threshold on this failure */
59053a6c12a0SXiaoliang Yang 		stmmac_bump_dma_threshold(priv, chan);
59068532f613SOng Boon Leong 	} else if (unlikely(status == tx_hard_error)) {
59078532f613SOng Boon Leong 		stmmac_tx_err(priv, chan);
59088532f613SOng Boon Leong 	}
59098532f613SOng Boon Leong 
59108532f613SOng Boon Leong 	return IRQ_HANDLED;
59118532f613SOng Boon Leong }
59128532f613SOng Boon Leong 
59138532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
59148532f613SOng Boon Leong {
59158532f613SOng Boon Leong 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
59168531c808SChristian Marangi 	struct stmmac_dma_conf *dma_conf;
59178532f613SOng Boon Leong 	int chan = rx_q->queue_index;
59188532f613SOng Boon Leong 	struct stmmac_priv *priv;
59198532f613SOng Boon Leong 
59208531c808SChristian Marangi 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
59218531c808SChristian Marangi 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
59228532f613SOng Boon Leong 
59238532f613SOng Boon Leong 	if (unlikely(!data)) {
59248532f613SOng Boon Leong 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
59258532f613SOng Boon Leong 		return IRQ_NONE;
59268532f613SOng Boon Leong 	}
59278532f613SOng Boon Leong 
59288532f613SOng Boon Leong 	/* Check if adapter is up */
59298532f613SOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
59308532f613SOng Boon Leong 		return IRQ_HANDLED;
59318532f613SOng Boon Leong 
59328532f613SOng Boon Leong 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
59338532f613SOng Boon Leong 
59348532f613SOng Boon Leong 	return IRQ_HANDLED;
59358532f613SOng Boon Leong }
59368532f613SOng Boon Leong 
59377ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
59387ac6653aSJeff Kirsher /* Polling receive - used by NETCONSOLE and other diagnostic tools
5939ceb69499SGiuseppe CAVALLARO  * to allow network I/O with interrupts disabled.
5940ceb69499SGiuseppe CAVALLARO  */
59417ac6653aSJeff Kirsher static void stmmac_poll_controller(struct net_device *dev)
59427ac6653aSJeff Kirsher {
59438532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
59448532f613SOng Boon Leong 	int i;
59458532f613SOng Boon Leong 
59468532f613SOng Boon Leong 	/* If adapter is down, do nothing */
59478532f613SOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
59488532f613SOng Boon Leong 		return;
59498532f613SOng Boon Leong 
59508532f613SOng Boon Leong 	if (priv->plat->multi_msi_en) {
59518532f613SOng Boon Leong 		for (i = 0; i < priv->plat->rx_queues_to_use; i++)
59528531c808SChristian Marangi 			stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
59538532f613SOng Boon Leong 
59548532f613SOng Boon Leong 		for (i = 0; i < priv->plat->tx_queues_to_use; i++)
59558531c808SChristian Marangi 			stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
59568532f613SOng Boon Leong 	} else {
59577ac6653aSJeff Kirsher 		disable_irq(dev->irq);
59587ac6653aSJeff Kirsher 		stmmac_interrupt(dev->irq, dev);
59597ac6653aSJeff Kirsher 		enable_irq(dev->irq);
59607ac6653aSJeff Kirsher 	}
59618532f613SOng Boon Leong }
59627ac6653aSJeff Kirsher #endif
59637ac6653aSJeff Kirsher 
59647ac6653aSJeff Kirsher /**
59657ac6653aSJeff Kirsher  *  stmmac_ioctl - Entry point for the Ioctl
59667ac6653aSJeff Kirsher  *  @dev: Device pointer.
59677ac6653aSJeff Kirsher  *  @rq: An IOCTL specefic structure, that can contain a pointer to
59687ac6653aSJeff Kirsher  *  a proprietary structure used to pass information to the driver.
59697ac6653aSJeff Kirsher  *  @cmd: IOCTL command
59707ac6653aSJeff Kirsher  *  Description:
597132ceabcaSGiuseppe CAVALLARO  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
59727ac6653aSJeff Kirsher  */
59737ac6653aSJeff Kirsher static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
59747ac6653aSJeff Kirsher {
597574371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv (dev);
5976891434b1SRayagond Kokatanur 	int ret = -EOPNOTSUPP;
59777ac6653aSJeff Kirsher 
59787ac6653aSJeff Kirsher 	if (!netif_running(dev))
59797ac6653aSJeff Kirsher 		return -EINVAL;
59807ac6653aSJeff Kirsher 
5981891434b1SRayagond Kokatanur 	switch (cmd) {
5982891434b1SRayagond Kokatanur 	case SIOCGMIIPHY:
5983891434b1SRayagond Kokatanur 	case SIOCGMIIREG:
5984891434b1SRayagond Kokatanur 	case SIOCSMIIREG:
598574371272SJose Abreu 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
5986891434b1SRayagond Kokatanur 		break;
5987891434b1SRayagond Kokatanur 	case SIOCSHWTSTAMP:
5988d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_set(dev, rq);
5989d6228b7cSArtem Panfilov 		break;
5990d6228b7cSArtem Panfilov 	case SIOCGHWTSTAMP:
5991d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_get(dev, rq);
5992891434b1SRayagond Kokatanur 		break;
5993891434b1SRayagond Kokatanur 	default:
5994891434b1SRayagond Kokatanur 		break;
5995891434b1SRayagond Kokatanur 	}
59967ac6653aSJeff Kirsher 
59977ac6653aSJeff Kirsher 	return ret;
59987ac6653aSJeff Kirsher }
59997ac6653aSJeff Kirsher 
60004dbbe8ddSJose Abreu static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
60014dbbe8ddSJose Abreu 				    void *cb_priv)
60024dbbe8ddSJose Abreu {
60034dbbe8ddSJose Abreu 	struct stmmac_priv *priv = cb_priv;
60044dbbe8ddSJose Abreu 	int ret = -EOPNOTSUPP;
60054dbbe8ddSJose Abreu 
6006425eabddSJose Abreu 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6007425eabddSJose Abreu 		return ret;
6008425eabddSJose Abreu 
6009bba2556eSOng Boon Leong 	__stmmac_disable_all_queues(priv);
60104dbbe8ddSJose Abreu 
60114dbbe8ddSJose Abreu 	switch (type) {
60124dbbe8ddSJose Abreu 	case TC_SETUP_CLSU32:
60134dbbe8ddSJose Abreu 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
60144dbbe8ddSJose Abreu 		break;
6015425eabddSJose Abreu 	case TC_SETUP_CLSFLOWER:
6016425eabddSJose Abreu 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6017425eabddSJose Abreu 		break;
60184dbbe8ddSJose Abreu 	default:
60194dbbe8ddSJose Abreu 		break;
60204dbbe8ddSJose Abreu 	}
60214dbbe8ddSJose Abreu 
60224dbbe8ddSJose Abreu 	stmmac_enable_all_queues(priv);
60234dbbe8ddSJose Abreu 	return ret;
60244dbbe8ddSJose Abreu }
60254dbbe8ddSJose Abreu 
6026955bcb6eSPablo Neira Ayuso static LIST_HEAD(stmmac_block_cb_list);
6027955bcb6eSPablo Neira Ayuso 
60284dbbe8ddSJose Abreu static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
60294dbbe8ddSJose Abreu 			   void *type_data)
60304dbbe8ddSJose Abreu {
60314dbbe8ddSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
60324dbbe8ddSJose Abreu 
60334dbbe8ddSJose Abreu 	switch (type) {
6034522d15eaSVladimir Oltean 	case TC_QUERY_CAPS:
6035522d15eaSVladimir Oltean 		return stmmac_tc_query_caps(priv, priv, type_data);
60364dbbe8ddSJose Abreu 	case TC_SETUP_BLOCK:
6037955bcb6eSPablo Neira Ayuso 		return flow_block_cb_setup_simple(type_data,
6038955bcb6eSPablo Neira Ayuso 						  &stmmac_block_cb_list,
60394e95bc26SPablo Neira Ayuso 						  stmmac_setup_tc_block_cb,
60404e95bc26SPablo Neira Ayuso 						  priv, priv, true);
60411f705bc6SJose Abreu 	case TC_SETUP_QDISC_CBS:
60421f705bc6SJose Abreu 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6043b60189e0SJose Abreu 	case TC_SETUP_QDISC_TAPRIO:
6044b60189e0SJose Abreu 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6045430b383cSJose Abreu 	case TC_SETUP_QDISC_ETF:
6046430b383cSJose Abreu 		return stmmac_tc_setup_etf(priv, priv, type_data);
60474dbbe8ddSJose Abreu 	default:
60484dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
60494dbbe8ddSJose Abreu 	}
60504dbbe8ddSJose Abreu }
60514dbbe8ddSJose Abreu 
60524993e5b3SJose Abreu static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
60534993e5b3SJose Abreu 			       struct net_device *sb_dev)
60544993e5b3SJose Abreu {
6055b7766206SJose Abreu 	int gso = skb_shinfo(skb)->gso_type;
6056b7766206SJose Abreu 
6057b7766206SJose Abreu 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
60584993e5b3SJose Abreu 		/*
6059b7766206SJose Abreu 		 * There is no way to determine the number of TSO/USO
60604993e5b3SJose Abreu 		 * capable Queues. Let's use always the Queue 0
6061b7766206SJose Abreu 		 * because if TSO/USO is supported then at least this
60624993e5b3SJose Abreu 		 * one will be capable.
60634993e5b3SJose Abreu 		 */
60644993e5b3SJose Abreu 		return 0;
60654993e5b3SJose Abreu 	}
60664993e5b3SJose Abreu 
60674993e5b3SJose Abreu 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
60684993e5b3SJose Abreu }
60694993e5b3SJose Abreu 
6070a830405eSBhadram Varka static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6071a830405eSBhadram Varka {
6072a830405eSBhadram Varka 	struct stmmac_priv *priv = netdev_priv(ndev);
6073a830405eSBhadram Varka 	int ret = 0;
6074a830405eSBhadram Varka 
607585648865SMinghao Chi 	ret = pm_runtime_resume_and_get(priv->device);
607685648865SMinghao Chi 	if (ret < 0)
60774691ffb1SJoakim Zhang 		return ret;
60784691ffb1SJoakim Zhang 
6079a830405eSBhadram Varka 	ret = eth_mac_addr(ndev, addr);
6080a830405eSBhadram Varka 	if (ret)
60814691ffb1SJoakim Zhang 		goto set_mac_error;
6082a830405eSBhadram Varka 
6083c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6084a830405eSBhadram Varka 
60854691ffb1SJoakim Zhang set_mac_error:
60864691ffb1SJoakim Zhang 	pm_runtime_put(priv->device);
60874691ffb1SJoakim Zhang 
6088a830405eSBhadram Varka 	return ret;
6089a830405eSBhadram Varka }
6090a830405eSBhadram Varka 
609150fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
60927ac29055SGiuseppe CAVALLARO static struct dentry *stmmac_fs_dir;
60937ac29055SGiuseppe CAVALLARO 
6094c24602efSGiuseppe CAVALLARO static void sysfs_display_ring(void *head, int size, int extend_desc,
6095bfaf91caSJoakim Zhang 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
60967ac29055SGiuseppe CAVALLARO {
60977ac29055SGiuseppe CAVALLARO 	int i;
6098c24602efSGiuseppe CAVALLARO 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6099c24602efSGiuseppe CAVALLARO 	struct dma_desc *p = (struct dma_desc *)head;
6100bfaf91caSJoakim Zhang 	dma_addr_t dma_addr;
61017ac29055SGiuseppe CAVALLARO 
6102c24602efSGiuseppe CAVALLARO 	for (i = 0; i < size; i++) {
6103c24602efSGiuseppe CAVALLARO 		if (extend_desc) {
6104bfaf91caSJoakim Zhang 			dma_addr = dma_phy_addr + i * sizeof(*ep);
6105bfaf91caSJoakim Zhang 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6106bfaf91caSJoakim Zhang 				   i, &dma_addr,
6107f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des0),
6108f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des1),
6109f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des2),
6110f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des3));
6111c24602efSGiuseppe CAVALLARO 			ep++;
6112c24602efSGiuseppe CAVALLARO 		} else {
6113bfaf91caSJoakim Zhang 			dma_addr = dma_phy_addr + i * sizeof(*p);
6114bfaf91caSJoakim Zhang 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6115bfaf91caSJoakim Zhang 				   i, &dma_addr,
6116f8be0d78SMichael Weiser 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6117f8be0d78SMichael Weiser 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6118c24602efSGiuseppe CAVALLARO 			p++;
6119c24602efSGiuseppe CAVALLARO 		}
61207ac29055SGiuseppe CAVALLARO 		seq_printf(seq, "\n");
61217ac29055SGiuseppe CAVALLARO 	}
6122c24602efSGiuseppe CAVALLARO }
61237ac29055SGiuseppe CAVALLARO 
6124fb0d9c63SYangtao Li static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6125c24602efSGiuseppe CAVALLARO {
6126c24602efSGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
6127c24602efSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
612854139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
6129ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
613054139cf3SJoao Pinto 	u32 queue;
613154139cf3SJoao Pinto 
61325f2b8b62SThierry Reding 	if ((dev->flags & IFF_UP) == 0)
61335f2b8b62SThierry Reding 		return 0;
61345f2b8b62SThierry Reding 
613554139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
61368531c808SChristian Marangi 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
613754139cf3SJoao Pinto 
613854139cf3SJoao Pinto 		seq_printf(seq, "RX Queue %d:\n", queue);
61397ac29055SGiuseppe CAVALLARO 
6140c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc) {
614154139cf3SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
614254139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_erx,
61438531c808SChristian Marangi 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
614454139cf3SJoao Pinto 		} else {
614554139cf3SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
614654139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_rx,
61478531c808SChristian Marangi 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
614854139cf3SJoao Pinto 		}
614954139cf3SJoao Pinto 	}
615054139cf3SJoao Pinto 
6151ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
61528531c808SChristian Marangi 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6153ce736788SJoao Pinto 
6154ce736788SJoao Pinto 		seq_printf(seq, "TX Queue %d:\n", queue);
6155ce736788SJoao Pinto 
615654139cf3SJoao Pinto 		if (priv->extend_desc) {
6157ce736788SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
6158ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_etx,
61598531c808SChristian Marangi 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6160579a25a8SJose Abreu 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6161ce736788SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
6162ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_tx,
61638531c808SChristian Marangi 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6164ce736788SJoao Pinto 		}
61657ac29055SGiuseppe CAVALLARO 	}
61667ac29055SGiuseppe CAVALLARO 
61677ac29055SGiuseppe CAVALLARO 	return 0;
61687ac29055SGiuseppe CAVALLARO }
6169fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
61707ac29055SGiuseppe CAVALLARO 
6171fb0d9c63SYangtao Li static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6172e7434821SGiuseppe CAVALLARO {
6173e7434821SGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
6174e7434821SGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
6175e7434821SGiuseppe CAVALLARO 
617619e30c14SGiuseppe CAVALLARO 	if (!priv->hw_cap_support) {
6177e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "DMA HW features not supported\n");
6178e7434821SGiuseppe CAVALLARO 		return 0;
6179e7434821SGiuseppe CAVALLARO 	}
6180e7434821SGiuseppe CAVALLARO 
6181e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
6182e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tDMA HW features\n");
6183e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
6184e7434821SGiuseppe CAVALLARO 
618522d3efe5SPavel Machek 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6186e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
618722d3efe5SPavel Machek 	seq_printf(seq, "\t1000 Mbps: %s\n",
6188e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
618922d3efe5SPavel Machek 	seq_printf(seq, "\tHalf duplex: %s\n",
6190e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6191e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tHash Filter: %s\n",
6192e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
6193e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6194e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
61958d45e42bSLABBE Corentin 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6196e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pcs) ? "Y" : "N");
6197e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6198e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6199e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6200e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6201e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6202e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6203e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRMON module: %s\n",
6204e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rmon) ? "Y" : "N");
6205e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6206e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6207e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6208e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
620922d3efe5SPavel Machek 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6210e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.eee) ? "Y" : "N");
6211e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6212e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6213e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6214f748be53SAlexandre TORGUE 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
6215f748be53SAlexandre TORGUE 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6216f748be53SAlexandre TORGUE 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6217f748be53SAlexandre TORGUE 	} else {
6218e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6219e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6220e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6221e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6222f748be53SAlexandre TORGUE 	}
6223e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6224e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6225e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6226e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_rx_channel);
6227e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6228e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_tx_channel);
62297d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
62307d0b447aSJose Abreu 		   priv->dma_cap.number_rx_queues);
62317d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
62327d0b447aSJose Abreu 		   priv->dma_cap.number_tx_queues);
6233e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6234e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
62357d0b447aSJose Abreu 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
62367d0b447aSJose Abreu 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
62377d0b447aSJose Abreu 	seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
62387d0b447aSJose Abreu 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
62397d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
62407d0b447aSJose Abreu 		   priv->dma_cap.pps_out_num);
62417d0b447aSJose Abreu 	seq_printf(seq, "\tSafety Features: %s\n",
62427d0b447aSJose Abreu 		   priv->dma_cap.asp ? "Y" : "N");
62437d0b447aSJose Abreu 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
62447d0b447aSJose Abreu 		   priv->dma_cap.frpsel ? "Y" : "N");
62457d0b447aSJose Abreu 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6246070246e4SJochen Henneberg 		   priv->dma_cap.host_dma_width);
62477d0b447aSJose Abreu 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
62487d0b447aSJose Abreu 		   priv->dma_cap.rssen ? "Y" : "N");
62497d0b447aSJose Abreu 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
62507d0b447aSJose Abreu 		   priv->dma_cap.vlhash ? "Y" : "N");
62517d0b447aSJose Abreu 	seq_printf(seq, "\tSplit Header: %s\n",
62527d0b447aSJose Abreu 		   priv->dma_cap.sphen ? "Y" : "N");
62537d0b447aSJose Abreu 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
62547d0b447aSJose Abreu 		   priv->dma_cap.vlins ? "Y" : "N");
62557d0b447aSJose Abreu 	seq_printf(seq, "\tDouble VLAN: %s\n",
62567d0b447aSJose Abreu 		   priv->dma_cap.dvlan ? "Y" : "N");
62577d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
62587d0b447aSJose Abreu 		   priv->dma_cap.l3l4fnum);
62597d0b447aSJose Abreu 	seq_printf(seq, "\tARP Offloading: %s\n",
62607d0b447aSJose Abreu 		   priv->dma_cap.arpoffsel ? "Y" : "N");
626144e65475SJose Abreu 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
626244e65475SJose Abreu 		   priv->dma_cap.estsel ? "Y" : "N");
626344e65475SJose Abreu 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
626444e65475SJose Abreu 		   priv->dma_cap.fpesel ? "Y" : "N");
626544e65475SJose Abreu 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
626644e65475SJose Abreu 		   priv->dma_cap.tbssel ? "Y" : "N");
6267e7434821SGiuseppe CAVALLARO 	return 0;
6268e7434821SGiuseppe CAVALLARO }
6269fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6270e7434821SGiuseppe CAVALLARO 
6271481a7d15SJiping Ma /* Use network device events to rename debugfs file entries.
6272481a7d15SJiping Ma  */
6273481a7d15SJiping Ma static int stmmac_device_event(struct notifier_block *unused,
6274481a7d15SJiping Ma 			       unsigned long event, void *ptr)
6275481a7d15SJiping Ma {
6276481a7d15SJiping Ma 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6277481a7d15SJiping Ma 	struct stmmac_priv *priv = netdev_priv(dev);
6278481a7d15SJiping Ma 
6279481a7d15SJiping Ma 	if (dev->netdev_ops != &stmmac_netdev_ops)
6280481a7d15SJiping Ma 		goto done;
6281481a7d15SJiping Ma 
6282481a7d15SJiping Ma 	switch (event) {
6283481a7d15SJiping Ma 	case NETDEV_CHANGENAME:
6284481a7d15SJiping Ma 		if (priv->dbgfs_dir)
6285481a7d15SJiping Ma 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6286481a7d15SJiping Ma 							 priv->dbgfs_dir,
6287481a7d15SJiping Ma 							 stmmac_fs_dir,
6288481a7d15SJiping Ma 							 dev->name);
6289481a7d15SJiping Ma 		break;
6290481a7d15SJiping Ma 	}
6291481a7d15SJiping Ma done:
6292481a7d15SJiping Ma 	return NOTIFY_DONE;
6293481a7d15SJiping Ma }
6294481a7d15SJiping Ma 
6295481a7d15SJiping Ma static struct notifier_block stmmac_notifier = {
6296481a7d15SJiping Ma 	.notifier_call = stmmac_device_event,
6297481a7d15SJiping Ma };
6298481a7d15SJiping Ma 
62998d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev)
63007ac29055SGiuseppe CAVALLARO {
6301466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
63027ac29055SGiuseppe CAVALLARO 
6303474a31e1SAaro Koskinen 	rtnl_lock();
6304474a31e1SAaro Koskinen 
6305466c5ac8SMathieu Olivari 	/* Create per netdev entries */
6306466c5ac8SMathieu Olivari 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6307466c5ac8SMathieu Olivari 
63087ac29055SGiuseppe CAVALLARO 	/* Entry to report DMA RX/TX rings */
63098d72ab11SGreg Kroah-Hartman 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
63107ac29055SGiuseppe CAVALLARO 			    &stmmac_rings_status_fops);
63117ac29055SGiuseppe CAVALLARO 
6312e7434821SGiuseppe CAVALLARO 	/* Entry to report the DMA HW features */
63138d72ab11SGreg Kroah-Hartman 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
63148d72ab11SGreg Kroah-Hartman 			    &stmmac_dma_cap_fops);
6315481a7d15SJiping Ma 
6316474a31e1SAaro Koskinen 	rtnl_unlock();
63177ac29055SGiuseppe CAVALLARO }
63187ac29055SGiuseppe CAVALLARO 
6319466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev)
63207ac29055SGiuseppe CAVALLARO {
6321466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
6322466c5ac8SMathieu Olivari 
6323466c5ac8SMathieu Olivari 	debugfs_remove_recursive(priv->dbgfs_dir);
63247ac29055SGiuseppe CAVALLARO }
632550fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
63267ac29055SGiuseppe CAVALLARO 
63273cd1cfcbSJose Abreu static u32 stmmac_vid_crc32_le(__le16 vid_le)
63283cd1cfcbSJose Abreu {
63293cd1cfcbSJose Abreu 	unsigned char *data = (unsigned char *)&vid_le;
63303cd1cfcbSJose Abreu 	unsigned char data_byte = 0;
63313cd1cfcbSJose Abreu 	u32 crc = ~0x0;
63323cd1cfcbSJose Abreu 	u32 temp = 0;
63333cd1cfcbSJose Abreu 	int i, bits;
63343cd1cfcbSJose Abreu 
63353cd1cfcbSJose Abreu 	bits = get_bitmask_order(VLAN_VID_MASK);
63363cd1cfcbSJose Abreu 	for (i = 0; i < bits; i++) {
63373cd1cfcbSJose Abreu 		if ((i % 8) == 0)
63383cd1cfcbSJose Abreu 			data_byte = data[i / 8];
63393cd1cfcbSJose Abreu 
63403cd1cfcbSJose Abreu 		temp = ((crc & 1) ^ data_byte) & 1;
63413cd1cfcbSJose Abreu 		crc >>= 1;
63423cd1cfcbSJose Abreu 		data_byte >>= 1;
63433cd1cfcbSJose Abreu 
63443cd1cfcbSJose Abreu 		if (temp)
63453cd1cfcbSJose Abreu 			crc ^= 0xedb88320;
63463cd1cfcbSJose Abreu 	}
63473cd1cfcbSJose Abreu 
63483cd1cfcbSJose Abreu 	return crc;
63493cd1cfcbSJose Abreu }
63503cd1cfcbSJose Abreu 
63513cd1cfcbSJose Abreu static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
63523cd1cfcbSJose Abreu {
63533cd1cfcbSJose Abreu 	u32 crc, hash = 0;
6354a24cae70SJose Abreu 	__le16 pmatch = 0;
6355c7ab0b80SJose Abreu 	int count = 0;
6356c7ab0b80SJose Abreu 	u16 vid = 0;
63573cd1cfcbSJose Abreu 
63583cd1cfcbSJose Abreu 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
63593cd1cfcbSJose Abreu 		__le16 vid_le = cpu_to_le16(vid);
63603cd1cfcbSJose Abreu 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
63613cd1cfcbSJose Abreu 		hash |= (1 << crc);
6362c7ab0b80SJose Abreu 		count++;
63633cd1cfcbSJose Abreu 	}
63643cd1cfcbSJose Abreu 
6365c7ab0b80SJose Abreu 	if (!priv->dma_cap.vlhash) {
6366c7ab0b80SJose Abreu 		if (count > 2) /* VID = 0 always passes filter */
6367c7ab0b80SJose Abreu 			return -EOPNOTSUPP;
6368c7ab0b80SJose Abreu 
6369a24cae70SJose Abreu 		pmatch = cpu_to_le16(vid);
6370c7ab0b80SJose Abreu 		hash = 0;
6371c7ab0b80SJose Abreu 	}
6372c7ab0b80SJose Abreu 
6373a24cae70SJose Abreu 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
63743cd1cfcbSJose Abreu }
63753cd1cfcbSJose Abreu 
63763cd1cfcbSJose Abreu static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
63773cd1cfcbSJose Abreu {
63783cd1cfcbSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
63793cd1cfcbSJose Abreu 	bool is_double = false;
63803cd1cfcbSJose Abreu 	int ret;
63813cd1cfcbSJose Abreu 
638235226750SYan Wang 	ret = pm_runtime_resume_and_get(priv->device);
638335226750SYan Wang 	if (ret < 0)
638435226750SYan Wang 		return ret;
638535226750SYan Wang 
63863cd1cfcbSJose Abreu 	if (be16_to_cpu(proto) == ETH_P_8021AD)
63873cd1cfcbSJose Abreu 		is_double = true;
63883cd1cfcbSJose Abreu 
63893cd1cfcbSJose Abreu 	set_bit(vid, priv->active_vlans);
63903cd1cfcbSJose Abreu 	ret = stmmac_vlan_update(priv, is_double);
63913cd1cfcbSJose Abreu 	if (ret) {
63923cd1cfcbSJose Abreu 		clear_bit(vid, priv->active_vlans);
639335226750SYan Wang 		goto err_pm_put;
63943cd1cfcbSJose Abreu 	}
63953cd1cfcbSJose Abreu 
6396dd6a4998SJose Abreu 	if (priv->hw->num_vlan) {
6397ed64639bSWong Vee Khee 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6398dd6a4998SJose Abreu 		if (ret)
639935226750SYan Wang 			goto err_pm_put;
64003cd1cfcbSJose Abreu 	}
640135226750SYan Wang err_pm_put:
640235226750SYan Wang 	pm_runtime_put(priv->device);
64033cd1cfcbSJose Abreu 
640435226750SYan Wang 	return ret;
6405dd6a4998SJose Abreu }
6406dd6a4998SJose Abreu 
64073cd1cfcbSJose Abreu static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
64083cd1cfcbSJose Abreu {
64093cd1cfcbSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
64103cd1cfcbSJose Abreu 	bool is_double = false;
6411ed64639bSWong Vee Khee 	int ret;
64123cd1cfcbSJose Abreu 
641385648865SMinghao Chi 	ret = pm_runtime_resume_and_get(priv->device);
641485648865SMinghao Chi 	if (ret < 0)
6415b3dcb312SJoakim Zhang 		return ret;
6416b3dcb312SJoakim Zhang 
64173cd1cfcbSJose Abreu 	if (be16_to_cpu(proto) == ETH_P_8021AD)
64183cd1cfcbSJose Abreu 		is_double = true;
64193cd1cfcbSJose Abreu 
64203cd1cfcbSJose Abreu 	clear_bit(vid, priv->active_vlans);
6421dd6a4998SJose Abreu 
6422dd6a4998SJose Abreu 	if (priv->hw->num_vlan) {
6423ed64639bSWong Vee Khee 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6424ed64639bSWong Vee Khee 		if (ret)
64255ec55823SJoakim Zhang 			goto del_vlan_error;
6426dd6a4998SJose Abreu 	}
6427ed64639bSWong Vee Khee 
64285ec55823SJoakim Zhang 	ret = stmmac_vlan_update(priv, is_double);
64295ec55823SJoakim Zhang 
64305ec55823SJoakim Zhang del_vlan_error:
64315ec55823SJoakim Zhang 	pm_runtime_put(priv->device);
64325ec55823SJoakim Zhang 
64335ec55823SJoakim Zhang 	return ret;
64343cd1cfcbSJose Abreu }
64353cd1cfcbSJose Abreu 
64365fabb012SOng Boon Leong static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
64375fabb012SOng Boon Leong {
64385fabb012SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
64395fabb012SOng Boon Leong 
64405fabb012SOng Boon Leong 	switch (bpf->command) {
64415fabb012SOng Boon Leong 	case XDP_SETUP_PROG:
64425fabb012SOng Boon Leong 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6443bba2556eSOng Boon Leong 	case XDP_SETUP_XSK_POOL:
6444bba2556eSOng Boon Leong 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6445bba2556eSOng Boon Leong 					     bpf->xsk.queue_id);
64465fabb012SOng Boon Leong 	default:
64475fabb012SOng Boon Leong 		return -EOPNOTSUPP;
64485fabb012SOng Boon Leong 	}
64495fabb012SOng Boon Leong }
64505fabb012SOng Boon Leong 
64518b278a5bSOng Boon Leong static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
64528b278a5bSOng Boon Leong 			   struct xdp_frame **frames, u32 flags)
64538b278a5bSOng Boon Leong {
64548b278a5bSOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
64558b278a5bSOng Boon Leong 	int cpu = smp_processor_id();
64568b278a5bSOng Boon Leong 	struct netdev_queue *nq;
64578b278a5bSOng Boon Leong 	int i, nxmit = 0;
64588b278a5bSOng Boon Leong 	int queue;
64598b278a5bSOng Boon Leong 
64608b278a5bSOng Boon Leong 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
64618b278a5bSOng Boon Leong 		return -ENETDOWN;
64628b278a5bSOng Boon Leong 
64638b278a5bSOng Boon Leong 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
64648b278a5bSOng Boon Leong 		return -EINVAL;
64658b278a5bSOng Boon Leong 
64668b278a5bSOng Boon Leong 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
64678b278a5bSOng Boon Leong 	nq = netdev_get_tx_queue(priv->dev, queue);
64688b278a5bSOng Boon Leong 
64698b278a5bSOng Boon Leong 	__netif_tx_lock(nq, cpu);
64708b278a5bSOng Boon Leong 	/* Avoids TX time-out as we are sharing with slow path */
64715337824fSEric Dumazet 	txq_trans_cond_update(nq);
64728b278a5bSOng Boon Leong 
64738b278a5bSOng Boon Leong 	for (i = 0; i < num_frames; i++) {
64748b278a5bSOng Boon Leong 		int res;
64758b278a5bSOng Boon Leong 
64768b278a5bSOng Boon Leong 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
64778b278a5bSOng Boon Leong 		if (res == STMMAC_XDP_CONSUMED)
64788b278a5bSOng Boon Leong 			break;
64798b278a5bSOng Boon Leong 
64808b278a5bSOng Boon Leong 		nxmit++;
64818b278a5bSOng Boon Leong 	}
64828b278a5bSOng Boon Leong 
64838b278a5bSOng Boon Leong 	if (flags & XDP_XMIT_FLUSH) {
64848b278a5bSOng Boon Leong 		stmmac_flush_tx_descriptors(priv, queue);
64858b278a5bSOng Boon Leong 		stmmac_tx_timer_arm(priv, queue);
64868b278a5bSOng Boon Leong 	}
64878b278a5bSOng Boon Leong 
64888b278a5bSOng Boon Leong 	__netif_tx_unlock(nq);
64898b278a5bSOng Boon Leong 
64908b278a5bSOng Boon Leong 	return nxmit;
64918b278a5bSOng Boon Leong }
64928b278a5bSOng Boon Leong 
6493bba2556eSOng Boon Leong void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6494bba2556eSOng Boon Leong {
6495bba2556eSOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
6496bba2556eSOng Boon Leong 	unsigned long flags;
6497bba2556eSOng Boon Leong 
6498bba2556eSOng Boon Leong 	spin_lock_irqsave(&ch->lock, flags);
6499bba2556eSOng Boon Leong 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6500bba2556eSOng Boon Leong 	spin_unlock_irqrestore(&ch->lock, flags);
6501bba2556eSOng Boon Leong 
6502bba2556eSOng Boon Leong 	stmmac_stop_rx_dma(priv, queue);
6503ba39b344SChristian Marangi 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6504bba2556eSOng Boon Leong }
6505bba2556eSOng Boon Leong 
6506bba2556eSOng Boon Leong void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6507bba2556eSOng Boon Leong {
65088531c808SChristian Marangi 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6509bba2556eSOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
6510bba2556eSOng Boon Leong 	unsigned long flags;
6511bba2556eSOng Boon Leong 	u32 buf_size;
6512bba2556eSOng Boon Leong 	int ret;
6513bba2556eSOng Boon Leong 
6514ba39b344SChristian Marangi 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6515bba2556eSOng Boon Leong 	if (ret) {
6516bba2556eSOng Boon Leong 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6517bba2556eSOng Boon Leong 		return;
6518bba2556eSOng Boon Leong 	}
6519bba2556eSOng Boon Leong 
6520ba39b344SChristian Marangi 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6521bba2556eSOng Boon Leong 	if (ret) {
6522ba39b344SChristian Marangi 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6523bba2556eSOng Boon Leong 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6524bba2556eSOng Boon Leong 		return;
6525bba2556eSOng Boon Leong 	}
6526bba2556eSOng Boon Leong 
6527f9ec5723SChristian Marangi 	stmmac_reset_rx_queue(priv, queue);
6528ba39b344SChristian Marangi 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6529bba2556eSOng Boon Leong 
6530bba2556eSOng Boon Leong 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6531bba2556eSOng Boon Leong 			    rx_q->dma_rx_phy, rx_q->queue_index);
6532bba2556eSOng Boon Leong 
6533bba2556eSOng Boon Leong 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6534bba2556eSOng Boon Leong 			     sizeof(struct dma_desc));
6535bba2556eSOng Boon Leong 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6536bba2556eSOng Boon Leong 			       rx_q->rx_tail_addr, rx_q->queue_index);
6537bba2556eSOng Boon Leong 
6538bba2556eSOng Boon Leong 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6539bba2556eSOng Boon Leong 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6540bba2556eSOng Boon Leong 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6541bba2556eSOng Boon Leong 				      buf_size,
6542bba2556eSOng Boon Leong 				      rx_q->queue_index);
6543bba2556eSOng Boon Leong 	} else {
6544bba2556eSOng Boon Leong 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
65458531c808SChristian Marangi 				      priv->dma_conf.dma_buf_sz,
6546bba2556eSOng Boon Leong 				      rx_q->queue_index);
6547bba2556eSOng Boon Leong 	}
6548bba2556eSOng Boon Leong 
6549bba2556eSOng Boon Leong 	stmmac_start_rx_dma(priv, queue);
6550bba2556eSOng Boon Leong 
6551bba2556eSOng Boon Leong 	spin_lock_irqsave(&ch->lock, flags);
6552bba2556eSOng Boon Leong 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6553bba2556eSOng Boon Leong 	spin_unlock_irqrestore(&ch->lock, flags);
6554bba2556eSOng Boon Leong }
6555bba2556eSOng Boon Leong 
6556132c32eeSOng Boon Leong void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6557132c32eeSOng Boon Leong {
6558132c32eeSOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
6559132c32eeSOng Boon Leong 	unsigned long flags;
6560132c32eeSOng Boon Leong 
6561132c32eeSOng Boon Leong 	spin_lock_irqsave(&ch->lock, flags);
6562132c32eeSOng Boon Leong 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6563132c32eeSOng Boon Leong 	spin_unlock_irqrestore(&ch->lock, flags);
6564132c32eeSOng Boon Leong 
6565132c32eeSOng Boon Leong 	stmmac_stop_tx_dma(priv, queue);
6566ba39b344SChristian Marangi 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6567132c32eeSOng Boon Leong }
6568132c32eeSOng Boon Leong 
6569132c32eeSOng Boon Leong void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6570132c32eeSOng Boon Leong {
65718531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6572132c32eeSOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
6573132c32eeSOng Boon Leong 	unsigned long flags;
6574132c32eeSOng Boon Leong 	int ret;
6575132c32eeSOng Boon Leong 
6576ba39b344SChristian Marangi 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6577132c32eeSOng Boon Leong 	if (ret) {
6578132c32eeSOng Boon Leong 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6579132c32eeSOng Boon Leong 		return;
6580132c32eeSOng Boon Leong 	}
6581132c32eeSOng Boon Leong 
6582ba39b344SChristian Marangi 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6583132c32eeSOng Boon Leong 	if (ret) {
6584ba39b344SChristian Marangi 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6585132c32eeSOng Boon Leong 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6586132c32eeSOng Boon Leong 		return;
6587132c32eeSOng Boon Leong 	}
6588132c32eeSOng Boon Leong 
6589f9ec5723SChristian Marangi 	stmmac_reset_tx_queue(priv, queue);
6590ba39b344SChristian Marangi 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6591132c32eeSOng Boon Leong 
6592132c32eeSOng Boon Leong 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6593132c32eeSOng Boon Leong 			    tx_q->dma_tx_phy, tx_q->queue_index);
6594132c32eeSOng Boon Leong 
6595132c32eeSOng Boon Leong 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6596132c32eeSOng Boon Leong 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6597132c32eeSOng Boon Leong 
6598132c32eeSOng Boon Leong 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6599132c32eeSOng Boon Leong 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6600132c32eeSOng Boon Leong 			       tx_q->tx_tail_addr, tx_q->queue_index);
6601132c32eeSOng Boon Leong 
6602132c32eeSOng Boon Leong 	stmmac_start_tx_dma(priv, queue);
6603132c32eeSOng Boon Leong 
6604132c32eeSOng Boon Leong 	spin_lock_irqsave(&ch->lock, flags);
6605132c32eeSOng Boon Leong 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6606132c32eeSOng Boon Leong 	spin_unlock_irqrestore(&ch->lock, flags);
6607132c32eeSOng Boon Leong }
6608132c32eeSOng Boon Leong 
6609ac746c85SOng Boon Leong void stmmac_xdp_release(struct net_device *dev)
6610ac746c85SOng Boon Leong {
6611ac746c85SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
6612ac746c85SOng Boon Leong 	u32 chan;
6613ac746c85SOng Boon Leong 
661477711683SMohd Faizal Abdul Rahim 	/* Ensure tx function is not running */
661577711683SMohd Faizal Abdul Rahim 	netif_tx_disable(dev);
661677711683SMohd Faizal Abdul Rahim 
6617ac746c85SOng Boon Leong 	/* Disable NAPI process */
6618ac746c85SOng Boon Leong 	stmmac_disable_all_queues(priv);
6619ac746c85SOng Boon Leong 
6620ac746c85SOng Boon Leong 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
66218531c808SChristian Marangi 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6622ac746c85SOng Boon Leong 
6623ac746c85SOng Boon Leong 	/* Free the IRQ lines */
6624ac746c85SOng Boon Leong 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6625ac746c85SOng Boon Leong 
6626ac746c85SOng Boon Leong 	/* Stop TX/RX DMA channels */
6627ac746c85SOng Boon Leong 	stmmac_stop_all_dma(priv);
6628ac746c85SOng Boon Leong 
6629ac746c85SOng Boon Leong 	/* Release and free the Rx/Tx resources */
6630ba39b344SChristian Marangi 	free_dma_desc_resources(priv, &priv->dma_conf);
6631ac746c85SOng Boon Leong 
6632ac746c85SOng Boon Leong 	/* Disable the MAC Rx/Tx */
6633ac746c85SOng Boon Leong 	stmmac_mac_set(priv, priv->ioaddr, false);
6634ac746c85SOng Boon Leong 
6635ac746c85SOng Boon Leong 	/* set trans_start so we don't get spurious
6636ac746c85SOng Boon Leong 	 * watchdogs during reset
6637ac746c85SOng Boon Leong 	 */
6638ac746c85SOng Boon Leong 	netif_trans_update(dev);
6639ac746c85SOng Boon Leong 	netif_carrier_off(dev);
6640ac746c85SOng Boon Leong }
6641ac746c85SOng Boon Leong 
6642ac746c85SOng Boon Leong int stmmac_xdp_open(struct net_device *dev)
6643ac746c85SOng Boon Leong {
6644ac746c85SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
6645ac746c85SOng Boon Leong 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6646ac746c85SOng Boon Leong 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6647ac746c85SOng Boon Leong 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6648ac746c85SOng Boon Leong 	struct stmmac_rx_queue *rx_q;
6649ac746c85SOng Boon Leong 	struct stmmac_tx_queue *tx_q;
6650ac746c85SOng Boon Leong 	u32 buf_size;
6651ac746c85SOng Boon Leong 	bool sph_en;
6652ac746c85SOng Boon Leong 	u32 chan;
6653ac746c85SOng Boon Leong 	int ret;
6654ac746c85SOng Boon Leong 
6655ba39b344SChristian Marangi 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6656ac746c85SOng Boon Leong 	if (ret < 0) {
6657ac746c85SOng Boon Leong 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6658ac746c85SOng Boon Leong 			   __func__);
6659ac746c85SOng Boon Leong 		goto dma_desc_error;
6660ac746c85SOng Boon Leong 	}
6661ac746c85SOng Boon Leong 
6662ba39b344SChristian Marangi 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6663ac746c85SOng Boon Leong 	if (ret < 0) {
6664ac746c85SOng Boon Leong 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6665ac746c85SOng Boon Leong 			   __func__);
6666ac746c85SOng Boon Leong 		goto init_error;
6667ac746c85SOng Boon Leong 	}
6668ac746c85SOng Boon Leong 
666924e3fce0SSong Yoong Siang 	stmmac_reset_queues_param(priv);
667024e3fce0SSong Yoong Siang 
6671ac746c85SOng Boon Leong 	/* DMA CSR Channel configuration */
6672087a7b94SVincent Whitchurch 	for (chan = 0; chan < dma_csr_ch; chan++) {
6673ac746c85SOng Boon Leong 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6674087a7b94SVincent Whitchurch 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6675087a7b94SVincent Whitchurch 	}
6676ac746c85SOng Boon Leong 
6677ac746c85SOng Boon Leong 	/* Adjust Split header */
6678ac746c85SOng Boon Leong 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6679ac746c85SOng Boon Leong 
6680ac746c85SOng Boon Leong 	/* DMA RX Channel Configuration */
6681ac746c85SOng Boon Leong 	for (chan = 0; chan < rx_cnt; chan++) {
66828531c808SChristian Marangi 		rx_q = &priv->dma_conf.rx_queue[chan];
6683ac746c85SOng Boon Leong 
6684ac746c85SOng Boon Leong 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6685ac746c85SOng Boon Leong 				    rx_q->dma_rx_phy, chan);
6686ac746c85SOng Boon Leong 
6687ac746c85SOng Boon Leong 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6688ac746c85SOng Boon Leong 				     (rx_q->buf_alloc_num *
6689ac746c85SOng Boon Leong 				      sizeof(struct dma_desc));
6690ac746c85SOng Boon Leong 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6691ac746c85SOng Boon Leong 				       rx_q->rx_tail_addr, chan);
6692ac746c85SOng Boon Leong 
6693ac746c85SOng Boon Leong 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6694ac746c85SOng Boon Leong 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6695ac746c85SOng Boon Leong 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6696ac746c85SOng Boon Leong 					      buf_size,
6697ac746c85SOng Boon Leong 					      rx_q->queue_index);
6698ac746c85SOng Boon Leong 		} else {
6699ac746c85SOng Boon Leong 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
67008531c808SChristian Marangi 					      priv->dma_conf.dma_buf_sz,
6701ac746c85SOng Boon Leong 					      rx_q->queue_index);
6702ac746c85SOng Boon Leong 		}
6703ac746c85SOng Boon Leong 
6704ac746c85SOng Boon Leong 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6705ac746c85SOng Boon Leong 	}
6706ac746c85SOng Boon Leong 
6707ac746c85SOng Boon Leong 	/* DMA TX Channel Configuration */
6708ac746c85SOng Boon Leong 	for (chan = 0; chan < tx_cnt; chan++) {
67098531c808SChristian Marangi 		tx_q = &priv->dma_conf.tx_queue[chan];
6710ac746c85SOng Boon Leong 
6711ac746c85SOng Boon Leong 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6712ac746c85SOng Boon Leong 				    tx_q->dma_tx_phy, chan);
6713ac746c85SOng Boon Leong 
6714ac746c85SOng Boon Leong 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6715ac746c85SOng Boon Leong 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6716ac746c85SOng Boon Leong 				       tx_q->tx_tail_addr, chan);
671761da6ac7SOng Boon Leong 
671861da6ac7SOng Boon Leong 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
671961da6ac7SOng Boon Leong 		tx_q->txtimer.function = stmmac_tx_timer;
6720ac746c85SOng Boon Leong 	}
6721ac746c85SOng Boon Leong 
6722ac746c85SOng Boon Leong 	/* Enable the MAC Rx/Tx */
6723ac746c85SOng Boon Leong 	stmmac_mac_set(priv, priv->ioaddr, true);
6724ac746c85SOng Boon Leong 
6725ac746c85SOng Boon Leong 	/* Start Rx & Tx DMA Channels */
6726ac746c85SOng Boon Leong 	stmmac_start_all_dma(priv);
6727ac746c85SOng Boon Leong 
6728ac746c85SOng Boon Leong 	ret = stmmac_request_irq(dev);
6729ac746c85SOng Boon Leong 	if (ret)
6730ac746c85SOng Boon Leong 		goto irq_error;
6731ac746c85SOng Boon Leong 
6732ac746c85SOng Boon Leong 	/* Enable NAPI process*/
6733ac746c85SOng Boon Leong 	stmmac_enable_all_queues(priv);
6734ac746c85SOng Boon Leong 	netif_carrier_on(dev);
6735ac746c85SOng Boon Leong 	netif_tx_start_all_queues(dev);
6736087a7b94SVincent Whitchurch 	stmmac_enable_all_dma_irq(priv);
6737ac746c85SOng Boon Leong 
6738ac746c85SOng Boon Leong 	return 0;
6739ac746c85SOng Boon Leong 
6740ac746c85SOng Boon Leong irq_error:
6741ac746c85SOng Boon Leong 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
67428531c808SChristian Marangi 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6743ac746c85SOng Boon Leong 
6744ac746c85SOng Boon Leong 	stmmac_hw_teardown(dev);
6745ac746c85SOng Boon Leong init_error:
6746ba39b344SChristian Marangi 	free_dma_desc_resources(priv, &priv->dma_conf);
6747ac746c85SOng Boon Leong dma_desc_error:
6748ac746c85SOng Boon Leong 	return ret;
6749ac746c85SOng Boon Leong }
6750ac746c85SOng Boon Leong 
6751bba2556eSOng Boon Leong int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6752bba2556eSOng Boon Leong {
6753bba2556eSOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
6754bba2556eSOng Boon Leong 	struct stmmac_rx_queue *rx_q;
6755132c32eeSOng Boon Leong 	struct stmmac_tx_queue *tx_q;
6756bba2556eSOng Boon Leong 	struct stmmac_channel *ch;
6757bba2556eSOng Boon Leong 
6758bba2556eSOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6759bba2556eSOng Boon Leong 	    !netif_carrier_ok(priv->dev))
6760bba2556eSOng Boon Leong 		return -ENETDOWN;
6761bba2556eSOng Boon Leong 
6762bba2556eSOng Boon Leong 	if (!stmmac_xdp_is_enabled(priv))
6763a817ead4SMaciej Fijalkowski 		return -EINVAL;
6764bba2556eSOng Boon Leong 
6765132c32eeSOng Boon Leong 	if (queue >= priv->plat->rx_queues_to_use ||
6766132c32eeSOng Boon Leong 	    queue >= priv->plat->tx_queues_to_use)
6767bba2556eSOng Boon Leong 		return -EINVAL;
6768bba2556eSOng Boon Leong 
67698531c808SChristian Marangi 	rx_q = &priv->dma_conf.rx_queue[queue];
67708531c808SChristian Marangi 	tx_q = &priv->dma_conf.tx_queue[queue];
6771bba2556eSOng Boon Leong 	ch = &priv->channel[queue];
6772bba2556eSOng Boon Leong 
6773132c32eeSOng Boon Leong 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6774a817ead4SMaciej Fijalkowski 		return -EINVAL;
6775bba2556eSOng Boon Leong 
6776132c32eeSOng Boon Leong 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6777bba2556eSOng Boon Leong 		/* EQoS does not have per-DMA channel SW interrupt,
6778bba2556eSOng Boon Leong 		 * so we schedule RX Napi straight-away.
6779bba2556eSOng Boon Leong 		 */
6780132c32eeSOng Boon Leong 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6781132c32eeSOng Boon Leong 			__napi_schedule(&ch->rxtx_napi);
6782bba2556eSOng Boon Leong 	}
6783bba2556eSOng Boon Leong 
6784bba2556eSOng Boon Leong 	return 0;
6785bba2556eSOng Boon Leong }
6786bba2556eSOng Boon Leong 
67877ac6653aSJeff Kirsher static const struct net_device_ops stmmac_netdev_ops = {
67887ac6653aSJeff Kirsher 	.ndo_open = stmmac_open,
67897ac6653aSJeff Kirsher 	.ndo_start_xmit = stmmac_xmit,
67907ac6653aSJeff Kirsher 	.ndo_stop = stmmac_release,
67917ac6653aSJeff Kirsher 	.ndo_change_mtu = stmmac_change_mtu,
67927ac6653aSJeff Kirsher 	.ndo_fix_features = stmmac_fix_features,
6793d2afb5bdSGiuseppe CAVALLARO 	.ndo_set_features = stmmac_set_features,
679401789349SJiri Pirko 	.ndo_set_rx_mode = stmmac_set_rx_mode,
67957ac6653aSJeff Kirsher 	.ndo_tx_timeout = stmmac_tx_timeout,
6796a7605370SArnd Bergmann 	.ndo_eth_ioctl = stmmac_ioctl,
67974dbbe8ddSJose Abreu 	.ndo_setup_tc = stmmac_setup_tc,
67984993e5b3SJose Abreu 	.ndo_select_queue = stmmac_select_queue,
67997ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
68007ac6653aSJeff Kirsher 	.ndo_poll_controller = stmmac_poll_controller,
68017ac6653aSJeff Kirsher #endif
6802a830405eSBhadram Varka 	.ndo_set_mac_address = stmmac_set_mac_address,
68033cd1cfcbSJose Abreu 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
68043cd1cfcbSJose Abreu 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
68055fabb012SOng Boon Leong 	.ndo_bpf = stmmac_bpf,
68068b278a5bSOng Boon Leong 	.ndo_xdp_xmit = stmmac_xdp_xmit,
6807bba2556eSOng Boon Leong 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
68087ac6653aSJeff Kirsher };
68097ac6653aSJeff Kirsher 
681034877a15SJose Abreu static void stmmac_reset_subtask(struct stmmac_priv *priv)
681134877a15SJose Abreu {
681234877a15SJose Abreu 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
681334877a15SJose Abreu 		return;
681434877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
681534877a15SJose Abreu 		return;
681634877a15SJose Abreu 
681734877a15SJose Abreu 	netdev_err(priv->dev, "Reset adapter.\n");
681834877a15SJose Abreu 
681934877a15SJose Abreu 	rtnl_lock();
682034877a15SJose Abreu 	netif_trans_update(priv->dev);
682134877a15SJose Abreu 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
682234877a15SJose Abreu 		usleep_range(1000, 2000);
682334877a15SJose Abreu 
682434877a15SJose Abreu 	set_bit(STMMAC_DOWN, &priv->state);
682534877a15SJose Abreu 	dev_close(priv->dev);
682600f54e68SPetr Machata 	dev_open(priv->dev, NULL);
682734877a15SJose Abreu 	clear_bit(STMMAC_DOWN, &priv->state);
682834877a15SJose Abreu 	clear_bit(STMMAC_RESETING, &priv->state);
682934877a15SJose Abreu 	rtnl_unlock();
683034877a15SJose Abreu }
683134877a15SJose Abreu 
683234877a15SJose Abreu static void stmmac_service_task(struct work_struct *work)
683334877a15SJose Abreu {
683434877a15SJose Abreu 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
683534877a15SJose Abreu 			service_task);
683634877a15SJose Abreu 
683734877a15SJose Abreu 	stmmac_reset_subtask(priv);
683834877a15SJose Abreu 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
683934877a15SJose Abreu }
684034877a15SJose Abreu 
68417ac6653aSJeff Kirsher /**
6842cf3f047bSGiuseppe CAVALLARO  *  stmmac_hw_init - Init the MAC device
684332ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
6844732fdf0eSGiuseppe CAVALLARO  *  Description: this function is to configure the MAC device according to
6845732fdf0eSGiuseppe CAVALLARO  *  some platform parameters or the HW capability register. It prepares the
6846732fdf0eSGiuseppe CAVALLARO  *  driver to use either ring or chain modes and to setup either enhanced or
6847732fdf0eSGiuseppe CAVALLARO  *  normal descriptors.
6848cf3f047bSGiuseppe CAVALLARO  */
6849cf3f047bSGiuseppe CAVALLARO static int stmmac_hw_init(struct stmmac_priv *priv)
6850cf3f047bSGiuseppe CAVALLARO {
68515f0456b4SJose Abreu 	int ret;
6852cf3f047bSGiuseppe CAVALLARO 
68539f93ac8dSLABBE Corentin 	/* dwmac-sun8i only work in chain mode */
68549f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i)
68559f93ac8dSLABBE Corentin 		chain_mode = 1;
68565f0456b4SJose Abreu 	priv->chain_mode = chain_mode;
68579f93ac8dSLABBE Corentin 
68585f0456b4SJose Abreu 	/* Initialize HW Interface */
68595f0456b4SJose Abreu 	ret = stmmac_hwif_init(priv);
68605f0456b4SJose Abreu 	if (ret)
68615f0456b4SJose Abreu 		return ret;
68624a7d666aSGiuseppe CAVALLARO 
6863cf3f047bSGiuseppe CAVALLARO 	/* Get the HW capability (new GMAC newer than 3.50a) */
6864cf3f047bSGiuseppe CAVALLARO 	priv->hw_cap_support = stmmac_get_hw_features(priv);
6865cf3f047bSGiuseppe CAVALLARO 	if (priv->hw_cap_support) {
686638ddc59dSLABBE Corentin 		dev_info(priv->device, "DMA HW capability register supported\n");
6867cf3f047bSGiuseppe CAVALLARO 
6868cf3f047bSGiuseppe CAVALLARO 		/* We can override some gmac/dma configuration fields: e.g.
6869cf3f047bSGiuseppe CAVALLARO 		 * enh_desc, tx_coe (e.g. that are passed through the
6870cf3f047bSGiuseppe CAVALLARO 		 * platform) with the values from the HW capability
6871cf3f047bSGiuseppe CAVALLARO 		 * register (if supported).
6872cf3f047bSGiuseppe CAVALLARO 		 */
6873cf3f047bSGiuseppe CAVALLARO 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
68745a9b876eSLing Pei Lee 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
68755a9b876eSLing Pei Lee 				!priv->plat->use_phy_wol;
68763fe5cadbSGiuseppe CAVALLARO 		priv->hw->pmt = priv->plat->pmt;
6877b8ef7020SBiao Huang 		if (priv->dma_cap.hash_tb_sz) {
6878b8ef7020SBiao Huang 			priv->hw->multicast_filter_bins =
6879b8ef7020SBiao Huang 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
6880b8ef7020SBiao Huang 			priv->hw->mcast_bits_log2 =
6881b8ef7020SBiao Huang 					ilog2(priv->hw->multicast_filter_bins);
6882b8ef7020SBiao Huang 		}
688338912bdbSDeepak SIKRI 
6884a8df35d4SEzequiel Garcia 		/* TXCOE doesn't work in thresh DMA mode */
6885a8df35d4SEzequiel Garcia 		if (priv->plat->force_thresh_dma_mode)
6886a8df35d4SEzequiel Garcia 			priv->plat->tx_coe = 0;
6887a8df35d4SEzequiel Garcia 		else
688838912bdbSDeepak SIKRI 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
6889a8df35d4SEzequiel Garcia 
6890f748be53SAlexandre TORGUE 		/* In case of GMAC4 rx_coe is from HW cap register. */
6891f748be53SAlexandre TORGUE 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
689238912bdbSDeepak SIKRI 
689338912bdbSDeepak SIKRI 		if (priv->dma_cap.rx_coe_type2)
689438912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
689538912bdbSDeepak SIKRI 		else if (priv->dma_cap.rx_coe_type1)
689638912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
689738912bdbSDeepak SIKRI 
689838ddc59dSLABBE Corentin 	} else {
689938ddc59dSLABBE Corentin 		dev_info(priv->device, "No HW DMA feature register supported\n");
690038ddc59dSLABBE Corentin 	}
6901cf3f047bSGiuseppe CAVALLARO 
6902d2afb5bdSGiuseppe CAVALLARO 	if (priv->plat->rx_coe) {
6903d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
690438ddc59dSLABBE Corentin 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
6905f748be53SAlexandre TORGUE 		if (priv->synopsys_id < DWMAC_CORE_4_00)
690638ddc59dSLABBE Corentin 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
6907d2afb5bdSGiuseppe CAVALLARO 	}
6908cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->tx_coe)
690938ddc59dSLABBE Corentin 		dev_info(priv->device, "TX Checksum insertion supported\n");
6910cf3f047bSGiuseppe CAVALLARO 
6911cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->pmt) {
691238ddc59dSLABBE Corentin 		dev_info(priv->device, "Wake-Up On Lan supported\n");
6913cf3f047bSGiuseppe CAVALLARO 		device_set_wakeup_capable(priv->device, 1);
6914cf3f047bSGiuseppe CAVALLARO 	}
6915cf3f047bSGiuseppe CAVALLARO 
6916f748be53SAlexandre TORGUE 	if (priv->dma_cap.tsoen)
691738ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO supported\n");
6918f748be53SAlexandre TORGUE 
6919e0f9956aSChuah, Kim Tatt 	priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
6920e0f9956aSChuah, Kim Tatt 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
6921e0f9956aSChuah, Kim Tatt 
69227cfde0afSJose Abreu 	/* Run HW quirks, if any */
69237cfde0afSJose Abreu 	if (priv->hwif_quirks) {
69247cfde0afSJose Abreu 		ret = priv->hwif_quirks(priv);
69257cfde0afSJose Abreu 		if (ret)
69267cfde0afSJose Abreu 			return ret;
69277cfde0afSJose Abreu 	}
69287cfde0afSJose Abreu 
69293b509466SJose Abreu 	/* Rx Watchdog is available in the COREs newer than the 3.40.
69303b509466SJose Abreu 	 * In some case, for example on bugged HW this feature
69313b509466SJose Abreu 	 * has to be disable and this can be done by passing the
69323b509466SJose Abreu 	 * riwt_off field from the platform.
69333b509466SJose Abreu 	 */
69343b509466SJose Abreu 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
69353b509466SJose Abreu 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
69363b509466SJose Abreu 		priv->use_riwt = 1;
69373b509466SJose Abreu 		dev_info(priv->device,
69383b509466SJose Abreu 			 "Enable RX Mitigation via HW Watchdog Timer\n");
69393b509466SJose Abreu 	}
69403b509466SJose Abreu 
6941c24602efSGiuseppe CAVALLARO 	return 0;
6942cf3f047bSGiuseppe CAVALLARO }
6943cf3f047bSGiuseppe CAVALLARO 
69440366f7e0SOng Boon Leong static void stmmac_napi_add(struct net_device *dev)
69450366f7e0SOng Boon Leong {
69460366f7e0SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
69470366f7e0SOng Boon Leong 	u32 queue, maxq;
69480366f7e0SOng Boon Leong 
69490366f7e0SOng Boon Leong 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
69500366f7e0SOng Boon Leong 
69510366f7e0SOng Boon Leong 	for (queue = 0; queue < maxq; queue++) {
69520366f7e0SOng Boon Leong 		struct stmmac_channel *ch = &priv->channel[queue];
69530366f7e0SOng Boon Leong 
69540366f7e0SOng Boon Leong 		ch->priv_data = priv;
69550366f7e0SOng Boon Leong 		ch->index = queue;
69562b94f526SMarek Szyprowski 		spin_lock_init(&ch->lock);
69570366f7e0SOng Boon Leong 
69580366f7e0SOng Boon Leong 		if (queue < priv->plat->rx_queues_to_use) {
6959b48b89f9SJakub Kicinski 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
69600366f7e0SOng Boon Leong 		}
69610366f7e0SOng Boon Leong 		if (queue < priv->plat->tx_queues_to_use) {
696216d083e2SJakub Kicinski 			netif_napi_add_tx(dev, &ch->tx_napi,
696316d083e2SJakub Kicinski 					  stmmac_napi_poll_tx);
69640366f7e0SOng Boon Leong 		}
6965132c32eeSOng Boon Leong 		if (queue < priv->plat->rx_queues_to_use &&
6966132c32eeSOng Boon Leong 		    queue < priv->plat->tx_queues_to_use) {
6967132c32eeSOng Boon Leong 			netif_napi_add(dev, &ch->rxtx_napi,
6968b48b89f9SJakub Kicinski 				       stmmac_napi_poll_rxtx);
6969132c32eeSOng Boon Leong 		}
69700366f7e0SOng Boon Leong 	}
69710366f7e0SOng Boon Leong }
69720366f7e0SOng Boon Leong 
69730366f7e0SOng Boon Leong static void stmmac_napi_del(struct net_device *dev)
69740366f7e0SOng Boon Leong {
69750366f7e0SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
69760366f7e0SOng Boon Leong 	u32 queue, maxq;
69770366f7e0SOng Boon Leong 
69780366f7e0SOng Boon Leong 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
69790366f7e0SOng Boon Leong 
69800366f7e0SOng Boon Leong 	for (queue = 0; queue < maxq; queue++) {
69810366f7e0SOng Boon Leong 		struct stmmac_channel *ch = &priv->channel[queue];
69820366f7e0SOng Boon Leong 
69830366f7e0SOng Boon Leong 		if (queue < priv->plat->rx_queues_to_use)
69840366f7e0SOng Boon Leong 			netif_napi_del(&ch->rx_napi);
69850366f7e0SOng Boon Leong 		if (queue < priv->plat->tx_queues_to_use)
69860366f7e0SOng Boon Leong 			netif_napi_del(&ch->tx_napi);
6987132c32eeSOng Boon Leong 		if (queue < priv->plat->rx_queues_to_use &&
6988132c32eeSOng Boon Leong 		    queue < priv->plat->tx_queues_to_use) {
6989132c32eeSOng Boon Leong 			netif_napi_del(&ch->rxtx_napi);
6990132c32eeSOng Boon Leong 		}
69910366f7e0SOng Boon Leong 	}
69920366f7e0SOng Boon Leong }
69930366f7e0SOng Boon Leong 
69940366f7e0SOng Boon Leong int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
69950366f7e0SOng Boon Leong {
69960366f7e0SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
6997218c5973SCorinna Vinschen 	int ret = 0, i;
69980366f7e0SOng Boon Leong 
69990366f7e0SOng Boon Leong 	if (netif_running(dev))
70000366f7e0SOng Boon Leong 		stmmac_release(dev);
70010366f7e0SOng Boon Leong 
70020366f7e0SOng Boon Leong 	stmmac_napi_del(dev);
70030366f7e0SOng Boon Leong 
70040366f7e0SOng Boon Leong 	priv->plat->rx_queues_to_use = rx_cnt;
70050366f7e0SOng Boon Leong 	priv->plat->tx_queues_to_use = tx_cnt;
7006218c5973SCorinna Vinschen 	if (!netif_is_rxfh_configured(dev))
7007218c5973SCorinna Vinschen 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7008218c5973SCorinna Vinschen 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7009218c5973SCorinna Vinschen 									rx_cnt);
70100366f7e0SOng Boon Leong 
70110366f7e0SOng Boon Leong 	stmmac_napi_add(dev);
70120366f7e0SOng Boon Leong 
70130366f7e0SOng Boon Leong 	if (netif_running(dev))
70140366f7e0SOng Boon Leong 		ret = stmmac_open(dev);
70150366f7e0SOng Boon Leong 
70160366f7e0SOng Boon Leong 	return ret;
70170366f7e0SOng Boon Leong }
70180366f7e0SOng Boon Leong 
7019aa042f60SSong, Yoong Siang int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7020aa042f60SSong, Yoong Siang {
7021aa042f60SSong, Yoong Siang 	struct stmmac_priv *priv = netdev_priv(dev);
7022aa042f60SSong, Yoong Siang 	int ret = 0;
7023aa042f60SSong, Yoong Siang 
7024aa042f60SSong, Yoong Siang 	if (netif_running(dev))
7025aa042f60SSong, Yoong Siang 		stmmac_release(dev);
7026aa042f60SSong, Yoong Siang 
70278531c808SChristian Marangi 	priv->dma_conf.dma_rx_size = rx_size;
70288531c808SChristian Marangi 	priv->dma_conf.dma_tx_size = tx_size;
7029aa042f60SSong, Yoong Siang 
7030aa042f60SSong, Yoong Siang 	if (netif_running(dev))
7031aa042f60SSong, Yoong Siang 		ret = stmmac_open(dev);
7032aa042f60SSong, Yoong Siang 
7033aa042f60SSong, Yoong Siang 	return ret;
7034aa042f60SSong, Yoong Siang }
7035aa042f60SSong, Yoong Siang 
70365a558611SOng Boon Leong #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
70375a558611SOng Boon Leong static void stmmac_fpe_lp_task(struct work_struct *work)
70385a558611SOng Boon Leong {
70395a558611SOng Boon Leong 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
70405a558611SOng Boon Leong 						fpe_task);
70415a558611SOng Boon Leong 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
70425a558611SOng Boon Leong 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
70435a558611SOng Boon Leong 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
70445a558611SOng Boon Leong 	bool *hs_enable = &fpe_cfg->hs_enable;
70455a558611SOng Boon Leong 	bool *enable = &fpe_cfg->enable;
70465a558611SOng Boon Leong 	int retries = 20;
70475a558611SOng Boon Leong 
70485a558611SOng Boon Leong 	while (retries-- > 0) {
70495a558611SOng Boon Leong 		/* Bail out immediately if FPE handshake is OFF */
70505a558611SOng Boon Leong 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
70515a558611SOng Boon Leong 			break;
70525a558611SOng Boon Leong 
70535a558611SOng Boon Leong 		if (*lo_state == FPE_STATE_ENTERING_ON &&
70545a558611SOng Boon Leong 		    *lp_state == FPE_STATE_ENTERING_ON) {
70555a558611SOng Boon Leong 			stmmac_fpe_configure(priv, priv->ioaddr,
70565a558611SOng Boon Leong 					     priv->plat->tx_queues_to_use,
70575a558611SOng Boon Leong 					     priv->plat->rx_queues_to_use,
70585a558611SOng Boon Leong 					     *enable);
70595a558611SOng Boon Leong 
70605a558611SOng Boon Leong 			netdev_info(priv->dev, "configured FPE\n");
70615a558611SOng Boon Leong 
70625a558611SOng Boon Leong 			*lo_state = FPE_STATE_ON;
70635a558611SOng Boon Leong 			*lp_state = FPE_STATE_ON;
70645a558611SOng Boon Leong 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
70655a558611SOng Boon Leong 			break;
70665a558611SOng Boon Leong 		}
70675a558611SOng Boon Leong 
70685a558611SOng Boon Leong 		if ((*lo_state == FPE_STATE_CAPABLE ||
70695a558611SOng Boon Leong 		     *lo_state == FPE_STATE_ENTERING_ON) &&
70705a558611SOng Boon Leong 		     *lp_state != FPE_STATE_ON) {
70715a558611SOng Boon Leong 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
70725a558611SOng Boon Leong 				    *lo_state, *lp_state);
70735a558611SOng Boon Leong 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
70745a558611SOng Boon Leong 						MPACKET_VERIFY);
70755a558611SOng Boon Leong 		}
70765a558611SOng Boon Leong 		/* Sleep then retry */
70775a558611SOng Boon Leong 		msleep(500);
70785a558611SOng Boon Leong 	}
70795a558611SOng Boon Leong 
70805a558611SOng Boon Leong 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
70815a558611SOng Boon Leong }
70825a558611SOng Boon Leong 
70835a558611SOng Boon Leong void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
70845a558611SOng Boon Leong {
70855a558611SOng Boon Leong 	if (priv->plat->fpe_cfg->hs_enable != enable) {
70865a558611SOng Boon Leong 		if (enable) {
70875a558611SOng Boon Leong 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
70885a558611SOng Boon Leong 						MPACKET_VERIFY);
70895a558611SOng Boon Leong 		} else {
70905a558611SOng Boon Leong 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
70915a558611SOng Boon Leong 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
70925a558611SOng Boon Leong 		}
70935a558611SOng Boon Leong 
70945a558611SOng Boon Leong 		priv->plat->fpe_cfg->hs_enable = enable;
70955a558611SOng Boon Leong 	}
70965a558611SOng Boon Leong }
70975a558611SOng Boon Leong 
7098e3f9c3e3SSong Yoong Siang static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7099e3f9c3e3SSong Yoong Siang {
7100e3f9c3e3SSong Yoong Siang 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7101e3f9c3e3SSong Yoong Siang 	struct dma_desc *desc_contains_ts = ctx->desc;
7102e3f9c3e3SSong Yoong Siang 	struct stmmac_priv *priv = ctx->priv;
7103e3f9c3e3SSong Yoong Siang 	struct dma_desc *ndesc = ctx->ndesc;
7104e3f9c3e3SSong Yoong Siang 	struct dma_desc *desc = ctx->desc;
7105e3f9c3e3SSong Yoong Siang 	u64 ns = 0;
7106e3f9c3e3SSong Yoong Siang 
7107e3f9c3e3SSong Yoong Siang 	if (!priv->hwts_rx_en)
7108e3f9c3e3SSong Yoong Siang 		return -ENODATA;
7109e3f9c3e3SSong Yoong Siang 
7110e3f9c3e3SSong Yoong Siang 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7111e3f9c3e3SSong Yoong Siang 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7112e3f9c3e3SSong Yoong Siang 		desc_contains_ts = ndesc;
7113e3f9c3e3SSong Yoong Siang 
7114e3f9c3e3SSong Yoong Siang 	/* Check if timestamp is available */
7115e3f9c3e3SSong Yoong Siang 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7116e3f9c3e3SSong Yoong Siang 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7117e3f9c3e3SSong Yoong Siang 		ns -= priv->plat->cdc_error_adj;
7118e3f9c3e3SSong Yoong Siang 		*timestamp = ns_to_ktime(ns);
7119e3f9c3e3SSong Yoong Siang 		return 0;
7120e3f9c3e3SSong Yoong Siang 	}
7121e3f9c3e3SSong Yoong Siang 
7122e3f9c3e3SSong Yoong Siang 	return -ENODATA;
7123e3f9c3e3SSong Yoong Siang }
7124e3f9c3e3SSong Yoong Siang 
7125e3f9c3e3SSong Yoong Siang static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7126e3f9c3e3SSong Yoong Siang 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7127e3f9c3e3SSong Yoong Siang };
7128e3f9c3e3SSong Yoong Siang 
7129cf3f047bSGiuseppe CAVALLARO /**
7130bfab27a1SGiuseppe CAVALLARO  * stmmac_dvr_probe
7131bfab27a1SGiuseppe CAVALLARO  * @device: device pointer
7132ff3dd78cSGiuseppe CAVALLARO  * @plat_dat: platform data pointer
7133e56788cfSJoachim Eastwood  * @res: stmmac resource pointer
7134bfab27a1SGiuseppe CAVALLARO  * Description: this is the main probe function used to
7135bfab27a1SGiuseppe CAVALLARO  * call the alloc_etherdev, allocate the priv structure.
71369afec6efSAndy Shevchenko  * Return:
713715ffac73SJoachim Eastwood  * returns 0 on success, otherwise errno.
71387ac6653aSJeff Kirsher  */
713915ffac73SJoachim Eastwood int stmmac_dvr_probe(struct device *device,
7140cf3f047bSGiuseppe CAVALLARO 		     struct plat_stmmacenet_data *plat_dat,
7141e56788cfSJoachim Eastwood 		     struct stmmac_resources *res)
71427ac6653aSJeff Kirsher {
7143bfab27a1SGiuseppe CAVALLARO 	struct net_device *ndev = NULL;
7144bfab27a1SGiuseppe CAVALLARO 	struct stmmac_priv *priv;
71450366f7e0SOng Boon Leong 	u32 rxq;
714676067459SJose Abreu 	int i, ret = 0;
71477ac6653aSJeff Kirsher 
71489737070cSJisheng Zhang 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
71499737070cSJisheng Zhang 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
715041de8d4cSJoe Perches 	if (!ndev)
715115ffac73SJoachim Eastwood 		return -ENOMEM;
71527ac6653aSJeff Kirsher 
7153bfab27a1SGiuseppe CAVALLARO 	SET_NETDEV_DEV(ndev, device);
71547ac6653aSJeff Kirsher 
7155bfab27a1SGiuseppe CAVALLARO 	priv = netdev_priv(ndev);
7156bfab27a1SGiuseppe CAVALLARO 	priv->device = device;
7157bfab27a1SGiuseppe CAVALLARO 	priv->dev = ndev;
7158bfab27a1SGiuseppe CAVALLARO 
7159bfab27a1SGiuseppe CAVALLARO 	stmmac_set_ethtool_ops(ndev);
7160cf3f047bSGiuseppe CAVALLARO 	priv->pause = pause;
7161cf3f047bSGiuseppe CAVALLARO 	priv->plat = plat_dat;
7162e56788cfSJoachim Eastwood 	priv->ioaddr = res->addr;
7163e56788cfSJoachim Eastwood 	priv->dev->base_addr = (unsigned long)res->addr;
71646ccf12aeSWong, Vee Khee 	priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
7165e56788cfSJoachim Eastwood 
7166e56788cfSJoachim Eastwood 	priv->dev->irq = res->irq;
7167e56788cfSJoachim Eastwood 	priv->wol_irq = res->wol_irq;
7168e56788cfSJoachim Eastwood 	priv->lpi_irq = res->lpi_irq;
71698532f613SOng Boon Leong 	priv->sfty_ce_irq = res->sfty_ce_irq;
71708532f613SOng Boon Leong 	priv->sfty_ue_irq = res->sfty_ue_irq;
71718532f613SOng Boon Leong 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
71728532f613SOng Boon Leong 		priv->rx_irq[i] = res->rx_irq[i];
71738532f613SOng Boon Leong 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
71748532f613SOng Boon Leong 		priv->tx_irq[i] = res->tx_irq[i];
7175e56788cfSJoachim Eastwood 
717683216e39SMichael Walle 	if (!is_zero_ether_addr(res->mac))
7177a96d317fSJakub Kicinski 		eth_hw_addr_set(priv->dev, res->mac);
7178bfab27a1SGiuseppe CAVALLARO 
7179a7a62685SJoachim Eastwood 	dev_set_drvdata(device, priv->dev);
7180803f8fc4SJoachim Eastwood 
7181cf3f047bSGiuseppe CAVALLARO 	/* Verify driver arguments */
7182cf3f047bSGiuseppe CAVALLARO 	stmmac_verify_args();
7183cf3f047bSGiuseppe CAVALLARO 
7184bba2556eSOng Boon Leong 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7185bba2556eSOng Boon Leong 	if (!priv->af_xdp_zc_qps)
7186bba2556eSOng Boon Leong 		return -ENOMEM;
7187bba2556eSOng Boon Leong 
718834877a15SJose Abreu 	/* Allocate workqueue */
718934877a15SJose Abreu 	priv->wq = create_singlethread_workqueue("stmmac_wq");
719034877a15SJose Abreu 	if (!priv->wq) {
719134877a15SJose Abreu 		dev_err(priv->device, "failed to create workqueue\n");
71922cb815cfSGaosheng Cui 		ret = -ENOMEM;
7193a137f3f2SGaosheng Cui 		goto error_wq_init;
719434877a15SJose Abreu 	}
719534877a15SJose Abreu 
719634877a15SJose Abreu 	INIT_WORK(&priv->service_task, stmmac_service_task);
719734877a15SJose Abreu 
71985a558611SOng Boon Leong 	/* Initialize Link Partner FPE workqueue */
71995a558611SOng Boon Leong 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
72005a558611SOng Boon Leong 
7201cf3f047bSGiuseppe CAVALLARO 	/* Override with kernel parameters if supplied XXX CRS XXX
7202ceb69499SGiuseppe CAVALLARO 	 * this needs to have multiple instances
7203ceb69499SGiuseppe CAVALLARO 	 */
7204cf3f047bSGiuseppe CAVALLARO 	if ((phyaddr >= 0) && (phyaddr <= 31))
7205cf3f047bSGiuseppe CAVALLARO 		priv->plat->phy_addr = phyaddr;
7206cf3f047bSGiuseppe CAVALLARO 
720790f522a2SEugeniy Paltsev 	if (priv->plat->stmmac_rst) {
720890f522a2SEugeniy Paltsev 		ret = reset_control_assert(priv->plat->stmmac_rst);
7209f573c0b9Sjpinto 		reset_control_deassert(priv->plat->stmmac_rst);
721090f522a2SEugeniy Paltsev 		/* Some reset controllers have only reset callback instead of
721190f522a2SEugeniy Paltsev 		 * assert + deassert callbacks pair.
721290f522a2SEugeniy Paltsev 		 */
721390f522a2SEugeniy Paltsev 		if (ret == -ENOTSUPP)
721490f522a2SEugeniy Paltsev 			reset_control_reset(priv->plat->stmmac_rst);
721590f522a2SEugeniy Paltsev 	}
7216c5e4ddbdSChen-Yu Tsai 
7217e67f325eSMatthew Hagan 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7218e67f325eSMatthew Hagan 	if (ret == -ENOTSUPP)
7219e67f325eSMatthew Hagan 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7220e67f325eSMatthew Hagan 			ERR_PTR(ret));
7221e67f325eSMatthew Hagan 
7222cf3f047bSGiuseppe CAVALLARO 	/* Init MAC and get the capabilities */
7223c24602efSGiuseppe CAVALLARO 	ret = stmmac_hw_init(priv);
7224c24602efSGiuseppe CAVALLARO 	if (ret)
722562866e98SChen-Yu Tsai 		goto error_hw_init;
7226cf3f047bSGiuseppe CAVALLARO 
722796874c61SMohammad Athari Bin Ismail 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
722896874c61SMohammad Athari Bin Ismail 	 */
722996874c61SMohammad Athari Bin Ismail 	if (priv->synopsys_id < DWMAC_CORE_5_20)
723096874c61SMohammad Athari Bin Ismail 		priv->plat->dma_cfg->dche = false;
723196874c61SMohammad Athari Bin Ismail 
7232b561af36SVinod Koul 	stmmac_check_ether_addr(priv);
7233b561af36SVinod Koul 
7234cf3f047bSGiuseppe CAVALLARO 	ndev->netdev_ops = &stmmac_netdev_ops;
7235cf3f047bSGiuseppe CAVALLARO 
7236e3f9c3e3SSong Yoong Siang 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7237e3f9c3e3SSong Yoong Siang 
7238cf3f047bSGiuseppe CAVALLARO 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7239cf3f047bSGiuseppe CAVALLARO 			    NETIF_F_RXCSUM;
724066c0e13aSMarek Majtyka 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7241ffb33221SWei Fang 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7242f748be53SAlexandre TORGUE 
72434dbbe8ddSJose Abreu 	ret = stmmac_tc_init(priv, priv);
72444dbbe8ddSJose Abreu 	if (!ret) {
72454dbbe8ddSJose Abreu 		ndev->hw_features |= NETIF_F_HW_TC;
72464dbbe8ddSJose Abreu 	}
72474dbbe8ddSJose Abreu 
7248f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
72499edfa7daSNiklas Cassel 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7250b7766206SJose Abreu 		if (priv->plat->has_gmac4)
7251b7766206SJose Abreu 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7252f748be53SAlexandre TORGUE 		priv->tso = true;
725338ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO feature enabled\n");
7254f748be53SAlexandre TORGUE 	}
7255a993db88SJose Abreu 
725647f753c1STan Tee Min 	if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
725767afd6d1SJose Abreu 		ndev->hw_features |= NETIF_F_GRO;
7258d08d32d1SOng Boon Leong 		priv->sph_cap = true;
7259d08d32d1SOng Boon Leong 		priv->sph = priv->sph_cap;
726067afd6d1SJose Abreu 		dev_info(priv->device, "SPH feature enabled\n");
726167afd6d1SJose Abreu 	}
726267afd6d1SJose Abreu 
7263070246e4SJochen Henneberg 	/* Ideally our host DMA address width is the same as for the
7264070246e4SJochen Henneberg 	 * device. However, it may differ and then we have to use our
7265070246e4SJochen Henneberg 	 * host DMA width for allocation and the device DMA width for
7266070246e4SJochen Henneberg 	 * register handling.
7267f119cc98SFugang Duan 	 */
7268070246e4SJochen Henneberg 	if (priv->plat->host_dma_width)
7269070246e4SJochen Henneberg 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7270070246e4SJochen Henneberg 	else
7271070246e4SJochen Henneberg 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7272f119cc98SFugang Duan 
7273070246e4SJochen Henneberg 	if (priv->dma_cap.host_dma_width) {
7274a993db88SJose Abreu 		ret = dma_set_mask_and_coherent(device,
7275070246e4SJochen Henneberg 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7276a993db88SJose Abreu 		if (!ret) {
7277070246e4SJochen Henneberg 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7278070246e4SJochen Henneberg 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7279968a2978SThierry Reding 
7280968a2978SThierry Reding 			/*
7281968a2978SThierry Reding 			 * If more than 32 bits can be addressed, make sure to
7282968a2978SThierry Reding 			 * enable enhanced addressing mode.
7283968a2978SThierry Reding 			 */
7284968a2978SThierry Reding 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7285968a2978SThierry Reding 				priv->plat->dma_cfg->eame = true;
7286a993db88SJose Abreu 		} else {
7287a993db88SJose Abreu 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7288a993db88SJose Abreu 			if (ret) {
7289a993db88SJose Abreu 				dev_err(priv->device, "Failed to set DMA Mask\n");
7290a993db88SJose Abreu 				goto error_hw_init;
7291a993db88SJose Abreu 			}
7292a993db88SJose Abreu 
7293070246e4SJochen Henneberg 			priv->dma_cap.host_dma_width = 32;
7294a993db88SJose Abreu 		}
7295a993db88SJose Abreu 	}
7296a993db88SJose Abreu 
7297bfab27a1SGiuseppe CAVALLARO 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7298bfab27a1SGiuseppe CAVALLARO 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
72997ac6653aSJeff Kirsher #ifdef STMMAC_VLAN_TAG_USED
73007ac6653aSJeff Kirsher 	/* Both mac100 and gmac support receive VLAN tag detection */
7301ab188e8fSElad Nachman 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
73023cd1cfcbSJose Abreu 	if (priv->dma_cap.vlhash) {
73033cd1cfcbSJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
73043cd1cfcbSJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
73053cd1cfcbSJose Abreu 	}
730630d93227SJose Abreu 	if (priv->dma_cap.vlins) {
730730d93227SJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
730830d93227SJose Abreu 		if (priv->dma_cap.dvlan)
730930d93227SJose Abreu 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
731030d93227SJose Abreu 	}
73117ac6653aSJeff Kirsher #endif
73127ac6653aSJeff Kirsher 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
73137ac6653aSJeff Kirsher 
731476067459SJose Abreu 	/* Initialize RSS */
731576067459SJose Abreu 	rxq = priv->plat->rx_queues_to_use;
731676067459SJose Abreu 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
731776067459SJose Abreu 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
731876067459SJose Abreu 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
731976067459SJose Abreu 
732076067459SJose Abreu 	if (priv->dma_cap.rssen && priv->plat->rss_en)
732176067459SJose Abreu 		ndev->features |= NETIF_F_RXHASH;
732276067459SJose Abreu 
73236b2c6e4aSCorinna Vinschen 	ndev->vlan_features |= ndev->features;
73246b2c6e4aSCorinna Vinschen 	/* TSO doesn't work on VLANs yet */
73256b2c6e4aSCorinna Vinschen 	ndev->vlan_features &= ~NETIF_F_TSO;
73266b2c6e4aSCorinna Vinschen 
732744770e11SJarod Wilson 	/* MTU range: 46 - hw-specific max */
732844770e11SJarod Wilson 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
732956bcd591SJose Abreu 	if (priv->plat->has_xgmac)
73307d9e6c5aSJose Abreu 		ndev->max_mtu = XGMAC_JUMBO_LEN;
733156bcd591SJose Abreu 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
733256bcd591SJose Abreu 		ndev->max_mtu = JUMBO_LEN;
733344770e11SJarod Wilson 	else
733444770e11SJarod Wilson 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7335a2cd64f3SKweh, Hock Leong 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7336a2cd64f3SKweh, Hock Leong 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7337a2cd64f3SKweh, Hock Leong 	 */
7338a2cd64f3SKweh, Hock Leong 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7339a2cd64f3SKweh, Hock Leong 	    (priv->plat->maxmtu >= ndev->min_mtu))
734044770e11SJarod Wilson 		ndev->max_mtu = priv->plat->maxmtu;
7341a2cd64f3SKweh, Hock Leong 	else if (priv->plat->maxmtu < ndev->min_mtu)
7342b618ab45SHeiner Kallweit 		dev_warn(priv->device,
7343a2cd64f3SKweh, Hock Leong 			 "%s: warning: maxmtu having invalid value (%d)\n",
7344a2cd64f3SKweh, Hock Leong 			 __func__, priv->plat->maxmtu);
734544770e11SJarod Wilson 
73467ac6653aSJeff Kirsher 	if (flow_ctrl)
73477ac6653aSJeff Kirsher 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
73487ac6653aSJeff Kirsher 
73494e195166SCorinna Vinschen 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
73504e195166SCorinna Vinschen 
73518fce3331SJose Abreu 	/* Setup channels NAPI */
73520366f7e0SOng Boon Leong 	stmmac_napi_add(ndev);
73537ac6653aSJeff Kirsher 
735429555fa3SThierry Reding 	mutex_init(&priv->lock);
73557ac6653aSJeff Kirsher 
7356cd7201f4SGiuseppe CAVALLARO 	/* If a specific clk_csr value is passed from the platform
7357cd7201f4SGiuseppe CAVALLARO 	 * this means that the CSR Clock Range selection cannot be
7358cd7201f4SGiuseppe CAVALLARO 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7359cd7201f4SGiuseppe CAVALLARO 	 * set the MDC clock dynamically according to the csr actual
7360cd7201f4SGiuseppe CAVALLARO 	 * clock input.
7361cd7201f4SGiuseppe CAVALLARO 	 */
73625e7f7fc5SBiao Huang 	if (priv->plat->clk_csr >= 0)
7363cd7201f4SGiuseppe CAVALLARO 		priv->clk_csr = priv->plat->clk_csr;
73645e7f7fc5SBiao Huang 	else
73655e7f7fc5SBiao Huang 		stmmac_clk_csr_set(priv);
7366cd7201f4SGiuseppe CAVALLARO 
7367e58bb43fSGiuseppe CAVALLARO 	stmmac_check_pcs_mode(priv);
7368e58bb43fSGiuseppe CAVALLARO 
73695ec55823SJoakim Zhang 	pm_runtime_get_noresume(device);
73705ec55823SJoakim Zhang 	pm_runtime_set_active(device);
7371d90d0c17SKai-Heng Feng 	if (!pm_runtime_enabled(device))
73725ec55823SJoakim Zhang 		pm_runtime_enable(device);
73735ec55823SJoakim Zhang 
7374a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
73753fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
73764bfcbd7aSFrancesco Virlinzi 		/* MDIO bus Registration */
73774bfcbd7aSFrancesco Virlinzi 		ret = stmmac_mdio_register(ndev);
73784bfcbd7aSFrancesco Virlinzi 		if (ret < 0) {
7379839612d2SRasmus Villemoes 			dev_err_probe(priv->device, ret,
7380839612d2SRasmus Villemoes 				      "%s: MDIO bus (id: %d) registration failed\n",
73814bfcbd7aSFrancesco Virlinzi 				      __func__, priv->plat->bus_id);
73826a81c26fSViresh Kumar 			goto error_mdio_register;
73834bfcbd7aSFrancesco Virlinzi 		}
7384e58bb43fSGiuseppe CAVALLARO 	}
73854bfcbd7aSFrancesco Virlinzi 
738646682cb8SVoon Weifeng 	if (priv->plat->speed_mode_2500)
738746682cb8SVoon Weifeng 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
738846682cb8SVoon Weifeng 
73897413f9a6SVladimir Oltean 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7390597a68ceSVoon Weifeng 		ret = stmmac_xpcs_setup(priv->mii);
7391597a68ceSVoon Weifeng 		if (ret)
7392597a68ceSVoon Weifeng 			goto error_xpcs_setup;
7393597a68ceSVoon Weifeng 	}
7394597a68ceSVoon Weifeng 
739574371272SJose Abreu 	ret = stmmac_phy_setup(priv);
739674371272SJose Abreu 	if (ret) {
739774371272SJose Abreu 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
739874371272SJose Abreu 		goto error_phy_setup;
739974371272SJose Abreu 	}
740074371272SJose Abreu 
740157016590SFlorian Fainelli 	ret = register_netdev(ndev);
7402b2eb09afSFlorian Fainelli 	if (ret) {
7403b618ab45SHeiner Kallweit 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
740457016590SFlorian Fainelli 			__func__, ret);
7405b2eb09afSFlorian Fainelli 		goto error_netdev_register;
7406b2eb09afSFlorian Fainelli 	}
74077ac6653aSJeff Kirsher 
74085f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS
74098d72ab11SGreg Kroah-Hartman 	stmmac_init_fs(ndev);
74105f2b8b62SThierry Reding #endif
74115f2b8b62SThierry Reding 
74124047b9dbSBhupesh Sharma 	if (priv->plat->dump_debug_regs)
74134047b9dbSBhupesh Sharma 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
74144047b9dbSBhupesh Sharma 
74155ec55823SJoakim Zhang 	/* Let pm_runtime_put() disable the clocks.
74165ec55823SJoakim Zhang 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
74175ec55823SJoakim Zhang 	 */
74185ec55823SJoakim Zhang 	pm_runtime_put(device);
74195ec55823SJoakim Zhang 
742057016590SFlorian Fainelli 	return ret;
74217ac6653aSJeff Kirsher 
74226a81c26fSViresh Kumar error_netdev_register:
742374371272SJose Abreu 	phylink_destroy(priv->phylink);
7424597a68ceSVoon Weifeng error_xpcs_setup:
742574371272SJose Abreu error_phy_setup:
7426a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7427b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7428b2eb09afSFlorian Fainelli 		stmmac_mdio_unregister(ndev);
74297ac6653aSJeff Kirsher error_mdio_register:
74300366f7e0SOng Boon Leong 	stmmac_napi_del(ndev);
743162866e98SChen-Yu Tsai error_hw_init:
743234877a15SJose Abreu 	destroy_workqueue(priv->wq);
7433a137f3f2SGaosheng Cui error_wq_init:
7434d7f576dcSWong Vee Khee 	bitmap_free(priv->af_xdp_zc_qps);
74357ac6653aSJeff Kirsher 
743615ffac73SJoachim Eastwood 	return ret;
74377ac6653aSJeff Kirsher }
7438b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
74397ac6653aSJeff Kirsher 
74407ac6653aSJeff Kirsher /**
74417ac6653aSJeff Kirsher  * stmmac_dvr_remove
7442f4e7bd81SJoachim Eastwood  * @dev: device pointer
74437ac6653aSJeff Kirsher  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7444bfab27a1SGiuseppe CAVALLARO  * changes the link status, releases the DMA descriptor rings.
74457ac6653aSJeff Kirsher  */
7446ff0011cfSUwe Kleine-König void stmmac_dvr_remove(struct device *dev)
74477ac6653aSJeff Kirsher {
7448f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
74497ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
74507ac6653aSJeff Kirsher 
745138ddc59dSLABBE Corentin 	netdev_info(priv->dev, "%s: removing driver", __func__);
74527ac6653aSJeff Kirsher 
745364495203SJisheng Zhang 	pm_runtime_get_sync(dev);
745464495203SJisheng Zhang 
7455ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
7456c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
74577ac6653aSJeff Kirsher 	netif_carrier_off(ndev);
74587ac6653aSJeff Kirsher 	unregister_netdev(ndev);
74599a7b3950SOng Boon Leong 
74609a7b3950SOng Boon Leong 	/* Serdes power down needs to happen after VLAN filter
74619a7b3950SOng Boon Leong 	 * is deleted that is triggered by unregister_netdev().
74629a7b3950SOng Boon Leong 	 */
74639a7b3950SOng Boon Leong 	if (priv->plat->serdes_powerdown)
74649a7b3950SOng Boon Leong 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
74659a7b3950SOng Boon Leong 
7466474a31e1SAaro Koskinen #ifdef CONFIG_DEBUG_FS
7467474a31e1SAaro Koskinen 	stmmac_exit_fs(ndev);
7468474a31e1SAaro Koskinen #endif
746974371272SJose Abreu 	phylink_destroy(priv->phylink);
7470f573c0b9Sjpinto 	if (priv->plat->stmmac_rst)
7471f573c0b9Sjpinto 		reset_control_assert(priv->plat->stmmac_rst);
7472e67f325eSMatthew Hagan 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7473a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
74743fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7475e743471fSBryan O'Donoghue 		stmmac_mdio_unregister(ndev);
747634877a15SJose Abreu 	destroy_workqueue(priv->wq);
747729555fa3SThierry Reding 	mutex_destroy(&priv->lock);
7478d7f576dcSWong Vee Khee 	bitmap_free(priv->af_xdp_zc_qps);
74797ac6653aSJeff Kirsher 
74800d9a1591SBiao Huang 	pm_runtime_disable(dev);
74810d9a1591SBiao Huang 	pm_runtime_put_noidle(dev);
74827ac6653aSJeff Kirsher }
7483b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
74847ac6653aSJeff Kirsher 
7485732fdf0eSGiuseppe CAVALLARO /**
7486732fdf0eSGiuseppe CAVALLARO  * stmmac_suspend - suspend callback
7487f4e7bd81SJoachim Eastwood  * @dev: device pointer
7488732fdf0eSGiuseppe CAVALLARO  * Description: this is the function to suspend the device and it is called
7489732fdf0eSGiuseppe CAVALLARO  * by the platform driver to stop the network queue, release the resources,
7490732fdf0eSGiuseppe CAVALLARO  * program the PMT register (for WoL), clean and release driver resources.
7491732fdf0eSGiuseppe CAVALLARO  */
7492f4e7bd81SJoachim Eastwood int stmmac_suspend(struct device *dev)
74937ac6653aSJeff Kirsher {
7494f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
74957ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
749614b41a29SNicolin Chen 	u32 chan;
74977ac6653aSJeff Kirsher 
74987ac6653aSJeff Kirsher 	if (!ndev || !netif_running(ndev))
74997ac6653aSJeff Kirsher 		return 0;
75007ac6653aSJeff Kirsher 
7501134cc4ceSThierry Reding 	mutex_lock(&priv->lock);
750219e13cb2SJose Abreu 
75037ac6653aSJeff Kirsher 	netif_device_detach(ndev);
75047ac6653aSJeff Kirsher 
7505c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
75067ac6653aSJeff Kirsher 
750714b41a29SNicolin Chen 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
75088531c808SChristian Marangi 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
750914b41a29SNicolin Chen 
75105f585913SFugang Duan 	if (priv->eee_enabled) {
75115f585913SFugang Duan 		priv->tx_path_in_lpi_mode = false;
75125f585913SFugang Duan 		del_timer_sync(&priv->eee_ctrl_timer);
75135f585913SFugang Duan 	}
75145f585913SFugang Duan 
75157ac6653aSJeff Kirsher 	/* Stop TX/RX DMA */
7516ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
7517c24602efSGiuseppe CAVALLARO 
7518b9663b7cSVoon Weifeng 	if (priv->plat->serdes_powerdown)
7519b9663b7cSVoon Weifeng 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7520b9663b7cSVoon Weifeng 
75217ac6653aSJeff Kirsher 	/* Enable Power down mode by programming the PMT regs */
7522e8377e7aSJisheng Zhang 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7523c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, priv->wolopts);
752489f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 1;
752589f7f2cfSSrinivas Kandagatla 	} else {
7526c10d4c82SJose Abreu 		stmmac_mac_set(priv, priv->ioaddr, false);
7527db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_sleep_state(priv->device);
752830f347aeSYang Yingliang 	}
75295a558611SOng Boon Leong 
753029555fa3SThierry Reding 	mutex_unlock(&priv->lock);
75312d871aa0SVince Bridgers 
753290702dcdSJoakim Zhang 	rtnl_lock();
753390702dcdSJoakim Zhang 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
753490702dcdSJoakim Zhang 		phylink_suspend(priv->phylink, true);
753590702dcdSJoakim Zhang 	} else {
753690702dcdSJoakim Zhang 		if (device_may_wakeup(priv->device))
753790702dcdSJoakim Zhang 			phylink_speed_down(priv->phylink, false);
753890702dcdSJoakim Zhang 		phylink_suspend(priv->phylink, false);
753990702dcdSJoakim Zhang 	}
754090702dcdSJoakim Zhang 	rtnl_unlock();
754190702dcdSJoakim Zhang 
75425a558611SOng Boon Leong 	if (priv->dma_cap.fpesel) {
75435a558611SOng Boon Leong 		/* Disable FPE */
75445a558611SOng Boon Leong 		stmmac_fpe_configure(priv, priv->ioaddr,
75455a558611SOng Boon Leong 				     priv->plat->tx_queues_to_use,
75465a558611SOng Boon Leong 				     priv->plat->rx_queues_to_use, false);
75475a558611SOng Boon Leong 
75485a558611SOng Boon Leong 		stmmac_fpe_handshake(priv, false);
75496b28a86dSMohammad Athari Bin Ismail 		stmmac_fpe_stop_wq(priv);
75505a558611SOng Boon Leong 	}
75515a558611SOng Boon Leong 
7552bd00632cSLABBE Corentin 	priv->speed = SPEED_UNKNOWN;
75537ac6653aSJeff Kirsher 	return 0;
75547ac6653aSJeff Kirsher }
7555b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_suspend);
75567ac6653aSJeff Kirsher 
7557f9ec5723SChristian Marangi static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7558f9ec5723SChristian Marangi {
75598531c808SChristian Marangi 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7560f9ec5723SChristian Marangi 
7561f9ec5723SChristian Marangi 	rx_q->cur_rx = 0;
7562f9ec5723SChristian Marangi 	rx_q->dirty_rx = 0;
7563f9ec5723SChristian Marangi }
7564f9ec5723SChristian Marangi 
7565f9ec5723SChristian Marangi static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7566f9ec5723SChristian Marangi {
75678531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7568f9ec5723SChristian Marangi 
7569f9ec5723SChristian Marangi 	tx_q->cur_tx = 0;
7570f9ec5723SChristian Marangi 	tx_q->dirty_tx = 0;
7571f9ec5723SChristian Marangi 	tx_q->mss = 0;
7572f9ec5723SChristian Marangi 
7573f9ec5723SChristian Marangi 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7574f9ec5723SChristian Marangi }
7575f9ec5723SChristian Marangi 
7576732fdf0eSGiuseppe CAVALLARO /**
757754139cf3SJoao Pinto  * stmmac_reset_queues_param - reset queue parameters
7578d0ea5cbdSJesse Brandeburg  * @priv: device pointer
757954139cf3SJoao Pinto  */
758054139cf3SJoao Pinto static void stmmac_reset_queues_param(struct stmmac_priv *priv)
758154139cf3SJoao Pinto {
758254139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7583ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
758454139cf3SJoao Pinto 	u32 queue;
758554139cf3SJoao Pinto 
7586f9ec5723SChristian Marangi 	for (queue = 0; queue < rx_cnt; queue++)
7587f9ec5723SChristian Marangi 		stmmac_reset_rx_queue(priv, queue);
758854139cf3SJoao Pinto 
7589f9ec5723SChristian Marangi 	for (queue = 0; queue < tx_cnt; queue++)
7590f9ec5723SChristian Marangi 		stmmac_reset_tx_queue(priv, queue);
759154139cf3SJoao Pinto }
759254139cf3SJoao Pinto 
759354139cf3SJoao Pinto /**
7594732fdf0eSGiuseppe CAVALLARO  * stmmac_resume - resume callback
7595f4e7bd81SJoachim Eastwood  * @dev: device pointer
7596732fdf0eSGiuseppe CAVALLARO  * Description: when resume this function is invoked to setup the DMA and CORE
7597732fdf0eSGiuseppe CAVALLARO  * in a usable state.
7598732fdf0eSGiuseppe CAVALLARO  */
7599f4e7bd81SJoachim Eastwood int stmmac_resume(struct device *dev)
76007ac6653aSJeff Kirsher {
7601f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
76027ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
7603b9663b7cSVoon Weifeng 	int ret;
76047ac6653aSJeff Kirsher 
76057ac6653aSJeff Kirsher 	if (!netif_running(ndev))
76067ac6653aSJeff Kirsher 		return 0;
76077ac6653aSJeff Kirsher 
76087ac6653aSJeff Kirsher 	/* Power Down bit, into the PM register, is cleared
76097ac6653aSJeff Kirsher 	 * automatically as soon as a magic packet or a Wake-up frame
76107ac6653aSJeff Kirsher 	 * is received. Anyway, it's better to manually clear
76117ac6653aSJeff Kirsher 	 * this bit because it can generate problems while resuming
7612ceb69499SGiuseppe CAVALLARO 	 * from another devices (e.g. serial console).
7613ceb69499SGiuseppe CAVALLARO 	 */
7614e8377e7aSJisheng Zhang 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
761529555fa3SThierry Reding 		mutex_lock(&priv->lock);
7616c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, 0);
761729555fa3SThierry Reding 		mutex_unlock(&priv->lock);
761889f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 0;
7619623997fbSSrinivas Kandagatla 	} else {
7620db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_default_state(priv->device);
7621623997fbSSrinivas Kandagatla 		/* reset the phy so that it's ready */
7622623997fbSSrinivas Kandagatla 		if (priv->mii)
7623623997fbSSrinivas Kandagatla 			stmmac_mdio_reset(priv->mii);
7624623997fbSSrinivas Kandagatla 	}
76257ac6653aSJeff Kirsher 
7626a46e9010SRevanth Kumar Uppala 	if (!priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup) {
7627b9663b7cSVoon Weifeng 		ret = priv->plat->serdes_powerup(ndev,
7628b9663b7cSVoon Weifeng 						 priv->plat->bsp_priv);
7629b9663b7cSVoon Weifeng 
7630b9663b7cSVoon Weifeng 		if (ret < 0)
7631b9663b7cSVoon Weifeng 			return ret;
7632b9663b7cSVoon Weifeng 	}
7633b9663b7cSVoon Weifeng 
763436d18b56SFugang Duan 	rtnl_lock();
763590702dcdSJoakim Zhang 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
763690702dcdSJoakim Zhang 		phylink_resume(priv->phylink);
763790702dcdSJoakim Zhang 	} else {
763890702dcdSJoakim Zhang 		phylink_resume(priv->phylink);
763990702dcdSJoakim Zhang 		if (device_may_wakeup(priv->device))
764036d18b56SFugang Duan 			phylink_speed_up(priv->phylink);
764136d18b56SFugang Duan 	}
764290702dcdSJoakim Zhang 	rtnl_unlock();
764336d18b56SFugang Duan 
76448e5debedSWong Vee Khee 	rtnl_lock();
764529555fa3SThierry Reding 	mutex_lock(&priv->lock);
7646f55d84b0SVincent Palatin 
764754139cf3SJoao Pinto 	stmmac_reset_queues_param(priv);
764800423969SThierry Reding 
76494ec236c7SFugang Duan 	stmmac_free_tx_skbufs(priv);
7650ba39b344SChristian Marangi 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7651ae79a639SGiuseppe CAVALLARO 
7652fe131929SHuacai Chen 	stmmac_hw_setup(ndev, false);
7653d429b66eSJose Abreu 	stmmac_init_coalesce(priv);
7654ac316c78SGiuseppe CAVALLARO 	stmmac_set_rx_mode(ndev);
76557ac6653aSJeff Kirsher 
7656ed64639bSWong Vee Khee 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7657ed64639bSWong Vee Khee 
7658c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
7659087a7b94SVincent Whitchurch 	stmmac_enable_all_dma_irq(priv);
76607ac6653aSJeff Kirsher 
7661134cc4ceSThierry Reding 	mutex_unlock(&priv->lock);
76628e5debedSWong Vee Khee 	rtnl_unlock();
7663134cc4ceSThierry Reding 
766431096c3eSLeon Yu 	netif_device_attach(ndev);
766531096c3eSLeon Yu 
76667ac6653aSJeff Kirsher 	return 0;
76677ac6653aSJeff Kirsher }
7668b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_resume);
7669ba27ec66SGiuseppe CAVALLARO 
76707ac6653aSJeff Kirsher #ifndef MODULE
76717ac6653aSJeff Kirsher static int __init stmmac_cmdline_opt(char *str)
76727ac6653aSJeff Kirsher {
76737ac6653aSJeff Kirsher 	char *opt;
76747ac6653aSJeff Kirsher 
76757ac6653aSJeff Kirsher 	if (!str || !*str)
7676e01b042eSRandy Dunlap 		return 1;
76777ac6653aSJeff Kirsher 	while ((opt = strsep(&str, ",")) != NULL) {
7678469d258dSVladimir Oltean 		if (!strncmp(opt, "debug:", 6)) {
7679ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &debug))
76807ac6653aSJeff Kirsher 				goto err;
7681469d258dSVladimir Oltean 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7682ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 8, 0, &phyaddr))
76837ac6653aSJeff Kirsher 				goto err;
7684469d258dSVladimir Oltean 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7685ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 7, 0, &buf_sz))
76867ac6653aSJeff Kirsher 				goto err;
7687469d258dSVladimir Oltean 		} else if (!strncmp(opt, "tc:", 3)) {
7688ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 3, 0, &tc))
76897ac6653aSJeff Kirsher 				goto err;
7690469d258dSVladimir Oltean 		} else if (!strncmp(opt, "watchdog:", 9)) {
7691ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 9, 0, &watchdog))
76927ac6653aSJeff Kirsher 				goto err;
7693469d258dSVladimir Oltean 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7694ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
76957ac6653aSJeff Kirsher 				goto err;
7696469d258dSVladimir Oltean 		} else if (!strncmp(opt, "pause:", 6)) {
7697ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &pause))
76987ac6653aSJeff Kirsher 				goto err;
7699469d258dSVladimir Oltean 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7700d765955dSGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &eee_timer))
7701d765955dSGiuseppe CAVALLARO 				goto err;
7702469d258dSVladimir Oltean 		} else if (!strncmp(opt, "chain_mode:", 11)) {
77034a7d666aSGiuseppe CAVALLARO 			if (kstrtoint(opt + 11, 0, &chain_mode))
77044a7d666aSGiuseppe CAVALLARO 				goto err;
77057ac6653aSJeff Kirsher 		}
77067ac6653aSJeff Kirsher 	}
7707e01b042eSRandy Dunlap 	return 1;
77087ac6653aSJeff Kirsher 
77097ac6653aSJeff Kirsher err:
77107ac6653aSJeff Kirsher 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7711e01b042eSRandy Dunlap 	return 1;
77127ac6653aSJeff Kirsher }
77137ac6653aSJeff Kirsher 
77147ac6653aSJeff Kirsher __setup("stmmaceth=", stmmac_cmdline_opt);
7715ceb69499SGiuseppe CAVALLARO #endif /* MODULE */
77166fc0d0f2SGiuseppe Cavallaro 
7717466c5ac8SMathieu Olivari static int __init stmmac_init(void)
7718466c5ac8SMathieu Olivari {
7719466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
7720466c5ac8SMathieu Olivari 	/* Create debugfs main directory if it doesn't exist yet */
77218d72ab11SGreg Kroah-Hartman 	if (!stmmac_fs_dir)
7722466c5ac8SMathieu Olivari 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7723474a31e1SAaro Koskinen 	register_netdevice_notifier(&stmmac_notifier);
7724466c5ac8SMathieu Olivari #endif
7725466c5ac8SMathieu Olivari 
7726466c5ac8SMathieu Olivari 	return 0;
7727466c5ac8SMathieu Olivari }
7728466c5ac8SMathieu Olivari 
7729466c5ac8SMathieu Olivari static void __exit stmmac_exit(void)
7730466c5ac8SMathieu Olivari {
7731466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
7732474a31e1SAaro Koskinen 	unregister_netdevice_notifier(&stmmac_notifier);
7733466c5ac8SMathieu Olivari 	debugfs_remove_recursive(stmmac_fs_dir);
7734466c5ac8SMathieu Olivari #endif
7735466c5ac8SMathieu Olivari }
7736466c5ac8SMathieu Olivari 
7737466c5ac8SMathieu Olivari module_init(stmmac_init)
7738466c5ac8SMathieu Olivari module_exit(stmmac_exit)
7739466c5ac8SMathieu Olivari 
77406fc0d0f2SGiuseppe Cavallaro MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
77416fc0d0f2SGiuseppe Cavallaro MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
77426fc0d0f2SGiuseppe Cavallaro MODULE_LICENSE("GPL");
7743