14fa9c49fSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
27ac6653aSJeff Kirsher /*******************************************************************************
37ac6653aSJeff Kirsher   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
47ac6653aSJeff Kirsher   ST Ethernet IPs are built around a Synopsys IP Core.
57ac6653aSJeff Kirsher 
6286a8372SGiuseppe CAVALLARO 	Copyright(C) 2007-2011 STMicroelectronics Ltd
77ac6653aSJeff Kirsher 
87ac6653aSJeff Kirsher 
97ac6653aSJeff Kirsher   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
107ac6653aSJeff Kirsher 
117ac6653aSJeff Kirsher   Documentation available at:
127ac6653aSJeff Kirsher 	http://www.stlinux.com
137ac6653aSJeff Kirsher   Support available at:
147ac6653aSJeff Kirsher 	https://bugzilla.stlinux.com/
157ac6653aSJeff Kirsher *******************************************************************************/
167ac6653aSJeff Kirsher 
176a81c26fSViresh Kumar #include <linux/clk.h>
187ac6653aSJeff Kirsher #include <linux/kernel.h>
197ac6653aSJeff Kirsher #include <linux/interrupt.h>
207ac6653aSJeff Kirsher #include <linux/ip.h>
217ac6653aSJeff Kirsher #include <linux/tcp.h>
227ac6653aSJeff Kirsher #include <linux/skbuff.h>
237ac6653aSJeff Kirsher #include <linux/ethtool.h>
247ac6653aSJeff Kirsher #include <linux/if_ether.h>
257ac6653aSJeff Kirsher #include <linux/crc32.h>
267ac6653aSJeff Kirsher #include <linux/mii.h>
2701789349SJiri Pirko #include <linux/if.h>
287ac6653aSJeff Kirsher #include <linux/if_vlan.h>
297ac6653aSJeff Kirsher #include <linux/dma-mapping.h>
307ac6653aSJeff Kirsher #include <linux/slab.h>
315ec55823SJoakim Zhang #include <linux/pm_runtime.h>
327ac6653aSJeff Kirsher #include <linux/prefetch.h>
33db88f10aSSrinivas Kandagatla #include <linux/pinctrl/consumer.h>
3450fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
357ac29055SGiuseppe CAVALLARO #include <linux/debugfs.h>
367ac29055SGiuseppe CAVALLARO #include <linux/seq_file.h>
3750fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
38891434b1SRayagond Kokatanur #include <linux/net_tstamp.h>
39eeef2f6bSJose Abreu #include <linux/phylink.h>
40b7766206SJose Abreu #include <linux/udp.h>
415fabb012SOng Boon Leong #include <linux/bpf_trace.h>
424dbbe8ddSJose Abreu #include <net/pkt_cls.h>
43bba2556eSOng Boon Leong #include <net/xdp_sock_drv.h>
44891434b1SRayagond Kokatanur #include "stmmac_ptp.h"
45286a8372SGiuseppe CAVALLARO #include "stmmac.h"
465fabb012SOng Boon Leong #include "stmmac_xdp.h"
47c5e4ddbdSChen-Yu Tsai #include <linux/reset.h>
485790cf3cSMathieu Olivari #include <linux/of_mdio.h>
4919d857c9SPhil Reid #include "dwmac1000.h"
507d9e6c5aSJose Abreu #include "dwxgmac2.h"
5142de047dSJose Abreu #include "hwif.h"
527ac6653aSJeff Kirsher 
53a6da2bbbSHolger Assmann /* As long as the interface is active, we keep the timestamping counter enabled
54a6da2bbbSHolger Assmann  * with fine resolution and binary rollover. This avoid non-monotonic behavior
55a6da2bbbSHolger Assmann  * (clock jumps) when changing timestamping settings at runtime.
56a6da2bbbSHolger Assmann  */
57a6da2bbbSHolger Assmann #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
58a6da2bbbSHolger Assmann 				 PTP_TCR_TSCTRLSSR)
59a6da2bbbSHolger Assmann 
608d558f02SJose Abreu #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
61f748be53SAlexandre TORGUE #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
627ac6653aSJeff Kirsher 
637ac6653aSJeff Kirsher /* Module parameters */
6432ceabcaSGiuseppe CAVALLARO #define TX_TIMEO	5000
657ac6653aSJeff Kirsher static int watchdog = TX_TIMEO;
66d3757ba4SJoe Perches module_param(watchdog, int, 0644);
6732ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
687ac6653aSJeff Kirsher 
6932ceabcaSGiuseppe CAVALLARO static int debug = -1;
70d3757ba4SJoe Perches module_param(debug, int, 0644);
7132ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
727ac6653aSJeff Kirsher 
7347d1f71fSstephen hemminger static int phyaddr = -1;
74d3757ba4SJoe Perches module_param(phyaddr, int, 0444);
757ac6653aSJeff Kirsher MODULE_PARM_DESC(phyaddr, "Physical device address");
767ac6653aSJeff Kirsher 
77aa042f60SSong, Yoong Siang #define STMMAC_TX_THRESH(x)	((x)->dma_tx_size / 4)
78aa042f60SSong, Yoong Siang #define STMMAC_RX_THRESH(x)	((x)->dma_rx_size / 4)
797ac6653aSJeff Kirsher 
80132c32eeSOng Boon Leong /* Limit to make sure XDP TX and slow path can coexist */
81132c32eeSOng Boon Leong #define STMMAC_XSK_TX_BUDGET_MAX	256
82132c32eeSOng Boon Leong #define STMMAC_TX_XSK_AVAIL		16
83bba2556eSOng Boon Leong #define STMMAC_RX_FILL_BATCH		16
84bba2556eSOng Boon Leong 
855fabb012SOng Boon Leong #define STMMAC_XDP_PASS		0
865fabb012SOng Boon Leong #define STMMAC_XDP_CONSUMED	BIT(0)
87be8b38a7SOng Boon Leong #define STMMAC_XDP_TX		BIT(1)
888b278a5bSOng Boon Leong #define STMMAC_XDP_REDIRECT	BIT(2)
895fabb012SOng Boon Leong 
90e9989339SJose Abreu static int flow_ctrl = FLOW_AUTO;
91d3757ba4SJoe Perches module_param(flow_ctrl, int, 0644);
927ac6653aSJeff Kirsher MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
937ac6653aSJeff Kirsher 
947ac6653aSJeff Kirsher static int pause = PAUSE_TIME;
95d3757ba4SJoe Perches module_param(pause, int, 0644);
967ac6653aSJeff Kirsher MODULE_PARM_DESC(pause, "Flow Control Pause Time");
977ac6653aSJeff Kirsher 
987ac6653aSJeff Kirsher #define TC_DEFAULT 64
997ac6653aSJeff Kirsher static int tc = TC_DEFAULT;
100d3757ba4SJoe Perches module_param(tc, int, 0644);
1017ac6653aSJeff Kirsher MODULE_PARM_DESC(tc, "DMA threshold control value");
1027ac6653aSJeff Kirsher 
103d916701cSGiuseppe CAVALLARO #define	DEFAULT_BUFSIZE	1536
104d916701cSGiuseppe CAVALLARO static int buf_sz = DEFAULT_BUFSIZE;
105d3757ba4SJoe Perches module_param(buf_sz, int, 0644);
1067ac6653aSJeff Kirsher MODULE_PARM_DESC(buf_sz, "DMA buffer size");
1077ac6653aSJeff Kirsher 
10822ad3838SGiuseppe Cavallaro #define	STMMAC_RX_COPYBREAK	256
10922ad3838SGiuseppe Cavallaro 
1107ac6653aSJeff Kirsher static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
1117ac6653aSJeff Kirsher 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
1127ac6653aSJeff Kirsher 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
1137ac6653aSJeff Kirsher 
114d765955dSGiuseppe CAVALLARO #define STMMAC_DEFAULT_LPI_TIMER	1000
115d765955dSGiuseppe CAVALLARO static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
116d3757ba4SJoe Perches module_param(eee_timer, int, 0644);
117d765955dSGiuseppe CAVALLARO MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
118388e201dSVineetha G. Jaya Kumaran #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
119d765955dSGiuseppe CAVALLARO 
12022d3efe5SPavel Machek /* By default the driver will use the ring mode to manage tx and rx descriptors,
12122d3efe5SPavel Machek  * but allow user to force to use the chain instead of the ring
1224a7d666aSGiuseppe CAVALLARO  */
1234a7d666aSGiuseppe CAVALLARO static unsigned int chain_mode;
124d3757ba4SJoe Perches module_param(chain_mode, int, 0444);
1254a7d666aSGiuseppe CAVALLARO MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
1264a7d666aSGiuseppe CAVALLARO 
1277ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
1288532f613SOng Boon Leong /* For MSI interrupts handling */
1298532f613SOng Boon Leong static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
1308532f613SOng Boon Leong static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
1318532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
1328532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
133132c32eeSOng Boon Leong static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
134132c32eeSOng Boon Leong static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
1353a6c12a0SXiaoliang Yang static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1363a6c12a0SXiaoliang Yang 					  u32 rxmode, u32 chan);
1377ac6653aSJeff Kirsher 
13850fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
139481a7d15SJiping Ma static const struct net_device_ops stmmac_netdev_ops;
1408d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev);
141466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev);
142bfab27a1SGiuseppe CAVALLARO #endif
143bfab27a1SGiuseppe CAVALLARO 
144d5a05e69SVincent Whitchurch #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
1459125cdd1SGiuseppe CAVALLARO 
1465ec55823SJoakim Zhang int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
1475ec55823SJoakim Zhang {
1485ec55823SJoakim Zhang 	int ret = 0;
1495ec55823SJoakim Zhang 
1505ec55823SJoakim Zhang 	if (enabled) {
1515ec55823SJoakim Zhang 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
1525ec55823SJoakim Zhang 		if (ret)
1535ec55823SJoakim Zhang 			return ret;
1545ec55823SJoakim Zhang 		ret = clk_prepare_enable(priv->plat->pclk);
1555ec55823SJoakim Zhang 		if (ret) {
1565ec55823SJoakim Zhang 			clk_disable_unprepare(priv->plat->stmmac_clk);
1575ec55823SJoakim Zhang 			return ret;
1585ec55823SJoakim Zhang 		}
159b4d45aeeSJoakim Zhang 		if (priv->plat->clks_config) {
160b4d45aeeSJoakim Zhang 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
161b4d45aeeSJoakim Zhang 			if (ret) {
162b4d45aeeSJoakim Zhang 				clk_disable_unprepare(priv->plat->stmmac_clk);
163b4d45aeeSJoakim Zhang 				clk_disable_unprepare(priv->plat->pclk);
164b4d45aeeSJoakim Zhang 				return ret;
165b4d45aeeSJoakim Zhang 			}
166b4d45aeeSJoakim Zhang 		}
1675ec55823SJoakim Zhang 	} else {
1685ec55823SJoakim Zhang 		clk_disable_unprepare(priv->plat->stmmac_clk);
1695ec55823SJoakim Zhang 		clk_disable_unprepare(priv->plat->pclk);
170b4d45aeeSJoakim Zhang 		if (priv->plat->clks_config)
171b4d45aeeSJoakim Zhang 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
1725ec55823SJoakim Zhang 	}
1735ec55823SJoakim Zhang 
1745ec55823SJoakim Zhang 	return ret;
1755ec55823SJoakim Zhang }
1765ec55823SJoakim Zhang EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
1775ec55823SJoakim Zhang 
1787ac6653aSJeff Kirsher /**
1797ac6653aSJeff Kirsher  * stmmac_verify_args - verify the driver parameters.
180732fdf0eSGiuseppe CAVALLARO  * Description: it checks the driver parameters and set a default in case of
181732fdf0eSGiuseppe CAVALLARO  * errors.
1827ac6653aSJeff Kirsher  */
1837ac6653aSJeff Kirsher static void stmmac_verify_args(void)
1847ac6653aSJeff Kirsher {
1857ac6653aSJeff Kirsher 	if (unlikely(watchdog < 0))
1867ac6653aSJeff Kirsher 		watchdog = TX_TIMEO;
187d916701cSGiuseppe CAVALLARO 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
188d916701cSGiuseppe CAVALLARO 		buf_sz = DEFAULT_BUFSIZE;
1897ac6653aSJeff Kirsher 	if (unlikely(flow_ctrl > 1))
1907ac6653aSJeff Kirsher 		flow_ctrl = FLOW_AUTO;
1917ac6653aSJeff Kirsher 	else if (likely(flow_ctrl < 0))
1927ac6653aSJeff Kirsher 		flow_ctrl = FLOW_OFF;
1937ac6653aSJeff Kirsher 	if (unlikely((pause < 0) || (pause > 0xffff)))
1947ac6653aSJeff Kirsher 		pause = PAUSE_TIME;
195d765955dSGiuseppe CAVALLARO 	if (eee_timer < 0)
196d765955dSGiuseppe CAVALLARO 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
1977ac6653aSJeff Kirsher }
1987ac6653aSJeff Kirsher 
199bba2556eSOng Boon Leong static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
200c22a3f48SJoao Pinto {
201c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
2028fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
2038fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
204c22a3f48SJoao Pinto 	u32 queue;
205c22a3f48SJoao Pinto 
2068fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
2078fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
208c22a3f48SJoao Pinto 
209132c32eeSOng Boon Leong 		if (stmmac_xdp_is_enabled(priv) &&
210132c32eeSOng Boon Leong 		    test_bit(queue, priv->af_xdp_zc_qps)) {
211132c32eeSOng Boon Leong 			napi_disable(&ch->rxtx_napi);
212132c32eeSOng Boon Leong 			continue;
213132c32eeSOng Boon Leong 		}
214132c32eeSOng Boon Leong 
2154ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
2164ccb4585SJose Abreu 			napi_disable(&ch->rx_napi);
2174ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
2184ccb4585SJose Abreu 			napi_disable(&ch->tx_napi);
219c22a3f48SJoao Pinto 	}
220c22a3f48SJoao Pinto }
221c22a3f48SJoao Pinto 
222c22a3f48SJoao Pinto /**
223bba2556eSOng Boon Leong  * stmmac_disable_all_queues - Disable all queues
224bba2556eSOng Boon Leong  * @priv: driver private structure
225bba2556eSOng Boon Leong  */
226bba2556eSOng Boon Leong static void stmmac_disable_all_queues(struct stmmac_priv *priv)
227bba2556eSOng Boon Leong {
228bba2556eSOng Boon Leong 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
229bba2556eSOng Boon Leong 	struct stmmac_rx_queue *rx_q;
230bba2556eSOng Boon Leong 	u32 queue;
231bba2556eSOng Boon Leong 
232bba2556eSOng Boon Leong 	/* synchronize_rcu() needed for pending XDP buffers to drain */
233bba2556eSOng Boon Leong 	for (queue = 0; queue < rx_queues_cnt; queue++) {
234bba2556eSOng Boon Leong 		rx_q = &priv->rx_queue[queue];
235bba2556eSOng Boon Leong 		if (rx_q->xsk_pool) {
236bba2556eSOng Boon Leong 			synchronize_rcu();
237bba2556eSOng Boon Leong 			break;
238bba2556eSOng Boon Leong 		}
239bba2556eSOng Boon Leong 	}
240bba2556eSOng Boon Leong 
241bba2556eSOng Boon Leong 	__stmmac_disable_all_queues(priv);
242bba2556eSOng Boon Leong }
243bba2556eSOng Boon Leong 
244bba2556eSOng Boon Leong /**
245c22a3f48SJoao Pinto  * stmmac_enable_all_queues - Enable all queues
246c22a3f48SJoao Pinto  * @priv: driver private structure
247c22a3f48SJoao Pinto  */
248c22a3f48SJoao Pinto static void stmmac_enable_all_queues(struct stmmac_priv *priv)
249c22a3f48SJoao Pinto {
250c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
2518fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
2528fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
253c22a3f48SJoao Pinto 	u32 queue;
254c22a3f48SJoao Pinto 
2558fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
2568fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
257c22a3f48SJoao Pinto 
258132c32eeSOng Boon Leong 		if (stmmac_xdp_is_enabled(priv) &&
259132c32eeSOng Boon Leong 		    test_bit(queue, priv->af_xdp_zc_qps)) {
260132c32eeSOng Boon Leong 			napi_enable(&ch->rxtx_napi);
261132c32eeSOng Boon Leong 			continue;
262132c32eeSOng Boon Leong 		}
263132c32eeSOng Boon Leong 
2644ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
2654ccb4585SJose Abreu 			napi_enable(&ch->rx_napi);
2664ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
2674ccb4585SJose Abreu 			napi_enable(&ch->tx_napi);
268c22a3f48SJoao Pinto 	}
269c22a3f48SJoao Pinto }
270c22a3f48SJoao Pinto 
27134877a15SJose Abreu static void stmmac_service_event_schedule(struct stmmac_priv *priv)
27234877a15SJose Abreu {
27334877a15SJose Abreu 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
27434877a15SJose Abreu 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
27534877a15SJose Abreu 		queue_work(priv->wq, &priv->service_task);
27634877a15SJose Abreu }
27734877a15SJose Abreu 
27834877a15SJose Abreu static void stmmac_global_err(struct stmmac_priv *priv)
27934877a15SJose Abreu {
28034877a15SJose Abreu 	netif_carrier_off(priv->dev);
28134877a15SJose Abreu 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
28234877a15SJose Abreu 	stmmac_service_event_schedule(priv);
28334877a15SJose Abreu }
28434877a15SJose Abreu 
285c22a3f48SJoao Pinto /**
28632ceabcaSGiuseppe CAVALLARO  * stmmac_clk_csr_set - dynamically set the MDC clock
28732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
28832ceabcaSGiuseppe CAVALLARO  * Description: this is to dynamically set the MDC clock according to the csr
28932ceabcaSGiuseppe CAVALLARO  * clock input.
29032ceabcaSGiuseppe CAVALLARO  * Note:
29132ceabcaSGiuseppe CAVALLARO  *	If a specific clk_csr value is passed from the platform
29232ceabcaSGiuseppe CAVALLARO  *	this means that the CSR Clock Range selection cannot be
29332ceabcaSGiuseppe CAVALLARO  *	changed at run-time and it is fixed (as reported in the driver
29432ceabcaSGiuseppe CAVALLARO  *	documentation). Viceversa the driver will try to set the MDC
29532ceabcaSGiuseppe CAVALLARO  *	clock dynamically according to the actual clock input.
29632ceabcaSGiuseppe CAVALLARO  */
297cd7201f4SGiuseppe CAVALLARO static void stmmac_clk_csr_set(struct stmmac_priv *priv)
298cd7201f4SGiuseppe CAVALLARO {
299cd7201f4SGiuseppe CAVALLARO 	u32 clk_rate;
300cd7201f4SGiuseppe CAVALLARO 
301f573c0b9Sjpinto 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
302cd7201f4SGiuseppe CAVALLARO 
303cd7201f4SGiuseppe CAVALLARO 	/* Platform provided default clk_csr would be assumed valid
304ceb69499SGiuseppe CAVALLARO 	 * for all other cases except for the below mentioned ones.
305ceb69499SGiuseppe CAVALLARO 	 * For values higher than the IEEE 802.3 specified frequency
306ceb69499SGiuseppe CAVALLARO 	 * we can not estimate the proper divider as it is not known
307ceb69499SGiuseppe CAVALLARO 	 * the frequency of clk_csr_i. So we do not change the default
308ceb69499SGiuseppe CAVALLARO 	 * divider.
309ceb69499SGiuseppe CAVALLARO 	 */
310cd7201f4SGiuseppe CAVALLARO 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
311cd7201f4SGiuseppe CAVALLARO 		if (clk_rate < CSR_F_35M)
312cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_20_35M;
313cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
314cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_35_60M;
315cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
316cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_60_100M;
317cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
318cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_100_150M;
319cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
320cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_150_250M;
32108dad2f4SJesper Nilsson 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
322cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_250_300M;
323ceb69499SGiuseppe CAVALLARO 	}
3249f93ac8dSLABBE Corentin 
3259f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i) {
3269f93ac8dSLABBE Corentin 		if (clk_rate > 160000000)
3279f93ac8dSLABBE Corentin 			priv->clk_csr = 0x03;
3289f93ac8dSLABBE Corentin 		else if (clk_rate > 80000000)
3299f93ac8dSLABBE Corentin 			priv->clk_csr = 0x02;
3309f93ac8dSLABBE Corentin 		else if (clk_rate > 40000000)
3319f93ac8dSLABBE Corentin 			priv->clk_csr = 0x01;
3329f93ac8dSLABBE Corentin 		else
3339f93ac8dSLABBE Corentin 			priv->clk_csr = 0;
3349f93ac8dSLABBE Corentin 	}
3357d9e6c5aSJose Abreu 
3367d9e6c5aSJose Abreu 	if (priv->plat->has_xgmac) {
3377d9e6c5aSJose Abreu 		if (clk_rate > 400000000)
3387d9e6c5aSJose Abreu 			priv->clk_csr = 0x5;
3397d9e6c5aSJose Abreu 		else if (clk_rate > 350000000)
3407d9e6c5aSJose Abreu 			priv->clk_csr = 0x4;
3417d9e6c5aSJose Abreu 		else if (clk_rate > 300000000)
3427d9e6c5aSJose Abreu 			priv->clk_csr = 0x3;
3437d9e6c5aSJose Abreu 		else if (clk_rate > 250000000)
3447d9e6c5aSJose Abreu 			priv->clk_csr = 0x2;
3457d9e6c5aSJose Abreu 		else if (clk_rate > 150000000)
3467d9e6c5aSJose Abreu 			priv->clk_csr = 0x1;
3477d9e6c5aSJose Abreu 		else
3487d9e6c5aSJose Abreu 			priv->clk_csr = 0x0;
3497d9e6c5aSJose Abreu 	}
350cd7201f4SGiuseppe CAVALLARO }
351cd7201f4SGiuseppe CAVALLARO 
3527ac6653aSJeff Kirsher static void print_pkt(unsigned char *buf, int len)
3537ac6653aSJeff Kirsher {
354424c4f78SAndy Shevchenko 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
355424c4f78SAndy Shevchenko 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
3567ac6653aSJeff Kirsher }
3577ac6653aSJeff Kirsher 
358ce736788SJoao Pinto static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
3597ac6653aSJeff Kirsher {
360ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
361a6a3e026SLABBE Corentin 	u32 avail;
362e3ad57c9SGiuseppe Cavallaro 
363ce736788SJoao Pinto 	if (tx_q->dirty_tx > tx_q->cur_tx)
364ce736788SJoao Pinto 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
365e3ad57c9SGiuseppe Cavallaro 	else
366aa042f60SSong, Yoong Siang 		avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
367e3ad57c9SGiuseppe Cavallaro 
368e3ad57c9SGiuseppe Cavallaro 	return avail;
369e3ad57c9SGiuseppe Cavallaro }
370e3ad57c9SGiuseppe Cavallaro 
37154139cf3SJoao Pinto /**
37254139cf3SJoao Pinto  * stmmac_rx_dirty - Get RX queue dirty
37354139cf3SJoao Pinto  * @priv: driver private structure
37454139cf3SJoao Pinto  * @queue: RX queue index
37554139cf3SJoao Pinto  */
37654139cf3SJoao Pinto static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
377e3ad57c9SGiuseppe Cavallaro {
37854139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
379a6a3e026SLABBE Corentin 	u32 dirty;
380e3ad57c9SGiuseppe Cavallaro 
38154139cf3SJoao Pinto 	if (rx_q->dirty_rx <= rx_q->cur_rx)
38254139cf3SJoao Pinto 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
383e3ad57c9SGiuseppe Cavallaro 	else
384aa042f60SSong, Yoong Siang 		dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
385e3ad57c9SGiuseppe Cavallaro 
386e3ad57c9SGiuseppe Cavallaro 	return dirty;
3877ac6653aSJeff Kirsher }
3887ac6653aSJeff Kirsher 
389be1c7eaeSVineetha G. Jaya Kumaran static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
390be1c7eaeSVineetha G. Jaya Kumaran {
391be1c7eaeSVineetha G. Jaya Kumaran 	int tx_lpi_timer;
392be1c7eaeSVineetha G. Jaya Kumaran 
393be1c7eaeSVineetha G. Jaya Kumaran 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
394be1c7eaeSVineetha G. Jaya Kumaran 	priv->eee_sw_timer_en = en ? 0 : 1;
395be1c7eaeSVineetha G. Jaya Kumaran 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
396be1c7eaeSVineetha G. Jaya Kumaran 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
397be1c7eaeSVineetha G. Jaya Kumaran }
398be1c7eaeSVineetha G. Jaya Kumaran 
39932ceabcaSGiuseppe CAVALLARO /**
400732fdf0eSGiuseppe CAVALLARO  * stmmac_enable_eee_mode - check and enter in LPI mode
40132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
402732fdf0eSGiuseppe CAVALLARO  * Description: this function is to verify and enter in LPI mode in case of
403732fdf0eSGiuseppe CAVALLARO  * EEE.
40432ceabcaSGiuseppe CAVALLARO  */
405d765955dSGiuseppe CAVALLARO static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
406d765955dSGiuseppe CAVALLARO {
407ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
408ce736788SJoao Pinto 	u32 queue;
409ce736788SJoao Pinto 
410ce736788SJoao Pinto 	/* check if all TX queues have the work finished */
411ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
412ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
413ce736788SJoao Pinto 
414ce736788SJoao Pinto 		if (tx_q->dirty_tx != tx_q->cur_tx)
415ce736788SJoao Pinto 			return; /* still unfinished work */
416ce736788SJoao Pinto 	}
417ce736788SJoao Pinto 
418d765955dSGiuseppe CAVALLARO 	/* Check and enter in LPI mode */
419ce736788SJoao Pinto 	if (!priv->tx_path_in_lpi_mode)
420c10d4c82SJose Abreu 		stmmac_set_eee_mode(priv, priv->hw,
421b4b7b772Sjpinto 				priv->plat->en_tx_lpi_clockgating);
422d765955dSGiuseppe CAVALLARO }
423d765955dSGiuseppe CAVALLARO 
42432ceabcaSGiuseppe CAVALLARO /**
425732fdf0eSGiuseppe CAVALLARO  * stmmac_disable_eee_mode - disable and exit from LPI mode
42632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
42732ceabcaSGiuseppe CAVALLARO  * Description: this function is to exit and disable EEE in case of
42832ceabcaSGiuseppe CAVALLARO  * LPI state is true. This is called by the xmit.
42932ceabcaSGiuseppe CAVALLARO  */
430d765955dSGiuseppe CAVALLARO void stmmac_disable_eee_mode(struct stmmac_priv *priv)
431d765955dSGiuseppe CAVALLARO {
432be1c7eaeSVineetha G. Jaya Kumaran 	if (!priv->eee_sw_timer_en) {
433be1c7eaeSVineetha G. Jaya Kumaran 		stmmac_lpi_entry_timer_config(priv, 0);
434be1c7eaeSVineetha G. Jaya Kumaran 		return;
435be1c7eaeSVineetha G. Jaya Kumaran 	}
436be1c7eaeSVineetha G. Jaya Kumaran 
437c10d4c82SJose Abreu 	stmmac_reset_eee_mode(priv, priv->hw);
438d765955dSGiuseppe CAVALLARO 	del_timer_sync(&priv->eee_ctrl_timer);
439d765955dSGiuseppe CAVALLARO 	priv->tx_path_in_lpi_mode = false;
440d765955dSGiuseppe CAVALLARO }
441d765955dSGiuseppe CAVALLARO 
442d765955dSGiuseppe CAVALLARO /**
443732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_ctrl_timer - EEE TX SW timer.
444d0ea5cbdSJesse Brandeburg  * @t:  timer_list struct containing private info
445d765955dSGiuseppe CAVALLARO  * Description:
44632ceabcaSGiuseppe CAVALLARO  *  if there is no data transfer and if we are not in LPI state,
447d765955dSGiuseppe CAVALLARO  *  then MAC Transmitter can be moved to LPI state.
448d765955dSGiuseppe CAVALLARO  */
449e99e88a9SKees Cook static void stmmac_eee_ctrl_timer(struct timer_list *t)
450d765955dSGiuseppe CAVALLARO {
451e99e88a9SKees Cook 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
452d765955dSGiuseppe CAVALLARO 
453d765955dSGiuseppe CAVALLARO 	stmmac_enable_eee_mode(priv);
454388e201dSVineetha G. Jaya Kumaran 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
455d765955dSGiuseppe CAVALLARO }
456d765955dSGiuseppe CAVALLARO 
457d765955dSGiuseppe CAVALLARO /**
458732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_init - init EEE
45932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
460d765955dSGiuseppe CAVALLARO  * Description:
461732fdf0eSGiuseppe CAVALLARO  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
462732fdf0eSGiuseppe CAVALLARO  *  can also manage EEE, this function enable the LPI state and start related
463732fdf0eSGiuseppe CAVALLARO  *  timer.
464d765955dSGiuseppe CAVALLARO  */
465d765955dSGiuseppe CAVALLARO bool stmmac_eee_init(struct stmmac_priv *priv)
466d765955dSGiuseppe CAVALLARO {
467388e201dSVineetha G. Jaya Kumaran 	int eee_tw_timer = priv->eee_tw_timer;
468879626e3SJerome Brunet 
469f5351ef7SGiuseppe CAVALLARO 	/* Using PCS we cannot dial with the phy registers at this stage
470f5351ef7SGiuseppe CAVALLARO 	 * so we do not support extra feature like EEE.
471f5351ef7SGiuseppe CAVALLARO 	 */
472a47b9e15SDejin Zheng 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
473a47b9e15SDejin Zheng 	    priv->hw->pcs == STMMAC_PCS_RTBI)
47474371272SJose Abreu 		return false;
475f5351ef7SGiuseppe CAVALLARO 
47674371272SJose Abreu 	/* Check if MAC core supports the EEE feature. */
47774371272SJose Abreu 	if (!priv->dma_cap.eee)
47874371272SJose Abreu 		return false;
479d765955dSGiuseppe CAVALLARO 
48029555fa3SThierry Reding 	mutex_lock(&priv->lock);
48174371272SJose Abreu 
48274371272SJose Abreu 	/* Check if it needs to be deactivated */
483177d935aSJon Hunter 	if (!priv->eee_active) {
484177d935aSJon Hunter 		if (priv->eee_enabled) {
48538ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "disable EEE\n");
486be1c7eaeSVineetha G. Jaya Kumaran 			stmmac_lpi_entry_timer_config(priv, 0);
48783bf79b6SGiuseppe CAVALLARO 			del_timer_sync(&priv->eee_ctrl_timer);
488388e201dSVineetha G. Jaya Kumaran 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
489d4aeaed8SWong Vee Khee 			if (priv->hw->xpcs)
490d4aeaed8SWong Vee Khee 				xpcs_config_eee(priv->hw->xpcs,
491d4aeaed8SWong Vee Khee 						priv->plat->mult_fact_100ns,
492d4aeaed8SWong Vee Khee 						false);
493177d935aSJon Hunter 		}
4940867bb97SJon Hunter 		mutex_unlock(&priv->lock);
49574371272SJose Abreu 		return false;
49674371272SJose Abreu 	}
49774371272SJose Abreu 
49874371272SJose Abreu 	if (priv->eee_active && !priv->eee_enabled) {
49974371272SJose Abreu 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
50074371272SJose Abreu 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
501388e201dSVineetha G. Jaya Kumaran 				     eee_tw_timer);
502656ed8b0SWong Vee Khee 		if (priv->hw->xpcs)
503656ed8b0SWong Vee Khee 			xpcs_config_eee(priv->hw->xpcs,
504656ed8b0SWong Vee Khee 					priv->plat->mult_fact_100ns,
505656ed8b0SWong Vee Khee 					true);
50683bf79b6SGiuseppe CAVALLARO 	}
50774371272SJose Abreu 
508be1c7eaeSVineetha G. Jaya Kumaran 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
509be1c7eaeSVineetha G. Jaya Kumaran 		del_timer_sync(&priv->eee_ctrl_timer);
510be1c7eaeSVineetha G. Jaya Kumaran 		priv->tx_path_in_lpi_mode = false;
511be1c7eaeSVineetha G. Jaya Kumaran 		stmmac_lpi_entry_timer_config(priv, 1);
512be1c7eaeSVineetha G. Jaya Kumaran 	} else {
513be1c7eaeSVineetha G. Jaya Kumaran 		stmmac_lpi_entry_timer_config(priv, 0);
514be1c7eaeSVineetha G. Jaya Kumaran 		mod_timer(&priv->eee_ctrl_timer,
515be1c7eaeSVineetha G. Jaya Kumaran 			  STMMAC_LPI_T(priv->tx_lpi_timer));
516be1c7eaeSVineetha G. Jaya Kumaran 	}
517388e201dSVineetha G. Jaya Kumaran 
51829555fa3SThierry Reding 	mutex_unlock(&priv->lock);
51938ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
52074371272SJose Abreu 	return true;
521d765955dSGiuseppe CAVALLARO }
522d765955dSGiuseppe CAVALLARO 
523732fdf0eSGiuseppe CAVALLARO /* stmmac_get_tx_hwtstamp - get HW TX timestamps
52432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
525ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
526891434b1SRayagond Kokatanur  * @skb : the socket buffer
527891434b1SRayagond Kokatanur  * Description :
528891434b1SRayagond Kokatanur  * This function will read timestamp from the descriptor & pass it to stack.
529891434b1SRayagond Kokatanur  * and also perform some sanity checks.
530891434b1SRayagond Kokatanur  */
531891434b1SRayagond Kokatanur static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
532ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *p, struct sk_buff *skb)
533891434b1SRayagond Kokatanur {
534891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps shhwtstamp;
53525e80cd0SJose Abreu 	bool found = false;
536df103170SNathan Chancellor 	u64 ns = 0;
537891434b1SRayagond Kokatanur 
538891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en)
539891434b1SRayagond Kokatanur 		return;
540891434b1SRayagond Kokatanur 
541ceb69499SGiuseppe CAVALLARO 	/* exit if skb doesn't support hw tstamp */
54275e4364fSdamuzi000 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
543891434b1SRayagond Kokatanur 		return;
544891434b1SRayagond Kokatanur 
545891434b1SRayagond Kokatanur 	/* check tx tstamp status */
54642de047dSJose Abreu 	if (stmmac_get_tx_timestamp_status(priv, p)) {
54742de047dSJose Abreu 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
54825e80cd0SJose Abreu 		found = true;
54925e80cd0SJose Abreu 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
55025e80cd0SJose Abreu 		found = true;
55125e80cd0SJose Abreu 	}
552891434b1SRayagond Kokatanur 
55325e80cd0SJose Abreu 	if (found) {
554c6d5f193SKurt Kanzenbach 		ns -= priv->plat->cdc_error_adj;
5553600be5fSVoon Weifeng 
556891434b1SRayagond Kokatanur 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
557891434b1SRayagond Kokatanur 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
558ba1ffd74SGiuseppe CAVALLARO 
55933d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
560891434b1SRayagond Kokatanur 		/* pass tstamp to stack */
561891434b1SRayagond Kokatanur 		skb_tstamp_tx(skb, &shhwtstamp);
562ba1ffd74SGiuseppe CAVALLARO 	}
563891434b1SRayagond Kokatanur }
564891434b1SRayagond Kokatanur 
565732fdf0eSGiuseppe CAVALLARO /* stmmac_get_rx_hwtstamp - get HW RX timestamps
56632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
567ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
568ba1ffd74SGiuseppe CAVALLARO  * @np : next descriptor pointer
569891434b1SRayagond Kokatanur  * @skb : the socket buffer
570891434b1SRayagond Kokatanur  * Description :
571891434b1SRayagond Kokatanur  * This function will read received packet's timestamp from the descriptor
572891434b1SRayagond Kokatanur  * and pass it to stack. It also perform some sanity checks.
573891434b1SRayagond Kokatanur  */
574ba1ffd74SGiuseppe CAVALLARO static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
575ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *np, struct sk_buff *skb)
576891434b1SRayagond Kokatanur {
577891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
57898870943SJose Abreu 	struct dma_desc *desc = p;
579df103170SNathan Chancellor 	u64 ns = 0;
580891434b1SRayagond Kokatanur 
581891434b1SRayagond Kokatanur 	if (!priv->hwts_rx_en)
582891434b1SRayagond Kokatanur 		return;
583ba1ffd74SGiuseppe CAVALLARO 	/* For GMAC4, the valid timestamp is from CTX next desc. */
5847d9e6c5aSJose Abreu 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
58598870943SJose Abreu 		desc = np;
586891434b1SRayagond Kokatanur 
58798870943SJose Abreu 	/* Check if timestamp is available */
58842de047dSJose Abreu 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
58942de047dSJose Abreu 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
5903600be5fSVoon Weifeng 
591c6d5f193SKurt Kanzenbach 		ns -= priv->plat->cdc_error_adj;
5923600be5fSVoon Weifeng 
59333d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
594891434b1SRayagond Kokatanur 		shhwtstamp = skb_hwtstamps(skb);
595891434b1SRayagond Kokatanur 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
596891434b1SRayagond Kokatanur 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
597ba1ffd74SGiuseppe CAVALLARO 	} else  {
59833d4c482SMario Molitor 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
599ba1ffd74SGiuseppe CAVALLARO 	}
600891434b1SRayagond Kokatanur }
601891434b1SRayagond Kokatanur 
602891434b1SRayagond Kokatanur /**
603d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_set - control hardware timestamping.
604891434b1SRayagond Kokatanur  *  @dev: device pointer.
6058d45e42bSLABBE Corentin  *  @ifr: An IOCTL specific structure, that can contain a pointer to
606891434b1SRayagond Kokatanur  *  a proprietary structure used to pass information to the driver.
607891434b1SRayagond Kokatanur  *  Description:
608891434b1SRayagond Kokatanur  *  This function configures the MAC to enable/disable both outgoing(TX)
609891434b1SRayagond Kokatanur  *  and incoming(RX) packets time stamping based on user input.
610891434b1SRayagond Kokatanur  *  Return Value:
611891434b1SRayagond Kokatanur  *  0 on success and an appropriate -ve integer on failure.
612891434b1SRayagond Kokatanur  */
613d6228b7cSArtem Panfilov static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
614891434b1SRayagond Kokatanur {
615891434b1SRayagond Kokatanur 	struct stmmac_priv *priv = netdev_priv(dev);
616891434b1SRayagond Kokatanur 	struct hwtstamp_config config;
617891434b1SRayagond Kokatanur 	u32 ptp_v2 = 0;
618891434b1SRayagond Kokatanur 	u32 tstamp_all = 0;
619891434b1SRayagond Kokatanur 	u32 ptp_over_ipv4_udp = 0;
620891434b1SRayagond Kokatanur 	u32 ptp_over_ipv6_udp = 0;
621891434b1SRayagond Kokatanur 	u32 ptp_over_ethernet = 0;
622891434b1SRayagond Kokatanur 	u32 snap_type_sel = 0;
623891434b1SRayagond Kokatanur 	u32 ts_master_en = 0;
624891434b1SRayagond Kokatanur 	u32 ts_event_en = 0;
625891434b1SRayagond Kokatanur 
626891434b1SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
627891434b1SRayagond Kokatanur 		netdev_alert(priv->dev, "No support for HW time stamping\n");
628891434b1SRayagond Kokatanur 		priv->hwts_tx_en = 0;
629891434b1SRayagond Kokatanur 		priv->hwts_rx_en = 0;
630891434b1SRayagond Kokatanur 
631891434b1SRayagond Kokatanur 		return -EOPNOTSUPP;
632891434b1SRayagond Kokatanur 	}
633891434b1SRayagond Kokatanur 
634891434b1SRayagond Kokatanur 	if (copy_from_user(&config, ifr->ifr_data,
635d6228b7cSArtem Panfilov 			   sizeof(config)))
636891434b1SRayagond Kokatanur 		return -EFAULT;
637891434b1SRayagond Kokatanur 
63838ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
639891434b1SRayagond Kokatanur 		   __func__, config.flags, config.tx_type, config.rx_filter);
640891434b1SRayagond Kokatanur 
6415f3da328SBen Hutchings 	if (config.tx_type != HWTSTAMP_TX_OFF &&
6425f3da328SBen Hutchings 	    config.tx_type != HWTSTAMP_TX_ON)
643891434b1SRayagond Kokatanur 		return -ERANGE;
644891434b1SRayagond Kokatanur 
645891434b1SRayagond Kokatanur 	if (priv->adv_ts) {
646891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
647891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
648ceb69499SGiuseppe CAVALLARO 			/* time stamp no incoming packet at all */
649891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
650891434b1SRayagond Kokatanur 			break;
651891434b1SRayagond Kokatanur 
652891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
653ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, any kind of event packet */
654891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
6557d8e249fSIlias Apalodimas 			/* 'xmac' hardware can support Sync, Pdelay_Req and
6567d8e249fSIlias Apalodimas 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
6577d8e249fSIlias Apalodimas 			 * This leaves Delay_Req timestamps out.
6587d8e249fSIlias Apalodimas 			 * Enable all events *and* general purpose message
6597d8e249fSIlias Apalodimas 			 * timestamping
6607d8e249fSIlias Apalodimas 			 */
661891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
662891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
663891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
664891434b1SRayagond Kokatanur 			break;
665891434b1SRayagond Kokatanur 
666891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
667ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Sync packet */
668891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
669891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
670891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
671891434b1SRayagond Kokatanur 
672891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
673891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
674891434b1SRayagond Kokatanur 			break;
675891434b1SRayagond Kokatanur 
676891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
677ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Delay_req packet */
678891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
679891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
680891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
681891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
682891434b1SRayagond Kokatanur 
683891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
684891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
685891434b1SRayagond Kokatanur 			break;
686891434b1SRayagond Kokatanur 
687891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
688ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, any kind of event packet */
689891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
690891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
691891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
692891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
693891434b1SRayagond Kokatanur 
694891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
695891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
696891434b1SRayagond Kokatanur 			break;
697891434b1SRayagond Kokatanur 
698891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
699ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Sync packet */
700891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
701891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
702891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
703891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
704891434b1SRayagond Kokatanur 
705891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
706891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
707891434b1SRayagond Kokatanur 			break;
708891434b1SRayagond Kokatanur 
709891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
710ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Delay_req packet */
711891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
712891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
713891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
714891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
715891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
716891434b1SRayagond Kokatanur 
717891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
718891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
719891434b1SRayagond Kokatanur 			break;
720891434b1SRayagond Kokatanur 
721891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
722ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1 any layer, any kind of event packet */
723891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
724891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
725891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
7263cb95802SKurt Kanzenbach 			if (priv->synopsys_id < DWMAC_CORE_4_10)
72714f34733SJose Abreu 				ts_event_en = PTP_TCR_TSEVNTENA;
728891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
729891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
730891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
731891434b1SRayagond Kokatanur 			break;
732891434b1SRayagond Kokatanur 
733891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
734ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Sync packet */
735891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
736891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
737891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
738891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
739891434b1SRayagond Kokatanur 
740891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
741891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
742891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
743891434b1SRayagond Kokatanur 			break;
744891434b1SRayagond Kokatanur 
745891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
746ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Delay_req packet */
747891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
748891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
749891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
750891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
751891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
752891434b1SRayagond Kokatanur 
753891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
754891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
755891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
756891434b1SRayagond Kokatanur 			break;
757891434b1SRayagond Kokatanur 
758e3412575SMiroslav Lichvar 		case HWTSTAMP_FILTER_NTP_ALL:
759891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_ALL:
760ceb69499SGiuseppe CAVALLARO 			/* time stamp any incoming packet */
761891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_ALL;
762891434b1SRayagond Kokatanur 			tstamp_all = PTP_TCR_TSENALL;
763891434b1SRayagond Kokatanur 			break;
764891434b1SRayagond Kokatanur 
765891434b1SRayagond Kokatanur 		default:
766891434b1SRayagond Kokatanur 			return -ERANGE;
767891434b1SRayagond Kokatanur 		}
768891434b1SRayagond Kokatanur 	} else {
769891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
770891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
771891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
772891434b1SRayagond Kokatanur 			break;
773891434b1SRayagond Kokatanur 		default:
774891434b1SRayagond Kokatanur 			/* PTP v1, UDP, any kind of event packet */
775891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
776891434b1SRayagond Kokatanur 			break;
777891434b1SRayagond Kokatanur 		}
778891434b1SRayagond Kokatanur 	}
779891434b1SRayagond Kokatanur 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
7805f3da328SBen Hutchings 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
781891434b1SRayagond Kokatanur 
782a6da2bbbSHolger Assmann 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
783891434b1SRayagond Kokatanur 
784a6da2bbbSHolger Assmann 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
785a6da2bbbSHolger Assmann 		priv->systime_flags |= tstamp_all | ptp_v2 |
786a6da2bbbSHolger Assmann 				       ptp_over_ethernet | ptp_over_ipv6_udp |
787a6da2bbbSHolger Assmann 				       ptp_over_ipv4_udp | ts_event_en |
788a6da2bbbSHolger Assmann 				       ts_master_en | snap_type_sel;
789891434b1SRayagond Kokatanur 	}
790891434b1SRayagond Kokatanur 
791a6da2bbbSHolger Assmann 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
792a6da2bbbSHolger Assmann 
793d6228b7cSArtem Panfilov 	memcpy(&priv->tstamp_config, &config, sizeof(config));
794d6228b7cSArtem Panfilov 
795891434b1SRayagond Kokatanur 	return copy_to_user(ifr->ifr_data, &config,
796d6228b7cSArtem Panfilov 			    sizeof(config)) ? -EFAULT : 0;
797d6228b7cSArtem Panfilov }
798d6228b7cSArtem Panfilov 
799d6228b7cSArtem Panfilov /**
800d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_get - read hardware timestamping.
801d6228b7cSArtem Panfilov  *  @dev: device pointer.
802d6228b7cSArtem Panfilov  *  @ifr: An IOCTL specific structure, that can contain a pointer to
803d6228b7cSArtem Panfilov  *  a proprietary structure used to pass information to the driver.
804d6228b7cSArtem Panfilov  *  Description:
805d6228b7cSArtem Panfilov  *  This function obtain the current hardware timestamping settings
806d0ea5cbdSJesse Brandeburg  *  as requested.
807d6228b7cSArtem Panfilov  */
808d6228b7cSArtem Panfilov static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
809d6228b7cSArtem Panfilov {
810d6228b7cSArtem Panfilov 	struct stmmac_priv *priv = netdev_priv(dev);
811d6228b7cSArtem Panfilov 	struct hwtstamp_config *config = &priv->tstamp_config;
812d6228b7cSArtem Panfilov 
813d6228b7cSArtem Panfilov 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
814d6228b7cSArtem Panfilov 		return -EOPNOTSUPP;
815d6228b7cSArtem Panfilov 
816d6228b7cSArtem Panfilov 	return copy_to_user(ifr->ifr_data, config,
817d6228b7cSArtem Panfilov 			    sizeof(*config)) ? -EFAULT : 0;
818891434b1SRayagond Kokatanur }
819891434b1SRayagond Kokatanur 
82032ceabcaSGiuseppe CAVALLARO /**
821a6da2bbbSHolger Assmann  * stmmac_init_tstamp_counter - init hardware timestamping counter
822a6da2bbbSHolger Assmann  * @priv: driver private structure
823a6da2bbbSHolger Assmann  * @systime_flags: timestamping flags
824a6da2bbbSHolger Assmann  * Description:
825a6da2bbbSHolger Assmann  * Initialize hardware counter for packet timestamping.
826a6da2bbbSHolger Assmann  * This is valid as long as the interface is open and not suspended.
827a6da2bbbSHolger Assmann  * Will be rerun after resuming from suspend, case in which the timestamping
828a6da2bbbSHolger Assmann  * flags updated by stmmac_hwtstamp_set() also need to be restored.
829a6da2bbbSHolger Assmann  */
830a6da2bbbSHolger Assmann int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
831a6da2bbbSHolger Assmann {
832a6da2bbbSHolger Assmann 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
833a6da2bbbSHolger Assmann 	struct timespec64 now;
834a6da2bbbSHolger Assmann 	u32 sec_inc = 0;
835a6da2bbbSHolger Assmann 	u64 temp = 0;
836a6da2bbbSHolger Assmann 	int ret;
837a6da2bbbSHolger Assmann 
838a6da2bbbSHolger Assmann 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
839a6da2bbbSHolger Assmann 		return -EOPNOTSUPP;
840a6da2bbbSHolger Assmann 
841a6da2bbbSHolger Assmann 	ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
842a6da2bbbSHolger Assmann 	if (ret < 0) {
843a6da2bbbSHolger Assmann 		netdev_warn(priv->dev,
844a6da2bbbSHolger Assmann 			    "failed to enable PTP reference clock: %pe\n",
845a6da2bbbSHolger Assmann 			    ERR_PTR(ret));
846a6da2bbbSHolger Assmann 		return ret;
847a6da2bbbSHolger Assmann 	}
848a6da2bbbSHolger Assmann 
849a6da2bbbSHolger Assmann 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
850a6da2bbbSHolger Assmann 	priv->systime_flags = systime_flags;
851a6da2bbbSHolger Assmann 
852a6da2bbbSHolger Assmann 	/* program Sub Second Increment reg */
853a6da2bbbSHolger Assmann 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
854a6da2bbbSHolger Assmann 					   priv->plat->clk_ptp_rate,
855a6da2bbbSHolger Assmann 					   xmac, &sec_inc);
856a6da2bbbSHolger Assmann 	temp = div_u64(1000000000ULL, sec_inc);
857a6da2bbbSHolger Assmann 
858a6da2bbbSHolger Assmann 	/* Store sub second increment for later use */
859a6da2bbbSHolger Assmann 	priv->sub_second_inc = sec_inc;
860a6da2bbbSHolger Assmann 
861a6da2bbbSHolger Assmann 	/* calculate default added value:
862a6da2bbbSHolger Assmann 	 * formula is :
863a6da2bbbSHolger Assmann 	 * addend = (2^32)/freq_div_ratio;
864a6da2bbbSHolger Assmann 	 * where, freq_div_ratio = 1e9ns/sec_inc
865a6da2bbbSHolger Assmann 	 */
866a6da2bbbSHolger Assmann 	temp = (u64)(temp << 32);
867a6da2bbbSHolger Assmann 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
868a6da2bbbSHolger Assmann 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
869a6da2bbbSHolger Assmann 
870a6da2bbbSHolger Assmann 	/* initialize system time */
871a6da2bbbSHolger Assmann 	ktime_get_real_ts64(&now);
872a6da2bbbSHolger Assmann 
873a6da2bbbSHolger Assmann 	/* lower 32 bits of tv_sec are safe until y2106 */
874a6da2bbbSHolger Assmann 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
875a6da2bbbSHolger Assmann 
876a6da2bbbSHolger Assmann 	return 0;
877a6da2bbbSHolger Assmann }
878a6da2bbbSHolger Assmann EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
879a6da2bbbSHolger Assmann 
880a6da2bbbSHolger Assmann /**
881732fdf0eSGiuseppe CAVALLARO  * stmmac_init_ptp - init PTP
88232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
883732fdf0eSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
88432ceabcaSGiuseppe CAVALLARO  * This is done by looking at the HW cap. register.
885732fdf0eSGiuseppe CAVALLARO  * This function also registers the ptp driver.
88632ceabcaSGiuseppe CAVALLARO  */
88792ba6888SRayagond Kokatanur static int stmmac_init_ptp(struct stmmac_priv *priv)
888891434b1SRayagond Kokatanur {
8897d9e6c5aSJose Abreu 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
890a6da2bbbSHolger Assmann 	int ret;
8917d9e6c5aSJose Abreu 
892a6da2bbbSHolger Assmann 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
893a6da2bbbSHolger Assmann 	if (ret)
894a6da2bbbSHolger Assmann 		return ret;
89592ba6888SRayagond Kokatanur 
896891434b1SRayagond Kokatanur 	priv->adv_ts = 0;
8977d9e6c5aSJose Abreu 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
8987d9e6c5aSJose Abreu 	if (xmac && priv->dma_cap.atime_stamp)
899be9b3174SGiuseppe CAVALLARO 		priv->adv_ts = 1;
900be9b3174SGiuseppe CAVALLARO 	/* Dwmac 3.x core with extend_desc can support adv_ts */
901be9b3174SGiuseppe CAVALLARO 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
902891434b1SRayagond Kokatanur 		priv->adv_ts = 1;
9037cd01399SVince Bridgers 
904be9b3174SGiuseppe CAVALLARO 	if (priv->dma_cap.time_stamp)
905be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
9067cd01399SVince Bridgers 
907be9b3174SGiuseppe CAVALLARO 	if (priv->adv_ts)
908be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev,
909be9b3174SGiuseppe CAVALLARO 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
910891434b1SRayagond Kokatanur 
911891434b1SRayagond Kokatanur 	priv->hwts_tx_en = 0;
912891434b1SRayagond Kokatanur 	priv->hwts_rx_en = 0;
91392ba6888SRayagond Kokatanur 
914c30a70d3SGiuseppe CAVALLARO 	stmmac_ptp_register(priv);
915c30a70d3SGiuseppe CAVALLARO 
916c30a70d3SGiuseppe CAVALLARO 	return 0;
91792ba6888SRayagond Kokatanur }
91892ba6888SRayagond Kokatanur 
91992ba6888SRayagond Kokatanur static void stmmac_release_ptp(struct stmmac_priv *priv)
92092ba6888SRayagond Kokatanur {
921f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
92292ba6888SRayagond Kokatanur 	stmmac_ptp_unregister(priv);
923891434b1SRayagond Kokatanur }
924891434b1SRayagond Kokatanur 
9257ac6653aSJeff Kirsher /**
92629feff39SJoao Pinto  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
92729feff39SJoao Pinto  *  @priv: driver private structure
928d0ea5cbdSJesse Brandeburg  *  @duplex: duplex passed to the next function
92929feff39SJoao Pinto  *  Description: It is used for configuring the flow control in all queues
93029feff39SJoao Pinto  */
93129feff39SJoao Pinto static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
93229feff39SJoao Pinto {
93329feff39SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
93429feff39SJoao Pinto 
935c10d4c82SJose Abreu 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
93629feff39SJoao Pinto 			priv->pause, tx_cnt);
93729feff39SJoao Pinto }
93829feff39SJoao Pinto 
939eeef2f6bSJose Abreu static void stmmac_validate(struct phylink_config *config,
940eeef2f6bSJose Abreu 			    unsigned long *supported,
941eeef2f6bSJose Abreu 			    struct phylink_link_state *state)
942eeef2f6bSJose Abreu {
943eeef2f6bSJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
9445b0d7d7dSJose Abreu 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
945eeef2f6bSJose Abreu 
946*92c3807bSRussell King (Oracle) 	/* This is very similar to phylink_generic_validate() except that
947*92c3807bSRussell King (Oracle) 	 * we always use PHY_INTERFACE_MODE_INTERNAL to get all capabilities.
948*92c3807bSRussell King (Oracle) 	 * This is because we don't always have config->supported_interfaces
949*92c3807bSRussell King (Oracle) 	 * populated (only when we have the XPCS.)
950*92c3807bSRussell King (Oracle) 	 *
951*92c3807bSRussell King (Oracle) 	 * When we do have an XPCS, we could pass state->interface, as XPCS
952*92c3807bSRussell King (Oracle) 	 * limits to a subset of the ethtool link modes allowed here.
953*92c3807bSRussell King (Oracle) 	 */
9545b0d7d7dSJose Abreu 	phylink_set(mac_supported, Autoneg);
9555b0d7d7dSJose Abreu 	phylink_set_port_modes(mac_supported);
956*92c3807bSRussell King (Oracle) 	phylink_get_linkmodes(mac_supported, PHY_INTERFACE_MODE_INTERNAL,
957*92c3807bSRussell King (Oracle) 			      config->mac_capabilities);
958eeef2f6bSJose Abreu 
959422829f9SJose Abreu 	linkmode_and(supported, supported, mac_supported);
960422829f9SJose Abreu 	linkmode_and(state->advertising, state->advertising, mac_supported);
961f213bbe8SJose Abreu 
962f213bbe8SJose Abreu 	/* If PCS is supported, check which modes it supports. */
963a1a753edSVladimir Oltean 	if (priv->hw->xpcs)
96411059740SVladimir Oltean 		xpcs_validate(priv->hw->xpcs, supported, state);
965eeef2f6bSJose Abreu }
966eeef2f6bSJose Abreu 
96774371272SJose Abreu static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
96874371272SJose Abreu 			      const struct phylink_link_state *state)
9699ad372fcSJose Abreu {
97011059740SVladimir Oltean 	/* Nothing to do, xpcs_config() handles everything */
971eeef2f6bSJose Abreu }
972eeef2f6bSJose Abreu 
9735a558611SOng Boon Leong static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
9745a558611SOng Boon Leong {
9755a558611SOng Boon Leong 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
9765a558611SOng Boon Leong 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
9775a558611SOng Boon Leong 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
9785a558611SOng Boon Leong 	bool *hs_enable = &fpe_cfg->hs_enable;
9795a558611SOng Boon Leong 
9805a558611SOng Boon Leong 	if (is_up && *hs_enable) {
9815a558611SOng Boon Leong 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
9825a558611SOng Boon Leong 	} else {
9831f7096f0SWong Vee Khee 		*lo_state = FPE_STATE_OFF;
9841f7096f0SWong Vee Khee 		*lp_state = FPE_STATE_OFF;
9855a558611SOng Boon Leong 	}
9865a558611SOng Boon Leong }
9875a558611SOng Boon Leong 
98874371272SJose Abreu static void stmmac_mac_link_down(struct phylink_config *config,
98974371272SJose Abreu 				 unsigned int mode, phy_interface_t interface)
9909ad372fcSJose Abreu {
99174371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
9929ad372fcSJose Abreu 
9939ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
99474371272SJose Abreu 	priv->eee_active = false;
995388e201dSVineetha G. Jaya Kumaran 	priv->tx_lpi_enabled = false;
996d4aeaed8SWong Vee Khee 	priv->eee_enabled = stmmac_eee_init(priv);
99774371272SJose Abreu 	stmmac_set_eee_pls(priv, priv->hw, false);
9985a558611SOng Boon Leong 
99963c173ffSMohammad Athari Bin Ismail 	if (priv->dma_cap.fpesel)
10005a558611SOng Boon Leong 		stmmac_fpe_link_state_handle(priv, false);
10019ad372fcSJose Abreu }
10029ad372fcSJose Abreu 
100374371272SJose Abreu static void stmmac_mac_link_up(struct phylink_config *config,
100491a208f2SRussell King 			       struct phy_device *phy,
100574371272SJose Abreu 			       unsigned int mode, phy_interface_t interface,
100691a208f2SRussell King 			       int speed, int duplex,
100791a208f2SRussell King 			       bool tx_pause, bool rx_pause)
10089ad372fcSJose Abreu {
100974371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
101046f69dedSJose Abreu 	u32 ctrl;
101146f69dedSJose Abreu 
101246f69dedSJose Abreu 	ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
101346f69dedSJose Abreu 	ctrl &= ~priv->hw->link.speed_mask;
101446f69dedSJose Abreu 
101546f69dedSJose Abreu 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
101646f69dedSJose Abreu 		switch (speed) {
101746f69dedSJose Abreu 		case SPEED_10000:
101846f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed10000;
101946f69dedSJose Abreu 			break;
102046f69dedSJose Abreu 		case SPEED_5000:
102146f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed5000;
102246f69dedSJose Abreu 			break;
102346f69dedSJose Abreu 		case SPEED_2500:
102446f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed2500;
102546f69dedSJose Abreu 			break;
102646f69dedSJose Abreu 		default:
102746f69dedSJose Abreu 			return;
102846f69dedSJose Abreu 		}
10298a880936SJose Abreu 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
10308a880936SJose Abreu 		switch (speed) {
10318a880936SJose Abreu 		case SPEED_100000:
10328a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed100000;
10338a880936SJose Abreu 			break;
10348a880936SJose Abreu 		case SPEED_50000:
10358a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed50000;
10368a880936SJose Abreu 			break;
10378a880936SJose Abreu 		case SPEED_40000:
10388a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed40000;
10398a880936SJose Abreu 			break;
10408a880936SJose Abreu 		case SPEED_25000:
10418a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed25000;
10428a880936SJose Abreu 			break;
10438a880936SJose Abreu 		case SPEED_10000:
10448a880936SJose Abreu 			ctrl |= priv->hw->link.xgmii.speed10000;
10458a880936SJose Abreu 			break;
10468a880936SJose Abreu 		case SPEED_2500:
10478a880936SJose Abreu 			ctrl |= priv->hw->link.speed2500;
10488a880936SJose Abreu 			break;
10498a880936SJose Abreu 		case SPEED_1000:
10508a880936SJose Abreu 			ctrl |= priv->hw->link.speed1000;
10518a880936SJose Abreu 			break;
10528a880936SJose Abreu 		default:
10538a880936SJose Abreu 			return;
10548a880936SJose Abreu 		}
105546f69dedSJose Abreu 	} else {
105646f69dedSJose Abreu 		switch (speed) {
105746f69dedSJose Abreu 		case SPEED_2500:
105846f69dedSJose Abreu 			ctrl |= priv->hw->link.speed2500;
105946f69dedSJose Abreu 			break;
106046f69dedSJose Abreu 		case SPEED_1000:
106146f69dedSJose Abreu 			ctrl |= priv->hw->link.speed1000;
106246f69dedSJose Abreu 			break;
106346f69dedSJose Abreu 		case SPEED_100:
106446f69dedSJose Abreu 			ctrl |= priv->hw->link.speed100;
106546f69dedSJose Abreu 			break;
106646f69dedSJose Abreu 		case SPEED_10:
106746f69dedSJose Abreu 			ctrl |= priv->hw->link.speed10;
106846f69dedSJose Abreu 			break;
106946f69dedSJose Abreu 		default:
107046f69dedSJose Abreu 			return;
107146f69dedSJose Abreu 		}
107246f69dedSJose Abreu 	}
107346f69dedSJose Abreu 
107446f69dedSJose Abreu 	priv->speed = speed;
107546f69dedSJose Abreu 
107646f69dedSJose Abreu 	if (priv->plat->fix_mac_speed)
107746f69dedSJose Abreu 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
107846f69dedSJose Abreu 
107946f69dedSJose Abreu 	if (!duplex)
108046f69dedSJose Abreu 		ctrl &= ~priv->hw->link.duplex;
108146f69dedSJose Abreu 	else
108246f69dedSJose Abreu 		ctrl |= priv->hw->link.duplex;
108346f69dedSJose Abreu 
108446f69dedSJose Abreu 	/* Flow Control operation */
108546f69dedSJose Abreu 	if (tx_pause && rx_pause)
108646f69dedSJose Abreu 		stmmac_mac_flow_ctrl(priv, duplex);
108746f69dedSJose Abreu 
108846f69dedSJose Abreu 	writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
10899ad372fcSJose Abreu 
10909ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
10915b111770SJose Abreu 	if (phy && priv->dma_cap.eee) {
109274371272SJose Abreu 		priv->eee_active = phy_init_eee(phy, 1) >= 0;
109374371272SJose Abreu 		priv->eee_enabled = stmmac_eee_init(priv);
1094388e201dSVineetha G. Jaya Kumaran 		priv->tx_lpi_enabled = priv->eee_enabled;
109574371272SJose Abreu 		stmmac_set_eee_pls(priv, priv->hw, true);
109674371272SJose Abreu 	}
10975a558611SOng Boon Leong 
109863c173ffSMohammad Athari Bin Ismail 	if (priv->dma_cap.fpesel)
10995a558611SOng Boon Leong 		stmmac_fpe_link_state_handle(priv, true);
11009ad372fcSJose Abreu }
11019ad372fcSJose Abreu 
110274371272SJose Abreu static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1103eeef2f6bSJose Abreu 	.validate = stmmac_validate,
110474371272SJose Abreu 	.mac_config = stmmac_mac_config,
110574371272SJose Abreu 	.mac_link_down = stmmac_mac_link_down,
110674371272SJose Abreu 	.mac_link_up = stmmac_mac_link_up,
1107eeef2f6bSJose Abreu };
1108eeef2f6bSJose Abreu 
110929feff39SJoao Pinto /**
1110732fdf0eSGiuseppe CAVALLARO  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
111132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
111232ceabcaSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PCS.
111332ceabcaSGiuseppe CAVALLARO  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
111432ceabcaSGiuseppe CAVALLARO  * configured for the TBI, RTBI, or SGMII PHY interface.
111532ceabcaSGiuseppe CAVALLARO  */
1116e58bb43fSGiuseppe CAVALLARO static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1117e58bb43fSGiuseppe CAVALLARO {
1118e58bb43fSGiuseppe CAVALLARO 	int interface = priv->plat->interface;
1119e58bb43fSGiuseppe CAVALLARO 
1120e58bb43fSGiuseppe CAVALLARO 	if (priv->dma_cap.pcs) {
11210d909dcdSByungho An 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
11220d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
11230d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
11240d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
112538ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
11263fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_RGMII;
11270d909dcdSByungho An 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
112838ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
11293fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_SGMII;
1130e58bb43fSGiuseppe CAVALLARO 		}
1131e58bb43fSGiuseppe CAVALLARO 	}
1132e58bb43fSGiuseppe CAVALLARO }
1133e58bb43fSGiuseppe CAVALLARO 
11347ac6653aSJeff Kirsher /**
11357ac6653aSJeff Kirsher  * stmmac_init_phy - PHY initialization
11367ac6653aSJeff Kirsher  * @dev: net device structure
11377ac6653aSJeff Kirsher  * Description: it initializes the driver's PHY state, and attaches the PHY
11387ac6653aSJeff Kirsher  * to the mac driver.
11397ac6653aSJeff Kirsher  *  Return value:
11407ac6653aSJeff Kirsher  *  0 on success
11417ac6653aSJeff Kirsher  */
11427ac6653aSJeff Kirsher static int stmmac_init_phy(struct net_device *dev)
11437ac6653aSJeff Kirsher {
11447ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
114574371272SJose Abreu 	struct device_node *node;
114674371272SJose Abreu 	int ret;
11477ac6653aSJeff Kirsher 
11484838a540SJose Abreu 	node = priv->plat->phylink_node;
114974371272SJose Abreu 
115042e87024SJose Abreu 	if (node)
115174371272SJose Abreu 		ret = phylink_of_phy_connect(priv->phylink, node, 0);
115242e87024SJose Abreu 
115342e87024SJose Abreu 	/* Some DT bindings do not set-up the PHY handle. Let's try to
115442e87024SJose Abreu 	 * manually parse it
115542e87024SJose Abreu 	 */
115642e87024SJose Abreu 	if (!node || ret) {
115774371272SJose Abreu 		int addr = priv->plat->phy_addr;
115874371272SJose Abreu 		struct phy_device *phydev;
1159f142af2eSSrinivas Kandagatla 
116074371272SJose Abreu 		phydev = mdiobus_get_phy(priv->mii, addr);
116174371272SJose Abreu 		if (!phydev) {
116274371272SJose Abreu 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
11637ac6653aSJeff Kirsher 			return -ENODEV;
11647ac6653aSJeff Kirsher 		}
11658e99fc5fSGiuseppe Cavallaro 
116674371272SJose Abreu 		ret = phylink_connect_phy(priv->phylink, phydev);
116774371272SJose Abreu 	}
1168c51e424dSFlorian Fainelli 
1169576f9eacSJoakim Zhang 	if (!priv->plat->pmt) {
1170576f9eacSJoakim Zhang 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1171576f9eacSJoakim Zhang 
11721d8e5b0fSJisheng Zhang 		phylink_ethtool_get_wol(priv->phylink, &wol);
11731d8e5b0fSJisheng Zhang 		device_set_wakeup_capable(priv->device, !!wol.supported);
1174576f9eacSJoakim Zhang 	}
11751d8e5b0fSJisheng Zhang 
117674371272SJose Abreu 	return ret;
117774371272SJose Abreu }
117874371272SJose Abreu 
117974371272SJose Abreu static int stmmac_phy_setup(struct stmmac_priv *priv)
118074371272SJose Abreu {
118111059740SVladimir Oltean 	struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
1182c63d1e5cSArnd Bergmann 	struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1183*92c3807bSRussell King (Oracle) 	int max_speed = priv->plat->max_speed;
11840060c878SAlexandru Ardelean 	int mode = priv->plat->phy_interface;
118574371272SJose Abreu 	struct phylink *phylink;
118674371272SJose Abreu 
118774371272SJose Abreu 	priv->phylink_config.dev = &priv->dev->dev;
118874371272SJose Abreu 	priv->phylink_config.type = PHYLINK_NETDEV;
1189f213bbe8SJose Abreu 	priv->phylink_config.pcs_poll = true;
1190593f555fSSriranjani P 	if (priv->plat->mdio_bus_data)
1191e5e5b771SOng Boon Leong 		priv->phylink_config.ovr_an_inband =
119212628565SDavid S. Miller 			mdio_bus_data->xpcs_an_inband;
119374371272SJose Abreu 
11948dc6051cSJose Abreu 	if (!fwnode)
11958dc6051cSJose Abreu 		fwnode = dev_fwnode(priv->device);
11968dc6051cSJose Abreu 
1197*92c3807bSRussell King (Oracle) 	priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1198*92c3807bSRussell King (Oracle) 		MAC_10 | MAC_100;
1199*92c3807bSRussell King (Oracle) 
1200*92c3807bSRussell King (Oracle) 	if (!max_speed || max_speed >= 1000)
1201*92c3807bSRussell King (Oracle) 		priv->phylink_config.mac_capabilities |= MAC_1000;
1202*92c3807bSRussell King (Oracle) 
1203*92c3807bSRussell King (Oracle) 	if (priv->plat->has_gmac4) {
1204*92c3807bSRussell King (Oracle) 		if (!max_speed || max_speed >= 2500)
1205*92c3807bSRussell King (Oracle) 			priv->phylink_config.mac_capabilities |= MAC_2500FD;
1206*92c3807bSRussell King (Oracle) 	} else if (priv->plat->has_xgmac) {
1207*92c3807bSRussell King (Oracle) 		if (!max_speed || max_speed >= 2500)
1208*92c3807bSRussell King (Oracle) 			priv->phylink_config.mac_capabilities |= MAC_2500FD;
1209*92c3807bSRussell King (Oracle) 		if (!max_speed || max_speed >= 5000)
1210*92c3807bSRussell King (Oracle) 			priv->phylink_config.mac_capabilities |= MAC_5000FD;
1211*92c3807bSRussell King (Oracle) 		if (!max_speed || max_speed >= 10000)
1212*92c3807bSRussell King (Oracle) 			priv->phylink_config.mac_capabilities |= MAC_10000FD;
1213*92c3807bSRussell King (Oracle) 		if (!max_speed || max_speed >= 25000)
1214*92c3807bSRussell King (Oracle) 			priv->phylink_config.mac_capabilities |= MAC_25000FD;
1215*92c3807bSRussell King (Oracle) 		if (!max_speed || max_speed >= 40000)
1216*92c3807bSRussell King (Oracle) 			priv->phylink_config.mac_capabilities |= MAC_40000FD;
1217*92c3807bSRussell King (Oracle) 		if (!max_speed || max_speed >= 50000)
1218*92c3807bSRussell King (Oracle) 			priv->phylink_config.mac_capabilities |= MAC_50000FD;
1219*92c3807bSRussell King (Oracle) 		if (!max_speed || max_speed >= 100000)
1220*92c3807bSRussell King (Oracle) 			priv->phylink_config.mac_capabilities |= MAC_100000FD;
1221*92c3807bSRussell King (Oracle) 	}
1222*92c3807bSRussell King (Oracle) 
1223*92c3807bSRussell King (Oracle) 	/* Half-Duplex can only work with single queue */
1224*92c3807bSRussell King (Oracle) 	if (priv->plat->tx_queues_to_use > 1)
1225*92c3807bSRussell King (Oracle) 		priv->phylink_config.mac_capabilities &=
1226*92c3807bSRussell King (Oracle) 			~(MAC_10HD | MAC_100HD | MAC_1000HD);
1227*92c3807bSRussell King (Oracle) 
1228c63d1e5cSArnd Bergmann 	phylink = phylink_create(&priv->phylink_config, fwnode,
122974371272SJose Abreu 				 mode, &stmmac_phylink_mac_ops);
123074371272SJose Abreu 	if (IS_ERR(phylink))
123174371272SJose Abreu 		return PTR_ERR(phylink);
123274371272SJose Abreu 
1233b55b1d50SVladimir Oltean 	if (priv->hw->xpcs)
1234b55b1d50SVladimir Oltean 		phylink_set_pcs(phylink, &priv->hw->xpcs->pcs);
123511059740SVladimir Oltean 
123674371272SJose Abreu 	priv->phylink = phylink;
12377ac6653aSJeff Kirsher 	return 0;
12387ac6653aSJeff Kirsher }
12397ac6653aSJeff Kirsher 
124071fedb01SJoao Pinto static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1241c24602efSGiuseppe CAVALLARO {
124254139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1243bfaf91caSJoakim Zhang 	unsigned int desc_size;
124471fedb01SJoao Pinto 	void *head_rx;
124554139cf3SJoao Pinto 	u32 queue;
124654139cf3SJoao Pinto 
124754139cf3SJoao Pinto 	/* Display RX rings */
124854139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
124954139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
125054139cf3SJoao Pinto 
125154139cf3SJoao Pinto 		pr_info("\tRX Queue %u rings\n", queue);
1252d0225e7dSAlexandre TORGUE 
1253bfaf91caSJoakim Zhang 		if (priv->extend_desc) {
125454139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_erx;
1255bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_extended_desc);
1256bfaf91caSJoakim Zhang 		} else {
125754139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_rx;
1258bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_desc);
1259bfaf91caSJoakim Zhang 		}
126071fedb01SJoao Pinto 
126171fedb01SJoao Pinto 		/* Display RX ring */
1262bfaf91caSJoakim Zhang 		stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
1263bfaf91caSJoakim Zhang 				    rx_q->dma_rx_phy, desc_size);
12645bacd778SLABBE Corentin 	}
126554139cf3SJoao Pinto }
1266d0225e7dSAlexandre TORGUE 
126771fedb01SJoao Pinto static void stmmac_display_tx_rings(struct stmmac_priv *priv)
126871fedb01SJoao Pinto {
1269ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1270bfaf91caSJoakim Zhang 	unsigned int desc_size;
127171fedb01SJoao Pinto 	void *head_tx;
1272ce736788SJoao Pinto 	u32 queue;
1273ce736788SJoao Pinto 
1274ce736788SJoao Pinto 	/* Display TX rings */
1275ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
1276ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1277ce736788SJoao Pinto 
1278ce736788SJoao Pinto 		pr_info("\tTX Queue %d rings\n", queue);
127971fedb01SJoao Pinto 
1280bfaf91caSJoakim Zhang 		if (priv->extend_desc) {
1281ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_etx;
1282bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_extended_desc);
1283bfaf91caSJoakim Zhang 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1284579a25a8SJose Abreu 			head_tx = (void *)tx_q->dma_entx;
1285bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_edesc);
1286bfaf91caSJoakim Zhang 		} else {
1287ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_tx;
1288bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_desc);
1289bfaf91caSJoakim Zhang 		}
129071fedb01SJoao Pinto 
1291bfaf91caSJoakim Zhang 		stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
1292bfaf91caSJoakim Zhang 				    tx_q->dma_tx_phy, desc_size);
1293c24602efSGiuseppe CAVALLARO 	}
1294ce736788SJoao Pinto }
1295c24602efSGiuseppe CAVALLARO 
129671fedb01SJoao Pinto static void stmmac_display_rings(struct stmmac_priv *priv)
129771fedb01SJoao Pinto {
129871fedb01SJoao Pinto 	/* Display RX ring */
129971fedb01SJoao Pinto 	stmmac_display_rx_rings(priv);
130071fedb01SJoao Pinto 
130171fedb01SJoao Pinto 	/* Display TX ring */
130271fedb01SJoao Pinto 	stmmac_display_tx_rings(priv);
130371fedb01SJoao Pinto }
130471fedb01SJoao Pinto 
1305286a8372SGiuseppe CAVALLARO static int stmmac_set_bfsize(int mtu, int bufsize)
1306286a8372SGiuseppe CAVALLARO {
1307286a8372SGiuseppe CAVALLARO 	int ret = bufsize;
1308286a8372SGiuseppe CAVALLARO 
1309b2f3a481SJose Abreu 	if (mtu >= BUF_SIZE_8KiB)
1310b2f3a481SJose Abreu 		ret = BUF_SIZE_16KiB;
1311b2f3a481SJose Abreu 	else if (mtu >= BUF_SIZE_4KiB)
1312286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_8KiB;
1313286a8372SGiuseppe CAVALLARO 	else if (mtu >= BUF_SIZE_2KiB)
1314286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_4KiB;
1315d916701cSGiuseppe CAVALLARO 	else if (mtu > DEFAULT_BUFSIZE)
1316286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_2KiB;
1317286a8372SGiuseppe CAVALLARO 	else
1318d916701cSGiuseppe CAVALLARO 		ret = DEFAULT_BUFSIZE;
1319286a8372SGiuseppe CAVALLARO 
1320286a8372SGiuseppe CAVALLARO 	return ret;
1321286a8372SGiuseppe CAVALLARO }
1322286a8372SGiuseppe CAVALLARO 
132332ceabcaSGiuseppe CAVALLARO /**
132471fedb01SJoao Pinto  * stmmac_clear_rx_descriptors - clear RX descriptors
132532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
132654139cf3SJoao Pinto  * @queue: RX queue index
132771fedb01SJoao Pinto  * Description: this function is called to clear the RX descriptors
132832ceabcaSGiuseppe CAVALLARO  * in case of both basic and extended descriptors are used.
132932ceabcaSGiuseppe CAVALLARO  */
133054139cf3SJoao Pinto static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1331c24602efSGiuseppe CAVALLARO {
133254139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
13335bacd778SLABBE Corentin 	int i;
1334c24602efSGiuseppe CAVALLARO 
133571fedb01SJoao Pinto 	/* Clear the RX descriptors */
1336aa042f60SSong, Yoong Siang 	for (i = 0; i < priv->dma_rx_size; i++)
13375bacd778SLABBE Corentin 		if (priv->extend_desc)
133842de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
13395bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1340aa042f60SSong, Yoong Siang 					(i == priv->dma_rx_size - 1),
1341583e6361SAaro Koskinen 					priv->dma_buf_sz);
13425bacd778SLABBE Corentin 		else
134342de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
13445bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1345aa042f60SSong, Yoong Siang 					(i == priv->dma_rx_size - 1),
1346583e6361SAaro Koskinen 					priv->dma_buf_sz);
134771fedb01SJoao Pinto }
134871fedb01SJoao Pinto 
134971fedb01SJoao Pinto /**
135071fedb01SJoao Pinto  * stmmac_clear_tx_descriptors - clear tx descriptors
135171fedb01SJoao Pinto  * @priv: driver private structure
1352ce736788SJoao Pinto  * @queue: TX queue index.
135371fedb01SJoao Pinto  * Description: this function is called to clear the TX descriptors
135471fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
135571fedb01SJoao Pinto  */
1356ce736788SJoao Pinto static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
135771fedb01SJoao Pinto {
1358ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
135971fedb01SJoao Pinto 	int i;
136071fedb01SJoao Pinto 
136171fedb01SJoao Pinto 	/* Clear the TX descriptors */
1362aa042f60SSong, Yoong Siang 	for (i = 0; i < priv->dma_tx_size; i++) {
1363aa042f60SSong, Yoong Siang 		int last = (i == (priv->dma_tx_size - 1));
1364579a25a8SJose Abreu 		struct dma_desc *p;
1365579a25a8SJose Abreu 
13665bacd778SLABBE Corentin 		if (priv->extend_desc)
1367579a25a8SJose Abreu 			p = &tx_q->dma_etx[i].basic;
1368579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1369579a25a8SJose Abreu 			p = &tx_q->dma_entx[i].basic;
13705bacd778SLABBE Corentin 		else
1371579a25a8SJose Abreu 			p = &tx_q->dma_tx[i];
1372579a25a8SJose Abreu 
1373579a25a8SJose Abreu 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1374579a25a8SJose Abreu 	}
1375c24602efSGiuseppe CAVALLARO }
1376c24602efSGiuseppe CAVALLARO 
1377732fdf0eSGiuseppe CAVALLARO /**
137871fedb01SJoao Pinto  * stmmac_clear_descriptors - clear descriptors
137971fedb01SJoao Pinto  * @priv: driver private structure
138071fedb01SJoao Pinto  * Description: this function is called to clear the TX and RX descriptors
138171fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
138271fedb01SJoao Pinto  */
138371fedb01SJoao Pinto static void stmmac_clear_descriptors(struct stmmac_priv *priv)
138471fedb01SJoao Pinto {
138554139cf3SJoao Pinto 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1386ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
138754139cf3SJoao Pinto 	u32 queue;
138854139cf3SJoao Pinto 
138971fedb01SJoao Pinto 	/* Clear the RX descriptors */
139054139cf3SJoao Pinto 	for (queue = 0; queue < rx_queue_cnt; queue++)
139154139cf3SJoao Pinto 		stmmac_clear_rx_descriptors(priv, queue);
139271fedb01SJoao Pinto 
139371fedb01SJoao Pinto 	/* Clear the TX descriptors */
1394ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++)
1395ce736788SJoao Pinto 		stmmac_clear_tx_descriptors(priv, queue);
139671fedb01SJoao Pinto }
139771fedb01SJoao Pinto 
139871fedb01SJoao Pinto /**
1399732fdf0eSGiuseppe CAVALLARO  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1400732fdf0eSGiuseppe CAVALLARO  * @priv: driver private structure
1401732fdf0eSGiuseppe CAVALLARO  * @p: descriptor pointer
1402732fdf0eSGiuseppe CAVALLARO  * @i: descriptor index
140354139cf3SJoao Pinto  * @flags: gfp flag
140454139cf3SJoao Pinto  * @queue: RX queue index
1405732fdf0eSGiuseppe CAVALLARO  * Description: this function is called to allocate a receive buffer, perform
1406732fdf0eSGiuseppe CAVALLARO  * the DMA mapping and init the descriptor.
1407732fdf0eSGiuseppe CAVALLARO  */
1408c24602efSGiuseppe CAVALLARO static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
140954139cf3SJoao Pinto 				  int i, gfp_t flags, u32 queue)
1410c24602efSGiuseppe CAVALLARO {
141154139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
14122af6106aSJose Abreu 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1413884d2b84SDavid Wu 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1414884d2b84SDavid Wu 
1415884d2b84SDavid Wu 	if (priv->dma_cap.addr64 <= 32)
1416884d2b84SDavid Wu 		gfp |= GFP_DMA32;
1417c24602efSGiuseppe CAVALLARO 
1418da5ec7f2SOng Boon Leong 	if (!buf->page) {
1419884d2b84SDavid Wu 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
14202af6106aSJose Abreu 		if (!buf->page)
142156329137SBartlomiej Zolnierkiewicz 			return -ENOMEM;
14225fabb012SOng Boon Leong 		buf->page_offset = stmmac_rx_offset(priv);
1423da5ec7f2SOng Boon Leong 	}
1424c24602efSGiuseppe CAVALLARO 
1425da5ec7f2SOng Boon Leong 	if (priv->sph && !buf->sec_page) {
1426884d2b84SDavid Wu 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
142767afd6d1SJose Abreu 		if (!buf->sec_page)
142867afd6d1SJose Abreu 			return -ENOMEM;
142967afd6d1SJose Abreu 
143067afd6d1SJose Abreu 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1431396e13e1SJoakim Zhang 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
143267afd6d1SJose Abreu 	} else {
143367afd6d1SJose Abreu 		buf->sec_page = NULL;
1434396e13e1SJoakim Zhang 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
143567afd6d1SJose Abreu 	}
143667afd6d1SJose Abreu 
14375fabb012SOng Boon Leong 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
14385fabb012SOng Boon Leong 
14392af6106aSJose Abreu 	stmmac_set_desc_addr(priv, p, buf->addr);
14402c520b1cSJose Abreu 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
14412c520b1cSJose Abreu 		stmmac_init_desc3(priv, p);
1442c24602efSGiuseppe CAVALLARO 
1443c24602efSGiuseppe CAVALLARO 	return 0;
1444c24602efSGiuseppe CAVALLARO }
1445c24602efSGiuseppe CAVALLARO 
144671fedb01SJoao Pinto /**
144771fedb01SJoao Pinto  * stmmac_free_rx_buffer - free RX dma buffers
144871fedb01SJoao Pinto  * @priv: private structure
144954139cf3SJoao Pinto  * @queue: RX queue index
145071fedb01SJoao Pinto  * @i: buffer index.
145171fedb01SJoao Pinto  */
145254139cf3SJoao Pinto static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
145356329137SBartlomiej Zolnierkiewicz {
145454139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
14552af6106aSJose Abreu 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
145654139cf3SJoao Pinto 
14572af6106aSJose Abreu 	if (buf->page)
1458458de8a9SIlias Apalodimas 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
14592af6106aSJose Abreu 	buf->page = NULL;
146067afd6d1SJose Abreu 
146167afd6d1SJose Abreu 	if (buf->sec_page)
1462458de8a9SIlias Apalodimas 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
146367afd6d1SJose Abreu 	buf->sec_page = NULL;
146456329137SBartlomiej Zolnierkiewicz }
146556329137SBartlomiej Zolnierkiewicz 
14667ac6653aSJeff Kirsher /**
146771fedb01SJoao Pinto  * stmmac_free_tx_buffer - free RX dma buffers
146871fedb01SJoao Pinto  * @priv: private structure
1469ce736788SJoao Pinto  * @queue: RX queue index
147071fedb01SJoao Pinto  * @i: buffer index.
147171fedb01SJoao Pinto  */
1472ce736788SJoao Pinto static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
147371fedb01SJoao Pinto {
1474ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1475ce736788SJoao Pinto 
1476be8b38a7SOng Boon Leong 	if (tx_q->tx_skbuff_dma[i].buf &&
1477be8b38a7SOng Boon Leong 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1478ce736788SJoao Pinto 		if (tx_q->tx_skbuff_dma[i].map_as_page)
147971fedb01SJoao Pinto 			dma_unmap_page(priv->device,
1480ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].buf,
1481ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].len,
148271fedb01SJoao Pinto 				       DMA_TO_DEVICE);
148371fedb01SJoao Pinto 		else
148471fedb01SJoao Pinto 			dma_unmap_single(priv->device,
1485ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].buf,
1486ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].len,
148771fedb01SJoao Pinto 					 DMA_TO_DEVICE);
148871fedb01SJoao Pinto 	}
148971fedb01SJoao Pinto 
1490be8b38a7SOng Boon Leong 	if (tx_q->xdpf[i] &&
14918b278a5bSOng Boon Leong 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
14928b278a5bSOng Boon Leong 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1493be8b38a7SOng Boon Leong 		xdp_return_frame(tx_q->xdpf[i]);
1494be8b38a7SOng Boon Leong 		tx_q->xdpf[i] = NULL;
1495be8b38a7SOng Boon Leong 	}
1496be8b38a7SOng Boon Leong 
1497132c32eeSOng Boon Leong 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1498132c32eeSOng Boon Leong 		tx_q->xsk_frames_done++;
1499132c32eeSOng Boon Leong 
1500be8b38a7SOng Boon Leong 	if (tx_q->tx_skbuff[i] &&
1501be8b38a7SOng Boon Leong 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1502ce736788SJoao Pinto 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1503ce736788SJoao Pinto 		tx_q->tx_skbuff[i] = NULL;
1504be8b38a7SOng Boon Leong 	}
1505be8b38a7SOng Boon Leong 
1506ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[i].buf = 0;
1507ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[i].map_as_page = false;
150871fedb01SJoao Pinto }
150971fedb01SJoao Pinto 
151071fedb01SJoao Pinto /**
15114298255fSOng Boon Leong  * dma_free_rx_skbufs - free RX dma buffers
15124298255fSOng Boon Leong  * @priv: private structure
15134298255fSOng Boon Leong  * @queue: RX queue index
15144298255fSOng Boon Leong  */
15154298255fSOng Boon Leong static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
15164298255fSOng Boon Leong {
15174298255fSOng Boon Leong 	int i;
15184298255fSOng Boon Leong 
15194298255fSOng Boon Leong 	for (i = 0; i < priv->dma_rx_size; i++)
15204298255fSOng Boon Leong 		stmmac_free_rx_buffer(priv, queue, i);
15214298255fSOng Boon Leong }
15224298255fSOng Boon Leong 
15234298255fSOng Boon Leong static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
15244298255fSOng Boon Leong 				   gfp_t flags)
15254298255fSOng Boon Leong {
15264298255fSOng Boon Leong 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
15274298255fSOng Boon Leong 	int i;
15284298255fSOng Boon Leong 
15294298255fSOng Boon Leong 	for (i = 0; i < priv->dma_rx_size; i++) {
15304298255fSOng Boon Leong 		struct dma_desc *p;
15314298255fSOng Boon Leong 		int ret;
15324298255fSOng Boon Leong 
15334298255fSOng Boon Leong 		if (priv->extend_desc)
15344298255fSOng Boon Leong 			p = &((rx_q->dma_erx + i)->basic);
15354298255fSOng Boon Leong 		else
15364298255fSOng Boon Leong 			p = rx_q->dma_rx + i;
15374298255fSOng Boon Leong 
15384298255fSOng Boon Leong 		ret = stmmac_init_rx_buffers(priv, p, i, flags,
15394298255fSOng Boon Leong 					     queue);
15404298255fSOng Boon Leong 		if (ret)
15414298255fSOng Boon Leong 			return ret;
1542bba2556eSOng Boon Leong 
1543bba2556eSOng Boon Leong 		rx_q->buf_alloc_num++;
15444298255fSOng Boon Leong 	}
15454298255fSOng Boon Leong 
15464298255fSOng Boon Leong 	return 0;
15474298255fSOng Boon Leong }
15484298255fSOng Boon Leong 
15494298255fSOng Boon Leong /**
1550bba2556eSOng Boon Leong  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1551bba2556eSOng Boon Leong  * @priv: private structure
1552bba2556eSOng Boon Leong  * @queue: RX queue index
1553bba2556eSOng Boon Leong  */
1554bba2556eSOng Boon Leong static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
1555bba2556eSOng Boon Leong {
1556bba2556eSOng Boon Leong 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1557bba2556eSOng Boon Leong 	int i;
1558bba2556eSOng Boon Leong 
1559bba2556eSOng Boon Leong 	for (i = 0; i < priv->dma_rx_size; i++) {
1560bba2556eSOng Boon Leong 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1561bba2556eSOng Boon Leong 
1562bba2556eSOng Boon Leong 		if (!buf->xdp)
1563bba2556eSOng Boon Leong 			continue;
1564bba2556eSOng Boon Leong 
1565bba2556eSOng Boon Leong 		xsk_buff_free(buf->xdp);
1566bba2556eSOng Boon Leong 		buf->xdp = NULL;
1567bba2556eSOng Boon Leong 	}
1568bba2556eSOng Boon Leong }
1569bba2556eSOng Boon Leong 
1570bba2556eSOng Boon Leong static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
1571bba2556eSOng Boon Leong {
1572bba2556eSOng Boon Leong 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1573bba2556eSOng Boon Leong 	int i;
1574bba2556eSOng Boon Leong 
1575bba2556eSOng Boon Leong 	for (i = 0; i < priv->dma_rx_size; i++) {
1576bba2556eSOng Boon Leong 		struct stmmac_rx_buffer *buf;
1577bba2556eSOng Boon Leong 		dma_addr_t dma_addr;
1578bba2556eSOng Boon Leong 		struct dma_desc *p;
1579bba2556eSOng Boon Leong 
1580bba2556eSOng Boon Leong 		if (priv->extend_desc)
1581bba2556eSOng Boon Leong 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1582bba2556eSOng Boon Leong 		else
1583bba2556eSOng Boon Leong 			p = rx_q->dma_rx + i;
1584bba2556eSOng Boon Leong 
1585bba2556eSOng Boon Leong 		buf = &rx_q->buf_pool[i];
1586bba2556eSOng Boon Leong 
1587bba2556eSOng Boon Leong 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1588bba2556eSOng Boon Leong 		if (!buf->xdp)
1589bba2556eSOng Boon Leong 			return -ENOMEM;
1590bba2556eSOng Boon Leong 
1591bba2556eSOng Boon Leong 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1592bba2556eSOng Boon Leong 		stmmac_set_desc_addr(priv, p, dma_addr);
1593bba2556eSOng Boon Leong 		rx_q->buf_alloc_num++;
1594bba2556eSOng Boon Leong 	}
1595bba2556eSOng Boon Leong 
1596bba2556eSOng Boon Leong 	return 0;
1597bba2556eSOng Boon Leong }
1598bba2556eSOng Boon Leong 
1599bba2556eSOng Boon Leong static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1600bba2556eSOng Boon Leong {
1601bba2556eSOng Boon Leong 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1602bba2556eSOng Boon Leong 		return NULL;
1603bba2556eSOng Boon Leong 
1604bba2556eSOng Boon Leong 	return xsk_get_pool_from_qid(priv->dev, queue);
1605bba2556eSOng Boon Leong }
1606bba2556eSOng Boon Leong 
16079c63faaaSJoakim Zhang /**
1608de0b90e5SOng Boon Leong  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1609de0b90e5SOng Boon Leong  * @priv: driver private structure
1610de0b90e5SOng Boon Leong  * @queue: RX queue index
16115bacd778SLABBE Corentin  * @flags: gfp flag.
161271fedb01SJoao Pinto  * Description: this function initializes the DMA RX descriptors
16135bacd778SLABBE Corentin  * and allocates the socket buffers. It supports the chained and ring
1614286a8372SGiuseppe CAVALLARO  * modes.
16157ac6653aSJeff Kirsher  */
1616de0b90e5SOng Boon Leong static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
16177ac6653aSJeff Kirsher {
161854139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1619de0b90e5SOng Boon Leong 	int ret;
162054139cf3SJoao Pinto 
162154139cf3SJoao Pinto 	netif_dbg(priv, probe, priv->dev,
162254139cf3SJoao Pinto 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
162354139cf3SJoao Pinto 		  (u32)rx_q->dma_rx_phy);
162454139cf3SJoao Pinto 
1625cbcf0999SJose Abreu 	stmmac_clear_rx_descriptors(priv, queue);
1626cbcf0999SJose Abreu 
1627bba2556eSOng Boon Leong 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1628bba2556eSOng Boon Leong 
1629bba2556eSOng Boon Leong 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1630bba2556eSOng Boon Leong 
1631bba2556eSOng Boon Leong 	if (rx_q->xsk_pool) {
1632bba2556eSOng Boon Leong 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1633bba2556eSOng Boon Leong 						   MEM_TYPE_XSK_BUFF_POOL,
1634bba2556eSOng Boon Leong 						   NULL));
1635bba2556eSOng Boon Leong 		netdev_info(priv->dev,
1636bba2556eSOng Boon Leong 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1637bba2556eSOng Boon Leong 			    rx_q->queue_index);
1638bba2556eSOng Boon Leong 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1639bba2556eSOng Boon Leong 	} else {
1640be8b38a7SOng Boon Leong 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1641be8b38a7SOng Boon Leong 						   MEM_TYPE_PAGE_POOL,
1642be8b38a7SOng Boon Leong 						   rx_q->page_pool));
1643be8b38a7SOng Boon Leong 		netdev_info(priv->dev,
1644be8b38a7SOng Boon Leong 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1645be8b38a7SOng Boon Leong 			    rx_q->queue_index);
1646bba2556eSOng Boon Leong 	}
1647be8b38a7SOng Boon Leong 
1648bba2556eSOng Boon Leong 	if (rx_q->xsk_pool) {
1649bba2556eSOng Boon Leong 		/* RX XDP ZC buffer pool may not be populated, e.g.
1650bba2556eSOng Boon Leong 		 * xdpsock TX-only.
1651bba2556eSOng Boon Leong 		 */
1652bba2556eSOng Boon Leong 		stmmac_alloc_rx_buffers_zc(priv, queue);
1653bba2556eSOng Boon Leong 	} else {
16544298255fSOng Boon Leong 		ret = stmmac_alloc_rx_buffers(priv, queue, flags);
16554298255fSOng Boon Leong 		if (ret < 0)
1656de0b90e5SOng Boon Leong 			return -ENOMEM;
1657bba2556eSOng Boon Leong 	}
165854139cf3SJoao Pinto 
165954139cf3SJoao Pinto 	rx_q->cur_rx = 0;
16604298255fSOng Boon Leong 	rx_q->dirty_rx = 0;
166154139cf3SJoao Pinto 
1662c24602efSGiuseppe CAVALLARO 	/* Setup the chained descriptor addresses */
1663c24602efSGiuseppe CAVALLARO 	if (priv->mode == STMMAC_CHAIN_MODE) {
166471fedb01SJoao Pinto 		if (priv->extend_desc)
16652c520b1cSJose Abreu 			stmmac_mode_init(priv, rx_q->dma_erx,
1666aa042f60SSong, Yoong Siang 					 rx_q->dma_rx_phy,
1667aa042f60SSong, Yoong Siang 					 priv->dma_rx_size, 1);
166871fedb01SJoao Pinto 		else
16692c520b1cSJose Abreu 			stmmac_mode_init(priv, rx_q->dma_rx,
1670aa042f60SSong, Yoong Siang 					 rx_q->dma_rx_phy,
1671aa042f60SSong, Yoong Siang 					 priv->dma_rx_size, 0);
167271fedb01SJoao Pinto 	}
1673de0b90e5SOng Boon Leong 
1674de0b90e5SOng Boon Leong 	return 0;
1675de0b90e5SOng Boon Leong }
1676de0b90e5SOng Boon Leong 
1677de0b90e5SOng Boon Leong static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1678de0b90e5SOng Boon Leong {
1679de0b90e5SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
1680de0b90e5SOng Boon Leong 	u32 rx_count = priv->plat->rx_queues_to_use;
1681de0b90e5SOng Boon Leong 	u32 queue;
1682de0b90e5SOng Boon Leong 	int ret;
1683de0b90e5SOng Boon Leong 
1684de0b90e5SOng Boon Leong 	/* RX INITIALIZATION */
1685de0b90e5SOng Boon Leong 	netif_dbg(priv, probe, priv->dev,
1686de0b90e5SOng Boon Leong 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1687de0b90e5SOng Boon Leong 
1688de0b90e5SOng Boon Leong 	for (queue = 0; queue < rx_count; queue++) {
1689de0b90e5SOng Boon Leong 		ret = __init_dma_rx_desc_rings(priv, queue, flags);
1690de0b90e5SOng Boon Leong 		if (ret)
1691de0b90e5SOng Boon Leong 			goto err_init_rx_buffers;
169254139cf3SJoao Pinto 	}
169354139cf3SJoao Pinto 
169471fedb01SJoao Pinto 	return 0;
169554139cf3SJoao Pinto 
169671fedb01SJoao Pinto err_init_rx_buffers:
169754139cf3SJoao Pinto 	while (queue >= 0) {
1698bba2556eSOng Boon Leong 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1699bba2556eSOng Boon Leong 
1700bba2556eSOng Boon Leong 		if (rx_q->xsk_pool)
1701bba2556eSOng Boon Leong 			dma_free_rx_xskbufs(priv, queue);
1702bba2556eSOng Boon Leong 		else
17034298255fSOng Boon Leong 			dma_free_rx_skbufs(priv, queue);
170454139cf3SJoao Pinto 
1705bba2556eSOng Boon Leong 		rx_q->buf_alloc_num = 0;
1706bba2556eSOng Boon Leong 		rx_q->xsk_pool = NULL;
1707bba2556eSOng Boon Leong 
170854139cf3SJoao Pinto 		if (queue == 0)
170954139cf3SJoao Pinto 			break;
171054139cf3SJoao Pinto 
171154139cf3SJoao Pinto 		queue--;
171254139cf3SJoao Pinto 	}
171354139cf3SJoao Pinto 
171471fedb01SJoao Pinto 	return ret;
171571fedb01SJoao Pinto }
171671fedb01SJoao Pinto 
171771fedb01SJoao Pinto /**
1718de0b90e5SOng Boon Leong  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1719de0b90e5SOng Boon Leong  * @priv: driver private structure
1720de0b90e5SOng Boon Leong  * @queue : TX queue index
172171fedb01SJoao Pinto  * Description: this function initializes the DMA TX descriptors
172271fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
172371fedb01SJoao Pinto  * modes.
172471fedb01SJoao Pinto  */
1725de0b90e5SOng Boon Leong static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
172671fedb01SJoao Pinto {
1727ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1728de0b90e5SOng Boon Leong 	int i;
1729ce736788SJoao Pinto 
173071fedb01SJoao Pinto 	netif_dbg(priv, probe, priv->dev,
1731ce736788SJoao Pinto 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1732ce736788SJoao Pinto 		  (u32)tx_q->dma_tx_phy);
173371fedb01SJoao Pinto 
173471fedb01SJoao Pinto 	/* Setup the chained descriptor addresses */
173571fedb01SJoao Pinto 	if (priv->mode == STMMAC_CHAIN_MODE) {
173671fedb01SJoao Pinto 		if (priv->extend_desc)
17372c520b1cSJose Abreu 			stmmac_mode_init(priv, tx_q->dma_etx,
1738aa042f60SSong, Yoong Siang 					 tx_q->dma_tx_phy,
1739aa042f60SSong, Yoong Siang 					 priv->dma_tx_size, 1);
1740579a25a8SJose Abreu 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
17412c520b1cSJose Abreu 			stmmac_mode_init(priv, tx_q->dma_tx,
1742aa042f60SSong, Yoong Siang 					 tx_q->dma_tx_phy,
1743aa042f60SSong, Yoong Siang 					 priv->dma_tx_size, 0);
1744c24602efSGiuseppe CAVALLARO 	}
1745286a8372SGiuseppe CAVALLARO 
1746132c32eeSOng Boon Leong 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1747132c32eeSOng Boon Leong 
1748aa042f60SSong, Yoong Siang 	for (i = 0; i < priv->dma_tx_size; i++) {
1749c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
1750de0b90e5SOng Boon Leong 
1751c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
1752ce736788SJoao Pinto 			p = &((tx_q->dma_etx + i)->basic);
1753579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1754579a25a8SJose Abreu 			p = &((tx_q->dma_entx + i)->basic);
1755c24602efSGiuseppe CAVALLARO 		else
1756ce736788SJoao Pinto 			p = tx_q->dma_tx + i;
1757f748be53SAlexandre TORGUE 
175844c67f85SJose Abreu 		stmmac_clear_desc(priv, p);
1759f748be53SAlexandre TORGUE 
1760ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].buf = 0;
1761ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1762ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].len = 0;
1763ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].last_segment = false;
1764ce736788SJoao Pinto 		tx_q->tx_skbuff[i] = NULL;
17654a7d666aSGiuseppe CAVALLARO 	}
1766c24602efSGiuseppe CAVALLARO 
1767ce736788SJoao Pinto 	tx_q->dirty_tx = 0;
1768ce736788SJoao Pinto 	tx_q->cur_tx = 0;
17698d212a9eSNiklas Cassel 	tx_q->mss = 0;
1770ce736788SJoao Pinto 
1771c22a3f48SJoao Pinto 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1772de0b90e5SOng Boon Leong 
1773de0b90e5SOng Boon Leong 	return 0;
1774c22a3f48SJoao Pinto }
17757ac6653aSJeff Kirsher 
1776de0b90e5SOng Boon Leong static int init_dma_tx_desc_rings(struct net_device *dev)
1777de0b90e5SOng Boon Leong {
1778de0b90e5SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
1779de0b90e5SOng Boon Leong 	u32 tx_queue_cnt;
1780de0b90e5SOng Boon Leong 	u32 queue;
1781de0b90e5SOng Boon Leong 
1782de0b90e5SOng Boon Leong 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1783de0b90e5SOng Boon Leong 
1784de0b90e5SOng Boon Leong 	for (queue = 0; queue < tx_queue_cnt; queue++)
1785de0b90e5SOng Boon Leong 		__init_dma_tx_desc_rings(priv, queue);
1786de0b90e5SOng Boon Leong 
178771fedb01SJoao Pinto 	return 0;
178871fedb01SJoao Pinto }
178971fedb01SJoao Pinto 
179071fedb01SJoao Pinto /**
179171fedb01SJoao Pinto  * init_dma_desc_rings - init the RX/TX descriptor rings
179271fedb01SJoao Pinto  * @dev: net device structure
179371fedb01SJoao Pinto  * @flags: gfp flag.
179471fedb01SJoao Pinto  * Description: this function initializes the DMA RX/TX descriptors
179571fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
179671fedb01SJoao Pinto  * modes.
179771fedb01SJoao Pinto  */
179871fedb01SJoao Pinto static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
179971fedb01SJoao Pinto {
180071fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
180171fedb01SJoao Pinto 	int ret;
180271fedb01SJoao Pinto 
180371fedb01SJoao Pinto 	ret = init_dma_rx_desc_rings(dev, flags);
180471fedb01SJoao Pinto 	if (ret)
180571fedb01SJoao Pinto 		return ret;
180671fedb01SJoao Pinto 
180771fedb01SJoao Pinto 	ret = init_dma_tx_desc_rings(dev);
180871fedb01SJoao Pinto 
18095bacd778SLABBE Corentin 	stmmac_clear_descriptors(priv);
18107ac6653aSJeff Kirsher 
1811c24602efSGiuseppe CAVALLARO 	if (netif_msg_hw(priv))
1812c24602efSGiuseppe CAVALLARO 		stmmac_display_rings(priv);
181356329137SBartlomiej Zolnierkiewicz 
181456329137SBartlomiej Zolnierkiewicz 	return ret;
18157ac6653aSJeff Kirsher }
18167ac6653aSJeff Kirsher 
181771fedb01SJoao Pinto /**
181871fedb01SJoao Pinto  * dma_free_tx_skbufs - free TX dma buffers
181971fedb01SJoao Pinto  * @priv: private structure
1820ce736788SJoao Pinto  * @queue: TX queue index
182171fedb01SJoao Pinto  */
1822ce736788SJoao Pinto static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
18237ac6653aSJeff Kirsher {
1824132c32eeSOng Boon Leong 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
18257ac6653aSJeff Kirsher 	int i;
18267ac6653aSJeff Kirsher 
1827132c32eeSOng Boon Leong 	tx_q->xsk_frames_done = 0;
1828132c32eeSOng Boon Leong 
1829aa042f60SSong, Yoong Siang 	for (i = 0; i < priv->dma_tx_size; i++)
1830ce736788SJoao Pinto 		stmmac_free_tx_buffer(priv, queue, i);
1831132c32eeSOng Boon Leong 
1832132c32eeSOng Boon Leong 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1833132c32eeSOng Boon Leong 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1834132c32eeSOng Boon Leong 		tx_q->xsk_frames_done = 0;
1835132c32eeSOng Boon Leong 		tx_q->xsk_pool = NULL;
1836132c32eeSOng Boon Leong 	}
18377ac6653aSJeff Kirsher }
18387ac6653aSJeff Kirsher 
1839732fdf0eSGiuseppe CAVALLARO /**
18404ec236c7SFugang Duan  * stmmac_free_tx_skbufs - free TX skb buffers
18414ec236c7SFugang Duan  * @priv: private structure
18424ec236c7SFugang Duan  */
18434ec236c7SFugang Duan static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
18444ec236c7SFugang Duan {
18454ec236c7SFugang Duan 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
18464ec236c7SFugang Duan 	u32 queue;
18474ec236c7SFugang Duan 
18484ec236c7SFugang Duan 	for (queue = 0; queue < tx_queue_cnt; queue++)
18494ec236c7SFugang Duan 		dma_free_tx_skbufs(priv, queue);
18504ec236c7SFugang Duan }
18514ec236c7SFugang Duan 
18524ec236c7SFugang Duan /**
1853da5ec7f2SOng Boon Leong  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
185454139cf3SJoao Pinto  * @priv: private structure
1855da5ec7f2SOng Boon Leong  * @queue: RX queue index
185654139cf3SJoao Pinto  */
1857da5ec7f2SOng Boon Leong static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
185854139cf3SJoao Pinto {
185954139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
186054139cf3SJoao Pinto 
186154139cf3SJoao Pinto 	/* Release the DMA RX socket buffers */
1862bba2556eSOng Boon Leong 	if (rx_q->xsk_pool)
1863bba2556eSOng Boon Leong 		dma_free_rx_xskbufs(priv, queue);
1864bba2556eSOng Boon Leong 	else
186554139cf3SJoao Pinto 		dma_free_rx_skbufs(priv, queue);
186654139cf3SJoao Pinto 
1867bba2556eSOng Boon Leong 	rx_q->buf_alloc_num = 0;
1868bba2556eSOng Boon Leong 	rx_q->xsk_pool = NULL;
1869bba2556eSOng Boon Leong 
187054139cf3SJoao Pinto 	/* Free DMA regions of consistent memory previously allocated */
187154139cf3SJoao Pinto 	if (!priv->extend_desc)
1872aa042f60SSong, Yoong Siang 		dma_free_coherent(priv->device, priv->dma_rx_size *
1873aa042f60SSong, Yoong Siang 				  sizeof(struct dma_desc),
187454139cf3SJoao Pinto 				  rx_q->dma_rx, rx_q->dma_rx_phy);
187554139cf3SJoao Pinto 	else
1876aa042f60SSong, Yoong Siang 		dma_free_coherent(priv->device, priv->dma_rx_size *
187754139cf3SJoao Pinto 				  sizeof(struct dma_extended_desc),
187854139cf3SJoao Pinto 				  rx_q->dma_erx, rx_q->dma_rx_phy);
187954139cf3SJoao Pinto 
1880be8b38a7SOng Boon Leong 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1881be8b38a7SOng Boon Leong 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1882be8b38a7SOng Boon Leong 
18832af6106aSJose Abreu 	kfree(rx_q->buf_pool);
1884c3f812ceSJonathan Lemon 	if (rx_q->page_pool)
18852af6106aSJose Abreu 		page_pool_destroy(rx_q->page_pool);
18862af6106aSJose Abreu }
1887da5ec7f2SOng Boon Leong 
1888da5ec7f2SOng Boon Leong static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1889da5ec7f2SOng Boon Leong {
1890da5ec7f2SOng Boon Leong 	u32 rx_count = priv->plat->rx_queues_to_use;
1891da5ec7f2SOng Boon Leong 	u32 queue;
1892da5ec7f2SOng Boon Leong 
1893da5ec7f2SOng Boon Leong 	/* Free RX queue resources */
1894da5ec7f2SOng Boon Leong 	for (queue = 0; queue < rx_count; queue++)
1895da5ec7f2SOng Boon Leong 		__free_dma_rx_desc_resources(priv, queue);
189654139cf3SJoao Pinto }
189754139cf3SJoao Pinto 
189854139cf3SJoao Pinto /**
1899da5ec7f2SOng Boon Leong  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1900ce736788SJoao Pinto  * @priv: private structure
1901da5ec7f2SOng Boon Leong  * @queue: TX queue index
1902ce736788SJoao Pinto  */
1903da5ec7f2SOng Boon Leong static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
1904ce736788SJoao Pinto {
1905ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1906579a25a8SJose Abreu 	size_t size;
1907579a25a8SJose Abreu 	void *addr;
1908ce736788SJoao Pinto 
1909ce736788SJoao Pinto 	/* Release the DMA TX socket buffers */
1910ce736788SJoao Pinto 	dma_free_tx_skbufs(priv, queue);
1911ce736788SJoao Pinto 
1912579a25a8SJose Abreu 	if (priv->extend_desc) {
1913579a25a8SJose Abreu 		size = sizeof(struct dma_extended_desc);
1914579a25a8SJose Abreu 		addr = tx_q->dma_etx;
1915579a25a8SJose Abreu 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1916579a25a8SJose Abreu 		size = sizeof(struct dma_edesc);
1917579a25a8SJose Abreu 		addr = tx_q->dma_entx;
1918579a25a8SJose Abreu 	} else {
1919579a25a8SJose Abreu 		size = sizeof(struct dma_desc);
1920579a25a8SJose Abreu 		addr = tx_q->dma_tx;
1921579a25a8SJose Abreu 	}
1922579a25a8SJose Abreu 
1923aa042f60SSong, Yoong Siang 	size *= priv->dma_tx_size;
1924579a25a8SJose Abreu 
1925579a25a8SJose Abreu 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1926ce736788SJoao Pinto 
1927ce736788SJoao Pinto 	kfree(tx_q->tx_skbuff_dma);
1928ce736788SJoao Pinto 	kfree(tx_q->tx_skbuff);
1929ce736788SJoao Pinto }
1930da5ec7f2SOng Boon Leong 
1931da5ec7f2SOng Boon Leong static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1932da5ec7f2SOng Boon Leong {
1933da5ec7f2SOng Boon Leong 	u32 tx_count = priv->plat->tx_queues_to_use;
1934da5ec7f2SOng Boon Leong 	u32 queue;
1935da5ec7f2SOng Boon Leong 
1936da5ec7f2SOng Boon Leong 	/* Free TX queue resources */
1937da5ec7f2SOng Boon Leong 	for (queue = 0; queue < tx_count; queue++)
1938da5ec7f2SOng Boon Leong 		__free_dma_tx_desc_resources(priv, queue);
1939ce736788SJoao Pinto }
1940ce736788SJoao Pinto 
1941ce736788SJoao Pinto /**
1942da5ec7f2SOng Boon Leong  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1943732fdf0eSGiuseppe CAVALLARO  * @priv: private structure
1944da5ec7f2SOng Boon Leong  * @queue: RX queue index
1945732fdf0eSGiuseppe CAVALLARO  * Description: according to which descriptor can be used (extend or basic)
1946732fdf0eSGiuseppe CAVALLARO  * this function allocates the resources for TX and RX paths. In case of
1947732fdf0eSGiuseppe CAVALLARO  * reception, for example, it pre-allocated the RX socket buffer in order to
1948732fdf0eSGiuseppe CAVALLARO  * allow zero-copy mechanism.
1949732fdf0eSGiuseppe CAVALLARO  */
1950da5ec7f2SOng Boon Leong static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
195109f8d696SSrinivas Kandagatla {
195254139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1953be8b38a7SOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
1954da5ec7f2SOng Boon Leong 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
19552af6106aSJose Abreu 	struct page_pool_params pp_params = { 0 };
19564f28bd95SThierry Reding 	unsigned int num_pages;
1957132c32eeSOng Boon Leong 	unsigned int napi_id;
1958be8b38a7SOng Boon Leong 	int ret;
195954139cf3SJoao Pinto 
196054139cf3SJoao Pinto 	rx_q->queue_index = queue;
196154139cf3SJoao Pinto 	rx_q->priv_data = priv;
196254139cf3SJoao Pinto 
19635fabb012SOng Boon Leong 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
1964aa042f60SSong, Yoong Siang 	pp_params.pool_size = priv->dma_rx_size;
19654f28bd95SThierry Reding 	num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
19664f28bd95SThierry Reding 	pp_params.order = ilog2(num_pages);
19672af6106aSJose Abreu 	pp_params.nid = dev_to_node(priv->device);
19682af6106aSJose Abreu 	pp_params.dev = priv->device;
19695fabb012SOng Boon Leong 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
19705fabb012SOng Boon Leong 	pp_params.offset = stmmac_rx_offset(priv);
19715fabb012SOng Boon Leong 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
19725bacd778SLABBE Corentin 
19732af6106aSJose Abreu 	rx_q->page_pool = page_pool_create(&pp_params);
19742af6106aSJose Abreu 	if (IS_ERR(rx_q->page_pool)) {
19752af6106aSJose Abreu 		ret = PTR_ERR(rx_q->page_pool);
19762af6106aSJose Abreu 		rx_q->page_pool = NULL;
1977da5ec7f2SOng Boon Leong 		return ret;
19782af6106aSJose Abreu 	}
19792af6106aSJose Abreu 
1980aa042f60SSong, Yoong Siang 	rx_q->buf_pool = kcalloc(priv->dma_rx_size,
1981aa042f60SSong, Yoong Siang 				 sizeof(*rx_q->buf_pool),
19825bacd778SLABBE Corentin 				 GFP_KERNEL);
19832af6106aSJose Abreu 	if (!rx_q->buf_pool)
1984da5ec7f2SOng Boon Leong 		return -ENOMEM;
19855bacd778SLABBE Corentin 
19865bacd778SLABBE Corentin 	if (priv->extend_desc) {
1987750afb08SLuis Chamberlain 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
1988aa042f60SSong, Yoong Siang 						   priv->dma_rx_size *
1989aa042f60SSong, Yoong Siang 						   sizeof(struct dma_extended_desc),
199054139cf3SJoao Pinto 						   &rx_q->dma_rx_phy,
19915bacd778SLABBE Corentin 						   GFP_KERNEL);
199254139cf3SJoao Pinto 		if (!rx_q->dma_erx)
1993da5ec7f2SOng Boon Leong 			return -ENOMEM;
19945bacd778SLABBE Corentin 
199571fedb01SJoao Pinto 	} else {
1996750afb08SLuis Chamberlain 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
1997aa042f60SSong, Yoong Siang 						  priv->dma_rx_size *
1998aa042f60SSong, Yoong Siang 						  sizeof(struct dma_desc),
199954139cf3SJoao Pinto 						  &rx_q->dma_rx_phy,
200071fedb01SJoao Pinto 						  GFP_KERNEL);
200154139cf3SJoao Pinto 		if (!rx_q->dma_rx)
2002da5ec7f2SOng Boon Leong 			return -ENOMEM;
200371fedb01SJoao Pinto 	}
2004be8b38a7SOng Boon Leong 
2005132c32eeSOng Boon Leong 	if (stmmac_xdp_is_enabled(priv) &&
2006132c32eeSOng Boon Leong 	    test_bit(queue, priv->af_xdp_zc_qps))
2007132c32eeSOng Boon Leong 		napi_id = ch->rxtx_napi.napi_id;
2008132c32eeSOng Boon Leong 	else
2009132c32eeSOng Boon Leong 		napi_id = ch->rx_napi.napi_id;
2010132c32eeSOng Boon Leong 
2011be8b38a7SOng Boon Leong 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2012be8b38a7SOng Boon Leong 			       rx_q->queue_index,
2013132c32eeSOng Boon Leong 			       napi_id);
2014be8b38a7SOng Boon Leong 	if (ret) {
2015be8b38a7SOng Boon Leong 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2016da5ec7f2SOng Boon Leong 		return -EINVAL;
2017be8b38a7SOng Boon Leong 	}
2018da5ec7f2SOng Boon Leong 
2019da5ec7f2SOng Boon Leong 	return 0;
2020da5ec7f2SOng Boon Leong }
2021da5ec7f2SOng Boon Leong 
2022da5ec7f2SOng Boon Leong static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
2023da5ec7f2SOng Boon Leong {
2024da5ec7f2SOng Boon Leong 	u32 rx_count = priv->plat->rx_queues_to_use;
2025da5ec7f2SOng Boon Leong 	u32 queue;
2026da5ec7f2SOng Boon Leong 	int ret;
2027da5ec7f2SOng Boon Leong 
2028da5ec7f2SOng Boon Leong 	/* RX queues buffers and DMA */
2029da5ec7f2SOng Boon Leong 	for (queue = 0; queue < rx_count; queue++) {
2030da5ec7f2SOng Boon Leong 		ret = __alloc_dma_rx_desc_resources(priv, queue);
2031da5ec7f2SOng Boon Leong 		if (ret)
2032da5ec7f2SOng Boon Leong 			goto err_dma;
203354139cf3SJoao Pinto 	}
203471fedb01SJoao Pinto 
203571fedb01SJoao Pinto 	return 0;
203671fedb01SJoao Pinto 
203771fedb01SJoao Pinto err_dma:
203854139cf3SJoao Pinto 	free_dma_rx_desc_resources(priv);
203954139cf3SJoao Pinto 
204071fedb01SJoao Pinto 	return ret;
204171fedb01SJoao Pinto }
204271fedb01SJoao Pinto 
204371fedb01SJoao Pinto /**
2044da5ec7f2SOng Boon Leong  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
204571fedb01SJoao Pinto  * @priv: private structure
2046da5ec7f2SOng Boon Leong  * @queue: TX queue index
204771fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
204871fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
204971fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
205071fedb01SJoao Pinto  * allow zero-copy mechanism.
205171fedb01SJoao Pinto  */
2052da5ec7f2SOng Boon Leong static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
205371fedb01SJoao Pinto {
2054ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2055579a25a8SJose Abreu 	size_t size;
2056579a25a8SJose Abreu 	void *addr;
2057ce736788SJoao Pinto 
2058ce736788SJoao Pinto 	tx_q->queue_index = queue;
2059ce736788SJoao Pinto 	tx_q->priv_data = priv;
2060ce736788SJoao Pinto 
2061aa042f60SSong, Yoong Siang 	tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
2062ce736788SJoao Pinto 				      sizeof(*tx_q->tx_skbuff_dma),
206371fedb01SJoao Pinto 				      GFP_KERNEL);
2064ce736788SJoao Pinto 	if (!tx_q->tx_skbuff_dma)
2065da5ec7f2SOng Boon Leong 		return -ENOMEM;
206671fedb01SJoao Pinto 
2067aa042f60SSong, Yoong Siang 	tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
2068ce736788SJoao Pinto 				  sizeof(struct sk_buff *),
206971fedb01SJoao Pinto 				  GFP_KERNEL);
2070ce736788SJoao Pinto 	if (!tx_q->tx_skbuff)
2071da5ec7f2SOng Boon Leong 		return -ENOMEM;
207271fedb01SJoao Pinto 
2073579a25a8SJose Abreu 	if (priv->extend_desc)
2074579a25a8SJose Abreu 		size = sizeof(struct dma_extended_desc);
2075579a25a8SJose Abreu 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2076579a25a8SJose Abreu 		size = sizeof(struct dma_edesc);
2077579a25a8SJose Abreu 	else
2078579a25a8SJose Abreu 		size = sizeof(struct dma_desc);
2079579a25a8SJose Abreu 
2080aa042f60SSong, Yoong Siang 	size *= priv->dma_tx_size;
2081579a25a8SJose Abreu 
2082579a25a8SJose Abreu 	addr = dma_alloc_coherent(priv->device, size,
2083579a25a8SJose Abreu 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2084579a25a8SJose Abreu 	if (!addr)
2085da5ec7f2SOng Boon Leong 		return -ENOMEM;
2086579a25a8SJose Abreu 
2087579a25a8SJose Abreu 	if (priv->extend_desc)
2088579a25a8SJose Abreu 		tx_q->dma_etx = addr;
2089579a25a8SJose Abreu 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2090579a25a8SJose Abreu 		tx_q->dma_entx = addr;
2091579a25a8SJose Abreu 	else
2092579a25a8SJose Abreu 		tx_q->dma_tx = addr;
2093da5ec7f2SOng Boon Leong 
2094da5ec7f2SOng Boon Leong 	return 0;
2095da5ec7f2SOng Boon Leong }
2096da5ec7f2SOng Boon Leong 
2097da5ec7f2SOng Boon Leong static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
2098da5ec7f2SOng Boon Leong {
2099da5ec7f2SOng Boon Leong 	u32 tx_count = priv->plat->tx_queues_to_use;
2100da5ec7f2SOng Boon Leong 	u32 queue;
2101da5ec7f2SOng Boon Leong 	int ret;
2102da5ec7f2SOng Boon Leong 
2103da5ec7f2SOng Boon Leong 	/* TX queues buffers and DMA */
2104da5ec7f2SOng Boon Leong 	for (queue = 0; queue < tx_count; queue++) {
2105da5ec7f2SOng Boon Leong 		ret = __alloc_dma_tx_desc_resources(priv, queue);
2106da5ec7f2SOng Boon Leong 		if (ret)
2107da5ec7f2SOng Boon Leong 			goto err_dma;
21085bacd778SLABBE Corentin 	}
21095bacd778SLABBE Corentin 
21105bacd778SLABBE Corentin 	return 0;
21115bacd778SLABBE Corentin 
211262242260SChristophe Jaillet err_dma:
2113ce736788SJoao Pinto 	free_dma_tx_desc_resources(priv);
211409f8d696SSrinivas Kandagatla 	return ret;
21155bacd778SLABBE Corentin }
211609f8d696SSrinivas Kandagatla 
211771fedb01SJoao Pinto /**
211871fedb01SJoao Pinto  * alloc_dma_desc_resources - alloc TX/RX resources.
211971fedb01SJoao Pinto  * @priv: private structure
212071fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
212171fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
212271fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
212371fedb01SJoao Pinto  * allow zero-copy mechanism.
212471fedb01SJoao Pinto  */
212571fedb01SJoao Pinto static int alloc_dma_desc_resources(struct stmmac_priv *priv)
21265bacd778SLABBE Corentin {
212754139cf3SJoao Pinto 	/* RX Allocation */
212871fedb01SJoao Pinto 	int ret = alloc_dma_rx_desc_resources(priv);
212971fedb01SJoao Pinto 
213071fedb01SJoao Pinto 	if (ret)
213171fedb01SJoao Pinto 		return ret;
213271fedb01SJoao Pinto 
213371fedb01SJoao Pinto 	ret = alloc_dma_tx_desc_resources(priv);
213471fedb01SJoao Pinto 
213571fedb01SJoao Pinto 	return ret;
213671fedb01SJoao Pinto }
213771fedb01SJoao Pinto 
213871fedb01SJoao Pinto /**
213971fedb01SJoao Pinto  * free_dma_desc_resources - free dma desc resources
214071fedb01SJoao Pinto  * @priv: private structure
214171fedb01SJoao Pinto  */
214271fedb01SJoao Pinto static void free_dma_desc_resources(struct stmmac_priv *priv)
214371fedb01SJoao Pinto {
214471fedb01SJoao Pinto 	/* Release the DMA TX socket buffers */
214571fedb01SJoao Pinto 	free_dma_tx_desc_resources(priv);
2146be8b38a7SOng Boon Leong 
2147be8b38a7SOng Boon Leong 	/* Release the DMA RX socket buffers later
2148be8b38a7SOng Boon Leong 	 * to ensure all pending XDP_TX buffers are returned.
2149be8b38a7SOng Boon Leong 	 */
2150be8b38a7SOng Boon Leong 	free_dma_rx_desc_resources(priv);
215171fedb01SJoao Pinto }
215271fedb01SJoao Pinto 
215371fedb01SJoao Pinto /**
21549eb12474Sjpinto  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
21559eb12474Sjpinto  *  @priv: driver private structure
21569eb12474Sjpinto  *  Description: It is used for enabling the rx queues in the MAC
21579eb12474Sjpinto  */
21589eb12474Sjpinto static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
21599eb12474Sjpinto {
21604f6046f5SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
21614f6046f5SJoao Pinto 	int queue;
21624f6046f5SJoao Pinto 	u8 mode;
21639eb12474Sjpinto 
21644f6046f5SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
21654f6046f5SJoao Pinto 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2166c10d4c82SJose Abreu 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
21674f6046f5SJoao Pinto 	}
21689eb12474Sjpinto }
21699eb12474Sjpinto 
21709eb12474Sjpinto /**
2171ae4f0d46SJoao Pinto  * stmmac_start_rx_dma - start RX DMA channel
2172ae4f0d46SJoao Pinto  * @priv: driver private structure
2173ae4f0d46SJoao Pinto  * @chan: RX channel index
2174ae4f0d46SJoao Pinto  * Description:
2175ae4f0d46SJoao Pinto  * This starts a RX DMA channel
2176ae4f0d46SJoao Pinto  */
2177ae4f0d46SJoao Pinto static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2178ae4f0d46SJoao Pinto {
2179ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2180a4e887faSJose Abreu 	stmmac_start_rx(priv, priv->ioaddr, chan);
2181ae4f0d46SJoao Pinto }
2182ae4f0d46SJoao Pinto 
2183ae4f0d46SJoao Pinto /**
2184ae4f0d46SJoao Pinto  * stmmac_start_tx_dma - start TX DMA channel
2185ae4f0d46SJoao Pinto  * @priv: driver private structure
2186ae4f0d46SJoao Pinto  * @chan: TX channel index
2187ae4f0d46SJoao Pinto  * Description:
2188ae4f0d46SJoao Pinto  * This starts a TX DMA channel
2189ae4f0d46SJoao Pinto  */
2190ae4f0d46SJoao Pinto static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2191ae4f0d46SJoao Pinto {
2192ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2193a4e887faSJose Abreu 	stmmac_start_tx(priv, priv->ioaddr, chan);
2194ae4f0d46SJoao Pinto }
2195ae4f0d46SJoao Pinto 
2196ae4f0d46SJoao Pinto /**
2197ae4f0d46SJoao Pinto  * stmmac_stop_rx_dma - stop RX DMA channel
2198ae4f0d46SJoao Pinto  * @priv: driver private structure
2199ae4f0d46SJoao Pinto  * @chan: RX channel index
2200ae4f0d46SJoao Pinto  * Description:
2201ae4f0d46SJoao Pinto  * This stops a RX DMA channel
2202ae4f0d46SJoao Pinto  */
2203ae4f0d46SJoao Pinto static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2204ae4f0d46SJoao Pinto {
2205ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2206a4e887faSJose Abreu 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2207ae4f0d46SJoao Pinto }
2208ae4f0d46SJoao Pinto 
2209ae4f0d46SJoao Pinto /**
2210ae4f0d46SJoao Pinto  * stmmac_stop_tx_dma - stop TX DMA channel
2211ae4f0d46SJoao Pinto  * @priv: driver private structure
2212ae4f0d46SJoao Pinto  * @chan: TX channel index
2213ae4f0d46SJoao Pinto  * Description:
2214ae4f0d46SJoao Pinto  * This stops a TX DMA channel
2215ae4f0d46SJoao Pinto  */
2216ae4f0d46SJoao Pinto static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2217ae4f0d46SJoao Pinto {
2218ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2219a4e887faSJose Abreu 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2220ae4f0d46SJoao Pinto }
2221ae4f0d46SJoao Pinto 
2222ae4f0d46SJoao Pinto /**
2223ae4f0d46SJoao Pinto  * stmmac_start_all_dma - start all RX and TX DMA channels
2224ae4f0d46SJoao Pinto  * @priv: driver private structure
2225ae4f0d46SJoao Pinto  * Description:
2226ae4f0d46SJoao Pinto  * This starts all the RX and TX DMA channels
2227ae4f0d46SJoao Pinto  */
2228ae4f0d46SJoao Pinto static void stmmac_start_all_dma(struct stmmac_priv *priv)
2229ae4f0d46SJoao Pinto {
2230ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2231ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2232ae4f0d46SJoao Pinto 	u32 chan = 0;
2233ae4f0d46SJoao Pinto 
2234ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2235ae4f0d46SJoao Pinto 		stmmac_start_rx_dma(priv, chan);
2236ae4f0d46SJoao Pinto 
2237ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2238ae4f0d46SJoao Pinto 		stmmac_start_tx_dma(priv, chan);
2239ae4f0d46SJoao Pinto }
2240ae4f0d46SJoao Pinto 
2241ae4f0d46SJoao Pinto /**
2242ae4f0d46SJoao Pinto  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2243ae4f0d46SJoao Pinto  * @priv: driver private structure
2244ae4f0d46SJoao Pinto  * Description:
2245ae4f0d46SJoao Pinto  * This stops the RX and TX DMA channels
2246ae4f0d46SJoao Pinto  */
2247ae4f0d46SJoao Pinto static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2248ae4f0d46SJoao Pinto {
2249ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2250ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2251ae4f0d46SJoao Pinto 	u32 chan = 0;
2252ae4f0d46SJoao Pinto 
2253ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2254ae4f0d46SJoao Pinto 		stmmac_stop_rx_dma(priv, chan);
2255ae4f0d46SJoao Pinto 
2256ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2257ae4f0d46SJoao Pinto 		stmmac_stop_tx_dma(priv, chan);
2258ae4f0d46SJoao Pinto }
2259ae4f0d46SJoao Pinto 
2260ae4f0d46SJoao Pinto /**
22617ac6653aSJeff Kirsher  *  stmmac_dma_operation_mode - HW DMA operation mode
226232ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
2263732fdf0eSGiuseppe CAVALLARO  *  Description: it is used for configuring the DMA operation mode register in
2264732fdf0eSGiuseppe CAVALLARO  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
22657ac6653aSJeff Kirsher  */
22667ac6653aSJeff Kirsher static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
22677ac6653aSJeff Kirsher {
22686deee222SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
22696deee222SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2270f88203a2SVince Bridgers 	int rxfifosz = priv->plat->rx_fifo_size;
227152a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
22726deee222SJoao Pinto 	u32 txmode = 0;
22736deee222SJoao Pinto 	u32 rxmode = 0;
22746deee222SJoao Pinto 	u32 chan = 0;
2275a0daae13SJose Abreu 	u8 qmode = 0;
2276f88203a2SVince Bridgers 
227711fbf811SThierry Reding 	if (rxfifosz == 0)
227811fbf811SThierry Reding 		rxfifosz = priv->dma_cap.rx_fifo_size;
227952a76235SJose Abreu 	if (txfifosz == 0)
228052a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
228152a76235SJose Abreu 
228252a76235SJose Abreu 	/* Adjust for real per queue fifo size */
228352a76235SJose Abreu 	rxfifosz /= rx_channels_count;
228452a76235SJose Abreu 	txfifosz /= tx_channels_count;
228511fbf811SThierry Reding 
22866deee222SJoao Pinto 	if (priv->plat->force_thresh_dma_mode) {
22876deee222SJoao Pinto 		txmode = tc;
22886deee222SJoao Pinto 		rxmode = tc;
22896deee222SJoao Pinto 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
22907ac6653aSJeff Kirsher 		/*
22917ac6653aSJeff Kirsher 		 * In case of GMAC, SF mode can be enabled
22927ac6653aSJeff Kirsher 		 * to perform the TX COE in HW. This depends on:
22937ac6653aSJeff Kirsher 		 * 1) TX COE if actually supported
22947ac6653aSJeff Kirsher 		 * 2) There is no bugged Jumbo frame support
22957ac6653aSJeff Kirsher 		 *    that needs to not insert csum in the TDES.
22967ac6653aSJeff Kirsher 		 */
22976deee222SJoao Pinto 		txmode = SF_DMA_MODE;
22986deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
2299b2dec116SSonic Zhang 		priv->xstats.threshold = SF_DMA_MODE;
23006deee222SJoao Pinto 	} else {
23016deee222SJoao Pinto 		txmode = tc;
23026deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
23036deee222SJoao Pinto 	}
23046deee222SJoao Pinto 
23056deee222SJoao Pinto 	/* configure all channels */
2306a0daae13SJose Abreu 	for (chan = 0; chan < rx_channels_count; chan++) {
2307bba2556eSOng Boon Leong 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2308bba2556eSOng Boon Leong 		u32 buf_size;
2309bba2556eSOng Boon Leong 
2310a0daae13SJose Abreu 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
23116deee222SJoao Pinto 
2312a4e887faSJose Abreu 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2313a0daae13SJose Abreu 				rxfifosz, qmode);
2314bba2556eSOng Boon Leong 
2315bba2556eSOng Boon Leong 		if (rx_q->xsk_pool) {
2316bba2556eSOng Boon Leong 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2317bba2556eSOng Boon Leong 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2318bba2556eSOng Boon Leong 					      buf_size,
23194205c88eSJose Abreu 					      chan);
2320bba2556eSOng Boon Leong 		} else {
2321bba2556eSOng Boon Leong 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2322bba2556eSOng Boon Leong 					      priv->dma_buf_sz,
2323bba2556eSOng Boon Leong 					      chan);
2324bba2556eSOng Boon Leong 		}
2325a0daae13SJose Abreu 	}
2326a0daae13SJose Abreu 
2327a0daae13SJose Abreu 	for (chan = 0; chan < tx_channels_count; chan++) {
2328a0daae13SJose Abreu 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2329a0daae13SJose Abreu 
2330a4e887faSJose Abreu 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2331a0daae13SJose Abreu 				txfifosz, qmode);
2332a0daae13SJose Abreu 	}
23337ac6653aSJeff Kirsher }
23347ac6653aSJeff Kirsher 
2335132c32eeSOng Boon Leong static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2336132c32eeSOng Boon Leong {
2337132c32eeSOng Boon Leong 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2338132c32eeSOng Boon Leong 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2339132c32eeSOng Boon Leong 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2340132c32eeSOng Boon Leong 	unsigned int entry = tx_q->cur_tx;
2341132c32eeSOng Boon Leong 	struct dma_desc *tx_desc = NULL;
2342132c32eeSOng Boon Leong 	struct xdp_desc xdp_desc;
2343132c32eeSOng Boon Leong 	bool work_done = true;
2344132c32eeSOng Boon Leong 
2345132c32eeSOng Boon Leong 	/* Avoids TX time-out as we are sharing with slow path */
2346e92af33eSAlexander Lobakin 	txq_trans_cond_update(nq);
2347132c32eeSOng Boon Leong 
2348132c32eeSOng Boon Leong 	budget = min(budget, stmmac_tx_avail(priv, queue));
2349132c32eeSOng Boon Leong 
2350132c32eeSOng Boon Leong 	while (budget-- > 0) {
2351132c32eeSOng Boon Leong 		dma_addr_t dma_addr;
2352132c32eeSOng Boon Leong 		bool set_ic;
2353132c32eeSOng Boon Leong 
2354132c32eeSOng Boon Leong 		/* We are sharing with slow path and stop XSK TX desc submission when
2355132c32eeSOng Boon Leong 		 * available TX ring is less than threshold.
2356132c32eeSOng Boon Leong 		 */
2357132c32eeSOng Boon Leong 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2358132c32eeSOng Boon Leong 		    !netif_carrier_ok(priv->dev)) {
2359132c32eeSOng Boon Leong 			work_done = false;
2360132c32eeSOng Boon Leong 			break;
2361132c32eeSOng Boon Leong 		}
2362132c32eeSOng Boon Leong 
2363132c32eeSOng Boon Leong 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2364132c32eeSOng Boon Leong 			break;
2365132c32eeSOng Boon Leong 
2366132c32eeSOng Boon Leong 		if (likely(priv->extend_desc))
2367132c32eeSOng Boon Leong 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2368132c32eeSOng Boon Leong 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2369132c32eeSOng Boon Leong 			tx_desc = &tx_q->dma_entx[entry].basic;
2370132c32eeSOng Boon Leong 		else
2371132c32eeSOng Boon Leong 			tx_desc = tx_q->dma_tx + entry;
2372132c32eeSOng Boon Leong 
2373132c32eeSOng Boon Leong 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2374132c32eeSOng Boon Leong 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2375132c32eeSOng Boon Leong 
2376132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2377132c32eeSOng Boon Leong 
2378132c32eeSOng Boon Leong 		/* To return XDP buffer to XSK pool, we simple call
2379132c32eeSOng Boon Leong 		 * xsk_tx_completed(), so we don't need to fill up
2380132c32eeSOng Boon Leong 		 * 'buf' and 'xdpf'.
2381132c32eeSOng Boon Leong 		 */
2382132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].buf = 0;
2383132c32eeSOng Boon Leong 		tx_q->xdpf[entry] = NULL;
2384132c32eeSOng Boon Leong 
2385132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2386132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2387132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2388132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2389132c32eeSOng Boon Leong 
2390132c32eeSOng Boon Leong 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2391132c32eeSOng Boon Leong 
2392132c32eeSOng Boon Leong 		tx_q->tx_count_frames++;
2393132c32eeSOng Boon Leong 
2394132c32eeSOng Boon Leong 		if (!priv->tx_coal_frames[queue])
2395132c32eeSOng Boon Leong 			set_ic = false;
2396132c32eeSOng Boon Leong 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2397132c32eeSOng Boon Leong 			set_ic = true;
2398132c32eeSOng Boon Leong 		else
2399132c32eeSOng Boon Leong 			set_ic = false;
2400132c32eeSOng Boon Leong 
2401132c32eeSOng Boon Leong 		if (set_ic) {
2402132c32eeSOng Boon Leong 			tx_q->tx_count_frames = 0;
2403132c32eeSOng Boon Leong 			stmmac_set_tx_ic(priv, tx_desc);
2404132c32eeSOng Boon Leong 			priv->xstats.tx_set_ic_bit++;
2405132c32eeSOng Boon Leong 		}
2406132c32eeSOng Boon Leong 
2407132c32eeSOng Boon Leong 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2408132c32eeSOng Boon Leong 				       true, priv->mode, true, true,
2409132c32eeSOng Boon Leong 				       xdp_desc.len);
2410132c32eeSOng Boon Leong 
2411132c32eeSOng Boon Leong 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2412132c32eeSOng Boon Leong 
2413132c32eeSOng Boon Leong 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
2414132c32eeSOng Boon Leong 		entry = tx_q->cur_tx;
2415132c32eeSOng Boon Leong 	}
2416132c32eeSOng Boon Leong 
2417132c32eeSOng Boon Leong 	if (tx_desc) {
2418132c32eeSOng Boon Leong 		stmmac_flush_tx_descriptors(priv, queue);
2419132c32eeSOng Boon Leong 		xsk_tx_release(pool);
2420132c32eeSOng Boon Leong 	}
2421132c32eeSOng Boon Leong 
2422132c32eeSOng Boon Leong 	/* Return true if all of the 3 conditions are met
2423132c32eeSOng Boon Leong 	 *  a) TX Budget is still available
2424132c32eeSOng Boon Leong 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2425132c32eeSOng Boon Leong 	 *     pending XSK TX for transmission)
2426132c32eeSOng Boon Leong 	 */
2427132c32eeSOng Boon Leong 	return !!budget && work_done;
2428132c32eeSOng Boon Leong }
2429132c32eeSOng Boon Leong 
24303a6c12a0SXiaoliang Yang static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
24313a6c12a0SXiaoliang Yang {
24323a6c12a0SXiaoliang Yang 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
24333a6c12a0SXiaoliang Yang 		tc += 64;
24343a6c12a0SXiaoliang Yang 
24353a6c12a0SXiaoliang Yang 		if (priv->plat->force_thresh_dma_mode)
24363a6c12a0SXiaoliang Yang 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
24373a6c12a0SXiaoliang Yang 		else
24383a6c12a0SXiaoliang Yang 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
24393a6c12a0SXiaoliang Yang 						      chan);
24403a6c12a0SXiaoliang Yang 
24413a6c12a0SXiaoliang Yang 		priv->xstats.threshold = tc;
24423a6c12a0SXiaoliang Yang 	}
24433a6c12a0SXiaoliang Yang }
24443a6c12a0SXiaoliang Yang 
24457ac6653aSJeff Kirsher /**
2446732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_clean - to manage the transmission completion
244732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
2448d0ea5cbdSJesse Brandeburg  * @budget: napi budget limiting this functions packet handling
2449ce736788SJoao Pinto  * @queue: TX queue index
2450732fdf0eSGiuseppe CAVALLARO  * Description: it reclaims the transmit resources after transmission completes.
24517ac6653aSJeff Kirsher  */
24528fce3331SJose Abreu static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
24537ac6653aSJeff Kirsher {
2454ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
245538979574SBeniamino Galvani 	unsigned int bytes_compl = 0, pkts_compl = 0;
2456132c32eeSOng Boon Leong 	unsigned int entry, xmits = 0, count = 0;
24577ac6653aSJeff Kirsher 
24588fce3331SJose Abreu 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2459a9097a96SGiuseppe CAVALLARO 
24609125cdd1SGiuseppe CAVALLARO 	priv->xstats.tx_clean++;
24619125cdd1SGiuseppe CAVALLARO 
2462132c32eeSOng Boon Leong 	tx_q->xsk_frames_done = 0;
2463132c32eeSOng Boon Leong 
24648d5f4b07SBernd Edlinger 	entry = tx_q->dirty_tx;
2465132c32eeSOng Boon Leong 
2466132c32eeSOng Boon Leong 	/* Try to clean all TX complete frame in 1 shot */
2467132c32eeSOng Boon Leong 	while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) {
2468be8b38a7SOng Boon Leong 		struct xdp_frame *xdpf;
2469be8b38a7SOng Boon Leong 		struct sk_buff *skb;
2470c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
2471c363b658SFabrice Gasnier 		int status;
2472c24602efSGiuseppe CAVALLARO 
24738b278a5bSOng Boon Leong 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
24748b278a5bSOng Boon Leong 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2475be8b38a7SOng Boon Leong 			xdpf = tx_q->xdpf[entry];
2476be8b38a7SOng Boon Leong 			skb = NULL;
2477be8b38a7SOng Boon Leong 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2478be8b38a7SOng Boon Leong 			xdpf = NULL;
2479be8b38a7SOng Boon Leong 			skb = tx_q->tx_skbuff[entry];
2480be8b38a7SOng Boon Leong 		} else {
2481be8b38a7SOng Boon Leong 			xdpf = NULL;
2482be8b38a7SOng Boon Leong 			skb = NULL;
2483be8b38a7SOng Boon Leong 		}
2484be8b38a7SOng Boon Leong 
2485c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
2486ce736788SJoao Pinto 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2487579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2488579a25a8SJose Abreu 			p = &tx_q->dma_entx[entry].basic;
2489c24602efSGiuseppe CAVALLARO 		else
2490ce736788SJoao Pinto 			p = tx_q->dma_tx + entry;
24917ac6653aSJeff Kirsher 
249242de047dSJose Abreu 		status = stmmac_tx_status(priv, &priv->dev->stats,
249342de047dSJose Abreu 				&priv->xstats, p, priv->ioaddr);
2494c363b658SFabrice Gasnier 		/* Check if the descriptor is owned by the DMA */
2495c363b658SFabrice Gasnier 		if (unlikely(status & tx_dma_own))
2496c363b658SFabrice Gasnier 			break;
2497c363b658SFabrice Gasnier 
24988fce3331SJose Abreu 		count++;
24998fce3331SJose Abreu 
2500a6b25da5SNiklas Cassel 		/* Make sure descriptor fields are read after reading
2501a6b25da5SNiklas Cassel 		 * the own bit.
2502a6b25da5SNiklas Cassel 		 */
2503a6b25da5SNiklas Cassel 		dma_rmb();
2504a6b25da5SNiklas Cassel 
2505c363b658SFabrice Gasnier 		/* Just consider the last segment and ...*/
2506c363b658SFabrice Gasnier 		if (likely(!(status & tx_not_ls))) {
2507c363b658SFabrice Gasnier 			/* ... verify the status error condition */
2508c363b658SFabrice Gasnier 			if (unlikely(status & tx_err)) {
2509c363b658SFabrice Gasnier 				priv->dev->stats.tx_errors++;
25103a6c12a0SXiaoliang Yang 				if (unlikely(status & tx_err_bump_tc))
25113a6c12a0SXiaoliang Yang 					stmmac_bump_dma_threshold(priv, queue);
2512c363b658SFabrice Gasnier 			} else {
25137ac6653aSJeff Kirsher 				priv->dev->stats.tx_packets++;
25147ac6653aSJeff Kirsher 				priv->xstats.tx_pkt_n++;
251568e9c5deSVijayakannan Ayyathurai 				priv->xstats.txq_stats[queue].tx_pkt_n++;
2516c363b658SFabrice Gasnier 			}
2517be8b38a7SOng Boon Leong 			if (skb)
2518ba1ffd74SGiuseppe CAVALLARO 				stmmac_get_tx_hwtstamp(priv, p, skb);
25197ac6653aSJeff Kirsher 		}
25207ac6653aSJeff Kirsher 
2521be8b38a7SOng Boon Leong 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2522be8b38a7SOng Boon Leong 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2523ce736788SJoao Pinto 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2524362b37beSGiuseppe CAVALLARO 				dma_unmap_page(priv->device,
2525ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].buf,
2526ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].len,
25277ac6653aSJeff Kirsher 					       DMA_TO_DEVICE);
2528362b37beSGiuseppe CAVALLARO 			else
2529362b37beSGiuseppe CAVALLARO 				dma_unmap_single(priv->device,
2530ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].buf,
2531ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].len,
2532362b37beSGiuseppe CAVALLARO 						 DMA_TO_DEVICE);
2533ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].buf = 0;
2534ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].len = 0;
2535ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2536cf32deecSRayagond Kokatanur 		}
2537f748be53SAlexandre TORGUE 
25382c520b1cSJose Abreu 		stmmac_clean_desc3(priv, tx_q, p);
2539f748be53SAlexandre TORGUE 
2540ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2541ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
25427ac6653aSJeff Kirsher 
2543be8b38a7SOng Boon Leong 		if (xdpf &&
2544be8b38a7SOng Boon Leong 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2545be8b38a7SOng Boon Leong 			xdp_return_frame_rx_napi(xdpf);
2546be8b38a7SOng Boon Leong 			tx_q->xdpf[entry] = NULL;
2547be8b38a7SOng Boon Leong 		}
2548be8b38a7SOng Boon Leong 
25498b278a5bSOng Boon Leong 		if (xdpf &&
25508b278a5bSOng Boon Leong 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
25518b278a5bSOng Boon Leong 			xdp_return_frame(xdpf);
25528b278a5bSOng Boon Leong 			tx_q->xdpf[entry] = NULL;
25538b278a5bSOng Boon Leong 		}
25548b278a5bSOng Boon Leong 
2555132c32eeSOng Boon Leong 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2556132c32eeSOng Boon Leong 			tx_q->xsk_frames_done++;
2557132c32eeSOng Boon Leong 
2558be8b38a7SOng Boon Leong 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2559be8b38a7SOng Boon Leong 			if (likely(skb)) {
256038979574SBeniamino Galvani 				pkts_compl++;
256138979574SBeniamino Galvani 				bytes_compl += skb->len;
25627c565c33SEric W. Biederman 				dev_consume_skb_any(skb);
2563ce736788SJoao Pinto 				tx_q->tx_skbuff[entry] = NULL;
25647ac6653aSJeff Kirsher 			}
2565be8b38a7SOng Boon Leong 		}
25667ac6653aSJeff Kirsher 
256742de047dSJose Abreu 		stmmac_release_tx_desc(priv, p, priv->mode);
25687ac6653aSJeff Kirsher 
2569aa042f60SSong, Yoong Siang 		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
25707ac6653aSJeff Kirsher 	}
2571ce736788SJoao Pinto 	tx_q->dirty_tx = entry;
257238979574SBeniamino Galvani 
2573c22a3f48SJoao Pinto 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2574c22a3f48SJoao Pinto 				  pkts_compl, bytes_compl);
257538979574SBeniamino Galvani 
2576c22a3f48SJoao Pinto 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2577c22a3f48SJoao Pinto 								queue))) &&
2578aa042f60SSong, Yoong Siang 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2579c22a3f48SJoao Pinto 
2580b3e51069SLABBE Corentin 		netif_dbg(priv, tx_done, priv->dev,
2581b3e51069SLABBE Corentin 			  "%s: restart transmit\n", __func__);
2582c22a3f48SJoao Pinto 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
25837ac6653aSJeff Kirsher 	}
2584d765955dSGiuseppe CAVALLARO 
2585132c32eeSOng Boon Leong 	if (tx_q->xsk_pool) {
2586132c32eeSOng Boon Leong 		bool work_done;
2587132c32eeSOng Boon Leong 
2588132c32eeSOng Boon Leong 		if (tx_q->xsk_frames_done)
2589132c32eeSOng Boon Leong 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2590132c32eeSOng Boon Leong 
2591132c32eeSOng Boon Leong 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2592132c32eeSOng Boon Leong 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2593132c32eeSOng Boon Leong 
2594132c32eeSOng Boon Leong 		/* For XSK TX, we try to send as many as possible.
2595132c32eeSOng Boon Leong 		 * If XSK work done (XSK TX desc empty and budget still
2596132c32eeSOng Boon Leong 		 * available), return "budget - 1" to reenable TX IRQ.
2597132c32eeSOng Boon Leong 		 * Else, return "budget" to make NAPI continue polling.
2598132c32eeSOng Boon Leong 		 */
2599132c32eeSOng Boon Leong 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2600132c32eeSOng Boon Leong 					       STMMAC_XSK_TX_BUDGET_MAX);
2601132c32eeSOng Boon Leong 		if (work_done)
2602132c32eeSOng Boon Leong 			xmits = budget - 1;
2603132c32eeSOng Boon Leong 		else
2604132c32eeSOng Boon Leong 			xmits = budget;
2605132c32eeSOng Boon Leong 	}
2606132c32eeSOng Boon Leong 
2607be1c7eaeSVineetha G. Jaya Kumaran 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2608be1c7eaeSVineetha G. Jaya Kumaran 	    priv->eee_sw_timer_en) {
2609d765955dSGiuseppe CAVALLARO 		stmmac_enable_eee_mode(priv);
2610388e201dSVineetha G. Jaya Kumaran 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2611d765955dSGiuseppe CAVALLARO 	}
26128fce3331SJose Abreu 
26134ccb4585SJose Abreu 	/* We still have pending packets, let's call for a new scheduling */
26144ccb4585SJose Abreu 	if (tx_q->dirty_tx != tx_q->cur_tx)
2615db2f2842SOng Boon Leong 		hrtimer_start(&tx_q->txtimer,
2616db2f2842SOng Boon Leong 			      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2617d5a05e69SVincent Whitchurch 			      HRTIMER_MODE_REL);
26184ccb4585SJose Abreu 
26198fce3331SJose Abreu 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
26208fce3331SJose Abreu 
2621132c32eeSOng Boon Leong 	/* Combine decisions from TX clean and XSK TX */
2622132c32eeSOng Boon Leong 	return max(count, xmits);
26237ac6653aSJeff Kirsher }
26247ac6653aSJeff Kirsher 
26257ac6653aSJeff Kirsher /**
2626732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_err - to manage the tx error
262732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
26285bacd778SLABBE Corentin  * @chan: channel index
26297ac6653aSJeff Kirsher  * Description: it cleans the descriptors and restarts the transmission
2630732fdf0eSGiuseppe CAVALLARO  * in case of transmission errors.
26317ac6653aSJeff Kirsher  */
26325bacd778SLABBE Corentin static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
26337ac6653aSJeff Kirsher {
2634ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2635ce736788SJoao Pinto 
2636c22a3f48SJoao Pinto 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
26377ac6653aSJeff Kirsher 
2638ae4f0d46SJoao Pinto 	stmmac_stop_tx_dma(priv, chan);
2639ce736788SJoao Pinto 	dma_free_tx_skbufs(priv, chan);
2640579a25a8SJose Abreu 	stmmac_clear_tx_descriptors(priv, chan);
2641ce736788SJoao Pinto 	tx_q->dirty_tx = 0;
2642ce736788SJoao Pinto 	tx_q->cur_tx = 0;
26438d212a9eSNiklas Cassel 	tx_q->mss = 0;
2644c22a3f48SJoao Pinto 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2645f421031eSJongsung Kim 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2646f421031eSJongsung Kim 			    tx_q->dma_tx_phy, chan);
2647ae4f0d46SJoao Pinto 	stmmac_start_tx_dma(priv, chan);
26487ac6653aSJeff Kirsher 
26497ac6653aSJeff Kirsher 	priv->dev->stats.tx_errors++;
2650c22a3f48SJoao Pinto 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
26517ac6653aSJeff Kirsher }
26527ac6653aSJeff Kirsher 
265332ceabcaSGiuseppe CAVALLARO /**
26546deee222SJoao Pinto  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
26556deee222SJoao Pinto  *  @priv: driver private structure
26566deee222SJoao Pinto  *  @txmode: TX operating mode
26576deee222SJoao Pinto  *  @rxmode: RX operating mode
26586deee222SJoao Pinto  *  @chan: channel index
26596deee222SJoao Pinto  *  Description: it is used for configuring of the DMA operation mode in
26606deee222SJoao Pinto  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
26616deee222SJoao Pinto  *  mode.
26626deee222SJoao Pinto  */
26636deee222SJoao Pinto static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
26646deee222SJoao Pinto 					  u32 rxmode, u32 chan)
26656deee222SJoao Pinto {
2666a0daae13SJose Abreu 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2667a0daae13SJose Abreu 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
266852a76235SJose Abreu 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
266952a76235SJose Abreu 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
26706deee222SJoao Pinto 	int rxfifosz = priv->plat->rx_fifo_size;
267152a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
26726deee222SJoao Pinto 
26736deee222SJoao Pinto 	if (rxfifosz == 0)
26746deee222SJoao Pinto 		rxfifosz = priv->dma_cap.rx_fifo_size;
267552a76235SJose Abreu 	if (txfifosz == 0)
267652a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
267752a76235SJose Abreu 
267852a76235SJose Abreu 	/* Adjust for real per queue fifo size */
267952a76235SJose Abreu 	rxfifosz /= rx_channels_count;
268052a76235SJose Abreu 	txfifosz /= tx_channels_count;
26816deee222SJoao Pinto 
2682ab0204e3SJose Abreu 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2683ab0204e3SJose Abreu 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
26846deee222SJoao Pinto }
26856deee222SJoao Pinto 
26868bf993a5SJose Abreu static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
26878bf993a5SJose Abreu {
268863a550fcSJose Abreu 	int ret;
26898bf993a5SJose Abreu 
2690c10d4c82SJose Abreu 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
26918bf993a5SJose Abreu 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2692c10d4c82SJose Abreu 	if (ret && (ret != -EINVAL)) {
26938bf993a5SJose Abreu 		stmmac_global_err(priv);
2694c10d4c82SJose Abreu 		return true;
2695c10d4c82SJose Abreu 	}
2696c10d4c82SJose Abreu 
2697c10d4c82SJose Abreu 	return false;
26988bf993a5SJose Abreu }
26998bf993a5SJose Abreu 
27007e1c520cSOng Boon Leong static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
27018fce3331SJose Abreu {
27028fce3331SJose Abreu 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
27037e1c520cSOng Boon Leong 						 &priv->xstats, chan, dir);
2704132c32eeSOng Boon Leong 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2705132c32eeSOng Boon Leong 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
27068fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[chan];
2707132c32eeSOng Boon Leong 	struct napi_struct *rx_napi;
2708132c32eeSOng Boon Leong 	struct napi_struct *tx_napi;
2709021bd5e3SJose Abreu 	unsigned long flags;
27108fce3331SJose Abreu 
2711132c32eeSOng Boon Leong 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2712132c32eeSOng Boon Leong 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2713132c32eeSOng Boon Leong 
27144ccb4585SJose Abreu 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2715132c32eeSOng Boon Leong 		if (napi_schedule_prep(rx_napi)) {
2716021bd5e3SJose Abreu 			spin_lock_irqsave(&ch->lock, flags);
2717021bd5e3SJose Abreu 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2718021bd5e3SJose Abreu 			spin_unlock_irqrestore(&ch->lock, flags);
2719132c32eeSOng Boon Leong 			__napi_schedule(rx_napi);
27203ba07debSJose Abreu 		}
27214ccb4585SJose Abreu 	}
27224ccb4585SJose Abreu 
2723021bd5e3SJose Abreu 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2724132c32eeSOng Boon Leong 		if (napi_schedule_prep(tx_napi)) {
2725021bd5e3SJose Abreu 			spin_lock_irqsave(&ch->lock, flags);
2726021bd5e3SJose Abreu 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2727021bd5e3SJose Abreu 			spin_unlock_irqrestore(&ch->lock, flags);
2728132c32eeSOng Boon Leong 			__napi_schedule(tx_napi);
2729021bd5e3SJose Abreu 		}
2730021bd5e3SJose Abreu 	}
27318fce3331SJose Abreu 
27328fce3331SJose Abreu 	return status;
27338fce3331SJose Abreu }
27348fce3331SJose Abreu 
27356deee222SJoao Pinto /**
2736732fdf0eSGiuseppe CAVALLARO  * stmmac_dma_interrupt - DMA ISR
273732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
273832ceabcaSGiuseppe CAVALLARO  * Description: this is the DMA ISR. It is called by the main ISR.
2739732fdf0eSGiuseppe CAVALLARO  * It calls the dwmac dma routine and schedule poll method in case of some
2740732fdf0eSGiuseppe CAVALLARO  * work can be done.
274132ceabcaSGiuseppe CAVALLARO  */
27427ac6653aSJeff Kirsher static void stmmac_dma_interrupt(struct stmmac_priv *priv)
27437ac6653aSJeff Kirsher {
2744d62a107aSJoao Pinto 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
27455a6a0445SNiklas Cassel 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
27465a6a0445SNiklas Cassel 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
27475a6a0445SNiklas Cassel 				tx_channel_count : rx_channel_count;
2748d62a107aSJoao Pinto 	u32 chan;
27498ac60ffbSKees Cook 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
27508ac60ffbSKees Cook 
27518ac60ffbSKees Cook 	/* Make sure we never check beyond our status buffer. */
27528ac60ffbSKees Cook 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
27538ac60ffbSKees Cook 		channels_to_check = ARRAY_SIZE(status);
275468e5cfafSJoao Pinto 
27555a6a0445SNiklas Cassel 	for (chan = 0; chan < channels_to_check; chan++)
27567e1c520cSOng Boon Leong 		status[chan] = stmmac_napi_check(priv, chan,
27577e1c520cSOng Boon Leong 						 DMA_DIR_RXTX);
2758d62a107aSJoao Pinto 
27595a6a0445SNiklas Cassel 	for (chan = 0; chan < tx_channel_count; chan++) {
27605a6a0445SNiklas Cassel 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
27617ac6653aSJeff Kirsher 			/* Try to bump up the dma threshold on this failure */
27623a6c12a0SXiaoliang Yang 			stmmac_bump_dma_threshold(priv, chan);
27635a6a0445SNiklas Cassel 		} else if (unlikely(status[chan] == tx_hard_error)) {
27644e593262SJoao Pinto 			stmmac_tx_err(priv, chan);
27657ac6653aSJeff Kirsher 		}
2766d62a107aSJoao Pinto 	}
2767d62a107aSJoao Pinto }
27687ac6653aSJeff Kirsher 
276932ceabcaSGiuseppe CAVALLARO /**
277032ceabcaSGiuseppe CAVALLARO  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
277132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
277232ceabcaSGiuseppe CAVALLARO  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
277332ceabcaSGiuseppe CAVALLARO  */
27741c901a46SGiuseppe CAVALLARO static void stmmac_mmc_setup(struct stmmac_priv *priv)
27751c901a46SGiuseppe CAVALLARO {
27761c901a46SGiuseppe CAVALLARO 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
27771c901a46SGiuseppe CAVALLARO 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
27781c901a46SGiuseppe CAVALLARO 
27793b1dd2c5SJose Abreu 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
27804f795b25SGiuseppe CAVALLARO 
27814f795b25SGiuseppe CAVALLARO 	if (priv->dma_cap.rmon) {
27823b1dd2c5SJose Abreu 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
27831c901a46SGiuseppe CAVALLARO 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
27844f795b25SGiuseppe CAVALLARO 	} else
278538ddc59dSLABBE Corentin 		netdev_info(priv->dev, "No MAC Management Counters available\n");
27861c901a46SGiuseppe CAVALLARO }
27871c901a46SGiuseppe CAVALLARO 
2788732fdf0eSGiuseppe CAVALLARO /**
2789732fdf0eSGiuseppe CAVALLARO  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
279032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
279119e30c14SGiuseppe CAVALLARO  * Description:
279219e30c14SGiuseppe CAVALLARO  *  new GMAC chip generations have a new register to indicate the
2793e7434821SGiuseppe CAVALLARO  *  presence of the optional feature/functions.
279419e30c14SGiuseppe CAVALLARO  *  This can be also used to override the value passed through the
279519e30c14SGiuseppe CAVALLARO  *  platform and necessary for old MAC10/100 and GMAC chips.
2796e7434821SGiuseppe CAVALLARO  */
2797e7434821SGiuseppe CAVALLARO static int stmmac_get_hw_features(struct stmmac_priv *priv)
2798e7434821SGiuseppe CAVALLARO {
2799a4e887faSJose Abreu 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2800e7434821SGiuseppe CAVALLARO }
2801e7434821SGiuseppe CAVALLARO 
280232ceabcaSGiuseppe CAVALLARO /**
2803732fdf0eSGiuseppe CAVALLARO  * stmmac_check_ether_addr - check if the MAC addr is valid
280432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
280532ceabcaSGiuseppe CAVALLARO  * Description:
280632ceabcaSGiuseppe CAVALLARO  * it is to verify if the MAC address is valid, in case of failures it
280732ceabcaSGiuseppe CAVALLARO  * generates a random MAC address
280832ceabcaSGiuseppe CAVALLARO  */
2809bfab27a1SGiuseppe CAVALLARO static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2810bfab27a1SGiuseppe CAVALLARO {
28117f9b8fe5SJakub Kicinski 	u8 addr[ETH_ALEN];
28127f9b8fe5SJakub Kicinski 
2813bfab27a1SGiuseppe CAVALLARO 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
28147f9b8fe5SJakub Kicinski 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
28157f9b8fe5SJakub Kicinski 		if (is_valid_ether_addr(addr))
28167f9b8fe5SJakub Kicinski 			eth_hw_addr_set(priv->dev, addr);
28177f9b8fe5SJakub Kicinski 		else
2818f2cedb63SDanny Kukawka 			eth_hw_addr_random(priv->dev);
2819af649352SJisheng Zhang 		dev_info(priv->device, "device MAC address %pM\n",
2820bfab27a1SGiuseppe CAVALLARO 			 priv->dev->dev_addr);
2821bfab27a1SGiuseppe CAVALLARO 	}
2822c88460b7SHans de Goede }
2823bfab27a1SGiuseppe CAVALLARO 
282432ceabcaSGiuseppe CAVALLARO /**
2825732fdf0eSGiuseppe CAVALLARO  * stmmac_init_dma_engine - DMA init.
282632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
282732ceabcaSGiuseppe CAVALLARO  * Description:
282832ceabcaSGiuseppe CAVALLARO  * It inits the DMA invoking the specific MAC/GMAC callback.
282932ceabcaSGiuseppe CAVALLARO  * Some DMA parameters can be passed from the platform;
283032ceabcaSGiuseppe CAVALLARO  * in case of these are not passed a default is kept for the MAC or GMAC.
283132ceabcaSGiuseppe CAVALLARO  */
28320f1f88a8SGiuseppe CAVALLARO static int stmmac_init_dma_engine(struct stmmac_priv *priv)
28330f1f88a8SGiuseppe CAVALLARO {
283447f2a9ceSJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
283547f2a9ceSJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
283624aaed0cSJose Abreu 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
283754139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q;
2838ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
283947f2a9ceSJoao Pinto 	u32 chan = 0;
2840c24602efSGiuseppe CAVALLARO 	int atds = 0;
2841495db273SGiuseppe Cavallaro 	int ret = 0;
28420f1f88a8SGiuseppe CAVALLARO 
2843a332e2faSNiklas Cassel 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2844a332e2faSNiklas Cassel 		dev_err(priv->device, "Invalid DMA configuration\n");
284589ab75bfSNiklas Cassel 		return -EINVAL;
28460f1f88a8SGiuseppe CAVALLARO 	}
28470f1f88a8SGiuseppe CAVALLARO 
2848c24602efSGiuseppe CAVALLARO 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2849c24602efSGiuseppe CAVALLARO 		atds = 1;
2850c24602efSGiuseppe CAVALLARO 
2851a4e887faSJose Abreu 	ret = stmmac_reset(priv, priv->ioaddr);
2852495db273SGiuseppe Cavallaro 	if (ret) {
2853495db273SGiuseppe Cavallaro 		dev_err(priv->device, "Failed to reset the dma\n");
2854495db273SGiuseppe Cavallaro 		return ret;
2855495db273SGiuseppe Cavallaro 	}
2856495db273SGiuseppe Cavallaro 
28577d9e6c5aSJose Abreu 	/* DMA Configuration */
28587d9e6c5aSJose Abreu 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
28597d9e6c5aSJose Abreu 
28607d9e6c5aSJose Abreu 	if (priv->plat->axi)
28617d9e6c5aSJose Abreu 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
28627d9e6c5aSJose Abreu 
2863af8f3fb7SWeifeng Voon 	/* DMA CSR Channel configuration */
2864af8f3fb7SWeifeng Voon 	for (chan = 0; chan < dma_csr_ch; chan++)
2865af8f3fb7SWeifeng Voon 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2866af8f3fb7SWeifeng Voon 
286747f2a9ceSJoao Pinto 	/* DMA RX Channel Configuration */
286847f2a9ceSJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++) {
286954139cf3SJoao Pinto 		rx_q = &priv->rx_queue[chan];
287054139cf3SJoao Pinto 
287124aaed0cSJose Abreu 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
287224aaed0cSJose Abreu 				    rx_q->dma_rx_phy, chan);
287347f2a9ceSJoao Pinto 
287454139cf3SJoao Pinto 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2875bba2556eSOng Boon Leong 				     (rx_q->buf_alloc_num *
2876aa042f60SSong, Yoong Siang 				      sizeof(struct dma_desc));
2877a4e887faSJose Abreu 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2878a4e887faSJose Abreu 				       rx_q->rx_tail_addr, chan);
287947f2a9ceSJoao Pinto 	}
288047f2a9ceSJoao Pinto 
288147f2a9ceSJoao Pinto 	/* DMA TX Channel Configuration */
288247f2a9ceSJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++) {
2883ce736788SJoao Pinto 		tx_q = &priv->tx_queue[chan];
2884ce736788SJoao Pinto 
288524aaed0cSJose Abreu 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
288624aaed0cSJose Abreu 				    tx_q->dma_tx_phy, chan);
2887f748be53SAlexandre TORGUE 
28880431100bSJose Abreu 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2889a4e887faSJose Abreu 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2890a4e887faSJose Abreu 				       tx_q->tx_tail_addr, chan);
289147f2a9ceSJoao Pinto 	}
289224aaed0cSJose Abreu 
2893495db273SGiuseppe Cavallaro 	return ret;
28940f1f88a8SGiuseppe CAVALLARO }
28950f1f88a8SGiuseppe CAVALLARO 
28968fce3331SJose Abreu static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
28978fce3331SJose Abreu {
28988fce3331SJose Abreu 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
28998fce3331SJose Abreu 
2900db2f2842SOng Boon Leong 	hrtimer_start(&tx_q->txtimer,
2901db2f2842SOng Boon Leong 		      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2902d5a05e69SVincent Whitchurch 		      HRTIMER_MODE_REL);
29038fce3331SJose Abreu }
29048fce3331SJose Abreu 
2905bfab27a1SGiuseppe CAVALLARO /**
2906732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_timer - mitigation sw timer for tx.
2907d0ea5cbdSJesse Brandeburg  * @t: data pointer
29089125cdd1SGiuseppe CAVALLARO  * Description:
29099125cdd1SGiuseppe CAVALLARO  * This is the timer handler to directly invoke the stmmac_tx_clean.
29109125cdd1SGiuseppe CAVALLARO  */
2911d5a05e69SVincent Whitchurch static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
29129125cdd1SGiuseppe CAVALLARO {
2913d5a05e69SVincent Whitchurch 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
29148fce3331SJose Abreu 	struct stmmac_priv *priv = tx_q->priv_data;
29158fce3331SJose Abreu 	struct stmmac_channel *ch;
2916132c32eeSOng Boon Leong 	struct napi_struct *napi;
29179125cdd1SGiuseppe CAVALLARO 
29188fce3331SJose Abreu 	ch = &priv->channel[tx_q->queue_index];
2919132c32eeSOng Boon Leong 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
29208fce3331SJose Abreu 
2921132c32eeSOng Boon Leong 	if (likely(napi_schedule_prep(napi))) {
2922021bd5e3SJose Abreu 		unsigned long flags;
2923021bd5e3SJose Abreu 
2924021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
2925021bd5e3SJose Abreu 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2926021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
2927132c32eeSOng Boon Leong 		__napi_schedule(napi);
2928021bd5e3SJose Abreu 	}
2929d5a05e69SVincent Whitchurch 
2930d5a05e69SVincent Whitchurch 	return HRTIMER_NORESTART;
29319125cdd1SGiuseppe CAVALLARO }
29329125cdd1SGiuseppe CAVALLARO 
29339125cdd1SGiuseppe CAVALLARO /**
2934d429b66eSJose Abreu  * stmmac_init_coalesce - init mitigation options.
293532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
29369125cdd1SGiuseppe CAVALLARO  * Description:
2937d429b66eSJose Abreu  * This inits the coalesce parameters: i.e. timer rate,
29389125cdd1SGiuseppe CAVALLARO  * timer handler and default threshold used for enabling the
29399125cdd1SGiuseppe CAVALLARO  * interrupt on completion bit.
29409125cdd1SGiuseppe CAVALLARO  */
2941d429b66eSJose Abreu static void stmmac_init_coalesce(struct stmmac_priv *priv)
29429125cdd1SGiuseppe CAVALLARO {
29438fce3331SJose Abreu 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2944db2f2842SOng Boon Leong 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
29458fce3331SJose Abreu 	u32 chan;
29468fce3331SJose Abreu 
29478fce3331SJose Abreu 	for (chan = 0; chan < tx_channel_count; chan++) {
29488fce3331SJose Abreu 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
29498fce3331SJose Abreu 
2950db2f2842SOng Boon Leong 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
2951db2f2842SOng Boon Leong 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
2952db2f2842SOng Boon Leong 
2953d5a05e69SVincent Whitchurch 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2954d5a05e69SVincent Whitchurch 		tx_q->txtimer.function = stmmac_tx_timer;
29558fce3331SJose Abreu 	}
2956db2f2842SOng Boon Leong 
2957db2f2842SOng Boon Leong 	for (chan = 0; chan < rx_channel_count; chan++)
2958db2f2842SOng Boon Leong 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
29599125cdd1SGiuseppe CAVALLARO }
29609125cdd1SGiuseppe CAVALLARO 
29614854ab99SJoao Pinto static void stmmac_set_rings_length(struct stmmac_priv *priv)
29624854ab99SJoao Pinto {
29634854ab99SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
29644854ab99SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
29654854ab99SJoao Pinto 	u32 chan;
29664854ab99SJoao Pinto 
29674854ab99SJoao Pinto 	/* set TX ring length */
29684854ab99SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2969a4e887faSJose Abreu 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
2970aa042f60SSong, Yoong Siang 				       (priv->dma_tx_size - 1), chan);
29714854ab99SJoao Pinto 
29724854ab99SJoao Pinto 	/* set RX ring length */
29734854ab99SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2974a4e887faSJose Abreu 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
2975aa042f60SSong, Yoong Siang 				       (priv->dma_rx_size - 1), chan);
29764854ab99SJoao Pinto }
29774854ab99SJoao Pinto 
29789125cdd1SGiuseppe CAVALLARO /**
29796a3a7193SJoao Pinto  *  stmmac_set_tx_queue_weight - Set TX queue weight
29806a3a7193SJoao Pinto  *  @priv: driver private structure
29816a3a7193SJoao Pinto  *  Description: It is used for setting TX queues weight
29826a3a7193SJoao Pinto  */
29836a3a7193SJoao Pinto static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
29846a3a7193SJoao Pinto {
29856a3a7193SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
29866a3a7193SJoao Pinto 	u32 weight;
29876a3a7193SJoao Pinto 	u32 queue;
29886a3a7193SJoao Pinto 
29896a3a7193SJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
29906a3a7193SJoao Pinto 		weight = priv->plat->tx_queues_cfg[queue].weight;
2991c10d4c82SJose Abreu 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
29926a3a7193SJoao Pinto 	}
29936a3a7193SJoao Pinto }
29946a3a7193SJoao Pinto 
29956a3a7193SJoao Pinto /**
299619d91873SJoao Pinto  *  stmmac_configure_cbs - Configure CBS in TX queue
299719d91873SJoao Pinto  *  @priv: driver private structure
299819d91873SJoao Pinto  *  Description: It is used for configuring CBS in AVB TX queues
299919d91873SJoao Pinto  */
300019d91873SJoao Pinto static void stmmac_configure_cbs(struct stmmac_priv *priv)
300119d91873SJoao Pinto {
300219d91873SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
300319d91873SJoao Pinto 	u32 mode_to_use;
300419d91873SJoao Pinto 	u32 queue;
300519d91873SJoao Pinto 
300644781fefSJoao Pinto 	/* queue 0 is reserved for legacy traffic */
300744781fefSJoao Pinto 	for (queue = 1; queue < tx_queues_count; queue++) {
300819d91873SJoao Pinto 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
300919d91873SJoao Pinto 		if (mode_to_use == MTL_QUEUE_DCB)
301019d91873SJoao Pinto 			continue;
301119d91873SJoao Pinto 
3012c10d4c82SJose Abreu 		stmmac_config_cbs(priv, priv->hw,
301319d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].send_slope,
301419d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].idle_slope,
301519d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].high_credit,
301619d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].low_credit,
301719d91873SJoao Pinto 				queue);
301819d91873SJoao Pinto 	}
301919d91873SJoao Pinto }
302019d91873SJoao Pinto 
302119d91873SJoao Pinto /**
3022d43042f4SJoao Pinto  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3023d43042f4SJoao Pinto  *  @priv: driver private structure
3024d43042f4SJoao Pinto  *  Description: It is used for mapping RX queues to RX dma channels
3025d43042f4SJoao Pinto  */
3026d43042f4SJoao Pinto static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3027d43042f4SJoao Pinto {
3028d43042f4SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3029d43042f4SJoao Pinto 	u32 queue;
3030d43042f4SJoao Pinto 	u32 chan;
3031d43042f4SJoao Pinto 
3032d43042f4SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
3033d43042f4SJoao Pinto 		chan = priv->plat->rx_queues_cfg[queue].chan;
3034c10d4c82SJose Abreu 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3035d43042f4SJoao Pinto 	}
3036d43042f4SJoao Pinto }
3037d43042f4SJoao Pinto 
3038d43042f4SJoao Pinto /**
3039a8f5102aSJoao Pinto  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3040a8f5102aSJoao Pinto  *  @priv: driver private structure
3041a8f5102aSJoao Pinto  *  Description: It is used for configuring the RX Queue Priority
3042a8f5102aSJoao Pinto  */
3043a8f5102aSJoao Pinto static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3044a8f5102aSJoao Pinto {
3045a8f5102aSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3046a8f5102aSJoao Pinto 	u32 queue;
3047a8f5102aSJoao Pinto 	u32 prio;
3048a8f5102aSJoao Pinto 
3049a8f5102aSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
3050a8f5102aSJoao Pinto 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3051a8f5102aSJoao Pinto 			continue;
3052a8f5102aSJoao Pinto 
3053a8f5102aSJoao Pinto 		prio = priv->plat->rx_queues_cfg[queue].prio;
3054c10d4c82SJose Abreu 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3055a8f5102aSJoao Pinto 	}
3056a8f5102aSJoao Pinto }
3057a8f5102aSJoao Pinto 
3058a8f5102aSJoao Pinto /**
3059a8f5102aSJoao Pinto  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3060a8f5102aSJoao Pinto  *  @priv: driver private structure
3061a8f5102aSJoao Pinto  *  Description: It is used for configuring the TX Queue Priority
3062a8f5102aSJoao Pinto  */
3063a8f5102aSJoao Pinto static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3064a8f5102aSJoao Pinto {
3065a8f5102aSJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3066a8f5102aSJoao Pinto 	u32 queue;
3067a8f5102aSJoao Pinto 	u32 prio;
3068a8f5102aSJoao Pinto 
3069a8f5102aSJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
3070a8f5102aSJoao Pinto 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3071a8f5102aSJoao Pinto 			continue;
3072a8f5102aSJoao Pinto 
3073a8f5102aSJoao Pinto 		prio = priv->plat->tx_queues_cfg[queue].prio;
3074c10d4c82SJose Abreu 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3075a8f5102aSJoao Pinto 	}
3076a8f5102aSJoao Pinto }
3077a8f5102aSJoao Pinto 
3078a8f5102aSJoao Pinto /**
3079abe80fdcSJoao Pinto  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3080abe80fdcSJoao Pinto  *  @priv: driver private structure
3081abe80fdcSJoao Pinto  *  Description: It is used for configuring the RX queue routing
3082abe80fdcSJoao Pinto  */
3083abe80fdcSJoao Pinto static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3084abe80fdcSJoao Pinto {
3085abe80fdcSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3086abe80fdcSJoao Pinto 	u32 queue;
3087abe80fdcSJoao Pinto 	u8 packet;
3088abe80fdcSJoao Pinto 
3089abe80fdcSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
3090abe80fdcSJoao Pinto 		/* no specific packet type routing specified for the queue */
3091abe80fdcSJoao Pinto 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3092abe80fdcSJoao Pinto 			continue;
3093abe80fdcSJoao Pinto 
3094abe80fdcSJoao Pinto 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3095c10d4c82SJose Abreu 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3096abe80fdcSJoao Pinto 	}
3097abe80fdcSJoao Pinto }
3098abe80fdcSJoao Pinto 
309976067459SJose Abreu static void stmmac_mac_config_rss(struct stmmac_priv *priv)
310076067459SJose Abreu {
310176067459SJose Abreu 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
310276067459SJose Abreu 		priv->rss.enable = false;
310376067459SJose Abreu 		return;
310476067459SJose Abreu 	}
310576067459SJose Abreu 
310676067459SJose Abreu 	if (priv->dev->features & NETIF_F_RXHASH)
310776067459SJose Abreu 		priv->rss.enable = true;
310876067459SJose Abreu 	else
310976067459SJose Abreu 		priv->rss.enable = false;
311076067459SJose Abreu 
311176067459SJose Abreu 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
311276067459SJose Abreu 			     priv->plat->rx_queues_to_use);
311376067459SJose Abreu }
311476067459SJose Abreu 
3115abe80fdcSJoao Pinto /**
3116d0a9c9f9SJoao Pinto  *  stmmac_mtl_configuration - Configure MTL
3117d0a9c9f9SJoao Pinto  *  @priv: driver private structure
3118d0a9c9f9SJoao Pinto  *  Description: It is used for configurring MTL
3119d0a9c9f9SJoao Pinto  */
3120d0a9c9f9SJoao Pinto static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3121d0a9c9f9SJoao Pinto {
3122d0a9c9f9SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3123d0a9c9f9SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3124d0a9c9f9SJoao Pinto 
3125c10d4c82SJose Abreu 	if (tx_queues_count > 1)
31266a3a7193SJoao Pinto 		stmmac_set_tx_queue_weight(priv);
31276a3a7193SJoao Pinto 
3128d0a9c9f9SJoao Pinto 	/* Configure MTL RX algorithms */
3129c10d4c82SJose Abreu 	if (rx_queues_count > 1)
3130c10d4c82SJose Abreu 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3131d0a9c9f9SJoao Pinto 				priv->plat->rx_sched_algorithm);
3132d0a9c9f9SJoao Pinto 
3133d0a9c9f9SJoao Pinto 	/* Configure MTL TX algorithms */
3134c10d4c82SJose Abreu 	if (tx_queues_count > 1)
3135c10d4c82SJose Abreu 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3136d0a9c9f9SJoao Pinto 				priv->plat->tx_sched_algorithm);
3137d0a9c9f9SJoao Pinto 
313819d91873SJoao Pinto 	/* Configure CBS in AVB TX queues */
3139c10d4c82SJose Abreu 	if (tx_queues_count > 1)
314019d91873SJoao Pinto 		stmmac_configure_cbs(priv);
314119d91873SJoao Pinto 
3142d43042f4SJoao Pinto 	/* Map RX MTL to DMA channels */
3143d43042f4SJoao Pinto 	stmmac_rx_queue_dma_chan_map(priv);
3144d43042f4SJoao Pinto 
3145d0a9c9f9SJoao Pinto 	/* Enable MAC RX Queues */
3146d0a9c9f9SJoao Pinto 	stmmac_mac_enable_rx_queues(priv);
31476deee222SJoao Pinto 
3148a8f5102aSJoao Pinto 	/* Set RX priorities */
3149c10d4c82SJose Abreu 	if (rx_queues_count > 1)
3150a8f5102aSJoao Pinto 		stmmac_mac_config_rx_queues_prio(priv);
3151a8f5102aSJoao Pinto 
3152a8f5102aSJoao Pinto 	/* Set TX priorities */
3153c10d4c82SJose Abreu 	if (tx_queues_count > 1)
3154a8f5102aSJoao Pinto 		stmmac_mac_config_tx_queues_prio(priv);
3155abe80fdcSJoao Pinto 
3156abe80fdcSJoao Pinto 	/* Set RX routing */
3157c10d4c82SJose Abreu 	if (rx_queues_count > 1)
3158abe80fdcSJoao Pinto 		stmmac_mac_config_rx_queues_routing(priv);
315976067459SJose Abreu 
316076067459SJose Abreu 	/* Receive Side Scaling */
316176067459SJose Abreu 	if (rx_queues_count > 1)
316276067459SJose Abreu 		stmmac_mac_config_rss(priv);
3163d0a9c9f9SJoao Pinto }
3164d0a9c9f9SJoao Pinto 
31658bf993a5SJose Abreu static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
31668bf993a5SJose Abreu {
3167c10d4c82SJose Abreu 	if (priv->dma_cap.asp) {
31688bf993a5SJose Abreu 		netdev_info(priv->dev, "Enabling Safety Features\n");
31695ac712dcSWong Vee Khee 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
31705ac712dcSWong Vee Khee 					  priv->plat->safety_feat_cfg);
31718bf993a5SJose Abreu 	} else {
31728bf993a5SJose Abreu 		netdev_info(priv->dev, "No Safety Features support found\n");
31738bf993a5SJose Abreu 	}
31748bf993a5SJose Abreu }
31758bf993a5SJose Abreu 
31765a558611SOng Boon Leong static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
31775a558611SOng Boon Leong {
31785a558611SOng Boon Leong 	char *name;
31795a558611SOng Boon Leong 
31805a558611SOng Boon Leong 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3181db7c691dSMohammad Athari Bin Ismail 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
31825a558611SOng Boon Leong 
31835a558611SOng Boon Leong 	name = priv->wq_name;
31845a558611SOng Boon Leong 	sprintf(name, "%s-fpe", priv->dev->name);
31855a558611SOng Boon Leong 
31865a558611SOng Boon Leong 	priv->fpe_wq = create_singlethread_workqueue(name);
31875a558611SOng Boon Leong 	if (!priv->fpe_wq) {
31885a558611SOng Boon Leong 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
31895a558611SOng Boon Leong 
31905a558611SOng Boon Leong 		return -ENOMEM;
31915a558611SOng Boon Leong 	}
31925a558611SOng Boon Leong 	netdev_info(priv->dev, "FPE workqueue start");
31935a558611SOng Boon Leong 
31945a558611SOng Boon Leong 	return 0;
31955a558611SOng Boon Leong }
31965a558611SOng Boon Leong 
3197d0a9c9f9SJoao Pinto /**
3198732fdf0eSGiuseppe CAVALLARO  * stmmac_hw_setup - setup mac in a usable state.
3199523f11b5SSrinivas Kandagatla  *  @dev : pointer to the device structure.
3200d0ea5cbdSJesse Brandeburg  *  @init_ptp: initialize PTP if set
3201523f11b5SSrinivas Kandagatla  *  Description:
3202732fdf0eSGiuseppe CAVALLARO  *  this is the main function to setup the HW in a usable state because the
3203732fdf0eSGiuseppe CAVALLARO  *  dma engine is reset, the core registers are configured (e.g. AXI,
3204732fdf0eSGiuseppe CAVALLARO  *  Checksum features, timers). The DMA is ready to start receiving and
3205732fdf0eSGiuseppe CAVALLARO  *  transmitting.
3206523f11b5SSrinivas Kandagatla  *  Return value:
3207523f11b5SSrinivas Kandagatla  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3208523f11b5SSrinivas Kandagatla  *  file on failure.
3209523f11b5SSrinivas Kandagatla  */
3210fe131929SHuacai Chen static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
3211523f11b5SSrinivas Kandagatla {
3212523f11b5SSrinivas Kandagatla 	struct stmmac_priv *priv = netdev_priv(dev);
32133c55d4d0SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3214146617b8SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3215d08d32d1SOng Boon Leong 	bool sph_en;
3216146617b8SJoao Pinto 	u32 chan;
3217523f11b5SSrinivas Kandagatla 	int ret;
3218523f11b5SSrinivas Kandagatla 
3219523f11b5SSrinivas Kandagatla 	/* DMA initialization and SW reset */
3220523f11b5SSrinivas Kandagatla 	ret = stmmac_init_dma_engine(priv);
3221523f11b5SSrinivas Kandagatla 	if (ret < 0) {
322238ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
322338ddc59dSLABBE Corentin 			   __func__);
3224523f11b5SSrinivas Kandagatla 		return ret;
3225523f11b5SSrinivas Kandagatla 	}
3226523f11b5SSrinivas Kandagatla 
3227523f11b5SSrinivas Kandagatla 	/* Copy the MAC addr into the HW  */
3228c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3229523f11b5SSrinivas Kandagatla 
323002e57b9dSGiuseppe CAVALLARO 	/* PS and related bits will be programmed according to the speed */
323102e57b9dSGiuseppe CAVALLARO 	if (priv->hw->pcs) {
323202e57b9dSGiuseppe CAVALLARO 		int speed = priv->plat->mac_port_sel_speed;
323302e57b9dSGiuseppe CAVALLARO 
323402e57b9dSGiuseppe CAVALLARO 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
323502e57b9dSGiuseppe CAVALLARO 		    (speed == SPEED_1000)) {
323602e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = speed;
323702e57b9dSGiuseppe CAVALLARO 		} else {
323802e57b9dSGiuseppe CAVALLARO 			dev_warn(priv->device, "invalid port speed\n");
323902e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = 0;
324002e57b9dSGiuseppe CAVALLARO 		}
324102e57b9dSGiuseppe CAVALLARO 	}
324202e57b9dSGiuseppe CAVALLARO 
3243523f11b5SSrinivas Kandagatla 	/* Initialize the MAC Core */
3244c10d4c82SJose Abreu 	stmmac_core_init(priv, priv->hw, dev);
3245523f11b5SSrinivas Kandagatla 
3246d0a9c9f9SJoao Pinto 	/* Initialize MTL*/
3247d0a9c9f9SJoao Pinto 	stmmac_mtl_configuration(priv);
32489eb12474Sjpinto 
32498bf993a5SJose Abreu 	/* Initialize Safety Features */
32508bf993a5SJose Abreu 	stmmac_safety_feat_configuration(priv);
32518bf993a5SJose Abreu 
3252c10d4c82SJose Abreu 	ret = stmmac_rx_ipc(priv, priv->hw);
3253978aded4SGiuseppe CAVALLARO 	if (!ret) {
325438ddc59dSLABBE Corentin 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3255978aded4SGiuseppe CAVALLARO 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3256d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
3257978aded4SGiuseppe CAVALLARO 	}
3258978aded4SGiuseppe CAVALLARO 
3259523f11b5SSrinivas Kandagatla 	/* Enable the MAC Rx/Tx */
3260c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
3261523f11b5SSrinivas Kandagatla 
3262b4f0a661SJoao Pinto 	/* Set the HW DMA mode and the COE */
3263b4f0a661SJoao Pinto 	stmmac_dma_operation_mode(priv);
3264b4f0a661SJoao Pinto 
3265523f11b5SSrinivas Kandagatla 	stmmac_mmc_setup(priv);
3266523f11b5SSrinivas Kandagatla 
3267fe131929SHuacai Chen 	if (init_ptp) {
3268523f11b5SSrinivas Kandagatla 		ret = stmmac_init_ptp(priv);
3269722eef28SHeiner Kallweit 		if (ret == -EOPNOTSUPP)
3270722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP not supported by HW\n");
3271722eef28SHeiner Kallweit 		else if (ret)
3272722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP init failed\n");
3273fe131929SHuacai Chen 	}
3274523f11b5SSrinivas Kandagatla 
3275388e201dSVineetha G. Jaya Kumaran 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3276388e201dSVineetha G. Jaya Kumaran 
3277388e201dSVineetha G. Jaya Kumaran 	/* Convert the timer from msec to usec */
3278388e201dSVineetha G. Jaya Kumaran 	if (!priv->tx_lpi_timer)
3279388e201dSVineetha G. Jaya Kumaran 		priv->tx_lpi_timer = eee_timer * 1000;
3280523f11b5SSrinivas Kandagatla 
3281a4e887faSJose Abreu 	if (priv->use_riwt) {
3282db2f2842SOng Boon Leong 		u32 queue;
32834e4337ccSJose Abreu 
3284db2f2842SOng Boon Leong 		for (queue = 0; queue < rx_cnt; queue++) {
3285db2f2842SOng Boon Leong 			if (!priv->rx_riwt[queue])
3286db2f2842SOng Boon Leong 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3287db2f2842SOng Boon Leong 
3288db2f2842SOng Boon Leong 			stmmac_rx_watchdog(priv, priv->ioaddr,
3289db2f2842SOng Boon Leong 					   priv->rx_riwt[queue], queue);
3290db2f2842SOng Boon Leong 		}
3291523f11b5SSrinivas Kandagatla 	}
3292523f11b5SSrinivas Kandagatla 
3293c10d4c82SJose Abreu 	if (priv->hw->pcs)
3294c9ad4c10SBen Dooks (Codethink) 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3295523f11b5SSrinivas Kandagatla 
32964854ab99SJoao Pinto 	/* set TX and RX rings length */
32974854ab99SJoao Pinto 	stmmac_set_rings_length(priv);
32984854ab99SJoao Pinto 
3299f748be53SAlexandre TORGUE 	/* Enable TSO */
3300146617b8SJoao Pinto 	if (priv->tso) {
33015e6038b8SOng Boon Leong 		for (chan = 0; chan < tx_cnt; chan++) {
33025e6038b8SOng Boon Leong 			struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
33035e6038b8SOng Boon Leong 
33045e6038b8SOng Boon Leong 			/* TSO and TBS cannot co-exist */
33055e6038b8SOng Boon Leong 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
33065e6038b8SOng Boon Leong 				continue;
33075e6038b8SOng Boon Leong 
3308a4e887faSJose Abreu 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3309146617b8SJoao Pinto 		}
33105e6038b8SOng Boon Leong 	}
3311f748be53SAlexandre TORGUE 
331267afd6d1SJose Abreu 	/* Enable Split Header */
3313d08d32d1SOng Boon Leong 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
331467afd6d1SJose Abreu 	for (chan = 0; chan < rx_cnt; chan++)
3315d08d32d1SOng Boon Leong 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3316d08d32d1SOng Boon Leong 
331767afd6d1SJose Abreu 
331830d93227SJose Abreu 	/* VLAN Tag Insertion */
331930d93227SJose Abreu 	if (priv->dma_cap.vlins)
332030d93227SJose Abreu 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
332130d93227SJose Abreu 
3322579a25a8SJose Abreu 	/* TBS */
3323579a25a8SJose Abreu 	for (chan = 0; chan < tx_cnt; chan++) {
3324579a25a8SJose Abreu 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3325579a25a8SJose Abreu 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3326579a25a8SJose Abreu 
3327579a25a8SJose Abreu 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3328579a25a8SJose Abreu 	}
3329579a25a8SJose Abreu 
3330686cff3dSAashish Verma 	/* Configure real RX and TX queues */
3331686cff3dSAashish Verma 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3332686cff3dSAashish Verma 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3333686cff3dSAashish Verma 
33347d9e6c5aSJose Abreu 	/* Start the ball rolling... */
33357d9e6c5aSJose Abreu 	stmmac_start_all_dma(priv);
33367d9e6c5aSJose Abreu 
33375a558611SOng Boon Leong 	if (priv->dma_cap.fpesel) {
33385a558611SOng Boon Leong 		stmmac_fpe_start_wq(priv);
33395a558611SOng Boon Leong 
33405a558611SOng Boon Leong 		if (priv->plat->fpe_cfg->enable)
33415a558611SOng Boon Leong 			stmmac_fpe_handshake(priv, true);
33425a558611SOng Boon Leong 	}
33435a558611SOng Boon Leong 
3344523f11b5SSrinivas Kandagatla 	return 0;
3345523f11b5SSrinivas Kandagatla }
3346523f11b5SSrinivas Kandagatla 
3347c66f6c37SThierry Reding static void stmmac_hw_teardown(struct net_device *dev)
3348c66f6c37SThierry Reding {
3349c66f6c37SThierry Reding 	struct stmmac_priv *priv = netdev_priv(dev);
3350c66f6c37SThierry Reding 
3351c66f6c37SThierry Reding 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3352c66f6c37SThierry Reding }
3353c66f6c37SThierry Reding 
33548532f613SOng Boon Leong static void stmmac_free_irq(struct net_device *dev,
33558532f613SOng Boon Leong 			    enum request_irq_err irq_err, int irq_idx)
33568532f613SOng Boon Leong {
33578532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
33588532f613SOng Boon Leong 	int j;
33598532f613SOng Boon Leong 
33608532f613SOng Boon Leong 	switch (irq_err) {
33618532f613SOng Boon Leong 	case REQ_IRQ_ERR_ALL:
33628532f613SOng Boon Leong 		irq_idx = priv->plat->tx_queues_to_use;
33638532f613SOng Boon Leong 		fallthrough;
33648532f613SOng Boon Leong 	case REQ_IRQ_ERR_TX:
33658532f613SOng Boon Leong 		for (j = irq_idx - 1; j >= 0; j--) {
33668deec94cSOng Boon Leong 			if (priv->tx_irq[j] > 0) {
33678deec94cSOng Boon Leong 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
33688532f613SOng Boon Leong 				free_irq(priv->tx_irq[j], &priv->tx_queue[j]);
33698532f613SOng Boon Leong 			}
33708deec94cSOng Boon Leong 		}
33718532f613SOng Boon Leong 		irq_idx = priv->plat->rx_queues_to_use;
33728532f613SOng Boon Leong 		fallthrough;
33738532f613SOng Boon Leong 	case REQ_IRQ_ERR_RX:
33748532f613SOng Boon Leong 		for (j = irq_idx - 1; j >= 0; j--) {
33758deec94cSOng Boon Leong 			if (priv->rx_irq[j] > 0) {
33768deec94cSOng Boon Leong 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
33778532f613SOng Boon Leong 				free_irq(priv->rx_irq[j], &priv->rx_queue[j]);
33788532f613SOng Boon Leong 			}
33798deec94cSOng Boon Leong 		}
33808532f613SOng Boon Leong 
33818532f613SOng Boon Leong 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
33828532f613SOng Boon Leong 			free_irq(priv->sfty_ue_irq, dev);
33838532f613SOng Boon Leong 		fallthrough;
33848532f613SOng Boon Leong 	case REQ_IRQ_ERR_SFTY_UE:
33858532f613SOng Boon Leong 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
33868532f613SOng Boon Leong 			free_irq(priv->sfty_ce_irq, dev);
33878532f613SOng Boon Leong 		fallthrough;
33888532f613SOng Boon Leong 	case REQ_IRQ_ERR_SFTY_CE:
33898532f613SOng Boon Leong 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
33908532f613SOng Boon Leong 			free_irq(priv->lpi_irq, dev);
33918532f613SOng Boon Leong 		fallthrough;
33928532f613SOng Boon Leong 	case REQ_IRQ_ERR_LPI:
33938532f613SOng Boon Leong 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
33948532f613SOng Boon Leong 			free_irq(priv->wol_irq, dev);
33958532f613SOng Boon Leong 		fallthrough;
33968532f613SOng Boon Leong 	case REQ_IRQ_ERR_WOL:
33978532f613SOng Boon Leong 		free_irq(dev->irq, dev);
33988532f613SOng Boon Leong 		fallthrough;
33998532f613SOng Boon Leong 	case REQ_IRQ_ERR_MAC:
34008532f613SOng Boon Leong 	case REQ_IRQ_ERR_NO:
34018532f613SOng Boon Leong 		/* If MAC IRQ request error, no more IRQ to free */
34028532f613SOng Boon Leong 		break;
34038532f613SOng Boon Leong 	}
34048532f613SOng Boon Leong }
34058532f613SOng Boon Leong 
34068532f613SOng Boon Leong static int stmmac_request_irq_multi_msi(struct net_device *dev)
34078532f613SOng Boon Leong {
34088532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
34093e6dc7b6SWong Vee Khee 	enum request_irq_err irq_err;
34108deec94cSOng Boon Leong 	cpumask_t cpu_mask;
34118532f613SOng Boon Leong 	int irq_idx = 0;
34128532f613SOng Boon Leong 	char *int_name;
34138532f613SOng Boon Leong 	int ret;
34148532f613SOng Boon Leong 	int i;
34158532f613SOng Boon Leong 
34168532f613SOng Boon Leong 	/* For common interrupt */
34178532f613SOng Boon Leong 	int_name = priv->int_name_mac;
34188532f613SOng Boon Leong 	sprintf(int_name, "%s:%s", dev->name, "mac");
34198532f613SOng Boon Leong 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
34208532f613SOng Boon Leong 			  0, int_name, dev);
34218532f613SOng Boon Leong 	if (unlikely(ret < 0)) {
34228532f613SOng Boon Leong 		netdev_err(priv->dev,
34238532f613SOng Boon Leong 			   "%s: alloc mac MSI %d (error: %d)\n",
34248532f613SOng Boon Leong 			   __func__, dev->irq, ret);
34258532f613SOng Boon Leong 		irq_err = REQ_IRQ_ERR_MAC;
34268532f613SOng Boon Leong 		goto irq_error;
34278532f613SOng Boon Leong 	}
34288532f613SOng Boon Leong 
34298532f613SOng Boon Leong 	/* Request the Wake IRQ in case of another line
34308532f613SOng Boon Leong 	 * is used for WoL
34318532f613SOng Boon Leong 	 */
34328532f613SOng Boon Leong 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
34338532f613SOng Boon Leong 		int_name = priv->int_name_wol;
34348532f613SOng Boon Leong 		sprintf(int_name, "%s:%s", dev->name, "wol");
34358532f613SOng Boon Leong 		ret = request_irq(priv->wol_irq,
34368532f613SOng Boon Leong 				  stmmac_mac_interrupt,
34378532f613SOng Boon Leong 				  0, int_name, dev);
34388532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
34398532f613SOng Boon Leong 			netdev_err(priv->dev,
34408532f613SOng Boon Leong 				   "%s: alloc wol MSI %d (error: %d)\n",
34418532f613SOng Boon Leong 				   __func__, priv->wol_irq, ret);
34428532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_WOL;
34438532f613SOng Boon Leong 			goto irq_error;
34448532f613SOng Boon Leong 		}
34458532f613SOng Boon Leong 	}
34468532f613SOng Boon Leong 
34478532f613SOng Boon Leong 	/* Request the LPI IRQ in case of another line
34488532f613SOng Boon Leong 	 * is used for LPI
34498532f613SOng Boon Leong 	 */
34508532f613SOng Boon Leong 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
34518532f613SOng Boon Leong 		int_name = priv->int_name_lpi;
34528532f613SOng Boon Leong 		sprintf(int_name, "%s:%s", dev->name, "lpi");
34538532f613SOng Boon Leong 		ret = request_irq(priv->lpi_irq,
34548532f613SOng Boon Leong 				  stmmac_mac_interrupt,
34558532f613SOng Boon Leong 				  0, int_name, dev);
34568532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
34578532f613SOng Boon Leong 			netdev_err(priv->dev,
34588532f613SOng Boon Leong 				   "%s: alloc lpi MSI %d (error: %d)\n",
34598532f613SOng Boon Leong 				   __func__, priv->lpi_irq, ret);
34608532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_LPI;
34618532f613SOng Boon Leong 			goto irq_error;
34628532f613SOng Boon Leong 		}
34638532f613SOng Boon Leong 	}
34648532f613SOng Boon Leong 
34658532f613SOng Boon Leong 	/* Request the Safety Feature Correctible Error line in
34668532f613SOng Boon Leong 	 * case of another line is used
34678532f613SOng Boon Leong 	 */
34688532f613SOng Boon Leong 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
34698532f613SOng Boon Leong 		int_name = priv->int_name_sfty_ce;
34708532f613SOng Boon Leong 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
34718532f613SOng Boon Leong 		ret = request_irq(priv->sfty_ce_irq,
34728532f613SOng Boon Leong 				  stmmac_safety_interrupt,
34738532f613SOng Boon Leong 				  0, int_name, dev);
34748532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
34758532f613SOng Boon Leong 			netdev_err(priv->dev,
34768532f613SOng Boon Leong 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
34778532f613SOng Boon Leong 				   __func__, priv->sfty_ce_irq, ret);
34788532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_SFTY_CE;
34798532f613SOng Boon Leong 			goto irq_error;
34808532f613SOng Boon Leong 		}
34818532f613SOng Boon Leong 	}
34828532f613SOng Boon Leong 
34838532f613SOng Boon Leong 	/* Request the Safety Feature Uncorrectible Error line in
34848532f613SOng Boon Leong 	 * case of another line is used
34858532f613SOng Boon Leong 	 */
34868532f613SOng Boon Leong 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
34878532f613SOng Boon Leong 		int_name = priv->int_name_sfty_ue;
34888532f613SOng Boon Leong 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
34898532f613SOng Boon Leong 		ret = request_irq(priv->sfty_ue_irq,
34908532f613SOng Boon Leong 				  stmmac_safety_interrupt,
34918532f613SOng Boon Leong 				  0, int_name, dev);
34928532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
34938532f613SOng Boon Leong 			netdev_err(priv->dev,
34948532f613SOng Boon Leong 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
34958532f613SOng Boon Leong 				   __func__, priv->sfty_ue_irq, ret);
34968532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_SFTY_UE;
34978532f613SOng Boon Leong 			goto irq_error;
34988532f613SOng Boon Leong 		}
34998532f613SOng Boon Leong 	}
35008532f613SOng Boon Leong 
35018532f613SOng Boon Leong 	/* Request Rx MSI irq */
35028532f613SOng Boon Leong 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3503d68c2e1dSArnd Bergmann 		if (i >= MTL_MAX_RX_QUEUES)
35043e0d5699SArnd Bergmann 			break;
35058532f613SOng Boon Leong 		if (priv->rx_irq[i] == 0)
35068532f613SOng Boon Leong 			continue;
35078532f613SOng Boon Leong 
35088532f613SOng Boon Leong 		int_name = priv->int_name_rx_irq[i];
35098532f613SOng Boon Leong 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
35108532f613SOng Boon Leong 		ret = request_irq(priv->rx_irq[i],
35118532f613SOng Boon Leong 				  stmmac_msi_intr_rx,
35128532f613SOng Boon Leong 				  0, int_name, &priv->rx_queue[i]);
35138532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
35148532f613SOng Boon Leong 			netdev_err(priv->dev,
35158532f613SOng Boon Leong 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
35168532f613SOng Boon Leong 				   __func__, i, priv->rx_irq[i], ret);
35178532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_RX;
35188532f613SOng Boon Leong 			irq_idx = i;
35198532f613SOng Boon Leong 			goto irq_error;
35208532f613SOng Boon Leong 		}
35218deec94cSOng Boon Leong 		cpumask_clear(&cpu_mask);
35228deec94cSOng Boon Leong 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
35238deec94cSOng Boon Leong 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
35248532f613SOng Boon Leong 	}
35258532f613SOng Boon Leong 
35268532f613SOng Boon Leong 	/* Request Tx MSI irq */
35278532f613SOng Boon Leong 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3528d68c2e1dSArnd Bergmann 		if (i >= MTL_MAX_TX_QUEUES)
35293e0d5699SArnd Bergmann 			break;
35308532f613SOng Boon Leong 		if (priv->tx_irq[i] == 0)
35318532f613SOng Boon Leong 			continue;
35328532f613SOng Boon Leong 
35338532f613SOng Boon Leong 		int_name = priv->int_name_tx_irq[i];
35348532f613SOng Boon Leong 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
35358532f613SOng Boon Leong 		ret = request_irq(priv->tx_irq[i],
35368532f613SOng Boon Leong 				  stmmac_msi_intr_tx,
35378532f613SOng Boon Leong 				  0, int_name, &priv->tx_queue[i]);
35388532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
35398532f613SOng Boon Leong 			netdev_err(priv->dev,
35408532f613SOng Boon Leong 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
35418532f613SOng Boon Leong 				   __func__, i, priv->tx_irq[i], ret);
35428532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_TX;
35438532f613SOng Boon Leong 			irq_idx = i;
35448532f613SOng Boon Leong 			goto irq_error;
35458532f613SOng Boon Leong 		}
35468deec94cSOng Boon Leong 		cpumask_clear(&cpu_mask);
35478deec94cSOng Boon Leong 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
35488deec94cSOng Boon Leong 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
35498532f613SOng Boon Leong 	}
35508532f613SOng Boon Leong 
35518532f613SOng Boon Leong 	return 0;
35528532f613SOng Boon Leong 
35538532f613SOng Boon Leong irq_error:
35548532f613SOng Boon Leong 	stmmac_free_irq(dev, irq_err, irq_idx);
35558532f613SOng Boon Leong 	return ret;
35568532f613SOng Boon Leong }
35578532f613SOng Boon Leong 
35588532f613SOng Boon Leong static int stmmac_request_irq_single(struct net_device *dev)
35598532f613SOng Boon Leong {
35608532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
35613e6dc7b6SWong Vee Khee 	enum request_irq_err irq_err;
35628532f613SOng Boon Leong 	int ret;
35638532f613SOng Boon Leong 
35648532f613SOng Boon Leong 	ret = request_irq(dev->irq, stmmac_interrupt,
35658532f613SOng Boon Leong 			  IRQF_SHARED, dev->name, dev);
35668532f613SOng Boon Leong 	if (unlikely(ret < 0)) {
35678532f613SOng Boon Leong 		netdev_err(priv->dev,
35688532f613SOng Boon Leong 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
35698532f613SOng Boon Leong 			   __func__, dev->irq, ret);
35708532f613SOng Boon Leong 		irq_err = REQ_IRQ_ERR_MAC;
35713e6dc7b6SWong Vee Khee 		goto irq_error;
35728532f613SOng Boon Leong 	}
35738532f613SOng Boon Leong 
35748532f613SOng Boon Leong 	/* Request the Wake IRQ in case of another line
35758532f613SOng Boon Leong 	 * is used for WoL
35768532f613SOng Boon Leong 	 */
35778532f613SOng Boon Leong 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
35788532f613SOng Boon Leong 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
35798532f613SOng Boon Leong 				  IRQF_SHARED, dev->name, dev);
35808532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
35818532f613SOng Boon Leong 			netdev_err(priv->dev,
35828532f613SOng Boon Leong 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
35838532f613SOng Boon Leong 				   __func__, priv->wol_irq, ret);
35848532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_WOL;
35853e6dc7b6SWong Vee Khee 			goto irq_error;
35868532f613SOng Boon Leong 		}
35878532f613SOng Boon Leong 	}
35888532f613SOng Boon Leong 
35898532f613SOng Boon Leong 	/* Request the IRQ lines */
35908532f613SOng Boon Leong 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
35918532f613SOng Boon Leong 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
35928532f613SOng Boon Leong 				  IRQF_SHARED, dev->name, dev);
35938532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
35948532f613SOng Boon Leong 			netdev_err(priv->dev,
35958532f613SOng Boon Leong 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
35968532f613SOng Boon Leong 				   __func__, priv->lpi_irq, ret);
35978532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_LPI;
35988532f613SOng Boon Leong 			goto irq_error;
35998532f613SOng Boon Leong 		}
36008532f613SOng Boon Leong 	}
36018532f613SOng Boon Leong 
36028532f613SOng Boon Leong 	return 0;
36038532f613SOng Boon Leong 
36048532f613SOng Boon Leong irq_error:
36058532f613SOng Boon Leong 	stmmac_free_irq(dev, irq_err, 0);
36068532f613SOng Boon Leong 	return ret;
36078532f613SOng Boon Leong }
36088532f613SOng Boon Leong 
36098532f613SOng Boon Leong static int stmmac_request_irq(struct net_device *dev)
36108532f613SOng Boon Leong {
36118532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
36128532f613SOng Boon Leong 	int ret;
36138532f613SOng Boon Leong 
36148532f613SOng Boon Leong 	/* Request the IRQ lines */
36158532f613SOng Boon Leong 	if (priv->plat->multi_msi_en)
36168532f613SOng Boon Leong 		ret = stmmac_request_irq_multi_msi(dev);
36178532f613SOng Boon Leong 	else
36188532f613SOng Boon Leong 		ret = stmmac_request_irq_single(dev);
36198532f613SOng Boon Leong 
36208532f613SOng Boon Leong 	return ret;
36218532f613SOng Boon Leong }
36228532f613SOng Boon Leong 
3623523f11b5SSrinivas Kandagatla /**
36247ac6653aSJeff Kirsher  *  stmmac_open - open entry point of the driver
36257ac6653aSJeff Kirsher  *  @dev : pointer to the device structure.
36267ac6653aSJeff Kirsher  *  Description:
36277ac6653aSJeff Kirsher  *  This function is the open entry point of the driver.
36287ac6653aSJeff Kirsher  *  Return value:
36297ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
36307ac6653aSJeff Kirsher  *  file on failure.
36317ac6653aSJeff Kirsher  */
3632ac746c85SOng Boon Leong static int stmmac_open(struct net_device *dev)
36337ac6653aSJeff Kirsher {
36347ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
36359900074eSVladimir Oltean 	int mode = priv->plat->phy_interface;
36365d626c87SJose Abreu 	int bfsize = 0;
36378fce3331SJose Abreu 	u32 chan;
36387ac6653aSJeff Kirsher 	int ret;
36397ac6653aSJeff Kirsher 
36405ec55823SJoakim Zhang 	ret = pm_runtime_get_sync(priv->device);
36415ec55823SJoakim Zhang 	if (ret < 0) {
36425ec55823SJoakim Zhang 		pm_runtime_put_noidle(priv->device);
36435ec55823SJoakim Zhang 		return ret;
36445ec55823SJoakim Zhang 	}
36455ec55823SJoakim Zhang 
3646a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3647f213bbe8SJose Abreu 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
36489900074eSVladimir Oltean 	    (!priv->hw->xpcs ||
364911059740SVladimir Oltean 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
36507ac6653aSJeff Kirsher 		ret = stmmac_init_phy(dev);
3651e58bb43fSGiuseppe CAVALLARO 		if (ret) {
365238ddc59dSLABBE Corentin 			netdev_err(priv->dev,
365338ddc59dSLABBE Corentin 				   "%s: Cannot attach to PHY (error: %d)\n",
3654e58bb43fSGiuseppe CAVALLARO 				   __func__, ret);
36555ec55823SJoakim Zhang 			goto init_phy_error;
36567ac6653aSJeff Kirsher 		}
3657e58bb43fSGiuseppe CAVALLARO 	}
36587ac6653aSJeff Kirsher 
3659523f11b5SSrinivas Kandagatla 	/* Extra statistics */
3660523f11b5SSrinivas Kandagatla 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
3661523f11b5SSrinivas Kandagatla 	priv->xstats.threshold = tc;
3662523f11b5SSrinivas Kandagatla 
36635d626c87SJose Abreu 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
36645d626c87SJose Abreu 	if (bfsize < 0)
36655d626c87SJose Abreu 		bfsize = 0;
36665d626c87SJose Abreu 
36675d626c87SJose Abreu 	if (bfsize < BUF_SIZE_16KiB)
36685d626c87SJose Abreu 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
36695d626c87SJose Abreu 
36705d626c87SJose Abreu 	priv->dma_buf_sz = bfsize;
36715d626c87SJose Abreu 	buf_sz = bfsize;
36725d626c87SJose Abreu 
367322ad3838SGiuseppe Cavallaro 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
367456329137SBartlomiej Zolnierkiewicz 
3675aa042f60SSong, Yoong Siang 	if (!priv->dma_tx_size)
3676aa042f60SSong, Yoong Siang 		priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3677aa042f60SSong, Yoong Siang 	if (!priv->dma_rx_size)
3678aa042f60SSong, Yoong Siang 		priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3679aa042f60SSong, Yoong Siang 
3680579a25a8SJose Abreu 	/* Earlier check for TBS */
3681579a25a8SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3682579a25a8SJose Abreu 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3683579a25a8SJose Abreu 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3684579a25a8SJose Abreu 
36855e6038b8SOng Boon Leong 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3686579a25a8SJose Abreu 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3687579a25a8SJose Abreu 	}
3688579a25a8SJose Abreu 
36895bacd778SLABBE Corentin 	ret = alloc_dma_desc_resources(priv);
36905bacd778SLABBE Corentin 	if (ret < 0) {
36915bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
36925bacd778SLABBE Corentin 			   __func__);
36935bacd778SLABBE Corentin 		goto dma_desc_error;
36945bacd778SLABBE Corentin 	}
36955bacd778SLABBE Corentin 
36965bacd778SLABBE Corentin 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
36975bacd778SLABBE Corentin 	if (ret < 0) {
36985bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
36995bacd778SLABBE Corentin 			   __func__);
37005bacd778SLABBE Corentin 		goto init_error;
37015bacd778SLABBE Corentin 	}
37025bacd778SLABBE Corentin 
3703fe131929SHuacai Chen 	ret = stmmac_hw_setup(dev, true);
370456329137SBartlomiej Zolnierkiewicz 	if (ret < 0) {
370538ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3706c9324d18SGiuseppe CAVALLARO 		goto init_error;
37077ac6653aSJeff Kirsher 	}
37087ac6653aSJeff Kirsher 
3709d429b66eSJose Abreu 	stmmac_init_coalesce(priv);
3710777da230SGiuseppe CAVALLARO 
371174371272SJose Abreu 	phylink_start(priv->phylink);
371277b28983SJisheng Zhang 	/* We may have called phylink_speed_down before */
371377b28983SJisheng Zhang 	phylink_speed_up(priv->phylink);
37147ac6653aSJeff Kirsher 
37158532f613SOng Boon Leong 	ret = stmmac_request_irq(dev);
37168532f613SOng Boon Leong 	if (ret)
37176c1e5abeSThierry Reding 		goto irq_error;
3718d765955dSGiuseppe CAVALLARO 
3719c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
37209f19306dSOng Boon Leong 	netif_tx_start_all_queues(priv->dev);
37217ac6653aSJeff Kirsher 
37227ac6653aSJeff Kirsher 	return 0;
37237ac6653aSJeff Kirsher 
37246c1e5abeSThierry Reding irq_error:
372574371272SJose Abreu 	phylink_stop(priv->phylink);
37267a13f8f5SFrancesco Virlinzi 
37278fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3728d5a05e69SVincent Whitchurch 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
37298fce3331SJose Abreu 
3730c66f6c37SThierry Reding 	stmmac_hw_teardown(dev);
3731c9324d18SGiuseppe CAVALLARO init_error:
3732c9324d18SGiuseppe CAVALLARO 	free_dma_desc_resources(priv);
37335bacd778SLABBE Corentin dma_desc_error:
373474371272SJose Abreu 	phylink_disconnect_phy(priv->phylink);
37355ec55823SJoakim Zhang init_phy_error:
37365ec55823SJoakim Zhang 	pm_runtime_put(priv->device);
37377ac6653aSJeff Kirsher 	return ret;
37387ac6653aSJeff Kirsher }
37397ac6653aSJeff Kirsher 
37405a558611SOng Boon Leong static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
37415a558611SOng Boon Leong {
37425a558611SOng Boon Leong 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
37435a558611SOng Boon Leong 
37445a558611SOng Boon Leong 	if (priv->fpe_wq)
37455a558611SOng Boon Leong 		destroy_workqueue(priv->fpe_wq);
37465a558611SOng Boon Leong 
37475a558611SOng Boon Leong 	netdev_info(priv->dev, "FPE workqueue stop");
37485a558611SOng Boon Leong }
37495a558611SOng Boon Leong 
37507ac6653aSJeff Kirsher /**
37517ac6653aSJeff Kirsher  *  stmmac_release - close entry point of the driver
37527ac6653aSJeff Kirsher  *  @dev : device pointer.
37537ac6653aSJeff Kirsher  *  Description:
37547ac6653aSJeff Kirsher  *  This is the stop entry point of the driver.
37557ac6653aSJeff Kirsher  */
3756ac746c85SOng Boon Leong static int stmmac_release(struct net_device *dev)
37577ac6653aSJeff Kirsher {
37587ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
37598fce3331SJose Abreu 	u32 chan;
37607ac6653aSJeff Kirsher 
3761b270bfe6SYannick Vignon 	netif_tx_disable(dev);
3762b270bfe6SYannick Vignon 
376377b28983SJisheng Zhang 	if (device_may_wakeup(priv->device))
376477b28983SJisheng Zhang 		phylink_speed_down(priv->phylink, false);
37657ac6653aSJeff Kirsher 	/* Stop and disconnect the PHY */
376674371272SJose Abreu 	phylink_stop(priv->phylink);
376774371272SJose Abreu 	phylink_disconnect_phy(priv->phylink);
37687ac6653aSJeff Kirsher 
3769c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
37707ac6653aSJeff Kirsher 
37718fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3772d5a05e69SVincent Whitchurch 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
37739125cdd1SGiuseppe CAVALLARO 
37747ac6653aSJeff Kirsher 	/* Free the IRQ lines */
37758532f613SOng Boon Leong 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
37767ac6653aSJeff Kirsher 
37775f585913SFugang Duan 	if (priv->eee_enabled) {
37785f585913SFugang Duan 		priv->tx_path_in_lpi_mode = false;
37795f585913SFugang Duan 		del_timer_sync(&priv->eee_ctrl_timer);
37805f585913SFugang Duan 	}
37815f585913SFugang Duan 
37827ac6653aSJeff Kirsher 	/* Stop TX/RX DMA and clear the descriptors */
3783ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
37847ac6653aSJeff Kirsher 
37857ac6653aSJeff Kirsher 	/* Release and free the Rx/Tx resources */
37867ac6653aSJeff Kirsher 	free_dma_desc_resources(priv);
37877ac6653aSJeff Kirsher 
37887ac6653aSJeff Kirsher 	/* Disable the MAC Rx/Tx */
3789c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
37907ac6653aSJeff Kirsher 
37917ac6653aSJeff Kirsher 	netif_carrier_off(dev);
37927ac6653aSJeff Kirsher 
379392ba6888SRayagond Kokatanur 	stmmac_release_ptp(priv);
379492ba6888SRayagond Kokatanur 
37955ec55823SJoakim Zhang 	pm_runtime_put(priv->device);
37965ec55823SJoakim Zhang 
37975a558611SOng Boon Leong 	if (priv->dma_cap.fpesel)
37985a558611SOng Boon Leong 		stmmac_fpe_stop_wq(priv);
37995a558611SOng Boon Leong 
38007ac6653aSJeff Kirsher 	return 0;
38017ac6653aSJeff Kirsher }
38027ac6653aSJeff Kirsher 
380330d93227SJose Abreu static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
380430d93227SJose Abreu 			       struct stmmac_tx_queue *tx_q)
380530d93227SJose Abreu {
380630d93227SJose Abreu 	u16 tag = 0x0, inner_tag = 0x0;
380730d93227SJose Abreu 	u32 inner_type = 0x0;
380830d93227SJose Abreu 	struct dma_desc *p;
380930d93227SJose Abreu 
381030d93227SJose Abreu 	if (!priv->dma_cap.vlins)
381130d93227SJose Abreu 		return false;
381230d93227SJose Abreu 	if (!skb_vlan_tag_present(skb))
381330d93227SJose Abreu 		return false;
381430d93227SJose Abreu 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
381530d93227SJose Abreu 		inner_tag = skb_vlan_tag_get(skb);
381630d93227SJose Abreu 		inner_type = STMMAC_VLAN_INSERT;
381730d93227SJose Abreu 	}
381830d93227SJose Abreu 
381930d93227SJose Abreu 	tag = skb_vlan_tag_get(skb);
382030d93227SJose Abreu 
3821579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3822579a25a8SJose Abreu 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3823579a25a8SJose Abreu 	else
3824579a25a8SJose Abreu 		p = &tx_q->dma_tx[tx_q->cur_tx];
3825579a25a8SJose Abreu 
382630d93227SJose Abreu 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
382730d93227SJose Abreu 		return false;
382830d93227SJose Abreu 
382930d93227SJose Abreu 	stmmac_set_tx_owner(priv, p);
3830aa042f60SSong, Yoong Siang 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
383130d93227SJose Abreu 	return true;
383230d93227SJose Abreu }
383330d93227SJose Abreu 
38347ac6653aSJeff Kirsher /**
3835f748be53SAlexandre TORGUE  *  stmmac_tso_allocator - close entry point of the driver
3836f748be53SAlexandre TORGUE  *  @priv: driver private structure
3837f748be53SAlexandre TORGUE  *  @des: buffer start address
3838f748be53SAlexandre TORGUE  *  @total_len: total length to fill in descriptors
3839d0ea5cbdSJesse Brandeburg  *  @last_segment: condition for the last descriptor
3840ce736788SJoao Pinto  *  @queue: TX queue index
3841f748be53SAlexandre TORGUE  *  Description:
3842f748be53SAlexandre TORGUE  *  This function fills descriptor and request new descriptors according to
3843f748be53SAlexandre TORGUE  *  buffer length to fill
3844f748be53SAlexandre TORGUE  */
3845a993db88SJose Abreu static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
3846ce736788SJoao Pinto 				 int total_len, bool last_segment, u32 queue)
3847f748be53SAlexandre TORGUE {
3848ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3849f748be53SAlexandre TORGUE 	struct dma_desc *desc;
38505bacd778SLABBE Corentin 	u32 buff_size;
3851ce736788SJoao Pinto 	int tmp_len;
3852f748be53SAlexandre TORGUE 
3853f748be53SAlexandre TORGUE 	tmp_len = total_len;
3854f748be53SAlexandre TORGUE 
3855f748be53SAlexandre TORGUE 	while (tmp_len > 0) {
3856a993db88SJose Abreu 		dma_addr_t curr_addr;
3857a993db88SJose Abreu 
3858aa042f60SSong, Yoong Siang 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3859aa042f60SSong, Yoong Siang 						priv->dma_tx_size);
3860b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3861579a25a8SJose Abreu 
3862579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3863579a25a8SJose Abreu 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3864579a25a8SJose Abreu 		else
3865579a25a8SJose Abreu 			desc = &tx_q->dma_tx[tx_q->cur_tx];
3866f748be53SAlexandre TORGUE 
3867a993db88SJose Abreu 		curr_addr = des + (total_len - tmp_len);
3868a993db88SJose Abreu 		if (priv->dma_cap.addr64 <= 32)
3869a993db88SJose Abreu 			desc->des0 = cpu_to_le32(curr_addr);
3870a993db88SJose Abreu 		else
3871a993db88SJose Abreu 			stmmac_set_desc_addr(priv, desc, curr_addr);
3872a993db88SJose Abreu 
3873f748be53SAlexandre TORGUE 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
3874f748be53SAlexandre TORGUE 			    TSO_MAX_BUFF_SIZE : tmp_len;
3875f748be53SAlexandre TORGUE 
387642de047dSJose Abreu 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
3877f748be53SAlexandre TORGUE 				0, 1,
3878426849e6SNiklas Cassel 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
3879f748be53SAlexandre TORGUE 				0, 0);
3880f748be53SAlexandre TORGUE 
3881f748be53SAlexandre TORGUE 		tmp_len -= TSO_MAX_BUFF_SIZE;
3882f748be53SAlexandre TORGUE 	}
3883f748be53SAlexandre TORGUE }
3884f748be53SAlexandre TORGUE 
3885d96febedSOng Boon Leong static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
3886d96febedSOng Boon Leong {
3887d96febedSOng Boon Leong 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3888d96febedSOng Boon Leong 	int desc_size;
3889d96febedSOng Boon Leong 
3890d96febedSOng Boon Leong 	if (likely(priv->extend_desc))
3891d96febedSOng Boon Leong 		desc_size = sizeof(struct dma_extended_desc);
3892d96febedSOng Boon Leong 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3893d96febedSOng Boon Leong 		desc_size = sizeof(struct dma_edesc);
3894d96febedSOng Boon Leong 	else
3895d96febedSOng Boon Leong 		desc_size = sizeof(struct dma_desc);
3896d96febedSOng Boon Leong 
3897d96febedSOng Boon Leong 	/* The own bit must be the latest setting done when prepare the
3898d96febedSOng Boon Leong 	 * descriptor and then barrier is needed to make sure that
3899d96febedSOng Boon Leong 	 * all is coherent before granting the DMA engine.
3900d96febedSOng Boon Leong 	 */
3901d96febedSOng Boon Leong 	wmb();
3902d96febedSOng Boon Leong 
3903d96febedSOng Boon Leong 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3904d96febedSOng Boon Leong 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3905d96febedSOng Boon Leong }
3906d96febedSOng Boon Leong 
3907f748be53SAlexandre TORGUE /**
3908f748be53SAlexandre TORGUE  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
3909f748be53SAlexandre TORGUE  *  @skb : the socket buffer
3910f748be53SAlexandre TORGUE  *  @dev : device pointer
3911f748be53SAlexandre TORGUE  *  Description: this is the transmit function that is called on TSO frames
3912f748be53SAlexandre TORGUE  *  (support available on GMAC4 and newer chips).
3913f748be53SAlexandre TORGUE  *  Diagram below show the ring programming in case of TSO frames:
3914f748be53SAlexandre TORGUE  *
3915f748be53SAlexandre TORGUE  *  First Descriptor
3916f748be53SAlexandre TORGUE  *   --------
3917f748be53SAlexandre TORGUE  *   | DES0 |---> buffer1 = L2/L3/L4 header
3918f748be53SAlexandre TORGUE  *   | DES1 |---> TCP Payload (can continue on next descr...)
3919f748be53SAlexandre TORGUE  *   | DES2 |---> buffer 1 and 2 len
3920f748be53SAlexandre TORGUE  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
3921f748be53SAlexandre TORGUE  *   --------
3922f748be53SAlexandre TORGUE  *	|
3923f748be53SAlexandre TORGUE  *     ...
3924f748be53SAlexandre TORGUE  *	|
3925f748be53SAlexandre TORGUE  *   --------
3926f748be53SAlexandre TORGUE  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
3927f748be53SAlexandre TORGUE  *   | DES1 | --|
3928f748be53SAlexandre TORGUE  *   | DES2 | --> buffer 1 and 2 len
3929f748be53SAlexandre TORGUE  *   | DES3 |
3930f748be53SAlexandre TORGUE  *   --------
3931f748be53SAlexandre TORGUE  *
3932f748be53SAlexandre TORGUE  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
3933f748be53SAlexandre TORGUE  */
3934f748be53SAlexandre TORGUE static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3935f748be53SAlexandre TORGUE {
3936ce736788SJoao Pinto 	struct dma_desc *desc, *first, *mss_desc = NULL;
3937f748be53SAlexandre TORGUE 	struct stmmac_priv *priv = netdev_priv(dev);
3938f748be53SAlexandre TORGUE 	int nfrags = skb_shinfo(skb)->nr_frags;
3939ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
3940c2837423SJose Abreu 	unsigned int first_entry, tx_packets;
3941d96febedSOng Boon Leong 	int tmp_pay_len = 0, first_tx;
3942ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
3943c2837423SJose Abreu 	bool has_vlan, set_ic;
3944579a25a8SJose Abreu 	u8 proto_hdr_len, hdr;
3945ce736788SJoao Pinto 	u32 pay_len, mss;
3946a993db88SJose Abreu 	dma_addr_t des;
3947f748be53SAlexandre TORGUE 	int i;
3948f748be53SAlexandre TORGUE 
3949ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
3950c2837423SJose Abreu 	first_tx = tx_q->cur_tx;
3951ce736788SJoao Pinto 
3952f748be53SAlexandre TORGUE 	/* Compute header lengths */
3953b7766206SJose Abreu 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3954b7766206SJose Abreu 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
3955b7766206SJose Abreu 		hdr = sizeof(struct udphdr);
3956b7766206SJose Abreu 	} else {
3957f748be53SAlexandre TORGUE 		proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3958b7766206SJose Abreu 		hdr = tcp_hdrlen(skb);
3959b7766206SJose Abreu 	}
3960f748be53SAlexandre TORGUE 
3961f748be53SAlexandre TORGUE 	/* Desc availability based on threshold should be enough safe */
3962ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <
3963f748be53SAlexandre TORGUE 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
3964c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3965c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3966c22a3f48SJoao Pinto 								queue));
3967f748be53SAlexandre TORGUE 			/* This is a hard error, log it. */
396838ddc59dSLABBE Corentin 			netdev_err(priv->dev,
396938ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
397038ddc59dSLABBE Corentin 				   __func__);
3971f748be53SAlexandre TORGUE 		}
3972f748be53SAlexandre TORGUE 		return NETDEV_TX_BUSY;
3973f748be53SAlexandre TORGUE 	}
3974f748be53SAlexandre TORGUE 
3975f748be53SAlexandre TORGUE 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
3976f748be53SAlexandre TORGUE 
3977f748be53SAlexandre TORGUE 	mss = skb_shinfo(skb)->gso_size;
3978f748be53SAlexandre TORGUE 
3979f748be53SAlexandre TORGUE 	/* set new MSS value if needed */
39808d212a9eSNiklas Cassel 	if (mss != tx_q->mss) {
3981579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3982579a25a8SJose Abreu 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3983579a25a8SJose Abreu 		else
3984579a25a8SJose Abreu 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
3985579a25a8SJose Abreu 
398642de047dSJose Abreu 		stmmac_set_mss(priv, mss_desc, mss);
39878d212a9eSNiklas Cassel 		tx_q->mss = mss;
3988aa042f60SSong, Yoong Siang 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3989aa042f60SSong, Yoong Siang 						priv->dma_tx_size);
3990b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3991f748be53SAlexandre TORGUE 	}
3992f748be53SAlexandre TORGUE 
3993f748be53SAlexandre TORGUE 	if (netif_msg_tx_queued(priv)) {
3994b7766206SJose Abreu 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
3995b7766206SJose Abreu 			__func__, hdr, proto_hdr_len, pay_len, mss);
3996f748be53SAlexandre TORGUE 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
3997f748be53SAlexandre TORGUE 			skb->data_len);
3998f748be53SAlexandre TORGUE 	}
3999f748be53SAlexandre TORGUE 
400030d93227SJose Abreu 	/* Check if VLAN can be inserted by HW */
400130d93227SJose Abreu 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
400230d93227SJose Abreu 
4003ce736788SJoao Pinto 	first_entry = tx_q->cur_tx;
4004b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4005f748be53SAlexandre TORGUE 
4006579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4007579a25a8SJose Abreu 		desc = &tx_q->dma_entx[first_entry].basic;
4008579a25a8SJose Abreu 	else
4009579a25a8SJose Abreu 		desc = &tx_q->dma_tx[first_entry];
4010f748be53SAlexandre TORGUE 	first = desc;
4011f748be53SAlexandre TORGUE 
401230d93227SJose Abreu 	if (has_vlan)
401330d93227SJose Abreu 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
401430d93227SJose Abreu 
4015f748be53SAlexandre TORGUE 	/* first descriptor: fill Headers on Buf1 */
4016f748be53SAlexandre TORGUE 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4017f748be53SAlexandre TORGUE 			     DMA_TO_DEVICE);
4018f748be53SAlexandre TORGUE 	if (dma_mapping_error(priv->device, des))
4019f748be53SAlexandre TORGUE 		goto dma_map_err;
4020f748be53SAlexandre TORGUE 
4021ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4022ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4023be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4024be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4025f748be53SAlexandre TORGUE 
4026a993db88SJose Abreu 	if (priv->dma_cap.addr64 <= 32) {
4027f8be0d78SMichael Weiser 		first->des0 = cpu_to_le32(des);
4028f748be53SAlexandre TORGUE 
4029f748be53SAlexandre TORGUE 		/* Fill start of payload in buff2 of first descriptor */
4030f748be53SAlexandre TORGUE 		if (pay_len)
4031f8be0d78SMichael Weiser 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4032f748be53SAlexandre TORGUE 
4033f748be53SAlexandre TORGUE 		/* If needed take extra descriptors to fill the remaining payload */
4034f748be53SAlexandre TORGUE 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4035a993db88SJose Abreu 	} else {
4036a993db88SJose Abreu 		stmmac_set_desc_addr(priv, first, des);
4037a993db88SJose Abreu 		tmp_pay_len = pay_len;
403834c15202Syuqi jin 		des += proto_hdr_len;
4039b2f07199SJose Abreu 		pay_len = 0;
4040a993db88SJose Abreu 	}
4041f748be53SAlexandre TORGUE 
4042ce736788SJoao Pinto 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4043f748be53SAlexandre TORGUE 
4044f748be53SAlexandre TORGUE 	/* Prepare fragments */
4045f748be53SAlexandre TORGUE 	for (i = 0; i < nfrags; i++) {
4046f748be53SAlexandre TORGUE 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4047f748be53SAlexandre TORGUE 
4048f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0,
4049f748be53SAlexandre TORGUE 				       skb_frag_size(frag),
4050f748be53SAlexandre TORGUE 				       DMA_TO_DEVICE);
4051937071c1SThierry Reding 		if (dma_mapping_error(priv->device, des))
4052937071c1SThierry Reding 			goto dma_map_err;
4053f748be53SAlexandre TORGUE 
4054f748be53SAlexandre TORGUE 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4055ce736788SJoao Pinto 				     (i == nfrags - 1), queue);
4056f748be53SAlexandre TORGUE 
4057ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4058ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4059ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4060be8b38a7SOng Boon Leong 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4061f748be53SAlexandre TORGUE 	}
4062f748be53SAlexandre TORGUE 
4063ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4064f748be53SAlexandre TORGUE 
406505cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
406605cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4067be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
406805cf0d1bSNiklas Cassel 
40697df4a3a7SJose Abreu 	/* Manage tx mitigation */
4070c2837423SJose Abreu 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4071c2837423SJose Abreu 	tx_q->tx_count_frames += tx_packets;
4072c2837423SJose Abreu 
4073c2837423SJose Abreu 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4074c2837423SJose Abreu 		set_ic = true;
4075db2f2842SOng Boon Leong 	else if (!priv->tx_coal_frames[queue])
4076c2837423SJose Abreu 		set_ic = false;
4077db2f2842SOng Boon Leong 	else if (tx_packets > priv->tx_coal_frames[queue])
4078c2837423SJose Abreu 		set_ic = true;
4079db2f2842SOng Boon Leong 	else if ((tx_q->tx_count_frames %
4080db2f2842SOng Boon Leong 		  priv->tx_coal_frames[queue]) < tx_packets)
4081c2837423SJose Abreu 		set_ic = true;
4082c2837423SJose Abreu 	else
4083c2837423SJose Abreu 		set_ic = false;
4084c2837423SJose Abreu 
4085c2837423SJose Abreu 	if (set_ic) {
4086579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4087579a25a8SJose Abreu 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4088579a25a8SJose Abreu 		else
40897df4a3a7SJose Abreu 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4090579a25a8SJose Abreu 
40917df4a3a7SJose Abreu 		tx_q->tx_count_frames = 0;
40927df4a3a7SJose Abreu 		stmmac_set_tx_ic(priv, desc);
40937df4a3a7SJose Abreu 		priv->xstats.tx_set_ic_bit++;
40947df4a3a7SJose Abreu 	}
40957df4a3a7SJose Abreu 
409605cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
409705cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
409805cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
409905cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
410005cf0d1bSNiklas Cassel 	 */
4101aa042f60SSong, Yoong Siang 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
4102f748be53SAlexandre TORGUE 
4103ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4104b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
410538ddc59dSLABBE Corentin 			  __func__);
4106c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4107f748be53SAlexandre TORGUE 	}
4108f748be53SAlexandre TORGUE 
4109f748be53SAlexandre TORGUE 	dev->stats.tx_bytes += skb->len;
4110f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_frames++;
4111f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_nfrags += nfrags;
4112f748be53SAlexandre TORGUE 
41138000ddc0SJose Abreu 	if (priv->sarc_type)
41148000ddc0SJose Abreu 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
41158000ddc0SJose Abreu 
4116f748be53SAlexandre TORGUE 	skb_tx_timestamp(skb);
4117f748be53SAlexandre TORGUE 
4118f748be53SAlexandre TORGUE 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4119f748be53SAlexandre TORGUE 		     priv->hwts_tx_en)) {
4120f748be53SAlexandre TORGUE 		/* declare that device is doing timestamping */
4121f748be53SAlexandre TORGUE 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
412242de047dSJose Abreu 		stmmac_enable_tx_timestamp(priv, first);
4123f748be53SAlexandre TORGUE 	}
4124f748be53SAlexandre TORGUE 
4125f748be53SAlexandre TORGUE 	/* Complete the first descriptor before granting the DMA */
412642de047dSJose Abreu 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4127f748be53SAlexandre TORGUE 			proto_hdr_len,
4128f748be53SAlexandre TORGUE 			pay_len,
4129ce736788SJoao Pinto 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4130b7766206SJose Abreu 			hdr / 4, (skb->len - proto_hdr_len));
4131f748be53SAlexandre TORGUE 
4132f748be53SAlexandre TORGUE 	/* If context desc is used to change MSS */
413315d2ee42SNiklas Cassel 	if (mss_desc) {
413415d2ee42SNiklas Cassel 		/* Make sure that first descriptor has been completely
413515d2ee42SNiklas Cassel 		 * written, including its own bit. This is because MSS is
413615d2ee42SNiklas Cassel 		 * actually before first descriptor, so we need to make
413715d2ee42SNiklas Cassel 		 * sure that MSS's own bit is the last thing written.
413815d2ee42SNiklas Cassel 		 */
413915d2ee42SNiklas Cassel 		dma_wmb();
414042de047dSJose Abreu 		stmmac_set_tx_owner(priv, mss_desc);
414115d2ee42SNiklas Cassel 	}
4142f748be53SAlexandre TORGUE 
4143f748be53SAlexandre TORGUE 	if (netif_msg_pktdata(priv)) {
4144f748be53SAlexandre TORGUE 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4145ce736788SJoao Pinto 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4146ce736788SJoao Pinto 			tx_q->cur_tx, first, nfrags);
4147f748be53SAlexandre TORGUE 		pr_info(">>> frame to be transmitted: ");
4148f748be53SAlexandre TORGUE 		print_pkt(skb->data, skb_headlen(skb));
4149f748be53SAlexandre TORGUE 	}
4150f748be53SAlexandre TORGUE 
4151c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4152f748be53SAlexandre TORGUE 
4153d96febedSOng Boon Leong 	stmmac_flush_tx_descriptors(priv, queue);
41544772f26dSJose Abreu 	stmmac_tx_timer_arm(priv, queue);
4155f748be53SAlexandre TORGUE 
4156f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
4157f748be53SAlexandre TORGUE 
4158f748be53SAlexandre TORGUE dma_map_err:
4159f748be53SAlexandre TORGUE 	dev_err(priv->device, "Tx dma map failed\n");
4160f748be53SAlexandre TORGUE 	dev_kfree_skb(skb);
4161f748be53SAlexandre TORGUE 	priv->dev->stats.tx_dropped++;
4162f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
4163f748be53SAlexandre TORGUE }
4164f748be53SAlexandre TORGUE 
4165f748be53SAlexandre TORGUE /**
4166732fdf0eSGiuseppe CAVALLARO  *  stmmac_xmit - Tx entry point of the driver
41677ac6653aSJeff Kirsher  *  @skb : the socket buffer
41687ac6653aSJeff Kirsher  *  @dev : device pointer
416932ceabcaSGiuseppe CAVALLARO  *  Description : this is the tx entry point of the driver.
417032ceabcaSGiuseppe CAVALLARO  *  It programs the chain or the ring and supports oversized frames
417132ceabcaSGiuseppe CAVALLARO  *  and SG feature.
41727ac6653aSJeff Kirsher  */
41737ac6653aSJeff Kirsher static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
41747ac6653aSJeff Kirsher {
4175c2837423SJose Abreu 	unsigned int first_entry, tx_packets, enh_desc;
41767ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
41770e80bdc9SGiuseppe Cavallaro 	unsigned int nopaged_len = skb_headlen(skb);
41784a7d666aSGiuseppe CAVALLARO 	int i, csum_insertion = 0, is_jumbo = 0;
4179ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
41807ac6653aSJeff Kirsher 	int nfrags = skb_shinfo(skb)->nr_frags;
4181b7766206SJose Abreu 	int gso = skb_shinfo(skb)->gso_type;
4182579a25a8SJose Abreu 	struct dma_edesc *tbs_desc = NULL;
41837ac6653aSJeff Kirsher 	struct dma_desc *desc, *first;
4184ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
4185c2837423SJose Abreu 	bool has_vlan, set_ic;
4186d96febedSOng Boon Leong 	int entry, first_tx;
4187a993db88SJose Abreu 	dma_addr_t des;
4188f748be53SAlexandre TORGUE 
4189ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
4190c2837423SJose Abreu 	first_tx = tx_q->cur_tx;
4191ce736788SJoao Pinto 
4192be1c7eaeSVineetha G. Jaya Kumaran 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4193e2cd682dSJose Abreu 		stmmac_disable_eee_mode(priv);
4194e2cd682dSJose Abreu 
4195f748be53SAlexandre TORGUE 	/* Manage oversized TCP frames for GMAC4 device */
4196f748be53SAlexandre TORGUE 	if (skb_is_gso(skb) && priv->tso) {
4197b7766206SJose Abreu 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4198b7766206SJose Abreu 			return stmmac_tso_xmit(skb, dev);
4199b7766206SJose Abreu 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4200f748be53SAlexandre TORGUE 			return stmmac_tso_xmit(skb, dev);
4201f748be53SAlexandre TORGUE 	}
42027ac6653aSJeff Kirsher 
4203ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4204c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4205c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4206c22a3f48SJoao Pinto 								queue));
42077ac6653aSJeff Kirsher 			/* This is a hard error, log it. */
420838ddc59dSLABBE Corentin 			netdev_err(priv->dev,
420938ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
421038ddc59dSLABBE Corentin 				   __func__);
42117ac6653aSJeff Kirsher 		}
42127ac6653aSJeff Kirsher 		return NETDEV_TX_BUSY;
42137ac6653aSJeff Kirsher 	}
42147ac6653aSJeff Kirsher 
421530d93227SJose Abreu 	/* Check if VLAN can be inserted by HW */
421630d93227SJose Abreu 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
421730d93227SJose Abreu 
4218ce736788SJoao Pinto 	entry = tx_q->cur_tx;
42190e80bdc9SGiuseppe Cavallaro 	first_entry = entry;
4220b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
42217ac6653aSJeff Kirsher 
42227ac6653aSJeff Kirsher 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
42237ac6653aSJeff Kirsher 
42240e80bdc9SGiuseppe Cavallaro 	if (likely(priv->extend_desc))
4225ce736788SJoao Pinto 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4226579a25a8SJose Abreu 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4227579a25a8SJose Abreu 		desc = &tx_q->dma_entx[entry].basic;
4228c24602efSGiuseppe CAVALLARO 	else
4229ce736788SJoao Pinto 		desc = tx_q->dma_tx + entry;
4230c24602efSGiuseppe CAVALLARO 
42317ac6653aSJeff Kirsher 	first = desc;
42327ac6653aSJeff Kirsher 
423330d93227SJose Abreu 	if (has_vlan)
423430d93227SJose Abreu 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
423530d93227SJose Abreu 
42360e80bdc9SGiuseppe Cavallaro 	enh_desc = priv->plat->enh_desc;
42374a7d666aSGiuseppe CAVALLARO 	/* To program the descriptors according to the size of the frame */
423829896a67SGiuseppe CAVALLARO 	if (enh_desc)
42392c520b1cSJose Abreu 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
424029896a67SGiuseppe CAVALLARO 
424163a550fcSJose Abreu 	if (unlikely(is_jumbo)) {
42422c520b1cSJose Abreu 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
424363a550fcSJose Abreu 		if (unlikely(entry < 0) && (entry != -EINVAL))
4244362b37beSGiuseppe CAVALLARO 			goto dma_map_err;
424529896a67SGiuseppe CAVALLARO 	}
42467ac6653aSJeff Kirsher 
42477ac6653aSJeff Kirsher 	for (i = 0; i < nfrags; i++) {
42489e903e08SEric Dumazet 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
42499e903e08SEric Dumazet 		int len = skb_frag_size(frag);
4250be434d50SGiuseppe Cavallaro 		bool last_segment = (i == (nfrags - 1));
42517ac6653aSJeff Kirsher 
4252aa042f60SSong, Yoong Siang 		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4253b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[entry]);
4254e3ad57c9SGiuseppe Cavallaro 
42550e80bdc9SGiuseppe Cavallaro 		if (likely(priv->extend_desc))
4256ce736788SJoao Pinto 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4257579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4258579a25a8SJose Abreu 			desc = &tx_q->dma_entx[entry].basic;
4259c24602efSGiuseppe CAVALLARO 		else
4260ce736788SJoao Pinto 			desc = tx_q->dma_tx + entry;
42617ac6653aSJeff Kirsher 
4262f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4263f722380dSIan Campbell 				       DMA_TO_DEVICE);
4264f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
4265362b37beSGiuseppe CAVALLARO 			goto dma_map_err; /* should reuse desc w/o issues */
4266362b37beSGiuseppe CAVALLARO 
4267ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].buf = des;
42686844171dSJose Abreu 
42696844171dSJose Abreu 		stmmac_set_desc_addr(priv, desc, des);
4270f748be53SAlexandre TORGUE 
4271ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4272ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].len = len;
4273ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4274be8b38a7SOng Boon Leong 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
42750e80bdc9SGiuseppe Cavallaro 
42760e80bdc9SGiuseppe Cavallaro 		/* Prepare the descriptor and set the own bit too */
427742de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
427842de047dSJose Abreu 				priv->mode, 1, last_segment, skb->len);
42797ac6653aSJeff Kirsher 	}
42807ac6653aSJeff Kirsher 
428105cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
428205cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[entry] = skb;
4283be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4284e3ad57c9SGiuseppe Cavallaro 
42857df4a3a7SJose Abreu 	/* According to the coalesce parameter the IC bit for the latest
42867df4a3a7SJose Abreu 	 * segment is reset and the timer re-started to clean the tx status.
42877df4a3a7SJose Abreu 	 * This approach takes care about the fragments: desc is the first
42887df4a3a7SJose Abreu 	 * element in case of no SG.
42897df4a3a7SJose Abreu 	 */
4290c2837423SJose Abreu 	tx_packets = (entry + 1) - first_tx;
4291c2837423SJose Abreu 	tx_q->tx_count_frames += tx_packets;
4292c2837423SJose Abreu 
4293c2837423SJose Abreu 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4294c2837423SJose Abreu 		set_ic = true;
4295db2f2842SOng Boon Leong 	else if (!priv->tx_coal_frames[queue])
4296c2837423SJose Abreu 		set_ic = false;
4297db2f2842SOng Boon Leong 	else if (tx_packets > priv->tx_coal_frames[queue])
4298c2837423SJose Abreu 		set_ic = true;
4299db2f2842SOng Boon Leong 	else if ((tx_q->tx_count_frames %
4300db2f2842SOng Boon Leong 		  priv->tx_coal_frames[queue]) < tx_packets)
4301c2837423SJose Abreu 		set_ic = true;
4302c2837423SJose Abreu 	else
4303c2837423SJose Abreu 		set_ic = false;
4304c2837423SJose Abreu 
4305c2837423SJose Abreu 	if (set_ic) {
43067df4a3a7SJose Abreu 		if (likely(priv->extend_desc))
43077df4a3a7SJose Abreu 			desc = &tx_q->dma_etx[entry].basic;
4308579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4309579a25a8SJose Abreu 			desc = &tx_q->dma_entx[entry].basic;
43107df4a3a7SJose Abreu 		else
43117df4a3a7SJose Abreu 			desc = &tx_q->dma_tx[entry];
43127df4a3a7SJose Abreu 
43137df4a3a7SJose Abreu 		tx_q->tx_count_frames = 0;
43147df4a3a7SJose Abreu 		stmmac_set_tx_ic(priv, desc);
43157df4a3a7SJose Abreu 		priv->xstats.tx_set_ic_bit++;
43167df4a3a7SJose Abreu 	}
43177df4a3a7SJose Abreu 
431805cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
431905cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
432005cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
432105cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
432205cf0d1bSNiklas Cassel 	 */
4323aa042f60SSong, Yoong Siang 	entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4324ce736788SJoao Pinto 	tx_q->cur_tx = entry;
43257ac6653aSJeff Kirsher 
43267ac6653aSJeff Kirsher 	if (netif_msg_pktdata(priv)) {
432738ddc59dSLABBE Corentin 		netdev_dbg(priv->dev,
432838ddc59dSLABBE Corentin 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4329ce736788SJoao Pinto 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
43300e80bdc9SGiuseppe Cavallaro 			   entry, first, nfrags);
433183d7af64SGiuseppe CAVALLARO 
433238ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
43337ac6653aSJeff Kirsher 		print_pkt(skb->data, skb->len);
43347ac6653aSJeff Kirsher 	}
43350e80bdc9SGiuseppe Cavallaro 
4336ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4337b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4338b3e51069SLABBE Corentin 			  __func__);
4339c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
43407ac6653aSJeff Kirsher 	}
43417ac6653aSJeff Kirsher 
43427ac6653aSJeff Kirsher 	dev->stats.tx_bytes += skb->len;
43437ac6653aSJeff Kirsher 
43448000ddc0SJose Abreu 	if (priv->sarc_type)
43458000ddc0SJose Abreu 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
43468000ddc0SJose Abreu 
43470e80bdc9SGiuseppe Cavallaro 	skb_tx_timestamp(skb);
43480e80bdc9SGiuseppe Cavallaro 
43490e80bdc9SGiuseppe Cavallaro 	/* Ready to fill the first descriptor and set the OWN bit w/o any
43500e80bdc9SGiuseppe Cavallaro 	 * problems because all the descriptors are actually ready to be
43510e80bdc9SGiuseppe Cavallaro 	 * passed to the DMA engine.
43520e80bdc9SGiuseppe Cavallaro 	 */
43530e80bdc9SGiuseppe Cavallaro 	if (likely(!is_jumbo)) {
43540e80bdc9SGiuseppe Cavallaro 		bool last_segment = (nfrags == 0);
43550e80bdc9SGiuseppe Cavallaro 
4356f748be53SAlexandre TORGUE 		des = dma_map_single(priv->device, skb->data,
43570e80bdc9SGiuseppe Cavallaro 				     nopaged_len, DMA_TO_DEVICE);
4358f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
43590e80bdc9SGiuseppe Cavallaro 			goto dma_map_err;
43600e80bdc9SGiuseppe Cavallaro 
4361ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4362be8b38a7SOng Boon Leong 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4363be8b38a7SOng Boon Leong 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
43646844171dSJose Abreu 
43656844171dSJose Abreu 		stmmac_set_desc_addr(priv, first, des);
4366f748be53SAlexandre TORGUE 
4367ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4368ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
43690e80bdc9SGiuseppe Cavallaro 
4370891434b1SRayagond Kokatanur 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4371891434b1SRayagond Kokatanur 			     priv->hwts_tx_en)) {
4372891434b1SRayagond Kokatanur 			/* declare that device is doing timestamping */
4373891434b1SRayagond Kokatanur 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
437442de047dSJose Abreu 			stmmac_enable_tx_timestamp(priv, first);
4375891434b1SRayagond Kokatanur 		}
4376891434b1SRayagond Kokatanur 
43770e80bdc9SGiuseppe Cavallaro 		/* Prepare the first descriptor setting the OWN bit too */
437842de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4379579a25a8SJose Abreu 				csum_insertion, priv->mode, 0, last_segment,
438042de047dSJose Abreu 				skb->len);
438180acbed9SAaro Koskinen 	}
43820e80bdc9SGiuseppe Cavallaro 
4383579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_EN) {
4384579a25a8SJose Abreu 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4385579a25a8SJose Abreu 
4386579a25a8SJose Abreu 		tbs_desc = &tx_q->dma_entx[first_entry];
4387579a25a8SJose Abreu 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4388579a25a8SJose Abreu 	}
4389579a25a8SJose Abreu 
4390579a25a8SJose Abreu 	stmmac_set_tx_owner(priv, first);
4391579a25a8SJose Abreu 
4392c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4393f748be53SAlexandre TORGUE 
4394a4e887faSJose Abreu 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
43958fce3331SJose Abreu 
4396d96febedSOng Boon Leong 	stmmac_flush_tx_descriptors(priv, queue);
43974772f26dSJose Abreu 	stmmac_tx_timer_arm(priv, queue);
43987ac6653aSJeff Kirsher 
4399362b37beSGiuseppe CAVALLARO 	return NETDEV_TX_OK;
4400a9097a96SGiuseppe CAVALLARO 
4401362b37beSGiuseppe CAVALLARO dma_map_err:
440238ddc59dSLABBE Corentin 	netdev_err(priv->dev, "Tx DMA map failed\n");
4403362b37beSGiuseppe CAVALLARO 	dev_kfree_skb(skb);
4404362b37beSGiuseppe CAVALLARO 	priv->dev->stats.tx_dropped++;
44057ac6653aSJeff Kirsher 	return NETDEV_TX_OK;
44067ac6653aSJeff Kirsher }
44077ac6653aSJeff Kirsher 
4408b9381985SVince Bridgers static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4409b9381985SVince Bridgers {
4410ab188e8fSElad Nachman 	struct vlan_ethhdr *veth;
4411ab188e8fSElad Nachman 	__be16 vlan_proto;
4412b9381985SVince Bridgers 	u16 vlanid;
4413b9381985SVince Bridgers 
4414ab188e8fSElad Nachman 	veth = (struct vlan_ethhdr *)skb->data;
4415ab188e8fSElad Nachman 	vlan_proto = veth->h_vlan_proto;
4416ab188e8fSElad Nachman 
4417ab188e8fSElad Nachman 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4418ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4419ab188e8fSElad Nachman 	    (vlan_proto == htons(ETH_P_8021AD) &&
4420ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4421b9381985SVince Bridgers 		/* pop the vlan tag */
4422ab188e8fSElad Nachman 		vlanid = ntohs(veth->h_vlan_TCI);
4423ab188e8fSElad Nachman 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4424b9381985SVince Bridgers 		skb_pull(skb, VLAN_HLEN);
4425ab188e8fSElad Nachman 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4426b9381985SVince Bridgers 	}
4427b9381985SVince Bridgers }
4428b9381985SVince Bridgers 
442932ceabcaSGiuseppe CAVALLARO /**
4430732fdf0eSGiuseppe CAVALLARO  * stmmac_rx_refill - refill used skb preallocated buffers
443132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
443254139cf3SJoao Pinto  * @queue: RX queue index
443332ceabcaSGiuseppe CAVALLARO  * Description : this is to reallocate the skb for the reception process
443432ceabcaSGiuseppe CAVALLARO  * that is based on zero-copy.
443532ceabcaSGiuseppe CAVALLARO  */
443654139cf3SJoao Pinto static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
44377ac6653aSJeff Kirsher {
443854139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
44395fabb012SOng Boon Leong 	int dirty = stmmac_rx_dirty(priv, queue);
444054139cf3SJoao Pinto 	unsigned int entry = rx_q->dirty_rx;
4441884d2b84SDavid Wu 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4442884d2b84SDavid Wu 
4443884d2b84SDavid Wu 	if (priv->dma_cap.addr64 <= 32)
4444884d2b84SDavid Wu 		gfp |= GFP_DMA32;
444554139cf3SJoao Pinto 
4446e3ad57c9SGiuseppe Cavallaro 	while (dirty-- > 0) {
44472af6106aSJose Abreu 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4448c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
4449d429b66eSJose Abreu 		bool use_rx_wd;
4450c24602efSGiuseppe CAVALLARO 
4451c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
445254139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4453c24602efSGiuseppe CAVALLARO 		else
445454139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
4455c24602efSGiuseppe CAVALLARO 
44562af6106aSJose Abreu 		if (!buf->page) {
4457884d2b84SDavid Wu 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
44582af6106aSJose Abreu 			if (!buf->page)
44597ac6653aSJeff Kirsher 				break;
4460120e87f9SGiuseppe Cavallaro 		}
44617ac6653aSJeff Kirsher 
446267afd6d1SJose Abreu 		if (priv->sph && !buf->sec_page) {
4463884d2b84SDavid Wu 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
446467afd6d1SJose Abreu 			if (!buf->sec_page)
446567afd6d1SJose Abreu 				break;
446667afd6d1SJose Abreu 
446767afd6d1SJose Abreu 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
446867afd6d1SJose Abreu 		}
446967afd6d1SJose Abreu 
44705fabb012SOng Boon Leong 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
44713caa61c2SJose Abreu 
44722af6106aSJose Abreu 		stmmac_set_desc_addr(priv, p, buf->addr);
4473396e13e1SJoakim Zhang 		if (priv->sph)
4474396e13e1SJoakim Zhang 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4475396e13e1SJoakim Zhang 		else
4476396e13e1SJoakim Zhang 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
44772c520b1cSJose Abreu 		stmmac_refill_desc3(priv, rx_q, p);
4478286a8372SGiuseppe CAVALLARO 
4479d429b66eSJose Abreu 		rx_q->rx_count_frames++;
4480db2f2842SOng Boon Leong 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4481db2f2842SOng Boon Leong 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
44826fa9d691SJose Abreu 			rx_q->rx_count_frames = 0;
448309146abeSJose Abreu 
4484db2f2842SOng Boon Leong 		use_rx_wd = !priv->rx_coal_frames[queue];
448509146abeSJose Abreu 		use_rx_wd |= rx_q->rx_count_frames > 0;
448609146abeSJose Abreu 		if (!priv->use_riwt)
448709146abeSJose Abreu 			use_rx_wd = false;
4488d429b66eSJose Abreu 
4489ad688cdbSPavel Machek 		dma_wmb();
44902af6106aSJose Abreu 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4491e3ad57c9SGiuseppe Cavallaro 
4492aa042f60SSong, Yoong Siang 		entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
44937ac6653aSJeff Kirsher 	}
449454139cf3SJoao Pinto 	rx_q->dirty_rx = entry;
4495858a31ffSJose Abreu 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4496858a31ffSJose Abreu 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
44974523a561SBiao Huang 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
44987ac6653aSJeff Kirsher }
44997ac6653aSJeff Kirsher 
450088ebe2cfSJose Abreu static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
450188ebe2cfSJose Abreu 				       struct dma_desc *p,
450288ebe2cfSJose Abreu 				       int status, unsigned int len)
450388ebe2cfSJose Abreu {
450488ebe2cfSJose Abreu 	unsigned int plen = 0, hlen = 0;
450531f2760eSLuo Jiaxing 	int coe = priv->hw->rx_csum;
450688ebe2cfSJose Abreu 
450788ebe2cfSJose Abreu 	/* Not first descriptor, buffer is always zero */
450888ebe2cfSJose Abreu 	if (priv->sph && len)
450988ebe2cfSJose Abreu 		return 0;
451088ebe2cfSJose Abreu 
451188ebe2cfSJose Abreu 	/* First descriptor, get split header length */
451231f2760eSLuo Jiaxing 	stmmac_get_rx_header_len(priv, p, &hlen);
451388ebe2cfSJose Abreu 	if (priv->sph && hlen) {
451488ebe2cfSJose Abreu 		priv->xstats.rx_split_hdr_pkt_n++;
451588ebe2cfSJose Abreu 		return hlen;
451688ebe2cfSJose Abreu 	}
451788ebe2cfSJose Abreu 
451888ebe2cfSJose Abreu 	/* First descriptor, not last descriptor and not split header */
451988ebe2cfSJose Abreu 	if (status & rx_not_ls)
452088ebe2cfSJose Abreu 		return priv->dma_buf_sz;
452188ebe2cfSJose Abreu 
452288ebe2cfSJose Abreu 	plen = stmmac_get_rx_frame_len(priv, p, coe);
452388ebe2cfSJose Abreu 
452488ebe2cfSJose Abreu 	/* First descriptor and last descriptor and not split header */
452588ebe2cfSJose Abreu 	return min_t(unsigned int, priv->dma_buf_sz, plen);
452688ebe2cfSJose Abreu }
452788ebe2cfSJose Abreu 
452888ebe2cfSJose Abreu static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
452988ebe2cfSJose Abreu 				       struct dma_desc *p,
453088ebe2cfSJose Abreu 				       int status, unsigned int len)
453188ebe2cfSJose Abreu {
453288ebe2cfSJose Abreu 	int coe = priv->hw->rx_csum;
453388ebe2cfSJose Abreu 	unsigned int plen = 0;
453488ebe2cfSJose Abreu 
453588ebe2cfSJose Abreu 	/* Not split header, buffer is not available */
453688ebe2cfSJose Abreu 	if (!priv->sph)
453788ebe2cfSJose Abreu 		return 0;
453888ebe2cfSJose Abreu 
453988ebe2cfSJose Abreu 	/* Not last descriptor */
454088ebe2cfSJose Abreu 	if (status & rx_not_ls)
454188ebe2cfSJose Abreu 		return priv->dma_buf_sz;
454288ebe2cfSJose Abreu 
454388ebe2cfSJose Abreu 	plen = stmmac_get_rx_frame_len(priv, p, coe);
454488ebe2cfSJose Abreu 
454588ebe2cfSJose Abreu 	/* Last descriptor */
454688ebe2cfSJose Abreu 	return plen - len;
454788ebe2cfSJose Abreu }
454888ebe2cfSJose Abreu 
4549be8b38a7SOng Boon Leong static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
45508b278a5bSOng Boon Leong 				struct xdp_frame *xdpf, bool dma_map)
4551be8b38a7SOng Boon Leong {
4552be8b38a7SOng Boon Leong 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4553be8b38a7SOng Boon Leong 	unsigned int entry = tx_q->cur_tx;
4554be8b38a7SOng Boon Leong 	struct dma_desc *tx_desc;
4555be8b38a7SOng Boon Leong 	dma_addr_t dma_addr;
4556be8b38a7SOng Boon Leong 	bool set_ic;
4557be8b38a7SOng Boon Leong 
4558be8b38a7SOng Boon Leong 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4559be8b38a7SOng Boon Leong 		return STMMAC_XDP_CONSUMED;
4560be8b38a7SOng Boon Leong 
4561be8b38a7SOng Boon Leong 	if (likely(priv->extend_desc))
4562be8b38a7SOng Boon Leong 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4563be8b38a7SOng Boon Leong 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4564be8b38a7SOng Boon Leong 		tx_desc = &tx_q->dma_entx[entry].basic;
4565be8b38a7SOng Boon Leong 	else
4566be8b38a7SOng Boon Leong 		tx_desc = tx_q->dma_tx + entry;
4567be8b38a7SOng Boon Leong 
45688b278a5bSOng Boon Leong 	if (dma_map) {
45698b278a5bSOng Boon Leong 		dma_addr = dma_map_single(priv->device, xdpf->data,
45708b278a5bSOng Boon Leong 					  xdpf->len, DMA_TO_DEVICE);
45718b278a5bSOng Boon Leong 		if (dma_mapping_error(priv->device, dma_addr))
45728b278a5bSOng Boon Leong 			return STMMAC_XDP_CONSUMED;
45738b278a5bSOng Boon Leong 
45748b278a5bSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
45758b278a5bSOng Boon Leong 	} else {
45768b278a5bSOng Boon Leong 		struct page *page = virt_to_page(xdpf->data);
45778b278a5bSOng Boon Leong 
4578be8b38a7SOng Boon Leong 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4579be8b38a7SOng Boon Leong 			   xdpf->headroom;
4580be8b38a7SOng Boon Leong 		dma_sync_single_for_device(priv->device, dma_addr,
4581be8b38a7SOng Boon Leong 					   xdpf->len, DMA_BIDIRECTIONAL);
4582be8b38a7SOng Boon Leong 
4583be8b38a7SOng Boon Leong 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
45848b278a5bSOng Boon Leong 	}
4585be8b38a7SOng Boon Leong 
4586be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4587be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4588be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4589be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4590be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4591be8b38a7SOng Boon Leong 
4592be8b38a7SOng Boon Leong 	tx_q->xdpf[entry] = xdpf;
4593be8b38a7SOng Boon Leong 
4594be8b38a7SOng Boon Leong 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4595be8b38a7SOng Boon Leong 
4596be8b38a7SOng Boon Leong 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4597be8b38a7SOng Boon Leong 			       true, priv->mode, true, true,
4598be8b38a7SOng Boon Leong 			       xdpf->len);
4599be8b38a7SOng Boon Leong 
4600be8b38a7SOng Boon Leong 	tx_q->tx_count_frames++;
4601be8b38a7SOng Boon Leong 
4602be8b38a7SOng Boon Leong 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4603be8b38a7SOng Boon Leong 		set_ic = true;
4604be8b38a7SOng Boon Leong 	else
4605be8b38a7SOng Boon Leong 		set_ic = false;
4606be8b38a7SOng Boon Leong 
4607be8b38a7SOng Boon Leong 	if (set_ic) {
4608be8b38a7SOng Boon Leong 		tx_q->tx_count_frames = 0;
4609be8b38a7SOng Boon Leong 		stmmac_set_tx_ic(priv, tx_desc);
4610be8b38a7SOng Boon Leong 		priv->xstats.tx_set_ic_bit++;
4611be8b38a7SOng Boon Leong 	}
4612be8b38a7SOng Boon Leong 
4613be8b38a7SOng Boon Leong 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4614be8b38a7SOng Boon Leong 
4615be8b38a7SOng Boon Leong 	entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4616be8b38a7SOng Boon Leong 	tx_q->cur_tx = entry;
4617be8b38a7SOng Boon Leong 
4618be8b38a7SOng Boon Leong 	return STMMAC_XDP_TX;
4619be8b38a7SOng Boon Leong }
4620be8b38a7SOng Boon Leong 
4621be8b38a7SOng Boon Leong static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4622be8b38a7SOng Boon Leong 				   int cpu)
4623be8b38a7SOng Boon Leong {
4624be8b38a7SOng Boon Leong 	int index = cpu;
4625be8b38a7SOng Boon Leong 
4626be8b38a7SOng Boon Leong 	if (unlikely(index < 0))
4627be8b38a7SOng Boon Leong 		index = 0;
4628be8b38a7SOng Boon Leong 
4629be8b38a7SOng Boon Leong 	while (index >= priv->plat->tx_queues_to_use)
4630be8b38a7SOng Boon Leong 		index -= priv->plat->tx_queues_to_use;
4631be8b38a7SOng Boon Leong 
4632be8b38a7SOng Boon Leong 	return index;
4633be8b38a7SOng Boon Leong }
4634be8b38a7SOng Boon Leong 
4635be8b38a7SOng Boon Leong static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4636be8b38a7SOng Boon Leong 				struct xdp_buff *xdp)
4637be8b38a7SOng Boon Leong {
4638be8b38a7SOng Boon Leong 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4639be8b38a7SOng Boon Leong 	int cpu = smp_processor_id();
4640be8b38a7SOng Boon Leong 	struct netdev_queue *nq;
4641be8b38a7SOng Boon Leong 	int queue;
4642be8b38a7SOng Boon Leong 	int res;
4643be8b38a7SOng Boon Leong 
4644be8b38a7SOng Boon Leong 	if (unlikely(!xdpf))
4645be8b38a7SOng Boon Leong 		return STMMAC_XDP_CONSUMED;
4646be8b38a7SOng Boon Leong 
4647be8b38a7SOng Boon Leong 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4648be8b38a7SOng Boon Leong 	nq = netdev_get_tx_queue(priv->dev, queue);
4649be8b38a7SOng Boon Leong 
4650be8b38a7SOng Boon Leong 	__netif_tx_lock(nq, cpu);
4651be8b38a7SOng Boon Leong 	/* Avoids TX time-out as we are sharing with slow path */
4652e92af33eSAlexander Lobakin 	txq_trans_cond_update(nq);
4653be8b38a7SOng Boon Leong 
46548b278a5bSOng Boon Leong 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4655be8b38a7SOng Boon Leong 	if (res == STMMAC_XDP_TX)
4656be8b38a7SOng Boon Leong 		stmmac_flush_tx_descriptors(priv, queue);
4657be8b38a7SOng Boon Leong 
4658be8b38a7SOng Boon Leong 	__netif_tx_unlock(nq);
4659be8b38a7SOng Boon Leong 
4660be8b38a7SOng Boon Leong 	return res;
4661be8b38a7SOng Boon Leong }
4662be8b38a7SOng Boon Leong 
4663bba71cacSOng Boon Leong static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4664bba71cacSOng Boon Leong 				 struct bpf_prog *prog,
46655fabb012SOng Boon Leong 				 struct xdp_buff *xdp)
46665fabb012SOng Boon Leong {
46675fabb012SOng Boon Leong 	u32 act;
4668bba71cacSOng Boon Leong 	int res;
46695fabb012SOng Boon Leong 
46705fabb012SOng Boon Leong 	act = bpf_prog_run_xdp(prog, xdp);
46715fabb012SOng Boon Leong 	switch (act) {
46725fabb012SOng Boon Leong 	case XDP_PASS:
46735fabb012SOng Boon Leong 		res = STMMAC_XDP_PASS;
46745fabb012SOng Boon Leong 		break;
4675be8b38a7SOng Boon Leong 	case XDP_TX:
4676be8b38a7SOng Boon Leong 		res = stmmac_xdp_xmit_back(priv, xdp);
4677be8b38a7SOng Boon Leong 		break;
46788b278a5bSOng Boon Leong 	case XDP_REDIRECT:
46798b278a5bSOng Boon Leong 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
46808b278a5bSOng Boon Leong 			res = STMMAC_XDP_CONSUMED;
46818b278a5bSOng Boon Leong 		else
46828b278a5bSOng Boon Leong 			res = STMMAC_XDP_REDIRECT;
46838b278a5bSOng Boon Leong 		break;
46845fabb012SOng Boon Leong 	default:
4685c8064e5bSPaolo Abeni 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
46865fabb012SOng Boon Leong 		fallthrough;
46875fabb012SOng Boon Leong 	case XDP_ABORTED:
46885fabb012SOng Boon Leong 		trace_xdp_exception(priv->dev, prog, act);
46895fabb012SOng Boon Leong 		fallthrough;
46905fabb012SOng Boon Leong 	case XDP_DROP:
46915fabb012SOng Boon Leong 		res = STMMAC_XDP_CONSUMED;
46925fabb012SOng Boon Leong 		break;
46935fabb012SOng Boon Leong 	}
46945fabb012SOng Boon Leong 
4695bba71cacSOng Boon Leong 	return res;
4696bba71cacSOng Boon Leong }
4697bba71cacSOng Boon Leong 
4698bba71cacSOng Boon Leong static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4699bba71cacSOng Boon Leong 					   struct xdp_buff *xdp)
4700bba71cacSOng Boon Leong {
4701bba71cacSOng Boon Leong 	struct bpf_prog *prog;
4702bba71cacSOng Boon Leong 	int res;
4703bba71cacSOng Boon Leong 
4704bba71cacSOng Boon Leong 	prog = READ_ONCE(priv->xdp_prog);
4705bba71cacSOng Boon Leong 	if (!prog) {
4706bba71cacSOng Boon Leong 		res = STMMAC_XDP_PASS;
47072f1e432dSToke Høiland-Jørgensen 		goto out;
4708bba71cacSOng Boon Leong 	}
4709bba71cacSOng Boon Leong 
4710bba71cacSOng Boon Leong 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
47112f1e432dSToke Høiland-Jørgensen out:
47125fabb012SOng Boon Leong 	return ERR_PTR(-res);
47135fabb012SOng Boon Leong }
47145fabb012SOng Boon Leong 
4715be8b38a7SOng Boon Leong static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4716be8b38a7SOng Boon Leong 				   int xdp_status)
4717be8b38a7SOng Boon Leong {
4718be8b38a7SOng Boon Leong 	int cpu = smp_processor_id();
4719be8b38a7SOng Boon Leong 	int queue;
4720be8b38a7SOng Boon Leong 
4721be8b38a7SOng Boon Leong 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4722be8b38a7SOng Boon Leong 
4723be8b38a7SOng Boon Leong 	if (xdp_status & STMMAC_XDP_TX)
4724be8b38a7SOng Boon Leong 		stmmac_tx_timer_arm(priv, queue);
47258b278a5bSOng Boon Leong 
47268b278a5bSOng Boon Leong 	if (xdp_status & STMMAC_XDP_REDIRECT)
47278b278a5bSOng Boon Leong 		xdp_do_flush();
4728be8b38a7SOng Boon Leong }
4729be8b38a7SOng Boon Leong 
4730bba2556eSOng Boon Leong static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4731bba2556eSOng Boon Leong 					       struct xdp_buff *xdp)
4732bba2556eSOng Boon Leong {
4733bba2556eSOng Boon Leong 	unsigned int metasize = xdp->data - xdp->data_meta;
4734bba2556eSOng Boon Leong 	unsigned int datasize = xdp->data_end - xdp->data;
4735bba2556eSOng Boon Leong 	struct sk_buff *skb;
4736bba2556eSOng Boon Leong 
4737132c32eeSOng Boon Leong 	skb = __napi_alloc_skb(&ch->rxtx_napi,
4738bba2556eSOng Boon Leong 			       xdp->data_end - xdp->data_hard_start,
4739bba2556eSOng Boon Leong 			       GFP_ATOMIC | __GFP_NOWARN);
4740bba2556eSOng Boon Leong 	if (unlikely(!skb))
4741bba2556eSOng Boon Leong 		return NULL;
4742bba2556eSOng Boon Leong 
4743bba2556eSOng Boon Leong 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
4744bba2556eSOng Boon Leong 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4745bba2556eSOng Boon Leong 	if (metasize)
4746bba2556eSOng Boon Leong 		skb_metadata_set(skb, metasize);
4747bba2556eSOng Boon Leong 
4748bba2556eSOng Boon Leong 	return skb;
4749bba2556eSOng Boon Leong }
4750bba2556eSOng Boon Leong 
4751bba2556eSOng Boon Leong static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4752bba2556eSOng Boon Leong 				   struct dma_desc *p, struct dma_desc *np,
4753bba2556eSOng Boon Leong 				   struct xdp_buff *xdp)
4754bba2556eSOng Boon Leong {
4755bba2556eSOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
4756bba2556eSOng Boon Leong 	unsigned int len = xdp->data_end - xdp->data;
4757bba2556eSOng Boon Leong 	enum pkt_hash_types hash_type;
4758bba2556eSOng Boon Leong 	int coe = priv->hw->rx_csum;
4759bba2556eSOng Boon Leong 	struct sk_buff *skb;
4760bba2556eSOng Boon Leong 	u32 hash;
4761bba2556eSOng Boon Leong 
4762bba2556eSOng Boon Leong 	skb = stmmac_construct_skb_zc(ch, xdp);
4763bba2556eSOng Boon Leong 	if (!skb) {
4764bba2556eSOng Boon Leong 		priv->dev->stats.rx_dropped++;
4765bba2556eSOng Boon Leong 		return;
4766bba2556eSOng Boon Leong 	}
4767bba2556eSOng Boon Leong 
4768bba2556eSOng Boon Leong 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
4769bba2556eSOng Boon Leong 	stmmac_rx_vlan(priv->dev, skb);
4770bba2556eSOng Boon Leong 	skb->protocol = eth_type_trans(skb, priv->dev);
4771bba2556eSOng Boon Leong 
4772bba2556eSOng Boon Leong 	if (unlikely(!coe))
4773bba2556eSOng Boon Leong 		skb_checksum_none_assert(skb);
4774bba2556eSOng Boon Leong 	else
4775bba2556eSOng Boon Leong 		skb->ip_summed = CHECKSUM_UNNECESSARY;
4776bba2556eSOng Boon Leong 
4777bba2556eSOng Boon Leong 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4778bba2556eSOng Boon Leong 		skb_set_hash(skb, hash, hash_type);
4779bba2556eSOng Boon Leong 
4780bba2556eSOng Boon Leong 	skb_record_rx_queue(skb, queue);
4781132c32eeSOng Boon Leong 	napi_gro_receive(&ch->rxtx_napi, skb);
4782bba2556eSOng Boon Leong 
4783bba2556eSOng Boon Leong 	priv->dev->stats.rx_packets++;
4784bba2556eSOng Boon Leong 	priv->dev->stats.rx_bytes += len;
4785bba2556eSOng Boon Leong }
4786bba2556eSOng Boon Leong 
4787bba2556eSOng Boon Leong static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4788bba2556eSOng Boon Leong {
4789bba2556eSOng Boon Leong 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4790bba2556eSOng Boon Leong 	unsigned int entry = rx_q->dirty_rx;
4791bba2556eSOng Boon Leong 	struct dma_desc *rx_desc = NULL;
4792bba2556eSOng Boon Leong 	bool ret = true;
4793bba2556eSOng Boon Leong 
4794bba2556eSOng Boon Leong 	budget = min(budget, stmmac_rx_dirty(priv, queue));
4795bba2556eSOng Boon Leong 
4796bba2556eSOng Boon Leong 	while (budget-- > 0 && entry != rx_q->cur_rx) {
4797bba2556eSOng Boon Leong 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4798bba2556eSOng Boon Leong 		dma_addr_t dma_addr;
4799bba2556eSOng Boon Leong 		bool use_rx_wd;
4800bba2556eSOng Boon Leong 
4801bba2556eSOng Boon Leong 		if (!buf->xdp) {
4802bba2556eSOng Boon Leong 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
4803bba2556eSOng Boon Leong 			if (!buf->xdp) {
4804bba2556eSOng Boon Leong 				ret = false;
4805bba2556eSOng Boon Leong 				break;
4806bba2556eSOng Boon Leong 			}
4807bba2556eSOng Boon Leong 		}
4808bba2556eSOng Boon Leong 
4809bba2556eSOng Boon Leong 		if (priv->extend_desc)
4810bba2556eSOng Boon Leong 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
4811bba2556eSOng Boon Leong 		else
4812bba2556eSOng Boon Leong 			rx_desc = rx_q->dma_rx + entry;
4813bba2556eSOng Boon Leong 
4814bba2556eSOng Boon Leong 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
4815bba2556eSOng Boon Leong 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
4816bba2556eSOng Boon Leong 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
4817bba2556eSOng Boon Leong 		stmmac_refill_desc3(priv, rx_q, rx_desc);
4818bba2556eSOng Boon Leong 
4819bba2556eSOng Boon Leong 		rx_q->rx_count_frames++;
4820bba2556eSOng Boon Leong 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4821bba2556eSOng Boon Leong 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4822bba2556eSOng Boon Leong 			rx_q->rx_count_frames = 0;
4823bba2556eSOng Boon Leong 
4824bba2556eSOng Boon Leong 		use_rx_wd = !priv->rx_coal_frames[queue];
4825bba2556eSOng Boon Leong 		use_rx_wd |= rx_q->rx_count_frames > 0;
4826bba2556eSOng Boon Leong 		if (!priv->use_riwt)
4827bba2556eSOng Boon Leong 			use_rx_wd = false;
4828bba2556eSOng Boon Leong 
4829bba2556eSOng Boon Leong 		dma_wmb();
4830bba2556eSOng Boon Leong 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
4831bba2556eSOng Boon Leong 
4832bba2556eSOng Boon Leong 		entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
4833bba2556eSOng Boon Leong 	}
4834bba2556eSOng Boon Leong 
4835bba2556eSOng Boon Leong 	if (rx_desc) {
4836bba2556eSOng Boon Leong 		rx_q->dirty_rx = entry;
4837bba2556eSOng Boon Leong 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4838bba2556eSOng Boon Leong 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
4839bba2556eSOng Boon Leong 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4840bba2556eSOng Boon Leong 	}
4841bba2556eSOng Boon Leong 
4842bba2556eSOng Boon Leong 	return ret;
4843bba2556eSOng Boon Leong }
4844bba2556eSOng Boon Leong 
4845bba2556eSOng Boon Leong static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
4846bba2556eSOng Boon Leong {
4847bba2556eSOng Boon Leong 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4848bba2556eSOng Boon Leong 	unsigned int count = 0, error = 0, len = 0;
4849bba2556eSOng Boon Leong 	int dirty = stmmac_rx_dirty(priv, queue);
4850bba2556eSOng Boon Leong 	unsigned int next_entry = rx_q->cur_rx;
4851bba2556eSOng Boon Leong 	unsigned int desc_size;
4852bba2556eSOng Boon Leong 	struct bpf_prog *prog;
4853bba2556eSOng Boon Leong 	bool failure = false;
4854bba2556eSOng Boon Leong 	int xdp_status = 0;
4855bba2556eSOng Boon Leong 	int status = 0;
4856bba2556eSOng Boon Leong 
4857bba2556eSOng Boon Leong 	if (netif_msg_rx_status(priv)) {
4858bba2556eSOng Boon Leong 		void *rx_head;
4859bba2556eSOng Boon Leong 
4860bba2556eSOng Boon Leong 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
4861bba2556eSOng Boon Leong 		if (priv->extend_desc) {
4862bba2556eSOng Boon Leong 			rx_head = (void *)rx_q->dma_erx;
4863bba2556eSOng Boon Leong 			desc_size = sizeof(struct dma_extended_desc);
4864bba2556eSOng Boon Leong 		} else {
4865bba2556eSOng Boon Leong 			rx_head = (void *)rx_q->dma_rx;
4866bba2556eSOng Boon Leong 			desc_size = sizeof(struct dma_desc);
4867bba2556eSOng Boon Leong 		}
4868bba2556eSOng Boon Leong 
4869bba2556eSOng Boon Leong 		stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
4870bba2556eSOng Boon Leong 				    rx_q->dma_rx_phy, desc_size);
4871bba2556eSOng Boon Leong 	}
4872bba2556eSOng Boon Leong 	while (count < limit) {
4873bba2556eSOng Boon Leong 		struct stmmac_rx_buffer *buf;
4874bba2556eSOng Boon Leong 		unsigned int buf1_len = 0;
4875bba2556eSOng Boon Leong 		struct dma_desc *np, *p;
4876bba2556eSOng Boon Leong 		int entry;
4877bba2556eSOng Boon Leong 		int res;
4878bba2556eSOng Boon Leong 
4879bba2556eSOng Boon Leong 		if (!count && rx_q->state_saved) {
4880bba2556eSOng Boon Leong 			error = rx_q->state.error;
4881bba2556eSOng Boon Leong 			len = rx_q->state.len;
4882bba2556eSOng Boon Leong 		} else {
4883bba2556eSOng Boon Leong 			rx_q->state_saved = false;
4884bba2556eSOng Boon Leong 			error = 0;
4885bba2556eSOng Boon Leong 			len = 0;
4886bba2556eSOng Boon Leong 		}
4887bba2556eSOng Boon Leong 
4888bba2556eSOng Boon Leong 		if (count >= limit)
4889bba2556eSOng Boon Leong 			break;
4890bba2556eSOng Boon Leong 
4891bba2556eSOng Boon Leong read_again:
4892bba2556eSOng Boon Leong 		buf1_len = 0;
4893bba2556eSOng Boon Leong 		entry = next_entry;
4894bba2556eSOng Boon Leong 		buf = &rx_q->buf_pool[entry];
4895bba2556eSOng Boon Leong 
4896bba2556eSOng Boon Leong 		if (dirty >= STMMAC_RX_FILL_BATCH) {
4897bba2556eSOng Boon Leong 			failure = failure ||
4898bba2556eSOng Boon Leong 				  !stmmac_rx_refill_zc(priv, queue, dirty);
4899bba2556eSOng Boon Leong 			dirty = 0;
4900bba2556eSOng Boon Leong 		}
4901bba2556eSOng Boon Leong 
4902bba2556eSOng Boon Leong 		if (priv->extend_desc)
4903bba2556eSOng Boon Leong 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4904bba2556eSOng Boon Leong 		else
4905bba2556eSOng Boon Leong 			p = rx_q->dma_rx + entry;
4906bba2556eSOng Boon Leong 
4907bba2556eSOng Boon Leong 		/* read the status of the incoming frame */
4908bba2556eSOng Boon Leong 		status = stmmac_rx_status(priv, &priv->dev->stats,
4909bba2556eSOng Boon Leong 					  &priv->xstats, p);
4910bba2556eSOng Boon Leong 		/* check if managed by the DMA otherwise go ahead */
4911bba2556eSOng Boon Leong 		if (unlikely(status & dma_own))
4912bba2556eSOng Boon Leong 			break;
4913bba2556eSOng Boon Leong 
4914bba2556eSOng Boon Leong 		/* Prefetch the next RX descriptor */
4915bba2556eSOng Boon Leong 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
4916bba2556eSOng Boon Leong 						priv->dma_rx_size);
4917bba2556eSOng Boon Leong 		next_entry = rx_q->cur_rx;
4918bba2556eSOng Boon Leong 
4919bba2556eSOng Boon Leong 		if (priv->extend_desc)
4920bba2556eSOng Boon Leong 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
4921bba2556eSOng Boon Leong 		else
4922bba2556eSOng Boon Leong 			np = rx_q->dma_rx + next_entry;
4923bba2556eSOng Boon Leong 
4924bba2556eSOng Boon Leong 		prefetch(np);
4925bba2556eSOng Boon Leong 
49262b9fff64SSong Yoong Siang 		/* Ensure a valid XSK buffer before proceed */
49272b9fff64SSong Yoong Siang 		if (!buf->xdp)
49282b9fff64SSong Yoong Siang 			break;
49292b9fff64SSong Yoong Siang 
4930bba2556eSOng Boon Leong 		if (priv->extend_desc)
4931bba2556eSOng Boon Leong 			stmmac_rx_extended_status(priv, &priv->dev->stats,
4932bba2556eSOng Boon Leong 						  &priv->xstats,
4933bba2556eSOng Boon Leong 						  rx_q->dma_erx + entry);
4934bba2556eSOng Boon Leong 		if (unlikely(status == discard_frame)) {
4935bba2556eSOng Boon Leong 			xsk_buff_free(buf->xdp);
4936bba2556eSOng Boon Leong 			buf->xdp = NULL;
4937bba2556eSOng Boon Leong 			dirty++;
4938bba2556eSOng Boon Leong 			error = 1;
4939bba2556eSOng Boon Leong 			if (!priv->hwts_rx_en)
4940bba2556eSOng Boon Leong 				priv->dev->stats.rx_errors++;
4941bba2556eSOng Boon Leong 		}
4942bba2556eSOng Boon Leong 
4943bba2556eSOng Boon Leong 		if (unlikely(error && (status & rx_not_ls)))
4944bba2556eSOng Boon Leong 			goto read_again;
4945bba2556eSOng Boon Leong 		if (unlikely(error)) {
4946bba2556eSOng Boon Leong 			count++;
4947bba2556eSOng Boon Leong 			continue;
4948bba2556eSOng Boon Leong 		}
4949bba2556eSOng Boon Leong 
4950bba2556eSOng Boon Leong 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
4951bba2556eSOng Boon Leong 		if (likely(status & rx_not_ls)) {
4952bba2556eSOng Boon Leong 			xsk_buff_free(buf->xdp);
4953bba2556eSOng Boon Leong 			buf->xdp = NULL;
4954bba2556eSOng Boon Leong 			dirty++;
4955bba2556eSOng Boon Leong 			count++;
4956bba2556eSOng Boon Leong 			goto read_again;
4957bba2556eSOng Boon Leong 		}
4958bba2556eSOng Boon Leong 
4959bba2556eSOng Boon Leong 		/* XDP ZC Frame only support primary buffers for now */
4960bba2556eSOng Boon Leong 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
4961bba2556eSOng Boon Leong 		len += buf1_len;
4962bba2556eSOng Boon Leong 
4963bba2556eSOng Boon Leong 		/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
4964bba2556eSOng Boon Leong 		 * Type frames (LLC/LLC-SNAP)
4965bba2556eSOng Boon Leong 		 *
4966bba2556eSOng Boon Leong 		 * llc_snap is never checked in GMAC >= 4, so this ACS
4967bba2556eSOng Boon Leong 		 * feature is always disabled and packets need to be
4968bba2556eSOng Boon Leong 		 * stripped manually.
4969bba2556eSOng Boon Leong 		 */
4970bba2556eSOng Boon Leong 		if (likely(!(status & rx_not_ls)) &&
4971bba2556eSOng Boon Leong 		    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
4972bba2556eSOng Boon Leong 		     unlikely(status != llc_snap))) {
4973bba2556eSOng Boon Leong 			buf1_len -= ETH_FCS_LEN;
4974bba2556eSOng Boon Leong 			len -= ETH_FCS_LEN;
4975bba2556eSOng Boon Leong 		}
4976bba2556eSOng Boon Leong 
4977bba2556eSOng Boon Leong 		/* RX buffer is good and fit into a XSK pool buffer */
4978bba2556eSOng Boon Leong 		buf->xdp->data_end = buf->xdp->data + buf1_len;
4979bba2556eSOng Boon Leong 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
4980bba2556eSOng Boon Leong 
4981bba2556eSOng Boon Leong 		prog = READ_ONCE(priv->xdp_prog);
4982bba2556eSOng Boon Leong 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
4983bba2556eSOng Boon Leong 
4984bba2556eSOng Boon Leong 		switch (res) {
4985bba2556eSOng Boon Leong 		case STMMAC_XDP_PASS:
4986bba2556eSOng Boon Leong 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
4987bba2556eSOng Boon Leong 			xsk_buff_free(buf->xdp);
4988bba2556eSOng Boon Leong 			break;
4989bba2556eSOng Boon Leong 		case STMMAC_XDP_CONSUMED:
4990bba2556eSOng Boon Leong 			xsk_buff_free(buf->xdp);
4991bba2556eSOng Boon Leong 			priv->dev->stats.rx_dropped++;
4992bba2556eSOng Boon Leong 			break;
4993bba2556eSOng Boon Leong 		case STMMAC_XDP_TX:
4994bba2556eSOng Boon Leong 		case STMMAC_XDP_REDIRECT:
4995bba2556eSOng Boon Leong 			xdp_status |= res;
4996bba2556eSOng Boon Leong 			break;
4997bba2556eSOng Boon Leong 		}
4998bba2556eSOng Boon Leong 
4999bba2556eSOng Boon Leong 		buf->xdp = NULL;
5000bba2556eSOng Boon Leong 		dirty++;
5001bba2556eSOng Boon Leong 		count++;
5002bba2556eSOng Boon Leong 	}
5003bba2556eSOng Boon Leong 
5004bba2556eSOng Boon Leong 	if (status & rx_not_ls) {
5005bba2556eSOng Boon Leong 		rx_q->state_saved = true;
5006bba2556eSOng Boon Leong 		rx_q->state.error = error;
5007bba2556eSOng Boon Leong 		rx_q->state.len = len;
5008bba2556eSOng Boon Leong 	}
5009bba2556eSOng Boon Leong 
5010bba2556eSOng Boon Leong 	stmmac_finalize_xdp_rx(priv, xdp_status);
5011bba2556eSOng Boon Leong 
501268e9c5deSVijayakannan Ayyathurai 	priv->xstats.rx_pkt_n += count;
501368e9c5deSVijayakannan Ayyathurai 	priv->xstats.rxq_stats[queue].rx_pkt_n += count;
501468e9c5deSVijayakannan Ayyathurai 
5015bba2556eSOng Boon Leong 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5016bba2556eSOng Boon Leong 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5017bba2556eSOng Boon Leong 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5018bba2556eSOng Boon Leong 		else
5019bba2556eSOng Boon Leong 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5020bba2556eSOng Boon Leong 
5021bba2556eSOng Boon Leong 		return (int)count;
5022bba2556eSOng Boon Leong 	}
5023bba2556eSOng Boon Leong 
5024bba2556eSOng Boon Leong 	return failure ? limit : (int)count;
5025bba2556eSOng Boon Leong }
5026bba2556eSOng Boon Leong 
502732ceabcaSGiuseppe CAVALLARO /**
5028732fdf0eSGiuseppe CAVALLARO  * stmmac_rx - manage the receive process
502932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
503054139cf3SJoao Pinto  * @limit: napi bugget
503154139cf3SJoao Pinto  * @queue: RX queue index.
503232ceabcaSGiuseppe CAVALLARO  * Description :  this the function called by the napi poll method.
503332ceabcaSGiuseppe CAVALLARO  * It gets all the frames inside the ring.
503432ceabcaSGiuseppe CAVALLARO  */
503554139cf3SJoao Pinto static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
50367ac6653aSJeff Kirsher {
503754139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
50388fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[queue];
5039ec222003SJose Abreu 	unsigned int count = 0, error = 0, len = 0;
5040ec222003SJose Abreu 	int status = 0, coe = priv->hw->rx_csum;
504107b39753SAaro Koskinen 	unsigned int next_entry = rx_q->cur_rx;
50425fabb012SOng Boon Leong 	enum dma_data_direction dma_dir;
5043bfaf91caSJoakim Zhang 	unsigned int desc_size;
5044ec222003SJose Abreu 	struct sk_buff *skb = NULL;
50455fabb012SOng Boon Leong 	struct xdp_buff xdp;
5046be8b38a7SOng Boon Leong 	int xdp_status = 0;
50475fabb012SOng Boon Leong 	int buf_sz;
50485fabb012SOng Boon Leong 
50495fabb012SOng Boon Leong 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
50505fabb012SOng Boon Leong 	buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
50517ac6653aSJeff Kirsher 
505283d7af64SGiuseppe CAVALLARO 	if (netif_msg_rx_status(priv)) {
5053d0225e7dSAlexandre TORGUE 		void *rx_head;
5054d0225e7dSAlexandre TORGUE 
505538ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5056bfaf91caSJoakim Zhang 		if (priv->extend_desc) {
505754139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_erx;
5058bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_extended_desc);
5059bfaf91caSJoakim Zhang 		} else {
506054139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_rx;
5061bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_desc);
5062bfaf91caSJoakim Zhang 		}
5063d0225e7dSAlexandre TORGUE 
5064bfaf91caSJoakim Zhang 		stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
5065bfaf91caSJoakim Zhang 				    rx_q->dma_rx_phy, desc_size);
50667ac6653aSJeff Kirsher 	}
5067c24602efSGiuseppe CAVALLARO 	while (count < limit) {
506888ebe2cfSJose Abreu 		unsigned int buf1_len = 0, buf2_len = 0;
5069ec222003SJose Abreu 		enum pkt_hash_types hash_type;
50702af6106aSJose Abreu 		struct stmmac_rx_buffer *buf;
50712af6106aSJose Abreu 		struct dma_desc *np, *p;
5072ec222003SJose Abreu 		int entry;
5073ec222003SJose Abreu 		u32 hash;
50747ac6653aSJeff Kirsher 
5075ec222003SJose Abreu 		if (!count && rx_q->state_saved) {
5076ec222003SJose Abreu 			skb = rx_q->state.skb;
5077ec222003SJose Abreu 			error = rx_q->state.error;
5078ec222003SJose Abreu 			len = rx_q->state.len;
5079ec222003SJose Abreu 		} else {
5080ec222003SJose Abreu 			rx_q->state_saved = false;
5081ec222003SJose Abreu 			skb = NULL;
5082ec222003SJose Abreu 			error = 0;
5083ec222003SJose Abreu 			len = 0;
5084ec222003SJose Abreu 		}
5085ec222003SJose Abreu 
5086ec222003SJose Abreu 		if (count >= limit)
5087ec222003SJose Abreu 			break;
5088ec222003SJose Abreu 
5089ec222003SJose Abreu read_again:
509088ebe2cfSJose Abreu 		buf1_len = 0;
509188ebe2cfSJose Abreu 		buf2_len = 0;
509207b39753SAaro Koskinen 		entry = next_entry;
50932af6106aSJose Abreu 		buf = &rx_q->buf_pool[entry];
509407b39753SAaro Koskinen 
5095c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
509654139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5097c24602efSGiuseppe CAVALLARO 		else
509854139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
5099c24602efSGiuseppe CAVALLARO 
5100c1fa3212SFabrice Gasnier 		/* read the status of the incoming frame */
510142de047dSJose Abreu 		status = stmmac_rx_status(priv, &priv->dev->stats,
5102c1fa3212SFabrice Gasnier 				&priv->xstats, p);
5103c1fa3212SFabrice Gasnier 		/* check if managed by the DMA otherwise go ahead */
5104c1fa3212SFabrice Gasnier 		if (unlikely(status & dma_own))
51057ac6653aSJeff Kirsher 			break;
51067ac6653aSJeff Kirsher 
5107aa042f60SSong, Yoong Siang 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5108aa042f60SSong, Yoong Siang 						priv->dma_rx_size);
510954139cf3SJoao Pinto 		next_entry = rx_q->cur_rx;
5110e3ad57c9SGiuseppe Cavallaro 
5111c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
511254139cf3SJoao Pinto 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5113c24602efSGiuseppe CAVALLARO 		else
511454139cf3SJoao Pinto 			np = rx_q->dma_rx + next_entry;
5115ba1ffd74SGiuseppe CAVALLARO 
5116ba1ffd74SGiuseppe CAVALLARO 		prefetch(np);
51177ac6653aSJeff Kirsher 
511842de047dSJose Abreu 		if (priv->extend_desc)
511942de047dSJose Abreu 			stmmac_rx_extended_status(priv, &priv->dev->stats,
512042de047dSJose Abreu 					&priv->xstats, rx_q->dma_erx + entry);
5121891434b1SRayagond Kokatanur 		if (unlikely(status == discard_frame)) {
51222af6106aSJose Abreu 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
51232af6106aSJose Abreu 			buf->page = NULL;
5124ec222003SJose Abreu 			error = 1;
51250b273ca4SJose Abreu 			if (!priv->hwts_rx_en)
51260b273ca4SJose Abreu 				priv->dev->stats.rx_errors++;
5127ec222003SJose Abreu 		}
5128f748be53SAlexandre TORGUE 
5129ec222003SJose Abreu 		if (unlikely(error && (status & rx_not_ls)))
5130ec222003SJose Abreu 			goto read_again;
5131ec222003SJose Abreu 		if (unlikely(error)) {
5132ec222003SJose Abreu 			dev_kfree_skb(skb);
513388ebe2cfSJose Abreu 			skb = NULL;
5134cda4985aSJose Abreu 			count++;
513507b39753SAaro Koskinen 			continue;
5136e527c4a7SGiuseppe CAVALLARO 		}
5137e527c4a7SGiuseppe CAVALLARO 
5138ec222003SJose Abreu 		/* Buffer is good. Go on. */
5139ec222003SJose Abreu 
51404744bf07SMatteo Croce 		prefetch(page_address(buf->page) + buf->page_offset);
514188ebe2cfSJose Abreu 		if (buf->sec_page)
514288ebe2cfSJose Abreu 			prefetch(page_address(buf->sec_page));
514388ebe2cfSJose Abreu 
514488ebe2cfSJose Abreu 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
514588ebe2cfSJose Abreu 		len += buf1_len;
514688ebe2cfSJose Abreu 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
514788ebe2cfSJose Abreu 		len += buf2_len;
5148ec222003SJose Abreu 
51497ac6653aSJeff Kirsher 		/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
5150ceb69499SGiuseppe CAVALLARO 		 * Type frames (LLC/LLC-SNAP)
5151565020aaSJose Abreu 		 *
5152565020aaSJose Abreu 		 * llc_snap is never checked in GMAC >= 4, so this ACS
5153565020aaSJose Abreu 		 * feature is always disabled and packets need to be
5154565020aaSJose Abreu 		 * stripped manually.
5155ceb69499SGiuseppe CAVALLARO 		 */
515693b5dce4SJose Abreu 		if (likely(!(status & rx_not_ls)) &&
515793b5dce4SJose Abreu 		    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
515893b5dce4SJose Abreu 		     unlikely(status != llc_snap))) {
51590f296e78SZekun Shen 			if (buf2_len) {
516088ebe2cfSJose Abreu 				buf2_len -= ETH_FCS_LEN;
5161ec222003SJose Abreu 				len -= ETH_FCS_LEN;
51620f296e78SZekun Shen 			} else if (buf1_len) {
51630f296e78SZekun Shen 				buf1_len -= ETH_FCS_LEN;
51640f296e78SZekun Shen 				len -= ETH_FCS_LEN;
51650f296e78SZekun Shen 			}
516683d7af64SGiuseppe CAVALLARO 		}
516722ad3838SGiuseppe Cavallaro 
5168ec222003SJose Abreu 		if (!skb) {
5169be8b38a7SOng Boon Leong 			unsigned int pre_len, sync_len;
5170be8b38a7SOng Boon Leong 
51715fabb012SOng Boon Leong 			dma_sync_single_for_cpu(priv->device, buf->addr,
51725fabb012SOng Boon Leong 						buf1_len, dma_dir);
51735fabb012SOng Boon Leong 
5174d172268fSMatteo Croce 			xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq);
5175d172268fSMatteo Croce 			xdp_prepare_buff(&xdp, page_address(buf->page),
5176d172268fSMatteo Croce 					 buf->page_offset, buf1_len, false);
51775fabb012SOng Boon Leong 
5178be8b38a7SOng Boon Leong 			pre_len = xdp.data_end - xdp.data_hard_start -
5179be8b38a7SOng Boon Leong 				  buf->page_offset;
51805fabb012SOng Boon Leong 			skb = stmmac_xdp_run_prog(priv, &xdp);
5181be8b38a7SOng Boon Leong 			/* Due xdp_adjust_tail: DMA sync for_device
5182be8b38a7SOng Boon Leong 			 * cover max len CPU touch
5183be8b38a7SOng Boon Leong 			 */
5184be8b38a7SOng Boon Leong 			sync_len = xdp.data_end - xdp.data_hard_start -
5185be8b38a7SOng Boon Leong 				   buf->page_offset;
5186be8b38a7SOng Boon Leong 			sync_len = max(sync_len, pre_len);
51875fabb012SOng Boon Leong 
51885fabb012SOng Boon Leong 			/* For Not XDP_PASS verdict */
51895fabb012SOng Boon Leong 			if (IS_ERR(skb)) {
51905fabb012SOng Boon Leong 				unsigned int xdp_res = -PTR_ERR(skb);
51915fabb012SOng Boon Leong 
51925fabb012SOng Boon Leong 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5193be8b38a7SOng Boon Leong 					page_pool_put_page(rx_q->page_pool,
5194be8b38a7SOng Boon Leong 							   virt_to_head_page(xdp.data),
5195be8b38a7SOng Boon Leong 							   sync_len, true);
51965fabb012SOng Boon Leong 					buf->page = NULL;
51975fabb012SOng Boon Leong 					priv->dev->stats.rx_dropped++;
51985fabb012SOng Boon Leong 
51995fabb012SOng Boon Leong 					/* Clear skb as it was set as
52005fabb012SOng Boon Leong 					 * status by XDP program.
52015fabb012SOng Boon Leong 					 */
52025fabb012SOng Boon Leong 					skb = NULL;
52035fabb012SOng Boon Leong 
52045fabb012SOng Boon Leong 					if (unlikely((status & rx_not_ls)))
52055fabb012SOng Boon Leong 						goto read_again;
52065fabb012SOng Boon Leong 
52075fabb012SOng Boon Leong 					count++;
52085fabb012SOng Boon Leong 					continue;
52098b278a5bSOng Boon Leong 				} else if (xdp_res & (STMMAC_XDP_TX |
52108b278a5bSOng Boon Leong 						      STMMAC_XDP_REDIRECT)) {
5211be8b38a7SOng Boon Leong 					xdp_status |= xdp_res;
5212be8b38a7SOng Boon Leong 					buf->page = NULL;
5213be8b38a7SOng Boon Leong 					skb = NULL;
5214be8b38a7SOng Boon Leong 					count++;
5215be8b38a7SOng Boon Leong 					continue;
52165fabb012SOng Boon Leong 				}
52175fabb012SOng Boon Leong 			}
52185fabb012SOng Boon Leong 		}
52195fabb012SOng Boon Leong 
52205fabb012SOng Boon Leong 		if (!skb) {
52215fabb012SOng Boon Leong 			/* XDP program may expand or reduce tail */
52225fabb012SOng Boon Leong 			buf1_len = xdp.data_end - xdp.data;
52235fabb012SOng Boon Leong 
522488ebe2cfSJose Abreu 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5225ec222003SJose Abreu 			if (!skb) {
522622ad3838SGiuseppe Cavallaro 				priv->dev->stats.rx_dropped++;
5227cda4985aSJose Abreu 				count++;
522888ebe2cfSJose Abreu 				goto drain_data;
522922ad3838SGiuseppe Cavallaro 			}
523022ad3838SGiuseppe Cavallaro 
52315fabb012SOng Boon Leong 			/* XDP program may adjust header */
52325fabb012SOng Boon Leong 			skb_copy_to_linear_data(skb, xdp.data, buf1_len);
523388ebe2cfSJose Abreu 			skb_put(skb, buf1_len);
523422ad3838SGiuseppe Cavallaro 
5235ec222003SJose Abreu 			/* Data payload copied into SKB, page ready for recycle */
5236ec222003SJose Abreu 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5237ec222003SJose Abreu 			buf->page = NULL;
523888ebe2cfSJose Abreu 		} else if (buf1_len) {
5239ec222003SJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->addr,
52405fabb012SOng Boon Leong 						buf1_len, dma_dir);
5241ec222003SJose Abreu 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
52425fabb012SOng Boon Leong 					buf->page, buf->page_offset, buf1_len,
5243ec222003SJose Abreu 					priv->dma_buf_sz);
5244ec222003SJose Abreu 
5245ec222003SJose Abreu 			/* Data payload appended into SKB */
5246ec222003SJose Abreu 			page_pool_release_page(rx_q->page_pool, buf->page);
5247ec222003SJose Abreu 			buf->page = NULL;
52487ac6653aSJeff Kirsher 		}
524983d7af64SGiuseppe CAVALLARO 
525088ebe2cfSJose Abreu 		if (buf2_len) {
525167afd6d1SJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
52525fabb012SOng Boon Leong 						buf2_len, dma_dir);
525367afd6d1SJose Abreu 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
525488ebe2cfSJose Abreu 					buf->sec_page, 0, buf2_len,
525567afd6d1SJose Abreu 					priv->dma_buf_sz);
525667afd6d1SJose Abreu 
525767afd6d1SJose Abreu 			/* Data payload appended into SKB */
525867afd6d1SJose Abreu 			page_pool_release_page(rx_q->page_pool, buf->sec_page);
525967afd6d1SJose Abreu 			buf->sec_page = NULL;
526067afd6d1SJose Abreu 		}
526167afd6d1SJose Abreu 
526288ebe2cfSJose Abreu drain_data:
5263ec222003SJose Abreu 		if (likely(status & rx_not_ls))
5264ec222003SJose Abreu 			goto read_again;
526588ebe2cfSJose Abreu 		if (!skb)
526688ebe2cfSJose Abreu 			continue;
5267ec222003SJose Abreu 
5268ec222003SJose Abreu 		/* Got entire packet into SKB. Finish it. */
5269ec222003SJose Abreu 
5270ba1ffd74SGiuseppe CAVALLARO 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5271b9381985SVince Bridgers 		stmmac_rx_vlan(priv->dev, skb);
52727ac6653aSJeff Kirsher 		skb->protocol = eth_type_trans(skb, priv->dev);
52737ac6653aSJeff Kirsher 
5274ceb69499SGiuseppe CAVALLARO 		if (unlikely(!coe))
52757ac6653aSJeff Kirsher 			skb_checksum_none_assert(skb);
527662a2ab93SGiuseppe CAVALLARO 		else
52777ac6653aSJeff Kirsher 			skb->ip_summed = CHECKSUM_UNNECESSARY;
527862a2ab93SGiuseppe CAVALLARO 
527976067459SJose Abreu 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
528076067459SJose Abreu 			skb_set_hash(skb, hash, hash_type);
528176067459SJose Abreu 
528276067459SJose Abreu 		skb_record_rx_queue(skb, queue);
52834ccb4585SJose Abreu 		napi_gro_receive(&ch->rx_napi, skb);
528488ebe2cfSJose Abreu 		skb = NULL;
52857ac6653aSJeff Kirsher 
52867ac6653aSJeff Kirsher 		priv->dev->stats.rx_packets++;
5287ec222003SJose Abreu 		priv->dev->stats.rx_bytes += len;
5288cda4985aSJose Abreu 		count++;
52897ac6653aSJeff Kirsher 	}
5290ec222003SJose Abreu 
529188ebe2cfSJose Abreu 	if (status & rx_not_ls || skb) {
5292ec222003SJose Abreu 		rx_q->state_saved = true;
5293ec222003SJose Abreu 		rx_q->state.skb = skb;
5294ec222003SJose Abreu 		rx_q->state.error = error;
5295ec222003SJose Abreu 		rx_q->state.len = len;
52967ac6653aSJeff Kirsher 	}
52977ac6653aSJeff Kirsher 
5298be8b38a7SOng Boon Leong 	stmmac_finalize_xdp_rx(priv, xdp_status);
5299be8b38a7SOng Boon Leong 
530054139cf3SJoao Pinto 	stmmac_rx_refill(priv, queue);
53017ac6653aSJeff Kirsher 
53027ac6653aSJeff Kirsher 	priv->xstats.rx_pkt_n += count;
530368e9c5deSVijayakannan Ayyathurai 	priv->xstats.rxq_stats[queue].rx_pkt_n += count;
53047ac6653aSJeff Kirsher 
53057ac6653aSJeff Kirsher 	return count;
53067ac6653aSJeff Kirsher }
53077ac6653aSJeff Kirsher 
53084ccb4585SJose Abreu static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
53097ac6653aSJeff Kirsher {
53108fce3331SJose Abreu 	struct stmmac_channel *ch =
53114ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, rx_napi);
53128fce3331SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
53138fce3331SJose Abreu 	u32 chan = ch->index;
53144ccb4585SJose Abreu 	int work_done;
53157ac6653aSJeff Kirsher 
53169125cdd1SGiuseppe CAVALLARO 	priv->xstats.napi_poll++;
5317ce736788SJoao Pinto 
5318132c32eeSOng Boon Leong 	work_done = stmmac_rx(priv, budget, chan);
5319021bd5e3SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5320021bd5e3SJose Abreu 		unsigned long flags;
5321021bd5e3SJose Abreu 
5322021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
5323021bd5e3SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5324021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
5325021bd5e3SJose Abreu 	}
5326021bd5e3SJose Abreu 
53274ccb4585SJose Abreu 	return work_done;
53284ccb4585SJose Abreu }
5329ce736788SJoao Pinto 
53304ccb4585SJose Abreu static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
53314ccb4585SJose Abreu {
53324ccb4585SJose Abreu 	struct stmmac_channel *ch =
53334ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, tx_napi);
53344ccb4585SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
53354ccb4585SJose Abreu 	u32 chan = ch->index;
53364ccb4585SJose Abreu 	int work_done;
53374ccb4585SJose Abreu 
53384ccb4585SJose Abreu 	priv->xstats.napi_poll++;
53394ccb4585SJose Abreu 
5340132c32eeSOng Boon Leong 	work_done = stmmac_tx_clean(priv, budget, chan);
5341fa0be0a4SJose Abreu 	work_done = min(work_done, budget);
53428fce3331SJose Abreu 
5343021bd5e3SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5344021bd5e3SJose Abreu 		unsigned long flags;
53454ccb4585SJose Abreu 
5346021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
5347021bd5e3SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5348021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
5349fa0be0a4SJose Abreu 	}
53508fce3331SJose Abreu 
53517ac6653aSJeff Kirsher 	return work_done;
53527ac6653aSJeff Kirsher }
53537ac6653aSJeff Kirsher 
5354132c32eeSOng Boon Leong static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5355132c32eeSOng Boon Leong {
5356132c32eeSOng Boon Leong 	struct stmmac_channel *ch =
5357132c32eeSOng Boon Leong 		container_of(napi, struct stmmac_channel, rxtx_napi);
5358132c32eeSOng Boon Leong 	struct stmmac_priv *priv = ch->priv_data;
535981d0885dSSong Yoong Siang 	int rx_done, tx_done, rxtx_done;
5360132c32eeSOng Boon Leong 	u32 chan = ch->index;
5361132c32eeSOng Boon Leong 
5362132c32eeSOng Boon Leong 	priv->xstats.napi_poll++;
5363132c32eeSOng Boon Leong 
5364132c32eeSOng Boon Leong 	tx_done = stmmac_tx_clean(priv, budget, chan);
5365132c32eeSOng Boon Leong 	tx_done = min(tx_done, budget);
5366132c32eeSOng Boon Leong 
5367132c32eeSOng Boon Leong 	rx_done = stmmac_rx_zc(priv, budget, chan);
5368132c32eeSOng Boon Leong 
536981d0885dSSong Yoong Siang 	rxtx_done = max(tx_done, rx_done);
537081d0885dSSong Yoong Siang 
5371132c32eeSOng Boon Leong 	/* If either TX or RX work is not complete, return budget
5372132c32eeSOng Boon Leong 	 * and keep pooling
5373132c32eeSOng Boon Leong 	 */
537481d0885dSSong Yoong Siang 	if (rxtx_done >= budget)
5375132c32eeSOng Boon Leong 		return budget;
5376132c32eeSOng Boon Leong 
5377132c32eeSOng Boon Leong 	/* all work done, exit the polling mode */
537881d0885dSSong Yoong Siang 	if (napi_complete_done(napi, rxtx_done)) {
5379132c32eeSOng Boon Leong 		unsigned long flags;
5380132c32eeSOng Boon Leong 
5381132c32eeSOng Boon Leong 		spin_lock_irqsave(&ch->lock, flags);
5382132c32eeSOng Boon Leong 		/* Both RX and TX work done are compelte,
5383132c32eeSOng Boon Leong 		 * so enable both RX & TX IRQs.
5384132c32eeSOng Boon Leong 		 */
5385132c32eeSOng Boon Leong 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5386132c32eeSOng Boon Leong 		spin_unlock_irqrestore(&ch->lock, flags);
5387132c32eeSOng Boon Leong 	}
5388132c32eeSOng Boon Leong 
538981d0885dSSong Yoong Siang 	return min(rxtx_done, budget - 1);
5390132c32eeSOng Boon Leong }
5391132c32eeSOng Boon Leong 
53927ac6653aSJeff Kirsher /**
53937ac6653aSJeff Kirsher  *  stmmac_tx_timeout
53947ac6653aSJeff Kirsher  *  @dev : Pointer to net device structure
5395d0ea5cbdSJesse Brandeburg  *  @txqueue: the index of the hanging transmit queue
53967ac6653aSJeff Kirsher  *  Description: this function is called when a packet transmission fails to
53977284a3f1SGiuseppe CAVALLARO  *   complete within a reasonable time. The driver will mark the error in the
53987ac6653aSJeff Kirsher  *   netdev structure and arrange for the device to be reset to a sane state
53997ac6653aSJeff Kirsher  *   in order to transmit a new packet.
54007ac6653aSJeff Kirsher  */
54010290bd29SMichael S. Tsirkin static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
54027ac6653aSJeff Kirsher {
54037ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
54047ac6653aSJeff Kirsher 
540534877a15SJose Abreu 	stmmac_global_err(priv);
54067ac6653aSJeff Kirsher }
54077ac6653aSJeff Kirsher 
54087ac6653aSJeff Kirsher /**
540901789349SJiri Pirko  *  stmmac_set_rx_mode - entry point for multicast addressing
54107ac6653aSJeff Kirsher  *  @dev : pointer to the device structure
54117ac6653aSJeff Kirsher  *  Description:
54127ac6653aSJeff Kirsher  *  This function is a driver entry point which gets called by the kernel
54137ac6653aSJeff Kirsher  *  whenever multicast addresses must be enabled/disabled.
54147ac6653aSJeff Kirsher  *  Return value:
54157ac6653aSJeff Kirsher  *  void.
54167ac6653aSJeff Kirsher  */
541701789349SJiri Pirko static void stmmac_set_rx_mode(struct net_device *dev)
54187ac6653aSJeff Kirsher {
54197ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
54207ac6653aSJeff Kirsher 
5421c10d4c82SJose Abreu 	stmmac_set_filter(priv, priv->hw, dev);
54227ac6653aSJeff Kirsher }
54237ac6653aSJeff Kirsher 
54247ac6653aSJeff Kirsher /**
54257ac6653aSJeff Kirsher  *  stmmac_change_mtu - entry point to change MTU size for the device.
54267ac6653aSJeff Kirsher  *  @dev : device pointer.
54277ac6653aSJeff Kirsher  *  @new_mtu : the new MTU size for the device.
54287ac6653aSJeff Kirsher  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
54297ac6653aSJeff Kirsher  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
54307ac6653aSJeff Kirsher  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
54317ac6653aSJeff Kirsher  *  Return value:
54327ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
54337ac6653aSJeff Kirsher  *  file on failure.
54347ac6653aSJeff Kirsher  */
54357ac6653aSJeff Kirsher static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
54367ac6653aSJeff Kirsher {
543738ddc59dSLABBE Corentin 	struct stmmac_priv *priv = netdev_priv(dev);
5438eaf4fac4SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
54395b55299eSDavid Wu 	const int mtu = new_mtu;
5440eaf4fac4SJose Abreu 
5441eaf4fac4SJose Abreu 	if (txfifosz == 0)
5442eaf4fac4SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
5443eaf4fac4SJose Abreu 
5444eaf4fac4SJose Abreu 	txfifosz /= priv->plat->tx_queues_to_use;
544538ddc59dSLABBE Corentin 
54467ac6653aSJeff Kirsher 	if (netif_running(dev)) {
544738ddc59dSLABBE Corentin 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
54487ac6653aSJeff Kirsher 		return -EBUSY;
54497ac6653aSJeff Kirsher 	}
54507ac6653aSJeff Kirsher 
54515fabb012SOng Boon Leong 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
54525fabb012SOng Boon Leong 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
54535fabb012SOng Boon Leong 		return -EINVAL;
54545fabb012SOng Boon Leong 	}
54555fabb012SOng Boon Leong 
5456eaf4fac4SJose Abreu 	new_mtu = STMMAC_ALIGN(new_mtu);
5457eaf4fac4SJose Abreu 
5458eaf4fac4SJose Abreu 	/* If condition true, FIFO is too small or MTU too large */
5459eaf4fac4SJose Abreu 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5460eaf4fac4SJose Abreu 		return -EINVAL;
5461eaf4fac4SJose Abreu 
54625b55299eSDavid Wu 	dev->mtu = mtu;
5463f748be53SAlexandre TORGUE 
54647ac6653aSJeff Kirsher 	netdev_update_features(dev);
54657ac6653aSJeff Kirsher 
54667ac6653aSJeff Kirsher 	return 0;
54677ac6653aSJeff Kirsher }
54687ac6653aSJeff Kirsher 
5469c8f44affSMichał Mirosław static netdev_features_t stmmac_fix_features(struct net_device *dev,
5470c8f44affSMichał Mirosław 					     netdev_features_t features)
54717ac6653aSJeff Kirsher {
54727ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
54737ac6653aSJeff Kirsher 
547438912bdbSDeepak SIKRI 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
54757ac6653aSJeff Kirsher 		features &= ~NETIF_F_RXCSUM;
5476d2afb5bdSGiuseppe CAVALLARO 
54777ac6653aSJeff Kirsher 	if (!priv->plat->tx_coe)
5478a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
54797ac6653aSJeff Kirsher 
54807ac6653aSJeff Kirsher 	/* Some GMAC devices have a bugged Jumbo frame support that
54817ac6653aSJeff Kirsher 	 * needs to have the Tx COE disabled for oversized frames
54827ac6653aSJeff Kirsher 	 * (due to limited buffer sizes). In this case we disable
5483ceb69499SGiuseppe CAVALLARO 	 * the TX csum insertion in the TDES and not use SF.
5484ceb69499SGiuseppe CAVALLARO 	 */
54857ac6653aSJeff Kirsher 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5486a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
54877ac6653aSJeff Kirsher 
5488f748be53SAlexandre TORGUE 	/* Disable tso if asked by ethtool */
5489f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
5490f748be53SAlexandre TORGUE 		if (features & NETIF_F_TSO)
5491f748be53SAlexandre TORGUE 			priv->tso = true;
5492f748be53SAlexandre TORGUE 		else
5493f748be53SAlexandre TORGUE 			priv->tso = false;
5494f748be53SAlexandre TORGUE 	}
5495f748be53SAlexandre TORGUE 
54967ac6653aSJeff Kirsher 	return features;
54977ac6653aSJeff Kirsher }
54987ac6653aSJeff Kirsher 
5499d2afb5bdSGiuseppe CAVALLARO static int stmmac_set_features(struct net_device *netdev,
5500d2afb5bdSGiuseppe CAVALLARO 			       netdev_features_t features)
5501d2afb5bdSGiuseppe CAVALLARO {
5502d2afb5bdSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(netdev);
5503d2afb5bdSGiuseppe CAVALLARO 
5504d2afb5bdSGiuseppe CAVALLARO 	/* Keep the COE Type in case of csum is supporting */
5505d2afb5bdSGiuseppe CAVALLARO 	if (features & NETIF_F_RXCSUM)
5506d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
5507d2afb5bdSGiuseppe CAVALLARO 	else
5508d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
5509d2afb5bdSGiuseppe CAVALLARO 	/* No check needed because rx_coe has been set before and it will be
5510d2afb5bdSGiuseppe CAVALLARO 	 * fixed in case of issue.
5511d2afb5bdSGiuseppe CAVALLARO 	 */
5512c10d4c82SJose Abreu 	stmmac_rx_ipc(priv, priv->hw);
5513d2afb5bdSGiuseppe CAVALLARO 
5514f8e7dfd6SVincent Whitchurch 	if (priv->sph_cap) {
5515f8e7dfd6SVincent Whitchurch 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5516f8e7dfd6SVincent Whitchurch 		u32 chan;
55175fabb012SOng Boon Leong 
551867afd6d1SJose Abreu 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
551967afd6d1SJose Abreu 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5520f8e7dfd6SVincent Whitchurch 	}
552167afd6d1SJose Abreu 
5522d2afb5bdSGiuseppe CAVALLARO 	return 0;
5523d2afb5bdSGiuseppe CAVALLARO }
5524d2afb5bdSGiuseppe CAVALLARO 
55255a558611SOng Boon Leong static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
55265a558611SOng Boon Leong {
55275a558611SOng Boon Leong 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
55285a558611SOng Boon Leong 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
55295a558611SOng Boon Leong 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
55305a558611SOng Boon Leong 	bool *hs_enable = &fpe_cfg->hs_enable;
55315a558611SOng Boon Leong 
55325a558611SOng Boon Leong 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
55335a558611SOng Boon Leong 		return;
55345a558611SOng Boon Leong 
55355a558611SOng Boon Leong 	/* If LP has sent verify mPacket, LP is FPE capable */
55365a558611SOng Boon Leong 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
55375a558611SOng Boon Leong 		if (*lp_state < FPE_STATE_CAPABLE)
55385a558611SOng Boon Leong 			*lp_state = FPE_STATE_CAPABLE;
55395a558611SOng Boon Leong 
55405a558611SOng Boon Leong 		/* If user has requested FPE enable, quickly response */
55415a558611SOng Boon Leong 		if (*hs_enable)
55425a558611SOng Boon Leong 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
55435a558611SOng Boon Leong 						MPACKET_RESPONSE);
55445a558611SOng Boon Leong 	}
55455a558611SOng Boon Leong 
55465a558611SOng Boon Leong 	/* If Local has sent verify mPacket, Local is FPE capable */
55475a558611SOng Boon Leong 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
55485a558611SOng Boon Leong 		if (*lo_state < FPE_STATE_CAPABLE)
55495a558611SOng Boon Leong 			*lo_state = FPE_STATE_CAPABLE;
55505a558611SOng Boon Leong 	}
55515a558611SOng Boon Leong 
55525a558611SOng Boon Leong 	/* If LP has sent response mPacket, LP is entering FPE ON */
55535a558611SOng Boon Leong 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
55545a558611SOng Boon Leong 		*lp_state = FPE_STATE_ENTERING_ON;
55555a558611SOng Boon Leong 
55565a558611SOng Boon Leong 	/* If Local has sent response mPacket, Local is entering FPE ON */
55575a558611SOng Boon Leong 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
55585a558611SOng Boon Leong 		*lo_state = FPE_STATE_ENTERING_ON;
55595a558611SOng Boon Leong 
55605a558611SOng Boon Leong 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
55615a558611SOng Boon Leong 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
55625a558611SOng Boon Leong 	    priv->fpe_wq) {
55635a558611SOng Boon Leong 		queue_work(priv->fpe_wq, &priv->fpe_task);
55645a558611SOng Boon Leong 	}
55655a558611SOng Boon Leong }
55665a558611SOng Boon Leong 
556729e6573cSOng Boon Leong static void stmmac_common_interrupt(struct stmmac_priv *priv)
55687ac6653aSJeff Kirsher {
55697bac4e1eSJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
55707bac4e1eSJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
55717bac4e1eSJoao Pinto 	u32 queues_count;
55727bac4e1eSJoao Pinto 	u32 queue;
55737d9e6c5aSJose Abreu 	bool xmac;
55747bac4e1eSJoao Pinto 
55757d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
55767bac4e1eSJoao Pinto 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
55777ac6653aSJeff Kirsher 
557889f7f2cfSSrinivas Kandagatla 	if (priv->irq_wake)
557989f7f2cfSSrinivas Kandagatla 		pm_wakeup_event(priv->device, 0);
558089f7f2cfSSrinivas Kandagatla 
5581e49aa315SVoon Weifeng 	if (priv->dma_cap.estsel)
55829f298959SOng Boon Leong 		stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
55839f298959SOng Boon Leong 				      &priv->xstats, tx_cnt);
5584e49aa315SVoon Weifeng 
55855a558611SOng Boon Leong 	if (priv->dma_cap.fpesel) {
55865a558611SOng Boon Leong 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
55875a558611SOng Boon Leong 						   priv->dev);
55885a558611SOng Boon Leong 
55895a558611SOng Boon Leong 		stmmac_fpe_event_status(priv, status);
55905a558611SOng Boon Leong 	}
55915a558611SOng Boon Leong 
55927ac6653aSJeff Kirsher 	/* To handle GMAC own interrupts */
55937d9e6c5aSJose Abreu 	if ((priv->plat->has_gmac) || xmac) {
5594c10d4c82SJose Abreu 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
55958f71a88dSJoao Pinto 
5596d765955dSGiuseppe CAVALLARO 		if (unlikely(status)) {
5597d765955dSGiuseppe CAVALLARO 			/* For LPI we need to save the tx status */
55980982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5599d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = true;
56000982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5601d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = false;
56027bac4e1eSJoao Pinto 		}
56037bac4e1eSJoao Pinto 
56047bac4e1eSJoao Pinto 		for (queue = 0; queue < queues_count; queue++) {
56058a7cb245SYannick Vignon 			status = stmmac_host_mtl_irq_status(priv, priv->hw,
56067bac4e1eSJoao Pinto 							    queue);
56077bac4e1eSJoao Pinto 		}
560870523e63SGiuseppe CAVALLARO 
560970523e63SGiuseppe CAVALLARO 		/* PCS link status */
56103fe5cadbSGiuseppe CAVALLARO 		if (priv->hw->pcs) {
561170523e63SGiuseppe CAVALLARO 			if (priv->xstats.pcs_link)
561229e6573cSOng Boon Leong 				netif_carrier_on(priv->dev);
561370523e63SGiuseppe CAVALLARO 			else
561429e6573cSOng Boon Leong 				netif_carrier_off(priv->dev);
561570523e63SGiuseppe CAVALLARO 		}
5616f4da5652STan Tee Min 
5617f4da5652STan Tee Min 		stmmac_timestamp_interrupt(priv, priv);
5618d765955dSGiuseppe CAVALLARO 	}
561929e6573cSOng Boon Leong }
562029e6573cSOng Boon Leong 
562129e6573cSOng Boon Leong /**
562229e6573cSOng Boon Leong  *  stmmac_interrupt - main ISR
562329e6573cSOng Boon Leong  *  @irq: interrupt number.
562429e6573cSOng Boon Leong  *  @dev_id: to pass the net device pointer.
562529e6573cSOng Boon Leong  *  Description: this is the main driver interrupt service routine.
562629e6573cSOng Boon Leong  *  It can call:
562729e6573cSOng Boon Leong  *  o DMA service routine (to manage incoming frame reception and transmission
562829e6573cSOng Boon Leong  *    status)
562929e6573cSOng Boon Leong  *  o Core interrupts to manage: remote wake-up, management counter, LPI
563029e6573cSOng Boon Leong  *    interrupts.
563129e6573cSOng Boon Leong  */
563229e6573cSOng Boon Leong static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
563329e6573cSOng Boon Leong {
563429e6573cSOng Boon Leong 	struct net_device *dev = (struct net_device *)dev_id;
563529e6573cSOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
563629e6573cSOng Boon Leong 
563729e6573cSOng Boon Leong 	/* Check if adapter is up */
563829e6573cSOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
563929e6573cSOng Boon Leong 		return IRQ_HANDLED;
564029e6573cSOng Boon Leong 
564129e6573cSOng Boon Leong 	/* Check if a fatal error happened */
564229e6573cSOng Boon Leong 	if (stmmac_safety_feat_interrupt(priv))
564329e6573cSOng Boon Leong 		return IRQ_HANDLED;
564429e6573cSOng Boon Leong 
564529e6573cSOng Boon Leong 	/* To handle Common interrupts */
564629e6573cSOng Boon Leong 	stmmac_common_interrupt(priv);
5647d765955dSGiuseppe CAVALLARO 
5648d765955dSGiuseppe CAVALLARO 	/* To handle DMA interrupts */
56497ac6653aSJeff Kirsher 	stmmac_dma_interrupt(priv);
56507ac6653aSJeff Kirsher 
56517ac6653aSJeff Kirsher 	return IRQ_HANDLED;
56527ac6653aSJeff Kirsher }
56537ac6653aSJeff Kirsher 
56548532f613SOng Boon Leong static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
56558532f613SOng Boon Leong {
56568532f613SOng Boon Leong 	struct net_device *dev = (struct net_device *)dev_id;
56578532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
56588532f613SOng Boon Leong 
56598532f613SOng Boon Leong 	if (unlikely(!dev)) {
56608532f613SOng Boon Leong 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
56618532f613SOng Boon Leong 		return IRQ_NONE;
56628532f613SOng Boon Leong 	}
56638532f613SOng Boon Leong 
56648532f613SOng Boon Leong 	/* Check if adapter is up */
56658532f613SOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
56668532f613SOng Boon Leong 		return IRQ_HANDLED;
56678532f613SOng Boon Leong 
56688532f613SOng Boon Leong 	/* To handle Common interrupts */
56698532f613SOng Boon Leong 	stmmac_common_interrupt(priv);
56708532f613SOng Boon Leong 
56718532f613SOng Boon Leong 	return IRQ_HANDLED;
56728532f613SOng Boon Leong }
56738532f613SOng Boon Leong 
56748532f613SOng Boon Leong static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
56758532f613SOng Boon Leong {
56768532f613SOng Boon Leong 	struct net_device *dev = (struct net_device *)dev_id;
56778532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
56788532f613SOng Boon Leong 
56798532f613SOng Boon Leong 	if (unlikely(!dev)) {
56808532f613SOng Boon Leong 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
56818532f613SOng Boon Leong 		return IRQ_NONE;
56828532f613SOng Boon Leong 	}
56838532f613SOng Boon Leong 
56848532f613SOng Boon Leong 	/* Check if adapter is up */
56858532f613SOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
56868532f613SOng Boon Leong 		return IRQ_HANDLED;
56878532f613SOng Boon Leong 
56888532f613SOng Boon Leong 	/* Check if a fatal error happened */
56898532f613SOng Boon Leong 	stmmac_safety_feat_interrupt(priv);
56908532f613SOng Boon Leong 
56918532f613SOng Boon Leong 	return IRQ_HANDLED;
56928532f613SOng Boon Leong }
56938532f613SOng Boon Leong 
56948532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
56958532f613SOng Boon Leong {
56968532f613SOng Boon Leong 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
56978532f613SOng Boon Leong 	int chan = tx_q->queue_index;
56988532f613SOng Boon Leong 	struct stmmac_priv *priv;
56998532f613SOng Boon Leong 	int status;
57008532f613SOng Boon Leong 
57018532f613SOng Boon Leong 	priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]);
57028532f613SOng Boon Leong 
57038532f613SOng Boon Leong 	if (unlikely(!data)) {
57048532f613SOng Boon Leong 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
57058532f613SOng Boon Leong 		return IRQ_NONE;
57068532f613SOng Boon Leong 	}
57078532f613SOng Boon Leong 
57088532f613SOng Boon Leong 	/* Check if adapter is up */
57098532f613SOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
57108532f613SOng Boon Leong 		return IRQ_HANDLED;
57118532f613SOng Boon Leong 
57128532f613SOng Boon Leong 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
57138532f613SOng Boon Leong 
57148532f613SOng Boon Leong 	if (unlikely(status & tx_hard_error_bump_tc)) {
57158532f613SOng Boon Leong 		/* Try to bump up the dma threshold on this failure */
57163a6c12a0SXiaoliang Yang 		stmmac_bump_dma_threshold(priv, chan);
57178532f613SOng Boon Leong 	} else if (unlikely(status == tx_hard_error)) {
57188532f613SOng Boon Leong 		stmmac_tx_err(priv, chan);
57198532f613SOng Boon Leong 	}
57208532f613SOng Boon Leong 
57218532f613SOng Boon Leong 	return IRQ_HANDLED;
57228532f613SOng Boon Leong }
57238532f613SOng Boon Leong 
57248532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
57258532f613SOng Boon Leong {
57268532f613SOng Boon Leong 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
57278532f613SOng Boon Leong 	int chan = rx_q->queue_index;
57288532f613SOng Boon Leong 	struct stmmac_priv *priv;
57298532f613SOng Boon Leong 
57308532f613SOng Boon Leong 	priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]);
57318532f613SOng Boon Leong 
57328532f613SOng Boon Leong 	if (unlikely(!data)) {
57338532f613SOng Boon Leong 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
57348532f613SOng Boon Leong 		return IRQ_NONE;
57358532f613SOng Boon Leong 	}
57368532f613SOng Boon Leong 
57378532f613SOng Boon Leong 	/* Check if adapter is up */
57388532f613SOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
57398532f613SOng Boon Leong 		return IRQ_HANDLED;
57408532f613SOng Boon Leong 
57418532f613SOng Boon Leong 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
57428532f613SOng Boon Leong 
57438532f613SOng Boon Leong 	return IRQ_HANDLED;
57448532f613SOng Boon Leong }
57458532f613SOng Boon Leong 
57467ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
57477ac6653aSJeff Kirsher /* Polling receive - used by NETCONSOLE and other diagnostic tools
5748ceb69499SGiuseppe CAVALLARO  * to allow network I/O with interrupts disabled.
5749ceb69499SGiuseppe CAVALLARO  */
57507ac6653aSJeff Kirsher static void stmmac_poll_controller(struct net_device *dev)
57517ac6653aSJeff Kirsher {
57528532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
57538532f613SOng Boon Leong 	int i;
57548532f613SOng Boon Leong 
57558532f613SOng Boon Leong 	/* If adapter is down, do nothing */
57568532f613SOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
57578532f613SOng Boon Leong 		return;
57588532f613SOng Boon Leong 
57598532f613SOng Boon Leong 	if (priv->plat->multi_msi_en) {
57608532f613SOng Boon Leong 		for (i = 0; i < priv->plat->rx_queues_to_use; i++)
57618532f613SOng Boon Leong 			stmmac_msi_intr_rx(0, &priv->rx_queue[i]);
57628532f613SOng Boon Leong 
57638532f613SOng Boon Leong 		for (i = 0; i < priv->plat->tx_queues_to_use; i++)
57648532f613SOng Boon Leong 			stmmac_msi_intr_tx(0, &priv->tx_queue[i]);
57658532f613SOng Boon Leong 	} else {
57667ac6653aSJeff Kirsher 		disable_irq(dev->irq);
57677ac6653aSJeff Kirsher 		stmmac_interrupt(dev->irq, dev);
57687ac6653aSJeff Kirsher 		enable_irq(dev->irq);
57697ac6653aSJeff Kirsher 	}
57708532f613SOng Boon Leong }
57717ac6653aSJeff Kirsher #endif
57727ac6653aSJeff Kirsher 
57737ac6653aSJeff Kirsher /**
57747ac6653aSJeff Kirsher  *  stmmac_ioctl - Entry point for the Ioctl
57757ac6653aSJeff Kirsher  *  @dev: Device pointer.
57767ac6653aSJeff Kirsher  *  @rq: An IOCTL specefic structure, that can contain a pointer to
57777ac6653aSJeff Kirsher  *  a proprietary structure used to pass information to the driver.
57787ac6653aSJeff Kirsher  *  @cmd: IOCTL command
57797ac6653aSJeff Kirsher  *  Description:
578032ceabcaSGiuseppe CAVALLARO  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
57817ac6653aSJeff Kirsher  */
57827ac6653aSJeff Kirsher static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
57837ac6653aSJeff Kirsher {
578474371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv (dev);
5785891434b1SRayagond Kokatanur 	int ret = -EOPNOTSUPP;
57867ac6653aSJeff Kirsher 
57877ac6653aSJeff Kirsher 	if (!netif_running(dev))
57887ac6653aSJeff Kirsher 		return -EINVAL;
57897ac6653aSJeff Kirsher 
5790891434b1SRayagond Kokatanur 	switch (cmd) {
5791891434b1SRayagond Kokatanur 	case SIOCGMIIPHY:
5792891434b1SRayagond Kokatanur 	case SIOCGMIIREG:
5793891434b1SRayagond Kokatanur 	case SIOCSMIIREG:
579474371272SJose Abreu 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
5795891434b1SRayagond Kokatanur 		break;
5796891434b1SRayagond Kokatanur 	case SIOCSHWTSTAMP:
5797d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_set(dev, rq);
5798d6228b7cSArtem Panfilov 		break;
5799d6228b7cSArtem Panfilov 	case SIOCGHWTSTAMP:
5800d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_get(dev, rq);
5801891434b1SRayagond Kokatanur 		break;
5802891434b1SRayagond Kokatanur 	default:
5803891434b1SRayagond Kokatanur 		break;
5804891434b1SRayagond Kokatanur 	}
58057ac6653aSJeff Kirsher 
58067ac6653aSJeff Kirsher 	return ret;
58077ac6653aSJeff Kirsher }
58087ac6653aSJeff Kirsher 
58094dbbe8ddSJose Abreu static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
58104dbbe8ddSJose Abreu 				    void *cb_priv)
58114dbbe8ddSJose Abreu {
58124dbbe8ddSJose Abreu 	struct stmmac_priv *priv = cb_priv;
58134dbbe8ddSJose Abreu 	int ret = -EOPNOTSUPP;
58144dbbe8ddSJose Abreu 
5815425eabddSJose Abreu 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
5816425eabddSJose Abreu 		return ret;
5817425eabddSJose Abreu 
5818bba2556eSOng Boon Leong 	__stmmac_disable_all_queues(priv);
58194dbbe8ddSJose Abreu 
58204dbbe8ddSJose Abreu 	switch (type) {
58214dbbe8ddSJose Abreu 	case TC_SETUP_CLSU32:
58224dbbe8ddSJose Abreu 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
58234dbbe8ddSJose Abreu 		break;
5824425eabddSJose Abreu 	case TC_SETUP_CLSFLOWER:
5825425eabddSJose Abreu 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
5826425eabddSJose Abreu 		break;
58274dbbe8ddSJose Abreu 	default:
58284dbbe8ddSJose Abreu 		break;
58294dbbe8ddSJose Abreu 	}
58304dbbe8ddSJose Abreu 
58314dbbe8ddSJose Abreu 	stmmac_enable_all_queues(priv);
58324dbbe8ddSJose Abreu 	return ret;
58334dbbe8ddSJose Abreu }
58344dbbe8ddSJose Abreu 
5835955bcb6eSPablo Neira Ayuso static LIST_HEAD(stmmac_block_cb_list);
5836955bcb6eSPablo Neira Ayuso 
58374dbbe8ddSJose Abreu static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
58384dbbe8ddSJose Abreu 			   void *type_data)
58394dbbe8ddSJose Abreu {
58404dbbe8ddSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
58414dbbe8ddSJose Abreu 
58424dbbe8ddSJose Abreu 	switch (type) {
58434dbbe8ddSJose Abreu 	case TC_SETUP_BLOCK:
5844955bcb6eSPablo Neira Ayuso 		return flow_block_cb_setup_simple(type_data,
5845955bcb6eSPablo Neira Ayuso 						  &stmmac_block_cb_list,
58464e95bc26SPablo Neira Ayuso 						  stmmac_setup_tc_block_cb,
58474e95bc26SPablo Neira Ayuso 						  priv, priv, true);
58481f705bc6SJose Abreu 	case TC_SETUP_QDISC_CBS:
58491f705bc6SJose Abreu 		return stmmac_tc_setup_cbs(priv, priv, type_data);
5850b60189e0SJose Abreu 	case TC_SETUP_QDISC_TAPRIO:
5851b60189e0SJose Abreu 		return stmmac_tc_setup_taprio(priv, priv, type_data);
5852430b383cSJose Abreu 	case TC_SETUP_QDISC_ETF:
5853430b383cSJose Abreu 		return stmmac_tc_setup_etf(priv, priv, type_data);
58544dbbe8ddSJose Abreu 	default:
58554dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
58564dbbe8ddSJose Abreu 	}
58574dbbe8ddSJose Abreu }
58584dbbe8ddSJose Abreu 
58594993e5b3SJose Abreu static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
58604993e5b3SJose Abreu 			       struct net_device *sb_dev)
58614993e5b3SJose Abreu {
5862b7766206SJose Abreu 	int gso = skb_shinfo(skb)->gso_type;
5863b7766206SJose Abreu 
5864b7766206SJose Abreu 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
58654993e5b3SJose Abreu 		/*
5866b7766206SJose Abreu 		 * There is no way to determine the number of TSO/USO
58674993e5b3SJose Abreu 		 * capable Queues. Let's use always the Queue 0
5868b7766206SJose Abreu 		 * because if TSO/USO is supported then at least this
58694993e5b3SJose Abreu 		 * one will be capable.
58704993e5b3SJose Abreu 		 */
58714993e5b3SJose Abreu 		return 0;
58724993e5b3SJose Abreu 	}
58734993e5b3SJose Abreu 
58744993e5b3SJose Abreu 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
58754993e5b3SJose Abreu }
58764993e5b3SJose Abreu 
5877a830405eSBhadram Varka static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
5878a830405eSBhadram Varka {
5879a830405eSBhadram Varka 	struct stmmac_priv *priv = netdev_priv(ndev);
5880a830405eSBhadram Varka 	int ret = 0;
5881a830405eSBhadram Varka 
58824691ffb1SJoakim Zhang 	ret = pm_runtime_get_sync(priv->device);
58834691ffb1SJoakim Zhang 	if (ret < 0) {
58844691ffb1SJoakim Zhang 		pm_runtime_put_noidle(priv->device);
58854691ffb1SJoakim Zhang 		return ret;
58864691ffb1SJoakim Zhang 	}
58874691ffb1SJoakim Zhang 
5888a830405eSBhadram Varka 	ret = eth_mac_addr(ndev, addr);
5889a830405eSBhadram Varka 	if (ret)
58904691ffb1SJoakim Zhang 		goto set_mac_error;
5891a830405eSBhadram Varka 
5892c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
5893a830405eSBhadram Varka 
58944691ffb1SJoakim Zhang set_mac_error:
58954691ffb1SJoakim Zhang 	pm_runtime_put(priv->device);
58964691ffb1SJoakim Zhang 
5897a830405eSBhadram Varka 	return ret;
5898a830405eSBhadram Varka }
5899a830405eSBhadram Varka 
590050fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
59017ac29055SGiuseppe CAVALLARO static struct dentry *stmmac_fs_dir;
59027ac29055SGiuseppe CAVALLARO 
5903c24602efSGiuseppe CAVALLARO static void sysfs_display_ring(void *head, int size, int extend_desc,
5904bfaf91caSJoakim Zhang 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
59057ac29055SGiuseppe CAVALLARO {
59067ac29055SGiuseppe CAVALLARO 	int i;
5907c24602efSGiuseppe CAVALLARO 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
5908c24602efSGiuseppe CAVALLARO 	struct dma_desc *p = (struct dma_desc *)head;
5909bfaf91caSJoakim Zhang 	dma_addr_t dma_addr;
59107ac29055SGiuseppe CAVALLARO 
5911c24602efSGiuseppe CAVALLARO 	for (i = 0; i < size; i++) {
5912c24602efSGiuseppe CAVALLARO 		if (extend_desc) {
5913bfaf91caSJoakim Zhang 			dma_addr = dma_phy_addr + i * sizeof(*ep);
5914bfaf91caSJoakim Zhang 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
5915bfaf91caSJoakim Zhang 				   i, &dma_addr,
5916f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des0),
5917f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des1),
5918f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des2),
5919f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des3));
5920c24602efSGiuseppe CAVALLARO 			ep++;
5921c24602efSGiuseppe CAVALLARO 		} else {
5922bfaf91caSJoakim Zhang 			dma_addr = dma_phy_addr + i * sizeof(*p);
5923bfaf91caSJoakim Zhang 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
5924bfaf91caSJoakim Zhang 				   i, &dma_addr,
5925f8be0d78SMichael Weiser 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
5926f8be0d78SMichael Weiser 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
5927c24602efSGiuseppe CAVALLARO 			p++;
5928c24602efSGiuseppe CAVALLARO 		}
59297ac29055SGiuseppe CAVALLARO 		seq_printf(seq, "\n");
59307ac29055SGiuseppe CAVALLARO 	}
5931c24602efSGiuseppe CAVALLARO }
59327ac29055SGiuseppe CAVALLARO 
5933fb0d9c63SYangtao Li static int stmmac_rings_status_show(struct seq_file *seq, void *v)
5934c24602efSGiuseppe CAVALLARO {
5935c24602efSGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
5936c24602efSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
593754139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
5938ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
593954139cf3SJoao Pinto 	u32 queue;
594054139cf3SJoao Pinto 
59415f2b8b62SThierry Reding 	if ((dev->flags & IFF_UP) == 0)
59425f2b8b62SThierry Reding 		return 0;
59435f2b8b62SThierry Reding 
594454139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
594554139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
594654139cf3SJoao Pinto 
594754139cf3SJoao Pinto 		seq_printf(seq, "RX Queue %d:\n", queue);
59487ac29055SGiuseppe CAVALLARO 
5949c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc) {
595054139cf3SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
595154139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_erx,
5952bfaf91caSJoakim Zhang 					   priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
595354139cf3SJoao Pinto 		} else {
595454139cf3SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
595554139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_rx,
5956bfaf91caSJoakim Zhang 					   priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
595754139cf3SJoao Pinto 		}
595854139cf3SJoao Pinto 	}
595954139cf3SJoao Pinto 
5960ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
5961ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5962ce736788SJoao Pinto 
5963ce736788SJoao Pinto 		seq_printf(seq, "TX Queue %d:\n", queue);
5964ce736788SJoao Pinto 
596554139cf3SJoao Pinto 		if (priv->extend_desc) {
5966ce736788SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
5967ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_etx,
5968bfaf91caSJoakim Zhang 					   priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
5969579a25a8SJose Abreu 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
5970ce736788SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
5971ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_tx,
5972bfaf91caSJoakim Zhang 					   priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
5973ce736788SJoao Pinto 		}
59747ac29055SGiuseppe CAVALLARO 	}
59757ac29055SGiuseppe CAVALLARO 
59767ac29055SGiuseppe CAVALLARO 	return 0;
59777ac29055SGiuseppe CAVALLARO }
5978fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
59797ac29055SGiuseppe CAVALLARO 
5980fb0d9c63SYangtao Li static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
5981e7434821SGiuseppe CAVALLARO {
5982e7434821SGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
5983e7434821SGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
5984e7434821SGiuseppe CAVALLARO 
598519e30c14SGiuseppe CAVALLARO 	if (!priv->hw_cap_support) {
5986e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "DMA HW features not supported\n");
5987e7434821SGiuseppe CAVALLARO 		return 0;
5988e7434821SGiuseppe CAVALLARO 	}
5989e7434821SGiuseppe CAVALLARO 
5990e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
5991e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tDMA HW features\n");
5992e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
5993e7434821SGiuseppe CAVALLARO 
599422d3efe5SPavel Machek 	seq_printf(seq, "\t10/100 Mbps: %s\n",
5995e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
599622d3efe5SPavel Machek 	seq_printf(seq, "\t1000 Mbps: %s\n",
5997e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
599822d3efe5SPavel Machek 	seq_printf(seq, "\tHalf duplex: %s\n",
5999e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6000e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tHash Filter: %s\n",
6001e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
6002e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6003e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
60048d45e42bSLABBE Corentin 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6005e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pcs) ? "Y" : "N");
6006e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6007e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6008e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6009e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6010e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6011e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6012e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRMON module: %s\n",
6013e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rmon) ? "Y" : "N");
6014e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6015e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6016e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6017e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
601822d3efe5SPavel Machek 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6019e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.eee) ? "Y" : "N");
6020e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6021e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6022e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6023f748be53SAlexandre TORGUE 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
6024f748be53SAlexandre TORGUE 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6025f748be53SAlexandre TORGUE 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6026f748be53SAlexandre TORGUE 	} else {
6027e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6028e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6029e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6030e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6031f748be53SAlexandre TORGUE 	}
6032e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6033e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6034e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6035e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_rx_channel);
6036e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6037e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_tx_channel);
60387d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
60397d0b447aSJose Abreu 		   priv->dma_cap.number_rx_queues);
60407d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
60417d0b447aSJose Abreu 		   priv->dma_cap.number_tx_queues);
6042e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6043e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
60447d0b447aSJose Abreu 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
60457d0b447aSJose Abreu 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
60467d0b447aSJose Abreu 	seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
60477d0b447aSJose Abreu 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
60487d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
60497d0b447aSJose Abreu 		   priv->dma_cap.pps_out_num);
60507d0b447aSJose Abreu 	seq_printf(seq, "\tSafety Features: %s\n",
60517d0b447aSJose Abreu 		   priv->dma_cap.asp ? "Y" : "N");
60527d0b447aSJose Abreu 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
60537d0b447aSJose Abreu 		   priv->dma_cap.frpsel ? "Y" : "N");
60547d0b447aSJose Abreu 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
60557d0b447aSJose Abreu 		   priv->dma_cap.addr64);
60567d0b447aSJose Abreu 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
60577d0b447aSJose Abreu 		   priv->dma_cap.rssen ? "Y" : "N");
60587d0b447aSJose Abreu 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
60597d0b447aSJose Abreu 		   priv->dma_cap.vlhash ? "Y" : "N");
60607d0b447aSJose Abreu 	seq_printf(seq, "\tSplit Header: %s\n",
60617d0b447aSJose Abreu 		   priv->dma_cap.sphen ? "Y" : "N");
60627d0b447aSJose Abreu 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
60637d0b447aSJose Abreu 		   priv->dma_cap.vlins ? "Y" : "N");
60647d0b447aSJose Abreu 	seq_printf(seq, "\tDouble VLAN: %s\n",
60657d0b447aSJose Abreu 		   priv->dma_cap.dvlan ? "Y" : "N");
60667d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
60677d0b447aSJose Abreu 		   priv->dma_cap.l3l4fnum);
60687d0b447aSJose Abreu 	seq_printf(seq, "\tARP Offloading: %s\n",
60697d0b447aSJose Abreu 		   priv->dma_cap.arpoffsel ? "Y" : "N");
607044e65475SJose Abreu 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
607144e65475SJose Abreu 		   priv->dma_cap.estsel ? "Y" : "N");
607244e65475SJose Abreu 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
607344e65475SJose Abreu 		   priv->dma_cap.fpesel ? "Y" : "N");
607444e65475SJose Abreu 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
607544e65475SJose Abreu 		   priv->dma_cap.tbssel ? "Y" : "N");
6076e7434821SGiuseppe CAVALLARO 	return 0;
6077e7434821SGiuseppe CAVALLARO }
6078fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6079e7434821SGiuseppe CAVALLARO 
6080481a7d15SJiping Ma /* Use network device events to rename debugfs file entries.
6081481a7d15SJiping Ma  */
6082481a7d15SJiping Ma static int stmmac_device_event(struct notifier_block *unused,
6083481a7d15SJiping Ma 			       unsigned long event, void *ptr)
6084481a7d15SJiping Ma {
6085481a7d15SJiping Ma 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6086481a7d15SJiping Ma 	struct stmmac_priv *priv = netdev_priv(dev);
6087481a7d15SJiping Ma 
6088481a7d15SJiping Ma 	if (dev->netdev_ops != &stmmac_netdev_ops)
6089481a7d15SJiping Ma 		goto done;
6090481a7d15SJiping Ma 
6091481a7d15SJiping Ma 	switch (event) {
6092481a7d15SJiping Ma 	case NETDEV_CHANGENAME:
6093481a7d15SJiping Ma 		if (priv->dbgfs_dir)
6094481a7d15SJiping Ma 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6095481a7d15SJiping Ma 							 priv->dbgfs_dir,
6096481a7d15SJiping Ma 							 stmmac_fs_dir,
6097481a7d15SJiping Ma 							 dev->name);
6098481a7d15SJiping Ma 		break;
6099481a7d15SJiping Ma 	}
6100481a7d15SJiping Ma done:
6101481a7d15SJiping Ma 	return NOTIFY_DONE;
6102481a7d15SJiping Ma }
6103481a7d15SJiping Ma 
6104481a7d15SJiping Ma static struct notifier_block stmmac_notifier = {
6105481a7d15SJiping Ma 	.notifier_call = stmmac_device_event,
6106481a7d15SJiping Ma };
6107481a7d15SJiping Ma 
61088d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev)
61097ac29055SGiuseppe CAVALLARO {
6110466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
61117ac29055SGiuseppe CAVALLARO 
6112474a31e1SAaro Koskinen 	rtnl_lock();
6113474a31e1SAaro Koskinen 
6114466c5ac8SMathieu Olivari 	/* Create per netdev entries */
6115466c5ac8SMathieu Olivari 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6116466c5ac8SMathieu Olivari 
61177ac29055SGiuseppe CAVALLARO 	/* Entry to report DMA RX/TX rings */
61188d72ab11SGreg Kroah-Hartman 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
61197ac29055SGiuseppe CAVALLARO 			    &stmmac_rings_status_fops);
61207ac29055SGiuseppe CAVALLARO 
6121e7434821SGiuseppe CAVALLARO 	/* Entry to report the DMA HW features */
61228d72ab11SGreg Kroah-Hartman 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
61238d72ab11SGreg Kroah-Hartman 			    &stmmac_dma_cap_fops);
6124481a7d15SJiping Ma 
6125474a31e1SAaro Koskinen 	rtnl_unlock();
61267ac29055SGiuseppe CAVALLARO }
61277ac29055SGiuseppe CAVALLARO 
6128466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev)
61297ac29055SGiuseppe CAVALLARO {
6130466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
6131466c5ac8SMathieu Olivari 
6132466c5ac8SMathieu Olivari 	debugfs_remove_recursive(priv->dbgfs_dir);
61337ac29055SGiuseppe CAVALLARO }
613450fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
61357ac29055SGiuseppe CAVALLARO 
61363cd1cfcbSJose Abreu static u32 stmmac_vid_crc32_le(__le16 vid_le)
61373cd1cfcbSJose Abreu {
61383cd1cfcbSJose Abreu 	unsigned char *data = (unsigned char *)&vid_le;
61393cd1cfcbSJose Abreu 	unsigned char data_byte = 0;
61403cd1cfcbSJose Abreu 	u32 crc = ~0x0;
61413cd1cfcbSJose Abreu 	u32 temp = 0;
61423cd1cfcbSJose Abreu 	int i, bits;
61433cd1cfcbSJose Abreu 
61443cd1cfcbSJose Abreu 	bits = get_bitmask_order(VLAN_VID_MASK);
61453cd1cfcbSJose Abreu 	for (i = 0; i < bits; i++) {
61463cd1cfcbSJose Abreu 		if ((i % 8) == 0)
61473cd1cfcbSJose Abreu 			data_byte = data[i / 8];
61483cd1cfcbSJose Abreu 
61493cd1cfcbSJose Abreu 		temp = ((crc & 1) ^ data_byte) & 1;
61503cd1cfcbSJose Abreu 		crc >>= 1;
61513cd1cfcbSJose Abreu 		data_byte >>= 1;
61523cd1cfcbSJose Abreu 
61533cd1cfcbSJose Abreu 		if (temp)
61543cd1cfcbSJose Abreu 			crc ^= 0xedb88320;
61553cd1cfcbSJose Abreu 	}
61563cd1cfcbSJose Abreu 
61573cd1cfcbSJose Abreu 	return crc;
61583cd1cfcbSJose Abreu }
61593cd1cfcbSJose Abreu 
61603cd1cfcbSJose Abreu static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
61613cd1cfcbSJose Abreu {
61623cd1cfcbSJose Abreu 	u32 crc, hash = 0;
6163a24cae70SJose Abreu 	__le16 pmatch = 0;
6164c7ab0b80SJose Abreu 	int count = 0;
6165c7ab0b80SJose Abreu 	u16 vid = 0;
61663cd1cfcbSJose Abreu 
61673cd1cfcbSJose Abreu 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
61683cd1cfcbSJose Abreu 		__le16 vid_le = cpu_to_le16(vid);
61693cd1cfcbSJose Abreu 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
61703cd1cfcbSJose Abreu 		hash |= (1 << crc);
6171c7ab0b80SJose Abreu 		count++;
61723cd1cfcbSJose Abreu 	}
61733cd1cfcbSJose Abreu 
6174c7ab0b80SJose Abreu 	if (!priv->dma_cap.vlhash) {
6175c7ab0b80SJose Abreu 		if (count > 2) /* VID = 0 always passes filter */
6176c7ab0b80SJose Abreu 			return -EOPNOTSUPP;
6177c7ab0b80SJose Abreu 
6178a24cae70SJose Abreu 		pmatch = cpu_to_le16(vid);
6179c7ab0b80SJose Abreu 		hash = 0;
6180c7ab0b80SJose Abreu 	}
6181c7ab0b80SJose Abreu 
6182a24cae70SJose Abreu 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
61833cd1cfcbSJose Abreu }
61843cd1cfcbSJose Abreu 
61853cd1cfcbSJose Abreu static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
61863cd1cfcbSJose Abreu {
61873cd1cfcbSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
61883cd1cfcbSJose Abreu 	bool is_double = false;
61893cd1cfcbSJose Abreu 	int ret;
61903cd1cfcbSJose Abreu 
61913cd1cfcbSJose Abreu 	if (be16_to_cpu(proto) == ETH_P_8021AD)
61923cd1cfcbSJose Abreu 		is_double = true;
61933cd1cfcbSJose Abreu 
61943cd1cfcbSJose Abreu 	set_bit(vid, priv->active_vlans);
61953cd1cfcbSJose Abreu 	ret = stmmac_vlan_update(priv, is_double);
61963cd1cfcbSJose Abreu 	if (ret) {
61973cd1cfcbSJose Abreu 		clear_bit(vid, priv->active_vlans);
61983cd1cfcbSJose Abreu 		return ret;
61993cd1cfcbSJose Abreu 	}
62003cd1cfcbSJose Abreu 
6201dd6a4998SJose Abreu 	if (priv->hw->num_vlan) {
6202ed64639bSWong Vee Khee 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6203dd6a4998SJose Abreu 		if (ret)
62043cd1cfcbSJose Abreu 			return ret;
62053cd1cfcbSJose Abreu 	}
62063cd1cfcbSJose Abreu 
6207dd6a4998SJose Abreu 	return 0;
6208dd6a4998SJose Abreu }
6209dd6a4998SJose Abreu 
62103cd1cfcbSJose Abreu static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
62113cd1cfcbSJose Abreu {
62123cd1cfcbSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
62133cd1cfcbSJose Abreu 	bool is_double = false;
6214ed64639bSWong Vee Khee 	int ret;
62153cd1cfcbSJose Abreu 
6216b3dcb312SJoakim Zhang 	ret = pm_runtime_get_sync(priv->device);
6217b3dcb312SJoakim Zhang 	if (ret < 0) {
6218b3dcb312SJoakim Zhang 		pm_runtime_put_noidle(priv->device);
6219b3dcb312SJoakim Zhang 		return ret;
6220b3dcb312SJoakim Zhang 	}
6221b3dcb312SJoakim Zhang 
62223cd1cfcbSJose Abreu 	if (be16_to_cpu(proto) == ETH_P_8021AD)
62233cd1cfcbSJose Abreu 		is_double = true;
62243cd1cfcbSJose Abreu 
62253cd1cfcbSJose Abreu 	clear_bit(vid, priv->active_vlans);
6226dd6a4998SJose Abreu 
6227dd6a4998SJose Abreu 	if (priv->hw->num_vlan) {
6228ed64639bSWong Vee Khee 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6229ed64639bSWong Vee Khee 		if (ret)
62305ec55823SJoakim Zhang 			goto del_vlan_error;
6231dd6a4998SJose Abreu 	}
6232ed64639bSWong Vee Khee 
62335ec55823SJoakim Zhang 	ret = stmmac_vlan_update(priv, is_double);
62345ec55823SJoakim Zhang 
62355ec55823SJoakim Zhang del_vlan_error:
62365ec55823SJoakim Zhang 	pm_runtime_put(priv->device);
62375ec55823SJoakim Zhang 
62385ec55823SJoakim Zhang 	return ret;
62393cd1cfcbSJose Abreu }
62403cd1cfcbSJose Abreu 
62415fabb012SOng Boon Leong static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
62425fabb012SOng Boon Leong {
62435fabb012SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
62445fabb012SOng Boon Leong 
62455fabb012SOng Boon Leong 	switch (bpf->command) {
62465fabb012SOng Boon Leong 	case XDP_SETUP_PROG:
62475fabb012SOng Boon Leong 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6248bba2556eSOng Boon Leong 	case XDP_SETUP_XSK_POOL:
6249bba2556eSOng Boon Leong 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6250bba2556eSOng Boon Leong 					     bpf->xsk.queue_id);
62515fabb012SOng Boon Leong 	default:
62525fabb012SOng Boon Leong 		return -EOPNOTSUPP;
62535fabb012SOng Boon Leong 	}
62545fabb012SOng Boon Leong }
62555fabb012SOng Boon Leong 
62568b278a5bSOng Boon Leong static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
62578b278a5bSOng Boon Leong 			   struct xdp_frame **frames, u32 flags)
62588b278a5bSOng Boon Leong {
62598b278a5bSOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
62608b278a5bSOng Boon Leong 	int cpu = smp_processor_id();
62618b278a5bSOng Boon Leong 	struct netdev_queue *nq;
62628b278a5bSOng Boon Leong 	int i, nxmit = 0;
62638b278a5bSOng Boon Leong 	int queue;
62648b278a5bSOng Boon Leong 
62658b278a5bSOng Boon Leong 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
62668b278a5bSOng Boon Leong 		return -ENETDOWN;
62678b278a5bSOng Boon Leong 
62688b278a5bSOng Boon Leong 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
62698b278a5bSOng Boon Leong 		return -EINVAL;
62708b278a5bSOng Boon Leong 
62718b278a5bSOng Boon Leong 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
62728b278a5bSOng Boon Leong 	nq = netdev_get_tx_queue(priv->dev, queue);
62738b278a5bSOng Boon Leong 
62748b278a5bSOng Boon Leong 	__netif_tx_lock(nq, cpu);
62758b278a5bSOng Boon Leong 	/* Avoids TX time-out as we are sharing with slow path */
62765337824fSEric Dumazet 	txq_trans_cond_update(nq);
62778b278a5bSOng Boon Leong 
62788b278a5bSOng Boon Leong 	for (i = 0; i < num_frames; i++) {
62798b278a5bSOng Boon Leong 		int res;
62808b278a5bSOng Boon Leong 
62818b278a5bSOng Boon Leong 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
62828b278a5bSOng Boon Leong 		if (res == STMMAC_XDP_CONSUMED)
62838b278a5bSOng Boon Leong 			break;
62848b278a5bSOng Boon Leong 
62858b278a5bSOng Boon Leong 		nxmit++;
62868b278a5bSOng Boon Leong 	}
62878b278a5bSOng Boon Leong 
62888b278a5bSOng Boon Leong 	if (flags & XDP_XMIT_FLUSH) {
62898b278a5bSOng Boon Leong 		stmmac_flush_tx_descriptors(priv, queue);
62908b278a5bSOng Boon Leong 		stmmac_tx_timer_arm(priv, queue);
62918b278a5bSOng Boon Leong 	}
62928b278a5bSOng Boon Leong 
62938b278a5bSOng Boon Leong 	__netif_tx_unlock(nq);
62948b278a5bSOng Boon Leong 
62958b278a5bSOng Boon Leong 	return nxmit;
62968b278a5bSOng Boon Leong }
62978b278a5bSOng Boon Leong 
6298bba2556eSOng Boon Leong void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6299bba2556eSOng Boon Leong {
6300bba2556eSOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
6301bba2556eSOng Boon Leong 	unsigned long flags;
6302bba2556eSOng Boon Leong 
6303bba2556eSOng Boon Leong 	spin_lock_irqsave(&ch->lock, flags);
6304bba2556eSOng Boon Leong 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6305bba2556eSOng Boon Leong 	spin_unlock_irqrestore(&ch->lock, flags);
6306bba2556eSOng Boon Leong 
6307bba2556eSOng Boon Leong 	stmmac_stop_rx_dma(priv, queue);
6308bba2556eSOng Boon Leong 	__free_dma_rx_desc_resources(priv, queue);
6309bba2556eSOng Boon Leong }
6310bba2556eSOng Boon Leong 
6311bba2556eSOng Boon Leong void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6312bba2556eSOng Boon Leong {
6313bba2556eSOng Boon Leong 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
6314bba2556eSOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
6315bba2556eSOng Boon Leong 	unsigned long flags;
6316bba2556eSOng Boon Leong 	u32 buf_size;
6317bba2556eSOng Boon Leong 	int ret;
6318bba2556eSOng Boon Leong 
6319bba2556eSOng Boon Leong 	ret = __alloc_dma_rx_desc_resources(priv, queue);
6320bba2556eSOng Boon Leong 	if (ret) {
6321bba2556eSOng Boon Leong 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6322bba2556eSOng Boon Leong 		return;
6323bba2556eSOng Boon Leong 	}
6324bba2556eSOng Boon Leong 
6325bba2556eSOng Boon Leong 	ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL);
6326bba2556eSOng Boon Leong 	if (ret) {
6327bba2556eSOng Boon Leong 		__free_dma_rx_desc_resources(priv, queue);
6328bba2556eSOng Boon Leong 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6329bba2556eSOng Boon Leong 		return;
6330bba2556eSOng Boon Leong 	}
6331bba2556eSOng Boon Leong 
6332bba2556eSOng Boon Leong 	stmmac_clear_rx_descriptors(priv, queue);
6333bba2556eSOng Boon Leong 
6334bba2556eSOng Boon Leong 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6335bba2556eSOng Boon Leong 			    rx_q->dma_rx_phy, rx_q->queue_index);
6336bba2556eSOng Boon Leong 
6337bba2556eSOng Boon Leong 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6338bba2556eSOng Boon Leong 			     sizeof(struct dma_desc));
6339bba2556eSOng Boon Leong 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6340bba2556eSOng Boon Leong 			       rx_q->rx_tail_addr, rx_q->queue_index);
6341bba2556eSOng Boon Leong 
6342bba2556eSOng Boon Leong 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6343bba2556eSOng Boon Leong 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6344bba2556eSOng Boon Leong 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6345bba2556eSOng Boon Leong 				      buf_size,
6346bba2556eSOng Boon Leong 				      rx_q->queue_index);
6347bba2556eSOng Boon Leong 	} else {
6348bba2556eSOng Boon Leong 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6349bba2556eSOng Boon Leong 				      priv->dma_buf_sz,
6350bba2556eSOng Boon Leong 				      rx_q->queue_index);
6351bba2556eSOng Boon Leong 	}
6352bba2556eSOng Boon Leong 
6353bba2556eSOng Boon Leong 	stmmac_start_rx_dma(priv, queue);
6354bba2556eSOng Boon Leong 
6355bba2556eSOng Boon Leong 	spin_lock_irqsave(&ch->lock, flags);
6356bba2556eSOng Boon Leong 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6357bba2556eSOng Boon Leong 	spin_unlock_irqrestore(&ch->lock, flags);
6358bba2556eSOng Boon Leong }
6359bba2556eSOng Boon Leong 
6360132c32eeSOng Boon Leong void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6361132c32eeSOng Boon Leong {
6362132c32eeSOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
6363132c32eeSOng Boon Leong 	unsigned long flags;
6364132c32eeSOng Boon Leong 
6365132c32eeSOng Boon Leong 	spin_lock_irqsave(&ch->lock, flags);
6366132c32eeSOng Boon Leong 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6367132c32eeSOng Boon Leong 	spin_unlock_irqrestore(&ch->lock, flags);
6368132c32eeSOng Boon Leong 
6369132c32eeSOng Boon Leong 	stmmac_stop_tx_dma(priv, queue);
6370132c32eeSOng Boon Leong 	__free_dma_tx_desc_resources(priv, queue);
6371132c32eeSOng Boon Leong }
6372132c32eeSOng Boon Leong 
6373132c32eeSOng Boon Leong void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6374132c32eeSOng Boon Leong {
6375132c32eeSOng Boon Leong 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
6376132c32eeSOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
6377132c32eeSOng Boon Leong 	unsigned long flags;
6378132c32eeSOng Boon Leong 	int ret;
6379132c32eeSOng Boon Leong 
6380132c32eeSOng Boon Leong 	ret = __alloc_dma_tx_desc_resources(priv, queue);
6381132c32eeSOng Boon Leong 	if (ret) {
6382132c32eeSOng Boon Leong 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6383132c32eeSOng Boon Leong 		return;
6384132c32eeSOng Boon Leong 	}
6385132c32eeSOng Boon Leong 
6386132c32eeSOng Boon Leong 	ret = __init_dma_tx_desc_rings(priv, queue);
6387132c32eeSOng Boon Leong 	if (ret) {
6388132c32eeSOng Boon Leong 		__free_dma_tx_desc_resources(priv, queue);
6389132c32eeSOng Boon Leong 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6390132c32eeSOng Boon Leong 		return;
6391132c32eeSOng Boon Leong 	}
6392132c32eeSOng Boon Leong 
6393132c32eeSOng Boon Leong 	stmmac_clear_tx_descriptors(priv, queue);
6394132c32eeSOng Boon Leong 
6395132c32eeSOng Boon Leong 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6396132c32eeSOng Boon Leong 			    tx_q->dma_tx_phy, tx_q->queue_index);
6397132c32eeSOng Boon Leong 
6398132c32eeSOng Boon Leong 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6399132c32eeSOng Boon Leong 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6400132c32eeSOng Boon Leong 
6401132c32eeSOng Boon Leong 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6402132c32eeSOng Boon Leong 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6403132c32eeSOng Boon Leong 			       tx_q->tx_tail_addr, tx_q->queue_index);
6404132c32eeSOng Boon Leong 
6405132c32eeSOng Boon Leong 	stmmac_start_tx_dma(priv, queue);
6406132c32eeSOng Boon Leong 
6407132c32eeSOng Boon Leong 	spin_lock_irqsave(&ch->lock, flags);
6408132c32eeSOng Boon Leong 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6409132c32eeSOng Boon Leong 	spin_unlock_irqrestore(&ch->lock, flags);
6410132c32eeSOng Boon Leong }
6411132c32eeSOng Boon Leong 
6412ac746c85SOng Boon Leong void stmmac_xdp_release(struct net_device *dev)
6413ac746c85SOng Boon Leong {
6414ac746c85SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
6415ac746c85SOng Boon Leong 	u32 chan;
6416ac746c85SOng Boon Leong 
6417ac746c85SOng Boon Leong 	/* Disable NAPI process */
6418ac746c85SOng Boon Leong 	stmmac_disable_all_queues(priv);
6419ac746c85SOng Boon Leong 
6420ac746c85SOng Boon Leong 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6421ac746c85SOng Boon Leong 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
6422ac746c85SOng Boon Leong 
6423ac746c85SOng Boon Leong 	/* Free the IRQ lines */
6424ac746c85SOng Boon Leong 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6425ac746c85SOng Boon Leong 
6426ac746c85SOng Boon Leong 	/* Stop TX/RX DMA channels */
6427ac746c85SOng Boon Leong 	stmmac_stop_all_dma(priv);
6428ac746c85SOng Boon Leong 
6429ac746c85SOng Boon Leong 	/* Release and free the Rx/Tx resources */
6430ac746c85SOng Boon Leong 	free_dma_desc_resources(priv);
6431ac746c85SOng Boon Leong 
6432ac746c85SOng Boon Leong 	/* Disable the MAC Rx/Tx */
6433ac746c85SOng Boon Leong 	stmmac_mac_set(priv, priv->ioaddr, false);
6434ac746c85SOng Boon Leong 
6435ac746c85SOng Boon Leong 	/* set trans_start so we don't get spurious
6436ac746c85SOng Boon Leong 	 * watchdogs during reset
6437ac746c85SOng Boon Leong 	 */
6438ac746c85SOng Boon Leong 	netif_trans_update(dev);
6439ac746c85SOng Boon Leong 	netif_carrier_off(dev);
6440ac746c85SOng Boon Leong }
6441ac746c85SOng Boon Leong 
6442ac746c85SOng Boon Leong int stmmac_xdp_open(struct net_device *dev)
6443ac746c85SOng Boon Leong {
6444ac746c85SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
6445ac746c85SOng Boon Leong 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6446ac746c85SOng Boon Leong 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6447ac746c85SOng Boon Leong 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6448ac746c85SOng Boon Leong 	struct stmmac_rx_queue *rx_q;
6449ac746c85SOng Boon Leong 	struct stmmac_tx_queue *tx_q;
6450ac746c85SOng Boon Leong 	u32 buf_size;
6451ac746c85SOng Boon Leong 	bool sph_en;
6452ac746c85SOng Boon Leong 	u32 chan;
6453ac746c85SOng Boon Leong 	int ret;
6454ac746c85SOng Boon Leong 
6455ac746c85SOng Boon Leong 	ret = alloc_dma_desc_resources(priv);
6456ac746c85SOng Boon Leong 	if (ret < 0) {
6457ac746c85SOng Boon Leong 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6458ac746c85SOng Boon Leong 			   __func__);
6459ac746c85SOng Boon Leong 		goto dma_desc_error;
6460ac746c85SOng Boon Leong 	}
6461ac746c85SOng Boon Leong 
6462ac746c85SOng Boon Leong 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
6463ac746c85SOng Boon Leong 	if (ret < 0) {
6464ac746c85SOng Boon Leong 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6465ac746c85SOng Boon Leong 			   __func__);
6466ac746c85SOng Boon Leong 		goto init_error;
6467ac746c85SOng Boon Leong 	}
6468ac746c85SOng Boon Leong 
6469ac746c85SOng Boon Leong 	/* DMA CSR Channel configuration */
6470ac746c85SOng Boon Leong 	for (chan = 0; chan < dma_csr_ch; chan++)
6471ac746c85SOng Boon Leong 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6472ac746c85SOng Boon Leong 
6473ac746c85SOng Boon Leong 	/* Adjust Split header */
6474ac746c85SOng Boon Leong 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6475ac746c85SOng Boon Leong 
6476ac746c85SOng Boon Leong 	/* DMA RX Channel Configuration */
6477ac746c85SOng Boon Leong 	for (chan = 0; chan < rx_cnt; chan++) {
6478ac746c85SOng Boon Leong 		rx_q = &priv->rx_queue[chan];
6479ac746c85SOng Boon Leong 
6480ac746c85SOng Boon Leong 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6481ac746c85SOng Boon Leong 				    rx_q->dma_rx_phy, chan);
6482ac746c85SOng Boon Leong 
6483ac746c85SOng Boon Leong 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6484ac746c85SOng Boon Leong 				     (rx_q->buf_alloc_num *
6485ac746c85SOng Boon Leong 				      sizeof(struct dma_desc));
6486ac746c85SOng Boon Leong 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6487ac746c85SOng Boon Leong 				       rx_q->rx_tail_addr, chan);
6488ac746c85SOng Boon Leong 
6489ac746c85SOng Boon Leong 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6490ac746c85SOng Boon Leong 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6491ac746c85SOng Boon Leong 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6492ac746c85SOng Boon Leong 					      buf_size,
6493ac746c85SOng Boon Leong 					      rx_q->queue_index);
6494ac746c85SOng Boon Leong 		} else {
6495ac746c85SOng Boon Leong 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6496ac746c85SOng Boon Leong 					      priv->dma_buf_sz,
6497ac746c85SOng Boon Leong 					      rx_q->queue_index);
6498ac746c85SOng Boon Leong 		}
6499ac746c85SOng Boon Leong 
6500ac746c85SOng Boon Leong 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6501ac746c85SOng Boon Leong 	}
6502ac746c85SOng Boon Leong 
6503ac746c85SOng Boon Leong 	/* DMA TX Channel Configuration */
6504ac746c85SOng Boon Leong 	for (chan = 0; chan < tx_cnt; chan++) {
6505ac746c85SOng Boon Leong 		tx_q = &priv->tx_queue[chan];
6506ac746c85SOng Boon Leong 
6507ac746c85SOng Boon Leong 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6508ac746c85SOng Boon Leong 				    tx_q->dma_tx_phy, chan);
6509ac746c85SOng Boon Leong 
6510ac746c85SOng Boon Leong 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6511ac746c85SOng Boon Leong 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6512ac746c85SOng Boon Leong 				       tx_q->tx_tail_addr, chan);
651361da6ac7SOng Boon Leong 
651461da6ac7SOng Boon Leong 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
651561da6ac7SOng Boon Leong 		tx_q->txtimer.function = stmmac_tx_timer;
6516ac746c85SOng Boon Leong 	}
6517ac746c85SOng Boon Leong 
6518ac746c85SOng Boon Leong 	/* Enable the MAC Rx/Tx */
6519ac746c85SOng Boon Leong 	stmmac_mac_set(priv, priv->ioaddr, true);
6520ac746c85SOng Boon Leong 
6521ac746c85SOng Boon Leong 	/* Start Rx & Tx DMA Channels */
6522ac746c85SOng Boon Leong 	stmmac_start_all_dma(priv);
6523ac746c85SOng Boon Leong 
6524ac746c85SOng Boon Leong 	ret = stmmac_request_irq(dev);
6525ac746c85SOng Boon Leong 	if (ret)
6526ac746c85SOng Boon Leong 		goto irq_error;
6527ac746c85SOng Boon Leong 
6528ac746c85SOng Boon Leong 	/* Enable NAPI process*/
6529ac746c85SOng Boon Leong 	stmmac_enable_all_queues(priv);
6530ac746c85SOng Boon Leong 	netif_carrier_on(dev);
6531ac746c85SOng Boon Leong 	netif_tx_start_all_queues(dev);
6532ac746c85SOng Boon Leong 
6533ac746c85SOng Boon Leong 	return 0;
6534ac746c85SOng Boon Leong 
6535ac746c85SOng Boon Leong irq_error:
6536ac746c85SOng Boon Leong 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6537ac746c85SOng Boon Leong 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
6538ac746c85SOng Boon Leong 
6539ac746c85SOng Boon Leong 	stmmac_hw_teardown(dev);
6540ac746c85SOng Boon Leong init_error:
6541ac746c85SOng Boon Leong 	free_dma_desc_resources(priv);
6542ac746c85SOng Boon Leong dma_desc_error:
6543ac746c85SOng Boon Leong 	return ret;
6544ac746c85SOng Boon Leong }
6545ac746c85SOng Boon Leong 
6546bba2556eSOng Boon Leong int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6547bba2556eSOng Boon Leong {
6548bba2556eSOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
6549bba2556eSOng Boon Leong 	struct stmmac_rx_queue *rx_q;
6550132c32eeSOng Boon Leong 	struct stmmac_tx_queue *tx_q;
6551bba2556eSOng Boon Leong 	struct stmmac_channel *ch;
6552bba2556eSOng Boon Leong 
6553bba2556eSOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6554bba2556eSOng Boon Leong 	    !netif_carrier_ok(priv->dev))
6555bba2556eSOng Boon Leong 		return -ENETDOWN;
6556bba2556eSOng Boon Leong 
6557bba2556eSOng Boon Leong 	if (!stmmac_xdp_is_enabled(priv))
6558bba2556eSOng Boon Leong 		return -ENXIO;
6559bba2556eSOng Boon Leong 
6560132c32eeSOng Boon Leong 	if (queue >= priv->plat->rx_queues_to_use ||
6561132c32eeSOng Boon Leong 	    queue >= priv->plat->tx_queues_to_use)
6562bba2556eSOng Boon Leong 		return -EINVAL;
6563bba2556eSOng Boon Leong 
6564bba2556eSOng Boon Leong 	rx_q = &priv->rx_queue[queue];
6565132c32eeSOng Boon Leong 	tx_q = &priv->tx_queue[queue];
6566bba2556eSOng Boon Leong 	ch = &priv->channel[queue];
6567bba2556eSOng Boon Leong 
6568132c32eeSOng Boon Leong 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6569bba2556eSOng Boon Leong 		return -ENXIO;
6570bba2556eSOng Boon Leong 
6571132c32eeSOng Boon Leong 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6572bba2556eSOng Boon Leong 		/* EQoS does not have per-DMA channel SW interrupt,
6573bba2556eSOng Boon Leong 		 * so we schedule RX Napi straight-away.
6574bba2556eSOng Boon Leong 		 */
6575132c32eeSOng Boon Leong 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6576132c32eeSOng Boon Leong 			__napi_schedule(&ch->rxtx_napi);
6577bba2556eSOng Boon Leong 	}
6578bba2556eSOng Boon Leong 
6579bba2556eSOng Boon Leong 	return 0;
6580bba2556eSOng Boon Leong }
6581bba2556eSOng Boon Leong 
65827ac6653aSJeff Kirsher static const struct net_device_ops stmmac_netdev_ops = {
65837ac6653aSJeff Kirsher 	.ndo_open = stmmac_open,
65847ac6653aSJeff Kirsher 	.ndo_start_xmit = stmmac_xmit,
65857ac6653aSJeff Kirsher 	.ndo_stop = stmmac_release,
65867ac6653aSJeff Kirsher 	.ndo_change_mtu = stmmac_change_mtu,
65877ac6653aSJeff Kirsher 	.ndo_fix_features = stmmac_fix_features,
6588d2afb5bdSGiuseppe CAVALLARO 	.ndo_set_features = stmmac_set_features,
658901789349SJiri Pirko 	.ndo_set_rx_mode = stmmac_set_rx_mode,
65907ac6653aSJeff Kirsher 	.ndo_tx_timeout = stmmac_tx_timeout,
6591a7605370SArnd Bergmann 	.ndo_eth_ioctl = stmmac_ioctl,
65924dbbe8ddSJose Abreu 	.ndo_setup_tc = stmmac_setup_tc,
65934993e5b3SJose Abreu 	.ndo_select_queue = stmmac_select_queue,
65947ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
65957ac6653aSJeff Kirsher 	.ndo_poll_controller = stmmac_poll_controller,
65967ac6653aSJeff Kirsher #endif
6597a830405eSBhadram Varka 	.ndo_set_mac_address = stmmac_set_mac_address,
65983cd1cfcbSJose Abreu 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
65993cd1cfcbSJose Abreu 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
66005fabb012SOng Boon Leong 	.ndo_bpf = stmmac_bpf,
66018b278a5bSOng Boon Leong 	.ndo_xdp_xmit = stmmac_xdp_xmit,
6602bba2556eSOng Boon Leong 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
66037ac6653aSJeff Kirsher };
66047ac6653aSJeff Kirsher 
660534877a15SJose Abreu static void stmmac_reset_subtask(struct stmmac_priv *priv)
660634877a15SJose Abreu {
660734877a15SJose Abreu 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
660834877a15SJose Abreu 		return;
660934877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
661034877a15SJose Abreu 		return;
661134877a15SJose Abreu 
661234877a15SJose Abreu 	netdev_err(priv->dev, "Reset adapter.\n");
661334877a15SJose Abreu 
661434877a15SJose Abreu 	rtnl_lock();
661534877a15SJose Abreu 	netif_trans_update(priv->dev);
661634877a15SJose Abreu 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
661734877a15SJose Abreu 		usleep_range(1000, 2000);
661834877a15SJose Abreu 
661934877a15SJose Abreu 	set_bit(STMMAC_DOWN, &priv->state);
662034877a15SJose Abreu 	dev_close(priv->dev);
662100f54e68SPetr Machata 	dev_open(priv->dev, NULL);
662234877a15SJose Abreu 	clear_bit(STMMAC_DOWN, &priv->state);
662334877a15SJose Abreu 	clear_bit(STMMAC_RESETING, &priv->state);
662434877a15SJose Abreu 	rtnl_unlock();
662534877a15SJose Abreu }
662634877a15SJose Abreu 
662734877a15SJose Abreu static void stmmac_service_task(struct work_struct *work)
662834877a15SJose Abreu {
662934877a15SJose Abreu 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
663034877a15SJose Abreu 			service_task);
663134877a15SJose Abreu 
663234877a15SJose Abreu 	stmmac_reset_subtask(priv);
663334877a15SJose Abreu 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
663434877a15SJose Abreu }
663534877a15SJose Abreu 
66367ac6653aSJeff Kirsher /**
6637cf3f047bSGiuseppe CAVALLARO  *  stmmac_hw_init - Init the MAC device
663832ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
6639732fdf0eSGiuseppe CAVALLARO  *  Description: this function is to configure the MAC device according to
6640732fdf0eSGiuseppe CAVALLARO  *  some platform parameters or the HW capability register. It prepares the
6641732fdf0eSGiuseppe CAVALLARO  *  driver to use either ring or chain modes and to setup either enhanced or
6642732fdf0eSGiuseppe CAVALLARO  *  normal descriptors.
6643cf3f047bSGiuseppe CAVALLARO  */
6644cf3f047bSGiuseppe CAVALLARO static int stmmac_hw_init(struct stmmac_priv *priv)
6645cf3f047bSGiuseppe CAVALLARO {
66465f0456b4SJose Abreu 	int ret;
6647cf3f047bSGiuseppe CAVALLARO 
66489f93ac8dSLABBE Corentin 	/* dwmac-sun8i only work in chain mode */
66499f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i)
66509f93ac8dSLABBE Corentin 		chain_mode = 1;
66515f0456b4SJose Abreu 	priv->chain_mode = chain_mode;
66529f93ac8dSLABBE Corentin 
66535f0456b4SJose Abreu 	/* Initialize HW Interface */
66545f0456b4SJose Abreu 	ret = stmmac_hwif_init(priv);
66555f0456b4SJose Abreu 	if (ret)
66565f0456b4SJose Abreu 		return ret;
66574a7d666aSGiuseppe CAVALLARO 
6658cf3f047bSGiuseppe CAVALLARO 	/* Get the HW capability (new GMAC newer than 3.50a) */
6659cf3f047bSGiuseppe CAVALLARO 	priv->hw_cap_support = stmmac_get_hw_features(priv);
6660cf3f047bSGiuseppe CAVALLARO 	if (priv->hw_cap_support) {
666138ddc59dSLABBE Corentin 		dev_info(priv->device, "DMA HW capability register supported\n");
6662cf3f047bSGiuseppe CAVALLARO 
6663cf3f047bSGiuseppe CAVALLARO 		/* We can override some gmac/dma configuration fields: e.g.
6664cf3f047bSGiuseppe CAVALLARO 		 * enh_desc, tx_coe (e.g. that are passed through the
6665cf3f047bSGiuseppe CAVALLARO 		 * platform) with the values from the HW capability
6666cf3f047bSGiuseppe CAVALLARO 		 * register (if supported).
6667cf3f047bSGiuseppe CAVALLARO 		 */
6668cf3f047bSGiuseppe CAVALLARO 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
66695a9b876eSLing Pei Lee 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
66705a9b876eSLing Pei Lee 				!priv->plat->use_phy_wol;
66713fe5cadbSGiuseppe CAVALLARO 		priv->hw->pmt = priv->plat->pmt;
6672b8ef7020SBiao Huang 		if (priv->dma_cap.hash_tb_sz) {
6673b8ef7020SBiao Huang 			priv->hw->multicast_filter_bins =
6674b8ef7020SBiao Huang 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
6675b8ef7020SBiao Huang 			priv->hw->mcast_bits_log2 =
6676b8ef7020SBiao Huang 					ilog2(priv->hw->multicast_filter_bins);
6677b8ef7020SBiao Huang 		}
667838912bdbSDeepak SIKRI 
6679a8df35d4SEzequiel Garcia 		/* TXCOE doesn't work in thresh DMA mode */
6680a8df35d4SEzequiel Garcia 		if (priv->plat->force_thresh_dma_mode)
6681a8df35d4SEzequiel Garcia 			priv->plat->tx_coe = 0;
6682a8df35d4SEzequiel Garcia 		else
668338912bdbSDeepak SIKRI 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
6684a8df35d4SEzequiel Garcia 
6685f748be53SAlexandre TORGUE 		/* In case of GMAC4 rx_coe is from HW cap register. */
6686f748be53SAlexandre TORGUE 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
668738912bdbSDeepak SIKRI 
668838912bdbSDeepak SIKRI 		if (priv->dma_cap.rx_coe_type2)
668938912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
669038912bdbSDeepak SIKRI 		else if (priv->dma_cap.rx_coe_type1)
669138912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
669238912bdbSDeepak SIKRI 
669338ddc59dSLABBE Corentin 	} else {
669438ddc59dSLABBE Corentin 		dev_info(priv->device, "No HW DMA feature register supported\n");
669538ddc59dSLABBE Corentin 	}
6696cf3f047bSGiuseppe CAVALLARO 
6697d2afb5bdSGiuseppe CAVALLARO 	if (priv->plat->rx_coe) {
6698d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
669938ddc59dSLABBE Corentin 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
6700f748be53SAlexandre TORGUE 		if (priv->synopsys_id < DWMAC_CORE_4_00)
670138ddc59dSLABBE Corentin 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
6702d2afb5bdSGiuseppe CAVALLARO 	}
6703cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->tx_coe)
670438ddc59dSLABBE Corentin 		dev_info(priv->device, "TX Checksum insertion supported\n");
6705cf3f047bSGiuseppe CAVALLARO 
6706cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->pmt) {
670738ddc59dSLABBE Corentin 		dev_info(priv->device, "Wake-Up On Lan supported\n");
6708cf3f047bSGiuseppe CAVALLARO 		device_set_wakeup_capable(priv->device, 1);
6709cf3f047bSGiuseppe CAVALLARO 	}
6710cf3f047bSGiuseppe CAVALLARO 
6711f748be53SAlexandre TORGUE 	if (priv->dma_cap.tsoen)
671238ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO supported\n");
6713f748be53SAlexandre TORGUE 
6714e0f9956aSChuah, Kim Tatt 	priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
6715e0f9956aSChuah, Kim Tatt 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
6716e0f9956aSChuah, Kim Tatt 
67177cfde0afSJose Abreu 	/* Run HW quirks, if any */
67187cfde0afSJose Abreu 	if (priv->hwif_quirks) {
67197cfde0afSJose Abreu 		ret = priv->hwif_quirks(priv);
67207cfde0afSJose Abreu 		if (ret)
67217cfde0afSJose Abreu 			return ret;
67227cfde0afSJose Abreu 	}
67237cfde0afSJose Abreu 
67243b509466SJose Abreu 	/* Rx Watchdog is available in the COREs newer than the 3.40.
67253b509466SJose Abreu 	 * In some case, for example on bugged HW this feature
67263b509466SJose Abreu 	 * has to be disable and this can be done by passing the
67273b509466SJose Abreu 	 * riwt_off field from the platform.
67283b509466SJose Abreu 	 */
67293b509466SJose Abreu 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
67303b509466SJose Abreu 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
67313b509466SJose Abreu 		priv->use_riwt = 1;
67323b509466SJose Abreu 		dev_info(priv->device,
67333b509466SJose Abreu 			 "Enable RX Mitigation via HW Watchdog Timer\n");
67343b509466SJose Abreu 	}
67353b509466SJose Abreu 
6736c24602efSGiuseppe CAVALLARO 	return 0;
6737cf3f047bSGiuseppe CAVALLARO }
6738cf3f047bSGiuseppe CAVALLARO 
67390366f7e0SOng Boon Leong static void stmmac_napi_add(struct net_device *dev)
67400366f7e0SOng Boon Leong {
67410366f7e0SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
67420366f7e0SOng Boon Leong 	u32 queue, maxq;
67430366f7e0SOng Boon Leong 
67440366f7e0SOng Boon Leong 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
67450366f7e0SOng Boon Leong 
67460366f7e0SOng Boon Leong 	for (queue = 0; queue < maxq; queue++) {
67470366f7e0SOng Boon Leong 		struct stmmac_channel *ch = &priv->channel[queue];
67480366f7e0SOng Boon Leong 
67490366f7e0SOng Boon Leong 		ch->priv_data = priv;
67500366f7e0SOng Boon Leong 		ch->index = queue;
67512b94f526SMarek Szyprowski 		spin_lock_init(&ch->lock);
67520366f7e0SOng Boon Leong 
67530366f7e0SOng Boon Leong 		if (queue < priv->plat->rx_queues_to_use) {
67540366f7e0SOng Boon Leong 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
67550366f7e0SOng Boon Leong 				       NAPI_POLL_WEIGHT);
67560366f7e0SOng Boon Leong 		}
67570366f7e0SOng Boon Leong 		if (queue < priv->plat->tx_queues_to_use) {
67580366f7e0SOng Boon Leong 			netif_tx_napi_add(dev, &ch->tx_napi,
67590366f7e0SOng Boon Leong 					  stmmac_napi_poll_tx,
67600366f7e0SOng Boon Leong 					  NAPI_POLL_WEIGHT);
67610366f7e0SOng Boon Leong 		}
6762132c32eeSOng Boon Leong 		if (queue < priv->plat->rx_queues_to_use &&
6763132c32eeSOng Boon Leong 		    queue < priv->plat->tx_queues_to_use) {
6764132c32eeSOng Boon Leong 			netif_napi_add(dev, &ch->rxtx_napi,
6765132c32eeSOng Boon Leong 				       stmmac_napi_poll_rxtx,
6766132c32eeSOng Boon Leong 				       NAPI_POLL_WEIGHT);
6767132c32eeSOng Boon Leong 		}
67680366f7e0SOng Boon Leong 	}
67690366f7e0SOng Boon Leong }
67700366f7e0SOng Boon Leong 
67710366f7e0SOng Boon Leong static void stmmac_napi_del(struct net_device *dev)
67720366f7e0SOng Boon Leong {
67730366f7e0SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
67740366f7e0SOng Boon Leong 	u32 queue, maxq;
67750366f7e0SOng Boon Leong 
67760366f7e0SOng Boon Leong 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
67770366f7e0SOng Boon Leong 
67780366f7e0SOng Boon Leong 	for (queue = 0; queue < maxq; queue++) {
67790366f7e0SOng Boon Leong 		struct stmmac_channel *ch = &priv->channel[queue];
67800366f7e0SOng Boon Leong 
67810366f7e0SOng Boon Leong 		if (queue < priv->plat->rx_queues_to_use)
67820366f7e0SOng Boon Leong 			netif_napi_del(&ch->rx_napi);
67830366f7e0SOng Boon Leong 		if (queue < priv->plat->tx_queues_to_use)
67840366f7e0SOng Boon Leong 			netif_napi_del(&ch->tx_napi);
6785132c32eeSOng Boon Leong 		if (queue < priv->plat->rx_queues_to_use &&
6786132c32eeSOng Boon Leong 		    queue < priv->plat->tx_queues_to_use) {
6787132c32eeSOng Boon Leong 			netif_napi_del(&ch->rxtx_napi);
6788132c32eeSOng Boon Leong 		}
67890366f7e0SOng Boon Leong 	}
67900366f7e0SOng Boon Leong }
67910366f7e0SOng Boon Leong 
67920366f7e0SOng Boon Leong int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
67930366f7e0SOng Boon Leong {
67940366f7e0SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
67950366f7e0SOng Boon Leong 	int ret = 0;
67960366f7e0SOng Boon Leong 
67970366f7e0SOng Boon Leong 	if (netif_running(dev))
67980366f7e0SOng Boon Leong 		stmmac_release(dev);
67990366f7e0SOng Boon Leong 
68000366f7e0SOng Boon Leong 	stmmac_napi_del(dev);
68010366f7e0SOng Boon Leong 
68020366f7e0SOng Boon Leong 	priv->plat->rx_queues_to_use = rx_cnt;
68030366f7e0SOng Boon Leong 	priv->plat->tx_queues_to_use = tx_cnt;
68040366f7e0SOng Boon Leong 
68050366f7e0SOng Boon Leong 	stmmac_napi_add(dev);
68060366f7e0SOng Boon Leong 
68070366f7e0SOng Boon Leong 	if (netif_running(dev))
68080366f7e0SOng Boon Leong 		ret = stmmac_open(dev);
68090366f7e0SOng Boon Leong 
68100366f7e0SOng Boon Leong 	return ret;
68110366f7e0SOng Boon Leong }
68120366f7e0SOng Boon Leong 
6813aa042f60SSong, Yoong Siang int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
6814aa042f60SSong, Yoong Siang {
6815aa042f60SSong, Yoong Siang 	struct stmmac_priv *priv = netdev_priv(dev);
6816aa042f60SSong, Yoong Siang 	int ret = 0;
6817aa042f60SSong, Yoong Siang 
6818aa042f60SSong, Yoong Siang 	if (netif_running(dev))
6819aa042f60SSong, Yoong Siang 		stmmac_release(dev);
6820aa042f60SSong, Yoong Siang 
6821aa042f60SSong, Yoong Siang 	priv->dma_rx_size = rx_size;
6822aa042f60SSong, Yoong Siang 	priv->dma_tx_size = tx_size;
6823aa042f60SSong, Yoong Siang 
6824aa042f60SSong, Yoong Siang 	if (netif_running(dev))
6825aa042f60SSong, Yoong Siang 		ret = stmmac_open(dev);
6826aa042f60SSong, Yoong Siang 
6827aa042f60SSong, Yoong Siang 	return ret;
6828aa042f60SSong, Yoong Siang }
6829aa042f60SSong, Yoong Siang 
68305a558611SOng Boon Leong #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
68315a558611SOng Boon Leong static void stmmac_fpe_lp_task(struct work_struct *work)
68325a558611SOng Boon Leong {
68335a558611SOng Boon Leong 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
68345a558611SOng Boon Leong 						fpe_task);
68355a558611SOng Boon Leong 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
68365a558611SOng Boon Leong 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
68375a558611SOng Boon Leong 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
68385a558611SOng Boon Leong 	bool *hs_enable = &fpe_cfg->hs_enable;
68395a558611SOng Boon Leong 	bool *enable = &fpe_cfg->enable;
68405a558611SOng Boon Leong 	int retries = 20;
68415a558611SOng Boon Leong 
68425a558611SOng Boon Leong 	while (retries-- > 0) {
68435a558611SOng Boon Leong 		/* Bail out immediately if FPE handshake is OFF */
68445a558611SOng Boon Leong 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
68455a558611SOng Boon Leong 			break;
68465a558611SOng Boon Leong 
68475a558611SOng Boon Leong 		if (*lo_state == FPE_STATE_ENTERING_ON &&
68485a558611SOng Boon Leong 		    *lp_state == FPE_STATE_ENTERING_ON) {
68495a558611SOng Boon Leong 			stmmac_fpe_configure(priv, priv->ioaddr,
68505a558611SOng Boon Leong 					     priv->plat->tx_queues_to_use,
68515a558611SOng Boon Leong 					     priv->plat->rx_queues_to_use,
68525a558611SOng Boon Leong 					     *enable);
68535a558611SOng Boon Leong 
68545a558611SOng Boon Leong 			netdev_info(priv->dev, "configured FPE\n");
68555a558611SOng Boon Leong 
68565a558611SOng Boon Leong 			*lo_state = FPE_STATE_ON;
68575a558611SOng Boon Leong 			*lp_state = FPE_STATE_ON;
68585a558611SOng Boon Leong 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
68595a558611SOng Boon Leong 			break;
68605a558611SOng Boon Leong 		}
68615a558611SOng Boon Leong 
68625a558611SOng Boon Leong 		if ((*lo_state == FPE_STATE_CAPABLE ||
68635a558611SOng Boon Leong 		     *lo_state == FPE_STATE_ENTERING_ON) &&
68645a558611SOng Boon Leong 		     *lp_state != FPE_STATE_ON) {
68655a558611SOng Boon Leong 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
68665a558611SOng Boon Leong 				    *lo_state, *lp_state);
68675a558611SOng Boon Leong 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
68685a558611SOng Boon Leong 						MPACKET_VERIFY);
68695a558611SOng Boon Leong 		}
68705a558611SOng Boon Leong 		/* Sleep then retry */
68715a558611SOng Boon Leong 		msleep(500);
68725a558611SOng Boon Leong 	}
68735a558611SOng Boon Leong 
68745a558611SOng Boon Leong 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
68755a558611SOng Boon Leong }
68765a558611SOng Boon Leong 
68775a558611SOng Boon Leong void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
68785a558611SOng Boon Leong {
68795a558611SOng Boon Leong 	if (priv->plat->fpe_cfg->hs_enable != enable) {
68805a558611SOng Boon Leong 		if (enable) {
68815a558611SOng Boon Leong 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
68825a558611SOng Boon Leong 						MPACKET_VERIFY);
68835a558611SOng Boon Leong 		} else {
68845a558611SOng Boon Leong 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
68855a558611SOng Boon Leong 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
68865a558611SOng Boon Leong 		}
68875a558611SOng Boon Leong 
68885a558611SOng Boon Leong 		priv->plat->fpe_cfg->hs_enable = enable;
68895a558611SOng Boon Leong 	}
68905a558611SOng Boon Leong }
68915a558611SOng Boon Leong 
6892cf3f047bSGiuseppe CAVALLARO /**
6893bfab27a1SGiuseppe CAVALLARO  * stmmac_dvr_probe
6894bfab27a1SGiuseppe CAVALLARO  * @device: device pointer
6895ff3dd78cSGiuseppe CAVALLARO  * @plat_dat: platform data pointer
6896e56788cfSJoachim Eastwood  * @res: stmmac resource pointer
6897bfab27a1SGiuseppe CAVALLARO  * Description: this is the main probe function used to
6898bfab27a1SGiuseppe CAVALLARO  * call the alloc_etherdev, allocate the priv structure.
68999afec6efSAndy Shevchenko  * Return:
690015ffac73SJoachim Eastwood  * returns 0 on success, otherwise errno.
69017ac6653aSJeff Kirsher  */
690215ffac73SJoachim Eastwood int stmmac_dvr_probe(struct device *device,
6903cf3f047bSGiuseppe CAVALLARO 		     struct plat_stmmacenet_data *plat_dat,
6904e56788cfSJoachim Eastwood 		     struct stmmac_resources *res)
69057ac6653aSJeff Kirsher {
6906bfab27a1SGiuseppe CAVALLARO 	struct net_device *ndev = NULL;
6907bfab27a1SGiuseppe CAVALLARO 	struct stmmac_priv *priv;
69080366f7e0SOng Boon Leong 	u32 rxq;
690976067459SJose Abreu 	int i, ret = 0;
69107ac6653aSJeff Kirsher 
69119737070cSJisheng Zhang 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
69129737070cSJisheng Zhang 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
691341de8d4cSJoe Perches 	if (!ndev)
691415ffac73SJoachim Eastwood 		return -ENOMEM;
69157ac6653aSJeff Kirsher 
6916bfab27a1SGiuseppe CAVALLARO 	SET_NETDEV_DEV(ndev, device);
69177ac6653aSJeff Kirsher 
6918bfab27a1SGiuseppe CAVALLARO 	priv = netdev_priv(ndev);
6919bfab27a1SGiuseppe CAVALLARO 	priv->device = device;
6920bfab27a1SGiuseppe CAVALLARO 	priv->dev = ndev;
6921bfab27a1SGiuseppe CAVALLARO 
6922bfab27a1SGiuseppe CAVALLARO 	stmmac_set_ethtool_ops(ndev);
6923cf3f047bSGiuseppe CAVALLARO 	priv->pause = pause;
6924cf3f047bSGiuseppe CAVALLARO 	priv->plat = plat_dat;
6925e56788cfSJoachim Eastwood 	priv->ioaddr = res->addr;
6926e56788cfSJoachim Eastwood 	priv->dev->base_addr = (unsigned long)res->addr;
69276ccf12aeSWong, Vee Khee 	priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
6928e56788cfSJoachim Eastwood 
6929e56788cfSJoachim Eastwood 	priv->dev->irq = res->irq;
6930e56788cfSJoachim Eastwood 	priv->wol_irq = res->wol_irq;
6931e56788cfSJoachim Eastwood 	priv->lpi_irq = res->lpi_irq;
69328532f613SOng Boon Leong 	priv->sfty_ce_irq = res->sfty_ce_irq;
69338532f613SOng Boon Leong 	priv->sfty_ue_irq = res->sfty_ue_irq;
69348532f613SOng Boon Leong 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
69358532f613SOng Boon Leong 		priv->rx_irq[i] = res->rx_irq[i];
69368532f613SOng Boon Leong 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
69378532f613SOng Boon Leong 		priv->tx_irq[i] = res->tx_irq[i];
6938e56788cfSJoachim Eastwood 
693983216e39SMichael Walle 	if (!is_zero_ether_addr(res->mac))
6940a96d317fSJakub Kicinski 		eth_hw_addr_set(priv->dev, res->mac);
6941bfab27a1SGiuseppe CAVALLARO 
6942a7a62685SJoachim Eastwood 	dev_set_drvdata(device, priv->dev);
6943803f8fc4SJoachim Eastwood 
6944cf3f047bSGiuseppe CAVALLARO 	/* Verify driver arguments */
6945cf3f047bSGiuseppe CAVALLARO 	stmmac_verify_args();
6946cf3f047bSGiuseppe CAVALLARO 
6947bba2556eSOng Boon Leong 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
6948bba2556eSOng Boon Leong 	if (!priv->af_xdp_zc_qps)
6949bba2556eSOng Boon Leong 		return -ENOMEM;
6950bba2556eSOng Boon Leong 
695134877a15SJose Abreu 	/* Allocate workqueue */
695234877a15SJose Abreu 	priv->wq = create_singlethread_workqueue("stmmac_wq");
695334877a15SJose Abreu 	if (!priv->wq) {
695434877a15SJose Abreu 		dev_err(priv->device, "failed to create workqueue\n");
69559737070cSJisheng Zhang 		return -ENOMEM;
695634877a15SJose Abreu 	}
695734877a15SJose Abreu 
695834877a15SJose Abreu 	INIT_WORK(&priv->service_task, stmmac_service_task);
695934877a15SJose Abreu 
69605a558611SOng Boon Leong 	/* Initialize Link Partner FPE workqueue */
69615a558611SOng Boon Leong 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
69625a558611SOng Boon Leong 
6963cf3f047bSGiuseppe CAVALLARO 	/* Override with kernel parameters if supplied XXX CRS XXX
6964ceb69499SGiuseppe CAVALLARO 	 * this needs to have multiple instances
6965ceb69499SGiuseppe CAVALLARO 	 */
6966cf3f047bSGiuseppe CAVALLARO 	if ((phyaddr >= 0) && (phyaddr <= 31))
6967cf3f047bSGiuseppe CAVALLARO 		priv->plat->phy_addr = phyaddr;
6968cf3f047bSGiuseppe CAVALLARO 
696990f522a2SEugeniy Paltsev 	if (priv->plat->stmmac_rst) {
697090f522a2SEugeniy Paltsev 		ret = reset_control_assert(priv->plat->stmmac_rst);
6971f573c0b9Sjpinto 		reset_control_deassert(priv->plat->stmmac_rst);
697290f522a2SEugeniy Paltsev 		/* Some reset controllers have only reset callback instead of
697390f522a2SEugeniy Paltsev 		 * assert + deassert callbacks pair.
697490f522a2SEugeniy Paltsev 		 */
697590f522a2SEugeniy Paltsev 		if (ret == -ENOTSUPP)
697690f522a2SEugeniy Paltsev 			reset_control_reset(priv->plat->stmmac_rst);
697790f522a2SEugeniy Paltsev 	}
6978c5e4ddbdSChen-Yu Tsai 
6979e67f325eSMatthew Hagan 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
6980e67f325eSMatthew Hagan 	if (ret == -ENOTSUPP)
6981e67f325eSMatthew Hagan 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
6982e67f325eSMatthew Hagan 			ERR_PTR(ret));
6983e67f325eSMatthew Hagan 
6984cf3f047bSGiuseppe CAVALLARO 	/* Init MAC and get the capabilities */
6985c24602efSGiuseppe CAVALLARO 	ret = stmmac_hw_init(priv);
6986c24602efSGiuseppe CAVALLARO 	if (ret)
698762866e98SChen-Yu Tsai 		goto error_hw_init;
6988cf3f047bSGiuseppe CAVALLARO 
698996874c61SMohammad Athari Bin Ismail 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
699096874c61SMohammad Athari Bin Ismail 	 */
699196874c61SMohammad Athari Bin Ismail 	if (priv->synopsys_id < DWMAC_CORE_5_20)
699296874c61SMohammad Athari Bin Ismail 		priv->plat->dma_cfg->dche = false;
699396874c61SMohammad Athari Bin Ismail 
6994b561af36SVinod Koul 	stmmac_check_ether_addr(priv);
6995b561af36SVinod Koul 
6996cf3f047bSGiuseppe CAVALLARO 	ndev->netdev_ops = &stmmac_netdev_ops;
6997cf3f047bSGiuseppe CAVALLARO 
6998cf3f047bSGiuseppe CAVALLARO 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6999cf3f047bSGiuseppe CAVALLARO 			    NETIF_F_RXCSUM;
7000f748be53SAlexandre TORGUE 
70014dbbe8ddSJose Abreu 	ret = stmmac_tc_init(priv, priv);
70024dbbe8ddSJose Abreu 	if (!ret) {
70034dbbe8ddSJose Abreu 		ndev->hw_features |= NETIF_F_HW_TC;
70044dbbe8ddSJose Abreu 	}
70054dbbe8ddSJose Abreu 
7006f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
70079edfa7daSNiklas Cassel 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7008b7766206SJose Abreu 		if (priv->plat->has_gmac4)
7009b7766206SJose Abreu 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7010f748be53SAlexandre TORGUE 		priv->tso = true;
701138ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO feature enabled\n");
7012f748be53SAlexandre TORGUE 	}
7013a993db88SJose Abreu 
701467afd6d1SJose Abreu 	if (priv->dma_cap.sphen) {
701567afd6d1SJose Abreu 		ndev->hw_features |= NETIF_F_GRO;
7016d08d32d1SOng Boon Leong 		priv->sph_cap = true;
7017d08d32d1SOng Boon Leong 		priv->sph = priv->sph_cap;
701867afd6d1SJose Abreu 		dev_info(priv->device, "SPH feature enabled\n");
701967afd6d1SJose Abreu 	}
702067afd6d1SJose Abreu 
7021f119cc98SFugang Duan 	/* The current IP register MAC_HW_Feature1[ADDR64] only define
7022f119cc98SFugang Duan 	 * 32/40/64 bit width, but some SOC support others like i.MX8MP
7023f119cc98SFugang Duan 	 * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
7024f119cc98SFugang Duan 	 * So overwrite dma_cap.addr64 according to HW real design.
7025f119cc98SFugang Duan 	 */
7026f119cc98SFugang Duan 	if (priv->plat->addr64)
7027f119cc98SFugang Duan 		priv->dma_cap.addr64 = priv->plat->addr64;
7028f119cc98SFugang Duan 
7029a993db88SJose Abreu 	if (priv->dma_cap.addr64) {
7030a993db88SJose Abreu 		ret = dma_set_mask_and_coherent(device,
7031a993db88SJose Abreu 				DMA_BIT_MASK(priv->dma_cap.addr64));
7032a993db88SJose Abreu 		if (!ret) {
7033a993db88SJose Abreu 			dev_info(priv->device, "Using %d bits DMA width\n",
7034a993db88SJose Abreu 				 priv->dma_cap.addr64);
7035968a2978SThierry Reding 
7036968a2978SThierry Reding 			/*
7037968a2978SThierry Reding 			 * If more than 32 bits can be addressed, make sure to
7038968a2978SThierry Reding 			 * enable enhanced addressing mode.
7039968a2978SThierry Reding 			 */
7040968a2978SThierry Reding 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7041968a2978SThierry Reding 				priv->plat->dma_cfg->eame = true;
7042a993db88SJose Abreu 		} else {
7043a993db88SJose Abreu 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7044a993db88SJose Abreu 			if (ret) {
7045a993db88SJose Abreu 				dev_err(priv->device, "Failed to set DMA Mask\n");
7046a993db88SJose Abreu 				goto error_hw_init;
7047a993db88SJose Abreu 			}
7048a993db88SJose Abreu 
7049a993db88SJose Abreu 			priv->dma_cap.addr64 = 32;
7050a993db88SJose Abreu 		}
7051a993db88SJose Abreu 	}
7052a993db88SJose Abreu 
7053bfab27a1SGiuseppe CAVALLARO 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7054bfab27a1SGiuseppe CAVALLARO 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
70557ac6653aSJeff Kirsher #ifdef STMMAC_VLAN_TAG_USED
70567ac6653aSJeff Kirsher 	/* Both mac100 and gmac support receive VLAN tag detection */
7057ab188e8fSElad Nachman 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
70583cd1cfcbSJose Abreu 	if (priv->dma_cap.vlhash) {
70593cd1cfcbSJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
70603cd1cfcbSJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
70613cd1cfcbSJose Abreu 	}
706230d93227SJose Abreu 	if (priv->dma_cap.vlins) {
706330d93227SJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
706430d93227SJose Abreu 		if (priv->dma_cap.dvlan)
706530d93227SJose Abreu 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
706630d93227SJose Abreu 	}
70677ac6653aSJeff Kirsher #endif
70687ac6653aSJeff Kirsher 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
70697ac6653aSJeff Kirsher 
707076067459SJose Abreu 	/* Initialize RSS */
707176067459SJose Abreu 	rxq = priv->plat->rx_queues_to_use;
707276067459SJose Abreu 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
707376067459SJose Abreu 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
707476067459SJose Abreu 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
707576067459SJose Abreu 
707676067459SJose Abreu 	if (priv->dma_cap.rssen && priv->plat->rss_en)
707776067459SJose Abreu 		ndev->features |= NETIF_F_RXHASH;
707876067459SJose Abreu 
707944770e11SJarod Wilson 	/* MTU range: 46 - hw-specific max */
708044770e11SJarod Wilson 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
708156bcd591SJose Abreu 	if (priv->plat->has_xgmac)
70827d9e6c5aSJose Abreu 		ndev->max_mtu = XGMAC_JUMBO_LEN;
708356bcd591SJose Abreu 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
708456bcd591SJose Abreu 		ndev->max_mtu = JUMBO_LEN;
708544770e11SJarod Wilson 	else
708644770e11SJarod Wilson 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7087a2cd64f3SKweh, Hock Leong 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7088a2cd64f3SKweh, Hock Leong 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7089a2cd64f3SKweh, Hock Leong 	 */
7090a2cd64f3SKweh, Hock Leong 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7091a2cd64f3SKweh, Hock Leong 	    (priv->plat->maxmtu >= ndev->min_mtu))
709244770e11SJarod Wilson 		ndev->max_mtu = priv->plat->maxmtu;
7093a2cd64f3SKweh, Hock Leong 	else if (priv->plat->maxmtu < ndev->min_mtu)
7094b618ab45SHeiner Kallweit 		dev_warn(priv->device,
7095a2cd64f3SKweh, Hock Leong 			 "%s: warning: maxmtu having invalid value (%d)\n",
7096a2cd64f3SKweh, Hock Leong 			 __func__, priv->plat->maxmtu);
709744770e11SJarod Wilson 
70987ac6653aSJeff Kirsher 	if (flow_ctrl)
70997ac6653aSJeff Kirsher 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
71007ac6653aSJeff Kirsher 
71018fce3331SJose Abreu 	/* Setup channels NAPI */
71020366f7e0SOng Boon Leong 	stmmac_napi_add(ndev);
71037ac6653aSJeff Kirsher 
710429555fa3SThierry Reding 	mutex_init(&priv->lock);
71057ac6653aSJeff Kirsher 
7106cd7201f4SGiuseppe CAVALLARO 	/* If a specific clk_csr value is passed from the platform
7107cd7201f4SGiuseppe CAVALLARO 	 * this means that the CSR Clock Range selection cannot be
7108cd7201f4SGiuseppe CAVALLARO 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7109cd7201f4SGiuseppe CAVALLARO 	 * set the MDC clock dynamically according to the csr actual
7110cd7201f4SGiuseppe CAVALLARO 	 * clock input.
7111cd7201f4SGiuseppe CAVALLARO 	 */
71125e7f7fc5SBiao Huang 	if (priv->plat->clk_csr >= 0)
7113cd7201f4SGiuseppe CAVALLARO 		priv->clk_csr = priv->plat->clk_csr;
71145e7f7fc5SBiao Huang 	else
71155e7f7fc5SBiao Huang 		stmmac_clk_csr_set(priv);
7116cd7201f4SGiuseppe CAVALLARO 
7117e58bb43fSGiuseppe CAVALLARO 	stmmac_check_pcs_mode(priv);
7118e58bb43fSGiuseppe CAVALLARO 
71195ec55823SJoakim Zhang 	pm_runtime_get_noresume(device);
71205ec55823SJoakim Zhang 	pm_runtime_set_active(device);
7121d90d0c17SKai-Heng Feng 	if (!pm_runtime_enabled(device))
71225ec55823SJoakim Zhang 		pm_runtime_enable(device);
71235ec55823SJoakim Zhang 
7124a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
71253fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
71264bfcbd7aSFrancesco Virlinzi 		/* MDIO bus Registration */
71274bfcbd7aSFrancesco Virlinzi 		ret = stmmac_mdio_register(ndev);
71284bfcbd7aSFrancesco Virlinzi 		if (ret < 0) {
7129b618ab45SHeiner Kallweit 			dev_err(priv->device,
713038ddc59dSLABBE Corentin 				"%s: MDIO bus (id: %d) registration failed",
71314bfcbd7aSFrancesco Virlinzi 				__func__, priv->plat->bus_id);
71326a81c26fSViresh Kumar 			goto error_mdio_register;
71334bfcbd7aSFrancesco Virlinzi 		}
7134e58bb43fSGiuseppe CAVALLARO 	}
71354bfcbd7aSFrancesco Virlinzi 
713646682cb8SVoon Weifeng 	if (priv->plat->speed_mode_2500)
713746682cb8SVoon Weifeng 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
713846682cb8SVoon Weifeng 
71397413f9a6SVladimir Oltean 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7140597a68ceSVoon Weifeng 		ret = stmmac_xpcs_setup(priv->mii);
7141597a68ceSVoon Weifeng 		if (ret)
7142597a68ceSVoon Weifeng 			goto error_xpcs_setup;
7143597a68ceSVoon Weifeng 	}
7144597a68ceSVoon Weifeng 
714574371272SJose Abreu 	ret = stmmac_phy_setup(priv);
714674371272SJose Abreu 	if (ret) {
714774371272SJose Abreu 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
714874371272SJose Abreu 		goto error_phy_setup;
714974371272SJose Abreu 	}
715074371272SJose Abreu 
715157016590SFlorian Fainelli 	ret = register_netdev(ndev);
7152b2eb09afSFlorian Fainelli 	if (ret) {
7153b618ab45SHeiner Kallweit 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
715457016590SFlorian Fainelli 			__func__, ret);
7155b2eb09afSFlorian Fainelli 		goto error_netdev_register;
7156b2eb09afSFlorian Fainelli 	}
71577ac6653aSJeff Kirsher 
7158b9663b7cSVoon Weifeng 	if (priv->plat->serdes_powerup) {
7159b9663b7cSVoon Weifeng 		ret = priv->plat->serdes_powerup(ndev,
7160b9663b7cSVoon Weifeng 						 priv->plat->bsp_priv);
7161b9663b7cSVoon Weifeng 
7162b9663b7cSVoon Weifeng 		if (ret < 0)
7163801eb050SAndy Shevchenko 			goto error_serdes_powerup;
7164b9663b7cSVoon Weifeng 	}
7165b9663b7cSVoon Weifeng 
71665f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS
71678d72ab11SGreg Kroah-Hartman 	stmmac_init_fs(ndev);
71685f2b8b62SThierry Reding #endif
71695f2b8b62SThierry Reding 
71704047b9dbSBhupesh Sharma 	if (priv->plat->dump_debug_regs)
71714047b9dbSBhupesh Sharma 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
71724047b9dbSBhupesh Sharma 
71735ec55823SJoakim Zhang 	/* Let pm_runtime_put() disable the clocks.
71745ec55823SJoakim Zhang 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
71755ec55823SJoakim Zhang 	 */
71765ec55823SJoakim Zhang 	pm_runtime_put(device);
71775ec55823SJoakim Zhang 
717857016590SFlorian Fainelli 	return ret;
71797ac6653aSJeff Kirsher 
7180801eb050SAndy Shevchenko error_serdes_powerup:
7181801eb050SAndy Shevchenko 	unregister_netdev(ndev);
71826a81c26fSViresh Kumar error_netdev_register:
718374371272SJose Abreu 	phylink_destroy(priv->phylink);
7184597a68ceSVoon Weifeng error_xpcs_setup:
718574371272SJose Abreu error_phy_setup:
7186a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7187b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7188b2eb09afSFlorian Fainelli 		stmmac_mdio_unregister(ndev);
71897ac6653aSJeff Kirsher error_mdio_register:
71900366f7e0SOng Boon Leong 	stmmac_napi_del(ndev);
719162866e98SChen-Yu Tsai error_hw_init:
719234877a15SJose Abreu 	destroy_workqueue(priv->wq);
7193d7f576dcSWong Vee Khee 	bitmap_free(priv->af_xdp_zc_qps);
71947ac6653aSJeff Kirsher 
719515ffac73SJoachim Eastwood 	return ret;
71967ac6653aSJeff Kirsher }
7197b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
71987ac6653aSJeff Kirsher 
71997ac6653aSJeff Kirsher /**
72007ac6653aSJeff Kirsher  * stmmac_dvr_remove
7201f4e7bd81SJoachim Eastwood  * @dev: device pointer
72027ac6653aSJeff Kirsher  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7203bfab27a1SGiuseppe CAVALLARO  * changes the link status, releases the DMA descriptor rings.
72047ac6653aSJeff Kirsher  */
7205f4e7bd81SJoachim Eastwood int stmmac_dvr_remove(struct device *dev)
72067ac6653aSJeff Kirsher {
7207f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
72087ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
72097ac6653aSJeff Kirsher 
721038ddc59dSLABBE Corentin 	netdev_info(priv->dev, "%s: removing driver", __func__);
72117ac6653aSJeff Kirsher 
7212ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
7213c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
72147ac6653aSJeff Kirsher 	netif_carrier_off(ndev);
72157ac6653aSJeff Kirsher 	unregister_netdev(ndev);
72169a7b3950SOng Boon Leong 
72179a7b3950SOng Boon Leong 	/* Serdes power down needs to happen after VLAN filter
72189a7b3950SOng Boon Leong 	 * is deleted that is triggered by unregister_netdev().
72199a7b3950SOng Boon Leong 	 */
72209a7b3950SOng Boon Leong 	if (priv->plat->serdes_powerdown)
72219a7b3950SOng Boon Leong 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
72229a7b3950SOng Boon Leong 
7223474a31e1SAaro Koskinen #ifdef CONFIG_DEBUG_FS
7224474a31e1SAaro Koskinen 	stmmac_exit_fs(ndev);
7225474a31e1SAaro Koskinen #endif
722674371272SJose Abreu 	phylink_destroy(priv->phylink);
7227f573c0b9Sjpinto 	if (priv->plat->stmmac_rst)
7228f573c0b9Sjpinto 		reset_control_assert(priv->plat->stmmac_rst);
7229e67f325eSMatthew Hagan 	reset_control_assert(priv->plat->stmmac_ahb_rst);
72305ec55823SJoakim Zhang 	pm_runtime_put(dev);
72315ec55823SJoakim Zhang 	pm_runtime_disable(dev);
7232a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
72333fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7234e743471fSBryan O'Donoghue 		stmmac_mdio_unregister(ndev);
723534877a15SJose Abreu 	destroy_workqueue(priv->wq);
723629555fa3SThierry Reding 	mutex_destroy(&priv->lock);
7237d7f576dcSWong Vee Khee 	bitmap_free(priv->af_xdp_zc_qps);
72387ac6653aSJeff Kirsher 
72397ac6653aSJeff Kirsher 	return 0;
72407ac6653aSJeff Kirsher }
7241b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
72427ac6653aSJeff Kirsher 
7243732fdf0eSGiuseppe CAVALLARO /**
7244732fdf0eSGiuseppe CAVALLARO  * stmmac_suspend - suspend callback
7245f4e7bd81SJoachim Eastwood  * @dev: device pointer
7246732fdf0eSGiuseppe CAVALLARO  * Description: this is the function to suspend the device and it is called
7247732fdf0eSGiuseppe CAVALLARO  * by the platform driver to stop the network queue, release the resources,
7248732fdf0eSGiuseppe CAVALLARO  * program the PMT register (for WoL), clean and release driver resources.
7249732fdf0eSGiuseppe CAVALLARO  */
7250f4e7bd81SJoachim Eastwood int stmmac_suspend(struct device *dev)
72517ac6653aSJeff Kirsher {
7252f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
72537ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
725414b41a29SNicolin Chen 	u32 chan;
72557ac6653aSJeff Kirsher 
72567ac6653aSJeff Kirsher 	if (!ndev || !netif_running(ndev))
72577ac6653aSJeff Kirsher 		return 0;
72587ac6653aSJeff Kirsher 
7259134cc4ceSThierry Reding 	mutex_lock(&priv->lock);
726019e13cb2SJose Abreu 
72617ac6653aSJeff Kirsher 	netif_device_detach(ndev);
72627ac6653aSJeff Kirsher 
7263c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
72647ac6653aSJeff Kirsher 
726514b41a29SNicolin Chen 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7266d5a05e69SVincent Whitchurch 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
726714b41a29SNicolin Chen 
72685f585913SFugang Duan 	if (priv->eee_enabled) {
72695f585913SFugang Duan 		priv->tx_path_in_lpi_mode = false;
72705f585913SFugang Duan 		del_timer_sync(&priv->eee_ctrl_timer);
72715f585913SFugang Duan 	}
72725f585913SFugang Duan 
72737ac6653aSJeff Kirsher 	/* Stop TX/RX DMA */
7274ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
7275c24602efSGiuseppe CAVALLARO 
7276b9663b7cSVoon Weifeng 	if (priv->plat->serdes_powerdown)
7277b9663b7cSVoon Weifeng 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7278b9663b7cSVoon Weifeng 
72797ac6653aSJeff Kirsher 	/* Enable Power down mode by programming the PMT regs */
7280e8377e7aSJisheng Zhang 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7281c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, priv->wolopts);
728289f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 1;
728389f7f2cfSSrinivas Kandagatla 	} else {
7284c10d4c82SJose Abreu 		stmmac_mac_set(priv, priv->ioaddr, false);
7285db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_sleep_state(priv->device);
728630f347aeSYang Yingliang 	}
72875a558611SOng Boon Leong 
728829555fa3SThierry Reding 	mutex_unlock(&priv->lock);
72892d871aa0SVince Bridgers 
729090702dcdSJoakim Zhang 	rtnl_lock();
729190702dcdSJoakim Zhang 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
729290702dcdSJoakim Zhang 		phylink_suspend(priv->phylink, true);
729390702dcdSJoakim Zhang 	} else {
729490702dcdSJoakim Zhang 		if (device_may_wakeup(priv->device))
729590702dcdSJoakim Zhang 			phylink_speed_down(priv->phylink, false);
729690702dcdSJoakim Zhang 		phylink_suspend(priv->phylink, false);
729790702dcdSJoakim Zhang 	}
729890702dcdSJoakim Zhang 	rtnl_unlock();
729990702dcdSJoakim Zhang 
73005a558611SOng Boon Leong 	if (priv->dma_cap.fpesel) {
73015a558611SOng Boon Leong 		/* Disable FPE */
73025a558611SOng Boon Leong 		stmmac_fpe_configure(priv, priv->ioaddr,
73035a558611SOng Boon Leong 				     priv->plat->tx_queues_to_use,
73045a558611SOng Boon Leong 				     priv->plat->rx_queues_to_use, false);
73055a558611SOng Boon Leong 
73065a558611SOng Boon Leong 		stmmac_fpe_handshake(priv, false);
73076b28a86dSMohammad Athari Bin Ismail 		stmmac_fpe_stop_wq(priv);
73085a558611SOng Boon Leong 	}
73095a558611SOng Boon Leong 
7310bd00632cSLABBE Corentin 	priv->speed = SPEED_UNKNOWN;
73117ac6653aSJeff Kirsher 	return 0;
73127ac6653aSJeff Kirsher }
7313b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_suspend);
73147ac6653aSJeff Kirsher 
7315732fdf0eSGiuseppe CAVALLARO /**
731654139cf3SJoao Pinto  * stmmac_reset_queues_param - reset queue parameters
7317d0ea5cbdSJesse Brandeburg  * @priv: device pointer
731854139cf3SJoao Pinto  */
731954139cf3SJoao Pinto static void stmmac_reset_queues_param(struct stmmac_priv *priv)
732054139cf3SJoao Pinto {
732154139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7322ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
732354139cf3SJoao Pinto 	u32 queue;
732454139cf3SJoao Pinto 
732554139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
732654139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
732754139cf3SJoao Pinto 
732854139cf3SJoao Pinto 		rx_q->cur_rx = 0;
732954139cf3SJoao Pinto 		rx_q->dirty_rx = 0;
733054139cf3SJoao Pinto 	}
733154139cf3SJoao Pinto 
7332ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
7333ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
7334ce736788SJoao Pinto 
7335ce736788SJoao Pinto 		tx_q->cur_tx = 0;
7336ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
73378d212a9eSNiklas Cassel 		tx_q->mss = 0;
7338c511819dSJoakim Zhang 
7339c511819dSJoakim Zhang 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7340ce736788SJoao Pinto 	}
734154139cf3SJoao Pinto }
734254139cf3SJoao Pinto 
734354139cf3SJoao Pinto /**
7344732fdf0eSGiuseppe CAVALLARO  * stmmac_resume - resume callback
7345f4e7bd81SJoachim Eastwood  * @dev: device pointer
7346732fdf0eSGiuseppe CAVALLARO  * Description: when resume this function is invoked to setup the DMA and CORE
7347732fdf0eSGiuseppe CAVALLARO  * in a usable state.
7348732fdf0eSGiuseppe CAVALLARO  */
7349f4e7bd81SJoachim Eastwood int stmmac_resume(struct device *dev)
73507ac6653aSJeff Kirsher {
7351f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
73527ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
7353b9663b7cSVoon Weifeng 	int ret;
73547ac6653aSJeff Kirsher 
73557ac6653aSJeff Kirsher 	if (!netif_running(ndev))
73567ac6653aSJeff Kirsher 		return 0;
73577ac6653aSJeff Kirsher 
73587ac6653aSJeff Kirsher 	/* Power Down bit, into the PM register, is cleared
73597ac6653aSJeff Kirsher 	 * automatically as soon as a magic packet or a Wake-up frame
73607ac6653aSJeff Kirsher 	 * is received. Anyway, it's better to manually clear
73617ac6653aSJeff Kirsher 	 * this bit because it can generate problems while resuming
7362ceb69499SGiuseppe CAVALLARO 	 * from another devices (e.g. serial console).
7363ceb69499SGiuseppe CAVALLARO 	 */
7364e8377e7aSJisheng Zhang 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
736529555fa3SThierry Reding 		mutex_lock(&priv->lock);
7366c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, 0);
736729555fa3SThierry Reding 		mutex_unlock(&priv->lock);
736889f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 0;
7369623997fbSSrinivas Kandagatla 	} else {
7370db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_default_state(priv->device);
7371623997fbSSrinivas Kandagatla 		/* reset the phy so that it's ready */
7372623997fbSSrinivas Kandagatla 		if (priv->mii)
7373623997fbSSrinivas Kandagatla 			stmmac_mdio_reset(priv->mii);
7374623997fbSSrinivas Kandagatla 	}
73757ac6653aSJeff Kirsher 
7376b9663b7cSVoon Weifeng 	if (priv->plat->serdes_powerup) {
7377b9663b7cSVoon Weifeng 		ret = priv->plat->serdes_powerup(ndev,
7378b9663b7cSVoon Weifeng 						 priv->plat->bsp_priv);
7379b9663b7cSVoon Weifeng 
7380b9663b7cSVoon Weifeng 		if (ret < 0)
7381b9663b7cSVoon Weifeng 			return ret;
7382b9663b7cSVoon Weifeng 	}
7383b9663b7cSVoon Weifeng 
738436d18b56SFugang Duan 	rtnl_lock();
738590702dcdSJoakim Zhang 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
738690702dcdSJoakim Zhang 		phylink_resume(priv->phylink);
738790702dcdSJoakim Zhang 	} else {
738890702dcdSJoakim Zhang 		phylink_resume(priv->phylink);
738990702dcdSJoakim Zhang 		if (device_may_wakeup(priv->device))
739036d18b56SFugang Duan 			phylink_speed_up(priv->phylink);
739136d18b56SFugang Duan 	}
739290702dcdSJoakim Zhang 	rtnl_unlock();
739336d18b56SFugang Duan 
73948e5debedSWong Vee Khee 	rtnl_lock();
739529555fa3SThierry Reding 	mutex_lock(&priv->lock);
7396f55d84b0SVincent Palatin 
739754139cf3SJoao Pinto 	stmmac_reset_queues_param(priv);
739800423969SThierry Reding 
73994ec236c7SFugang Duan 	stmmac_free_tx_skbufs(priv);
7400ae79a639SGiuseppe CAVALLARO 	stmmac_clear_descriptors(priv);
7401ae79a639SGiuseppe CAVALLARO 
7402fe131929SHuacai Chen 	stmmac_hw_setup(ndev, false);
7403d429b66eSJose Abreu 	stmmac_init_coalesce(priv);
7404ac316c78SGiuseppe CAVALLARO 	stmmac_set_rx_mode(ndev);
74057ac6653aSJeff Kirsher 
7406ed64639bSWong Vee Khee 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7407ed64639bSWong Vee Khee 
7408c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
74097ac6653aSJeff Kirsher 
7410134cc4ceSThierry Reding 	mutex_unlock(&priv->lock);
74118e5debedSWong Vee Khee 	rtnl_unlock();
7412134cc4ceSThierry Reding 
741331096c3eSLeon Yu 	netif_device_attach(ndev);
741431096c3eSLeon Yu 
74157ac6653aSJeff Kirsher 	return 0;
74167ac6653aSJeff Kirsher }
7417b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_resume);
7418ba27ec66SGiuseppe CAVALLARO 
74197ac6653aSJeff Kirsher #ifndef MODULE
74207ac6653aSJeff Kirsher static int __init stmmac_cmdline_opt(char *str)
74217ac6653aSJeff Kirsher {
74227ac6653aSJeff Kirsher 	char *opt;
74237ac6653aSJeff Kirsher 
74247ac6653aSJeff Kirsher 	if (!str || !*str)
74257ac6653aSJeff Kirsher 		return -EINVAL;
74267ac6653aSJeff Kirsher 	while ((opt = strsep(&str, ",")) != NULL) {
74277ac6653aSJeff Kirsher 		if (!strncmp(opt, "debug:", 6)) {
7428ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &debug))
74297ac6653aSJeff Kirsher 				goto err;
74307ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7431ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 8, 0, &phyaddr))
74327ac6653aSJeff Kirsher 				goto err;
74337ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7434ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 7, 0, &buf_sz))
74357ac6653aSJeff Kirsher 				goto err;
74367ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "tc:", 3)) {
7437ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 3, 0, &tc))
74387ac6653aSJeff Kirsher 				goto err;
74397ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "watchdog:", 9)) {
7440ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 9, 0, &watchdog))
74417ac6653aSJeff Kirsher 				goto err;
74427ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7443ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
74447ac6653aSJeff Kirsher 				goto err;
74457ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "pause:", 6)) {
7446ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &pause))
74477ac6653aSJeff Kirsher 				goto err;
7448506f669cSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7449d765955dSGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &eee_timer))
7450d765955dSGiuseppe CAVALLARO 				goto err;
74514a7d666aSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "chain_mode:", 11)) {
74524a7d666aSGiuseppe CAVALLARO 			if (kstrtoint(opt + 11, 0, &chain_mode))
74534a7d666aSGiuseppe CAVALLARO 				goto err;
74547ac6653aSJeff Kirsher 		}
74557ac6653aSJeff Kirsher 	}
74567ac6653aSJeff Kirsher 	return 0;
74577ac6653aSJeff Kirsher 
74587ac6653aSJeff Kirsher err:
74597ac6653aSJeff Kirsher 	pr_err("%s: ERROR broken module parameter conversion", __func__);
74607ac6653aSJeff Kirsher 	return -EINVAL;
74617ac6653aSJeff Kirsher }
74627ac6653aSJeff Kirsher 
74637ac6653aSJeff Kirsher __setup("stmmaceth=", stmmac_cmdline_opt);
7464ceb69499SGiuseppe CAVALLARO #endif /* MODULE */
74656fc0d0f2SGiuseppe Cavallaro 
7466466c5ac8SMathieu Olivari static int __init stmmac_init(void)
7467466c5ac8SMathieu Olivari {
7468466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
7469466c5ac8SMathieu Olivari 	/* Create debugfs main directory if it doesn't exist yet */
74708d72ab11SGreg Kroah-Hartman 	if (!stmmac_fs_dir)
7471466c5ac8SMathieu Olivari 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7472474a31e1SAaro Koskinen 	register_netdevice_notifier(&stmmac_notifier);
7473466c5ac8SMathieu Olivari #endif
7474466c5ac8SMathieu Olivari 
7475466c5ac8SMathieu Olivari 	return 0;
7476466c5ac8SMathieu Olivari }
7477466c5ac8SMathieu Olivari 
7478466c5ac8SMathieu Olivari static void __exit stmmac_exit(void)
7479466c5ac8SMathieu Olivari {
7480466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
7481474a31e1SAaro Koskinen 	unregister_netdevice_notifier(&stmmac_notifier);
7482466c5ac8SMathieu Olivari 	debugfs_remove_recursive(stmmac_fs_dir);
7483466c5ac8SMathieu Olivari #endif
7484466c5ac8SMathieu Olivari }
7485466c5ac8SMathieu Olivari 
7486466c5ac8SMathieu Olivari module_init(stmmac_init)
7487466c5ac8SMathieu Olivari module_exit(stmmac_exit)
7488466c5ac8SMathieu Olivari 
74896fc0d0f2SGiuseppe Cavallaro MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
74906fc0d0f2SGiuseppe Cavallaro MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
74916fc0d0f2SGiuseppe Cavallaro MODULE_LICENSE("GPL");
7492