14fa9c49fSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
27ac6653aSJeff Kirsher /*******************************************************************************
37ac6653aSJeff Kirsher   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
47ac6653aSJeff Kirsher   ST Ethernet IPs are built around a Synopsys IP Core.
57ac6653aSJeff Kirsher 
6286a8372SGiuseppe CAVALLARO 	Copyright(C) 2007-2011 STMicroelectronics Ltd
77ac6653aSJeff Kirsher 
87ac6653aSJeff Kirsher 
97ac6653aSJeff Kirsher   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
107ac6653aSJeff Kirsher 
117ac6653aSJeff Kirsher   Documentation available at:
127ac6653aSJeff Kirsher 	http://www.stlinux.com
137ac6653aSJeff Kirsher   Support available at:
147ac6653aSJeff Kirsher 	https://bugzilla.stlinux.com/
157ac6653aSJeff Kirsher *******************************************************************************/
167ac6653aSJeff Kirsher 
176a81c26fSViresh Kumar #include <linux/clk.h>
187ac6653aSJeff Kirsher #include <linux/kernel.h>
197ac6653aSJeff Kirsher #include <linux/interrupt.h>
207ac6653aSJeff Kirsher #include <linux/ip.h>
217ac6653aSJeff Kirsher #include <linux/tcp.h>
227ac6653aSJeff Kirsher #include <linux/skbuff.h>
237ac6653aSJeff Kirsher #include <linux/ethtool.h>
247ac6653aSJeff Kirsher #include <linux/if_ether.h>
257ac6653aSJeff Kirsher #include <linux/crc32.h>
267ac6653aSJeff Kirsher #include <linux/mii.h>
2701789349SJiri Pirko #include <linux/if.h>
287ac6653aSJeff Kirsher #include <linux/if_vlan.h>
297ac6653aSJeff Kirsher #include <linux/dma-mapping.h>
307ac6653aSJeff Kirsher #include <linux/slab.h>
315ec55823SJoakim Zhang #include <linux/pm_runtime.h>
327ac6653aSJeff Kirsher #include <linux/prefetch.h>
33db88f10aSSrinivas Kandagatla #include <linux/pinctrl/consumer.h>
3450fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
357ac29055SGiuseppe CAVALLARO #include <linux/debugfs.h>
367ac29055SGiuseppe CAVALLARO #include <linux/seq_file.h>
3750fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
38891434b1SRayagond Kokatanur #include <linux/net_tstamp.h>
39eeef2f6bSJose Abreu #include <linux/phylink.h>
40b7766206SJose Abreu #include <linux/udp.h>
415fabb012SOng Boon Leong #include <linux/bpf_trace.h>
424dbbe8ddSJose Abreu #include <net/pkt_cls.h>
43bba2556eSOng Boon Leong #include <net/xdp_sock_drv.h>
44891434b1SRayagond Kokatanur #include "stmmac_ptp.h"
45286a8372SGiuseppe CAVALLARO #include "stmmac.h"
465fabb012SOng Boon Leong #include "stmmac_xdp.h"
47c5e4ddbdSChen-Yu Tsai #include <linux/reset.h>
485790cf3cSMathieu Olivari #include <linux/of_mdio.h>
4919d857c9SPhil Reid #include "dwmac1000.h"
507d9e6c5aSJose Abreu #include "dwxgmac2.h"
5142de047dSJose Abreu #include "hwif.h"
527ac6653aSJeff Kirsher 
53a6da2bbbSHolger Assmann /* As long as the interface is active, we keep the timestamping counter enabled
54a6da2bbbSHolger Assmann  * with fine resolution and binary rollover. This avoid non-monotonic behavior
55a6da2bbbSHolger Assmann  * (clock jumps) when changing timestamping settings at runtime.
56a6da2bbbSHolger Assmann  */
57a6da2bbbSHolger Assmann #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
58a6da2bbbSHolger Assmann 				 PTP_TCR_TSCTRLSSR)
59a6da2bbbSHolger Assmann 
608d558f02SJose Abreu #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
61f748be53SAlexandre TORGUE #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
627ac6653aSJeff Kirsher 
637ac6653aSJeff Kirsher /* Module parameters */
6432ceabcaSGiuseppe CAVALLARO #define TX_TIMEO	5000
657ac6653aSJeff Kirsher static int watchdog = TX_TIMEO;
66d3757ba4SJoe Perches module_param(watchdog, int, 0644);
6732ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
687ac6653aSJeff Kirsher 
6932ceabcaSGiuseppe CAVALLARO static int debug = -1;
70d3757ba4SJoe Perches module_param(debug, int, 0644);
7132ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
727ac6653aSJeff Kirsher 
7347d1f71fSstephen hemminger static int phyaddr = -1;
74d3757ba4SJoe Perches module_param(phyaddr, int, 0444);
757ac6653aSJeff Kirsher MODULE_PARM_DESC(phyaddr, "Physical device address");
767ac6653aSJeff Kirsher 
77aa042f60SSong, Yoong Siang #define STMMAC_TX_THRESH(x)	((x)->dma_tx_size / 4)
78aa042f60SSong, Yoong Siang #define STMMAC_RX_THRESH(x)	((x)->dma_rx_size / 4)
797ac6653aSJeff Kirsher 
80132c32eeSOng Boon Leong /* Limit to make sure XDP TX and slow path can coexist */
81132c32eeSOng Boon Leong #define STMMAC_XSK_TX_BUDGET_MAX	256
82132c32eeSOng Boon Leong #define STMMAC_TX_XSK_AVAIL		16
83bba2556eSOng Boon Leong #define STMMAC_RX_FILL_BATCH		16
84bba2556eSOng Boon Leong 
855fabb012SOng Boon Leong #define STMMAC_XDP_PASS		0
865fabb012SOng Boon Leong #define STMMAC_XDP_CONSUMED	BIT(0)
87be8b38a7SOng Boon Leong #define STMMAC_XDP_TX		BIT(1)
888b278a5bSOng Boon Leong #define STMMAC_XDP_REDIRECT	BIT(2)
895fabb012SOng Boon Leong 
90e9989339SJose Abreu static int flow_ctrl = FLOW_AUTO;
91d3757ba4SJoe Perches module_param(flow_ctrl, int, 0644);
927ac6653aSJeff Kirsher MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
937ac6653aSJeff Kirsher 
947ac6653aSJeff Kirsher static int pause = PAUSE_TIME;
95d3757ba4SJoe Perches module_param(pause, int, 0644);
967ac6653aSJeff Kirsher MODULE_PARM_DESC(pause, "Flow Control Pause Time");
977ac6653aSJeff Kirsher 
987ac6653aSJeff Kirsher #define TC_DEFAULT 64
997ac6653aSJeff Kirsher static int tc = TC_DEFAULT;
100d3757ba4SJoe Perches module_param(tc, int, 0644);
1017ac6653aSJeff Kirsher MODULE_PARM_DESC(tc, "DMA threshold control value");
1027ac6653aSJeff Kirsher 
103d916701cSGiuseppe CAVALLARO #define	DEFAULT_BUFSIZE	1536
104d916701cSGiuseppe CAVALLARO static int buf_sz = DEFAULT_BUFSIZE;
105d3757ba4SJoe Perches module_param(buf_sz, int, 0644);
1067ac6653aSJeff Kirsher MODULE_PARM_DESC(buf_sz, "DMA buffer size");
1077ac6653aSJeff Kirsher 
10822ad3838SGiuseppe Cavallaro #define	STMMAC_RX_COPYBREAK	256
10922ad3838SGiuseppe Cavallaro 
1107ac6653aSJeff Kirsher static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
1117ac6653aSJeff Kirsher 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
1127ac6653aSJeff Kirsher 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
1137ac6653aSJeff Kirsher 
114d765955dSGiuseppe CAVALLARO #define STMMAC_DEFAULT_LPI_TIMER	1000
115d765955dSGiuseppe CAVALLARO static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
116d3757ba4SJoe Perches module_param(eee_timer, int, 0644);
117d765955dSGiuseppe CAVALLARO MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
118388e201dSVineetha G. Jaya Kumaran #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
119d765955dSGiuseppe CAVALLARO 
12022d3efe5SPavel Machek /* By default the driver will use the ring mode to manage tx and rx descriptors,
12122d3efe5SPavel Machek  * but allow user to force to use the chain instead of the ring
1224a7d666aSGiuseppe CAVALLARO  */
1234a7d666aSGiuseppe CAVALLARO static unsigned int chain_mode;
124d3757ba4SJoe Perches module_param(chain_mode, int, 0444);
1254a7d666aSGiuseppe CAVALLARO MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
1264a7d666aSGiuseppe CAVALLARO 
1277ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
1288532f613SOng Boon Leong /* For MSI interrupts handling */
1298532f613SOng Boon Leong static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
1308532f613SOng Boon Leong static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
1318532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
1328532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
133132c32eeSOng Boon Leong static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
134132c32eeSOng Boon Leong static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
1357ac6653aSJeff Kirsher 
13650fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
137481a7d15SJiping Ma static const struct net_device_ops stmmac_netdev_ops;
1388d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev);
139466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev);
140bfab27a1SGiuseppe CAVALLARO #endif
141bfab27a1SGiuseppe CAVALLARO 
142d5a05e69SVincent Whitchurch #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
1439125cdd1SGiuseppe CAVALLARO 
1445ec55823SJoakim Zhang int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
1455ec55823SJoakim Zhang {
1465ec55823SJoakim Zhang 	int ret = 0;
1475ec55823SJoakim Zhang 
1485ec55823SJoakim Zhang 	if (enabled) {
1495ec55823SJoakim Zhang 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
1505ec55823SJoakim Zhang 		if (ret)
1515ec55823SJoakim Zhang 			return ret;
1525ec55823SJoakim Zhang 		ret = clk_prepare_enable(priv->plat->pclk);
1535ec55823SJoakim Zhang 		if (ret) {
1545ec55823SJoakim Zhang 			clk_disable_unprepare(priv->plat->stmmac_clk);
1555ec55823SJoakim Zhang 			return ret;
1565ec55823SJoakim Zhang 		}
157b4d45aeeSJoakim Zhang 		if (priv->plat->clks_config) {
158b4d45aeeSJoakim Zhang 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
159b4d45aeeSJoakim Zhang 			if (ret) {
160b4d45aeeSJoakim Zhang 				clk_disable_unprepare(priv->plat->stmmac_clk);
161b4d45aeeSJoakim Zhang 				clk_disable_unprepare(priv->plat->pclk);
162b4d45aeeSJoakim Zhang 				return ret;
163b4d45aeeSJoakim Zhang 			}
164b4d45aeeSJoakim Zhang 		}
1655ec55823SJoakim Zhang 	} else {
1665ec55823SJoakim Zhang 		clk_disable_unprepare(priv->plat->stmmac_clk);
1675ec55823SJoakim Zhang 		clk_disable_unprepare(priv->plat->pclk);
168b4d45aeeSJoakim Zhang 		if (priv->plat->clks_config)
169b4d45aeeSJoakim Zhang 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
1705ec55823SJoakim Zhang 	}
1715ec55823SJoakim Zhang 
1725ec55823SJoakim Zhang 	return ret;
1735ec55823SJoakim Zhang }
1745ec55823SJoakim Zhang EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
1755ec55823SJoakim Zhang 
1767ac6653aSJeff Kirsher /**
1777ac6653aSJeff Kirsher  * stmmac_verify_args - verify the driver parameters.
178732fdf0eSGiuseppe CAVALLARO  * Description: it checks the driver parameters and set a default in case of
179732fdf0eSGiuseppe CAVALLARO  * errors.
1807ac6653aSJeff Kirsher  */
1817ac6653aSJeff Kirsher static void stmmac_verify_args(void)
1827ac6653aSJeff Kirsher {
1837ac6653aSJeff Kirsher 	if (unlikely(watchdog < 0))
1847ac6653aSJeff Kirsher 		watchdog = TX_TIMEO;
185d916701cSGiuseppe CAVALLARO 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
186d916701cSGiuseppe CAVALLARO 		buf_sz = DEFAULT_BUFSIZE;
1877ac6653aSJeff Kirsher 	if (unlikely(flow_ctrl > 1))
1887ac6653aSJeff Kirsher 		flow_ctrl = FLOW_AUTO;
1897ac6653aSJeff Kirsher 	else if (likely(flow_ctrl < 0))
1907ac6653aSJeff Kirsher 		flow_ctrl = FLOW_OFF;
1917ac6653aSJeff Kirsher 	if (unlikely((pause < 0) || (pause > 0xffff)))
1927ac6653aSJeff Kirsher 		pause = PAUSE_TIME;
193d765955dSGiuseppe CAVALLARO 	if (eee_timer < 0)
194d765955dSGiuseppe CAVALLARO 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
1957ac6653aSJeff Kirsher }
1967ac6653aSJeff Kirsher 
197bba2556eSOng Boon Leong static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
198c22a3f48SJoao Pinto {
199c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
2008fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
2018fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
202c22a3f48SJoao Pinto 	u32 queue;
203c22a3f48SJoao Pinto 
2048fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
2058fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
206c22a3f48SJoao Pinto 
207132c32eeSOng Boon Leong 		if (stmmac_xdp_is_enabled(priv) &&
208132c32eeSOng Boon Leong 		    test_bit(queue, priv->af_xdp_zc_qps)) {
209132c32eeSOng Boon Leong 			napi_disable(&ch->rxtx_napi);
210132c32eeSOng Boon Leong 			continue;
211132c32eeSOng Boon Leong 		}
212132c32eeSOng Boon Leong 
2134ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
2144ccb4585SJose Abreu 			napi_disable(&ch->rx_napi);
2154ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
2164ccb4585SJose Abreu 			napi_disable(&ch->tx_napi);
217c22a3f48SJoao Pinto 	}
218c22a3f48SJoao Pinto }
219c22a3f48SJoao Pinto 
220c22a3f48SJoao Pinto /**
221bba2556eSOng Boon Leong  * stmmac_disable_all_queues - Disable all queues
222bba2556eSOng Boon Leong  * @priv: driver private structure
223bba2556eSOng Boon Leong  */
224bba2556eSOng Boon Leong static void stmmac_disable_all_queues(struct stmmac_priv *priv)
225bba2556eSOng Boon Leong {
226bba2556eSOng Boon Leong 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
227bba2556eSOng Boon Leong 	struct stmmac_rx_queue *rx_q;
228bba2556eSOng Boon Leong 	u32 queue;
229bba2556eSOng Boon Leong 
230bba2556eSOng Boon Leong 	/* synchronize_rcu() needed for pending XDP buffers to drain */
231bba2556eSOng Boon Leong 	for (queue = 0; queue < rx_queues_cnt; queue++) {
232bba2556eSOng Boon Leong 		rx_q = &priv->rx_queue[queue];
233bba2556eSOng Boon Leong 		if (rx_q->xsk_pool) {
234bba2556eSOng Boon Leong 			synchronize_rcu();
235bba2556eSOng Boon Leong 			break;
236bba2556eSOng Boon Leong 		}
237bba2556eSOng Boon Leong 	}
238bba2556eSOng Boon Leong 
239bba2556eSOng Boon Leong 	__stmmac_disable_all_queues(priv);
240bba2556eSOng Boon Leong }
241bba2556eSOng Boon Leong 
242bba2556eSOng Boon Leong /**
243c22a3f48SJoao Pinto  * stmmac_enable_all_queues - Enable all queues
244c22a3f48SJoao Pinto  * @priv: driver private structure
245c22a3f48SJoao Pinto  */
246c22a3f48SJoao Pinto static void stmmac_enable_all_queues(struct stmmac_priv *priv)
247c22a3f48SJoao Pinto {
248c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
2498fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
2508fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
251c22a3f48SJoao Pinto 	u32 queue;
252c22a3f48SJoao Pinto 
2538fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
2548fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
255c22a3f48SJoao Pinto 
256132c32eeSOng Boon Leong 		if (stmmac_xdp_is_enabled(priv) &&
257132c32eeSOng Boon Leong 		    test_bit(queue, priv->af_xdp_zc_qps)) {
258132c32eeSOng Boon Leong 			napi_enable(&ch->rxtx_napi);
259132c32eeSOng Boon Leong 			continue;
260132c32eeSOng Boon Leong 		}
261132c32eeSOng Boon Leong 
2624ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
2634ccb4585SJose Abreu 			napi_enable(&ch->rx_napi);
2644ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
2654ccb4585SJose Abreu 			napi_enable(&ch->tx_napi);
266c22a3f48SJoao Pinto 	}
267c22a3f48SJoao Pinto }
268c22a3f48SJoao Pinto 
26934877a15SJose Abreu static void stmmac_service_event_schedule(struct stmmac_priv *priv)
27034877a15SJose Abreu {
27134877a15SJose Abreu 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
27234877a15SJose Abreu 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
27334877a15SJose Abreu 		queue_work(priv->wq, &priv->service_task);
27434877a15SJose Abreu }
27534877a15SJose Abreu 
27634877a15SJose Abreu static void stmmac_global_err(struct stmmac_priv *priv)
27734877a15SJose Abreu {
27834877a15SJose Abreu 	netif_carrier_off(priv->dev);
27934877a15SJose Abreu 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
28034877a15SJose Abreu 	stmmac_service_event_schedule(priv);
28134877a15SJose Abreu }
28234877a15SJose Abreu 
283c22a3f48SJoao Pinto /**
28432ceabcaSGiuseppe CAVALLARO  * stmmac_clk_csr_set - dynamically set the MDC clock
28532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
28632ceabcaSGiuseppe CAVALLARO  * Description: this is to dynamically set the MDC clock according to the csr
28732ceabcaSGiuseppe CAVALLARO  * clock input.
28832ceabcaSGiuseppe CAVALLARO  * Note:
28932ceabcaSGiuseppe CAVALLARO  *	If a specific clk_csr value is passed from the platform
29032ceabcaSGiuseppe CAVALLARO  *	this means that the CSR Clock Range selection cannot be
29132ceabcaSGiuseppe CAVALLARO  *	changed at run-time and it is fixed (as reported in the driver
29232ceabcaSGiuseppe CAVALLARO  *	documentation). Viceversa the driver will try to set the MDC
29332ceabcaSGiuseppe CAVALLARO  *	clock dynamically according to the actual clock input.
29432ceabcaSGiuseppe CAVALLARO  */
295cd7201f4SGiuseppe CAVALLARO static void stmmac_clk_csr_set(struct stmmac_priv *priv)
296cd7201f4SGiuseppe CAVALLARO {
297cd7201f4SGiuseppe CAVALLARO 	u32 clk_rate;
298cd7201f4SGiuseppe CAVALLARO 
299f573c0b9Sjpinto 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
300cd7201f4SGiuseppe CAVALLARO 
301cd7201f4SGiuseppe CAVALLARO 	/* Platform provided default clk_csr would be assumed valid
302ceb69499SGiuseppe CAVALLARO 	 * for all other cases except for the below mentioned ones.
303ceb69499SGiuseppe CAVALLARO 	 * For values higher than the IEEE 802.3 specified frequency
304ceb69499SGiuseppe CAVALLARO 	 * we can not estimate the proper divider as it is not known
305ceb69499SGiuseppe CAVALLARO 	 * the frequency of clk_csr_i. So we do not change the default
306ceb69499SGiuseppe CAVALLARO 	 * divider.
307ceb69499SGiuseppe CAVALLARO 	 */
308cd7201f4SGiuseppe CAVALLARO 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
309cd7201f4SGiuseppe CAVALLARO 		if (clk_rate < CSR_F_35M)
310cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_20_35M;
311cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
312cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_35_60M;
313cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
314cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_60_100M;
315cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
316cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_100_150M;
317cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
318cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_150_250M;
31908dad2f4SJesper Nilsson 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
320cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_250_300M;
321ceb69499SGiuseppe CAVALLARO 	}
3229f93ac8dSLABBE Corentin 
3239f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i) {
3249f93ac8dSLABBE Corentin 		if (clk_rate > 160000000)
3259f93ac8dSLABBE Corentin 			priv->clk_csr = 0x03;
3269f93ac8dSLABBE Corentin 		else if (clk_rate > 80000000)
3279f93ac8dSLABBE Corentin 			priv->clk_csr = 0x02;
3289f93ac8dSLABBE Corentin 		else if (clk_rate > 40000000)
3299f93ac8dSLABBE Corentin 			priv->clk_csr = 0x01;
3309f93ac8dSLABBE Corentin 		else
3319f93ac8dSLABBE Corentin 			priv->clk_csr = 0;
3329f93ac8dSLABBE Corentin 	}
3337d9e6c5aSJose Abreu 
3347d9e6c5aSJose Abreu 	if (priv->plat->has_xgmac) {
3357d9e6c5aSJose Abreu 		if (clk_rate > 400000000)
3367d9e6c5aSJose Abreu 			priv->clk_csr = 0x5;
3377d9e6c5aSJose Abreu 		else if (clk_rate > 350000000)
3387d9e6c5aSJose Abreu 			priv->clk_csr = 0x4;
3397d9e6c5aSJose Abreu 		else if (clk_rate > 300000000)
3407d9e6c5aSJose Abreu 			priv->clk_csr = 0x3;
3417d9e6c5aSJose Abreu 		else if (clk_rate > 250000000)
3427d9e6c5aSJose Abreu 			priv->clk_csr = 0x2;
3437d9e6c5aSJose Abreu 		else if (clk_rate > 150000000)
3447d9e6c5aSJose Abreu 			priv->clk_csr = 0x1;
3457d9e6c5aSJose Abreu 		else
3467d9e6c5aSJose Abreu 			priv->clk_csr = 0x0;
3477d9e6c5aSJose Abreu 	}
348cd7201f4SGiuseppe CAVALLARO }
349cd7201f4SGiuseppe CAVALLARO 
3507ac6653aSJeff Kirsher static void print_pkt(unsigned char *buf, int len)
3517ac6653aSJeff Kirsher {
352424c4f78SAndy Shevchenko 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
353424c4f78SAndy Shevchenko 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
3547ac6653aSJeff Kirsher }
3557ac6653aSJeff Kirsher 
356ce736788SJoao Pinto static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
3577ac6653aSJeff Kirsher {
358ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
359a6a3e026SLABBE Corentin 	u32 avail;
360e3ad57c9SGiuseppe Cavallaro 
361ce736788SJoao Pinto 	if (tx_q->dirty_tx > tx_q->cur_tx)
362ce736788SJoao Pinto 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
363e3ad57c9SGiuseppe Cavallaro 	else
364aa042f60SSong, Yoong Siang 		avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
365e3ad57c9SGiuseppe Cavallaro 
366e3ad57c9SGiuseppe Cavallaro 	return avail;
367e3ad57c9SGiuseppe Cavallaro }
368e3ad57c9SGiuseppe Cavallaro 
36954139cf3SJoao Pinto /**
37054139cf3SJoao Pinto  * stmmac_rx_dirty - Get RX queue dirty
37154139cf3SJoao Pinto  * @priv: driver private structure
37254139cf3SJoao Pinto  * @queue: RX queue index
37354139cf3SJoao Pinto  */
37454139cf3SJoao Pinto static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
375e3ad57c9SGiuseppe Cavallaro {
37654139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
377a6a3e026SLABBE Corentin 	u32 dirty;
378e3ad57c9SGiuseppe Cavallaro 
37954139cf3SJoao Pinto 	if (rx_q->dirty_rx <= rx_q->cur_rx)
38054139cf3SJoao Pinto 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
381e3ad57c9SGiuseppe Cavallaro 	else
382aa042f60SSong, Yoong Siang 		dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
383e3ad57c9SGiuseppe Cavallaro 
384e3ad57c9SGiuseppe Cavallaro 	return dirty;
3857ac6653aSJeff Kirsher }
3867ac6653aSJeff Kirsher 
387be1c7eaeSVineetha G. Jaya Kumaran static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
388be1c7eaeSVineetha G. Jaya Kumaran {
389be1c7eaeSVineetha G. Jaya Kumaran 	int tx_lpi_timer;
390be1c7eaeSVineetha G. Jaya Kumaran 
391be1c7eaeSVineetha G. Jaya Kumaran 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
392be1c7eaeSVineetha G. Jaya Kumaran 	priv->eee_sw_timer_en = en ? 0 : 1;
393be1c7eaeSVineetha G. Jaya Kumaran 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
394be1c7eaeSVineetha G. Jaya Kumaran 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
395be1c7eaeSVineetha G. Jaya Kumaran }
396be1c7eaeSVineetha G. Jaya Kumaran 
39732ceabcaSGiuseppe CAVALLARO /**
398732fdf0eSGiuseppe CAVALLARO  * stmmac_enable_eee_mode - check and enter in LPI mode
39932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
400732fdf0eSGiuseppe CAVALLARO  * Description: this function is to verify and enter in LPI mode in case of
401732fdf0eSGiuseppe CAVALLARO  * EEE.
40232ceabcaSGiuseppe CAVALLARO  */
403d765955dSGiuseppe CAVALLARO static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
404d765955dSGiuseppe CAVALLARO {
405ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
406ce736788SJoao Pinto 	u32 queue;
407ce736788SJoao Pinto 
408ce736788SJoao Pinto 	/* check if all TX queues have the work finished */
409ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
410ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
411ce736788SJoao Pinto 
412ce736788SJoao Pinto 		if (tx_q->dirty_tx != tx_q->cur_tx)
413ce736788SJoao Pinto 			return; /* still unfinished work */
414ce736788SJoao Pinto 	}
415ce736788SJoao Pinto 
416d765955dSGiuseppe CAVALLARO 	/* Check and enter in LPI mode */
417ce736788SJoao Pinto 	if (!priv->tx_path_in_lpi_mode)
418c10d4c82SJose Abreu 		stmmac_set_eee_mode(priv, priv->hw,
419b4b7b772Sjpinto 				priv->plat->en_tx_lpi_clockgating);
420d765955dSGiuseppe CAVALLARO }
421d765955dSGiuseppe CAVALLARO 
42232ceabcaSGiuseppe CAVALLARO /**
423732fdf0eSGiuseppe CAVALLARO  * stmmac_disable_eee_mode - disable and exit from LPI mode
42432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
42532ceabcaSGiuseppe CAVALLARO  * Description: this function is to exit and disable EEE in case of
42632ceabcaSGiuseppe CAVALLARO  * LPI state is true. This is called by the xmit.
42732ceabcaSGiuseppe CAVALLARO  */
428d765955dSGiuseppe CAVALLARO void stmmac_disable_eee_mode(struct stmmac_priv *priv)
429d765955dSGiuseppe CAVALLARO {
430be1c7eaeSVineetha G. Jaya Kumaran 	if (!priv->eee_sw_timer_en) {
431be1c7eaeSVineetha G. Jaya Kumaran 		stmmac_lpi_entry_timer_config(priv, 0);
432be1c7eaeSVineetha G. Jaya Kumaran 		return;
433be1c7eaeSVineetha G. Jaya Kumaran 	}
434be1c7eaeSVineetha G. Jaya Kumaran 
435c10d4c82SJose Abreu 	stmmac_reset_eee_mode(priv, priv->hw);
436d765955dSGiuseppe CAVALLARO 	del_timer_sync(&priv->eee_ctrl_timer);
437d765955dSGiuseppe CAVALLARO 	priv->tx_path_in_lpi_mode = false;
438d765955dSGiuseppe CAVALLARO }
439d765955dSGiuseppe CAVALLARO 
440d765955dSGiuseppe CAVALLARO /**
441732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_ctrl_timer - EEE TX SW timer.
442d0ea5cbdSJesse Brandeburg  * @t:  timer_list struct containing private info
443d765955dSGiuseppe CAVALLARO  * Description:
44432ceabcaSGiuseppe CAVALLARO  *  if there is no data transfer and if we are not in LPI state,
445d765955dSGiuseppe CAVALLARO  *  then MAC Transmitter can be moved to LPI state.
446d765955dSGiuseppe CAVALLARO  */
447e99e88a9SKees Cook static void stmmac_eee_ctrl_timer(struct timer_list *t)
448d765955dSGiuseppe CAVALLARO {
449e99e88a9SKees Cook 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
450d765955dSGiuseppe CAVALLARO 
451d765955dSGiuseppe CAVALLARO 	stmmac_enable_eee_mode(priv);
452388e201dSVineetha G. Jaya Kumaran 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
453d765955dSGiuseppe CAVALLARO }
454d765955dSGiuseppe CAVALLARO 
455d765955dSGiuseppe CAVALLARO /**
456732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_init - init EEE
45732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
458d765955dSGiuseppe CAVALLARO  * Description:
459732fdf0eSGiuseppe CAVALLARO  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
460732fdf0eSGiuseppe CAVALLARO  *  can also manage EEE, this function enable the LPI state and start related
461732fdf0eSGiuseppe CAVALLARO  *  timer.
462d765955dSGiuseppe CAVALLARO  */
463d765955dSGiuseppe CAVALLARO bool stmmac_eee_init(struct stmmac_priv *priv)
464d765955dSGiuseppe CAVALLARO {
465388e201dSVineetha G. Jaya Kumaran 	int eee_tw_timer = priv->eee_tw_timer;
466879626e3SJerome Brunet 
467f5351ef7SGiuseppe CAVALLARO 	/* Using PCS we cannot dial with the phy registers at this stage
468f5351ef7SGiuseppe CAVALLARO 	 * so we do not support extra feature like EEE.
469f5351ef7SGiuseppe CAVALLARO 	 */
470a47b9e15SDejin Zheng 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
471a47b9e15SDejin Zheng 	    priv->hw->pcs == STMMAC_PCS_RTBI)
47274371272SJose Abreu 		return false;
473f5351ef7SGiuseppe CAVALLARO 
47474371272SJose Abreu 	/* Check if MAC core supports the EEE feature. */
47574371272SJose Abreu 	if (!priv->dma_cap.eee)
47674371272SJose Abreu 		return false;
477d765955dSGiuseppe CAVALLARO 
47829555fa3SThierry Reding 	mutex_lock(&priv->lock);
47974371272SJose Abreu 
48074371272SJose Abreu 	/* Check if it needs to be deactivated */
481177d935aSJon Hunter 	if (!priv->eee_active) {
482177d935aSJon Hunter 		if (priv->eee_enabled) {
48338ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "disable EEE\n");
484be1c7eaeSVineetha G. Jaya Kumaran 			stmmac_lpi_entry_timer_config(priv, 0);
48583bf79b6SGiuseppe CAVALLARO 			del_timer_sync(&priv->eee_ctrl_timer);
486388e201dSVineetha G. Jaya Kumaran 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
487d4aeaed8SWong Vee Khee 			if (priv->hw->xpcs)
488d4aeaed8SWong Vee Khee 				xpcs_config_eee(priv->hw->xpcs,
489d4aeaed8SWong Vee Khee 						priv->plat->mult_fact_100ns,
490d4aeaed8SWong Vee Khee 						false);
491177d935aSJon Hunter 		}
4920867bb97SJon Hunter 		mutex_unlock(&priv->lock);
49374371272SJose Abreu 		return false;
49474371272SJose Abreu 	}
49574371272SJose Abreu 
49674371272SJose Abreu 	if (priv->eee_active && !priv->eee_enabled) {
49774371272SJose Abreu 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
49874371272SJose Abreu 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
499388e201dSVineetha G. Jaya Kumaran 				     eee_tw_timer);
500656ed8b0SWong Vee Khee 		if (priv->hw->xpcs)
501656ed8b0SWong Vee Khee 			xpcs_config_eee(priv->hw->xpcs,
502656ed8b0SWong Vee Khee 					priv->plat->mult_fact_100ns,
503656ed8b0SWong Vee Khee 					true);
50483bf79b6SGiuseppe CAVALLARO 	}
50574371272SJose Abreu 
506be1c7eaeSVineetha G. Jaya Kumaran 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
507be1c7eaeSVineetha G. Jaya Kumaran 		del_timer_sync(&priv->eee_ctrl_timer);
508be1c7eaeSVineetha G. Jaya Kumaran 		priv->tx_path_in_lpi_mode = false;
509be1c7eaeSVineetha G. Jaya Kumaran 		stmmac_lpi_entry_timer_config(priv, 1);
510be1c7eaeSVineetha G. Jaya Kumaran 	} else {
511be1c7eaeSVineetha G. Jaya Kumaran 		stmmac_lpi_entry_timer_config(priv, 0);
512be1c7eaeSVineetha G. Jaya Kumaran 		mod_timer(&priv->eee_ctrl_timer,
513be1c7eaeSVineetha G. Jaya Kumaran 			  STMMAC_LPI_T(priv->tx_lpi_timer));
514be1c7eaeSVineetha G. Jaya Kumaran 	}
515388e201dSVineetha G. Jaya Kumaran 
51629555fa3SThierry Reding 	mutex_unlock(&priv->lock);
51738ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
51874371272SJose Abreu 	return true;
519d765955dSGiuseppe CAVALLARO }
520d765955dSGiuseppe CAVALLARO 
5213751c3d3SThomas Gleixner static inline u32 stmmac_cdc_adjust(struct stmmac_priv *priv)
5223751c3d3SThomas Gleixner {
5233751c3d3SThomas Gleixner 	/* Correct the clk domain crossing(CDC) error */
5243751c3d3SThomas Gleixner 	if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate)
5253751c3d3SThomas Gleixner 		return (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate;
5263751c3d3SThomas Gleixner 	return 0;
5273751c3d3SThomas Gleixner }
5283751c3d3SThomas Gleixner 
529732fdf0eSGiuseppe CAVALLARO /* stmmac_get_tx_hwtstamp - get HW TX timestamps
53032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
531ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
532891434b1SRayagond Kokatanur  * @skb : the socket buffer
533891434b1SRayagond Kokatanur  * Description :
534891434b1SRayagond Kokatanur  * This function will read timestamp from the descriptor & pass it to stack.
535891434b1SRayagond Kokatanur  * and also perform some sanity checks.
536891434b1SRayagond Kokatanur  */
537891434b1SRayagond Kokatanur static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
538ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *p, struct sk_buff *skb)
539891434b1SRayagond Kokatanur {
540891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps shhwtstamp;
54125e80cd0SJose Abreu 	bool found = false;
542df103170SNathan Chancellor 	u64 ns = 0;
543891434b1SRayagond Kokatanur 
544891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en)
545891434b1SRayagond Kokatanur 		return;
546891434b1SRayagond Kokatanur 
547ceb69499SGiuseppe CAVALLARO 	/* exit if skb doesn't support hw tstamp */
54875e4364fSdamuzi000 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
549891434b1SRayagond Kokatanur 		return;
550891434b1SRayagond Kokatanur 
551891434b1SRayagond Kokatanur 	/* check tx tstamp status */
55242de047dSJose Abreu 	if (stmmac_get_tx_timestamp_status(priv, p)) {
55342de047dSJose Abreu 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
55425e80cd0SJose Abreu 		found = true;
55525e80cd0SJose Abreu 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
55625e80cd0SJose Abreu 		found = true;
55725e80cd0SJose Abreu 	}
558891434b1SRayagond Kokatanur 
55925e80cd0SJose Abreu 	if (found) {
5603751c3d3SThomas Gleixner 		ns -= stmmac_cdc_adjust(priv);
5613600be5fSVoon Weifeng 
562891434b1SRayagond Kokatanur 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
563891434b1SRayagond Kokatanur 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
564ba1ffd74SGiuseppe CAVALLARO 
56533d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
566891434b1SRayagond Kokatanur 		/* pass tstamp to stack */
567891434b1SRayagond Kokatanur 		skb_tstamp_tx(skb, &shhwtstamp);
568ba1ffd74SGiuseppe CAVALLARO 	}
569891434b1SRayagond Kokatanur }
570891434b1SRayagond Kokatanur 
571732fdf0eSGiuseppe CAVALLARO /* stmmac_get_rx_hwtstamp - get HW RX timestamps
57232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
573ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
574ba1ffd74SGiuseppe CAVALLARO  * @np : next descriptor pointer
575891434b1SRayagond Kokatanur  * @skb : the socket buffer
576891434b1SRayagond Kokatanur  * Description :
577891434b1SRayagond Kokatanur  * This function will read received packet's timestamp from the descriptor
578891434b1SRayagond Kokatanur  * and pass it to stack. It also perform some sanity checks.
579891434b1SRayagond Kokatanur  */
580ba1ffd74SGiuseppe CAVALLARO static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
581ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *np, struct sk_buff *skb)
582891434b1SRayagond Kokatanur {
583891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
58498870943SJose Abreu 	struct dma_desc *desc = p;
585df103170SNathan Chancellor 	u64 ns = 0;
586891434b1SRayagond Kokatanur 
587891434b1SRayagond Kokatanur 	if (!priv->hwts_rx_en)
588891434b1SRayagond Kokatanur 		return;
589ba1ffd74SGiuseppe CAVALLARO 	/* For GMAC4, the valid timestamp is from CTX next desc. */
5907d9e6c5aSJose Abreu 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
59198870943SJose Abreu 		desc = np;
592891434b1SRayagond Kokatanur 
59398870943SJose Abreu 	/* Check if timestamp is available */
59442de047dSJose Abreu 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
59542de047dSJose Abreu 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
5963600be5fSVoon Weifeng 
5973751c3d3SThomas Gleixner 		ns -= stmmac_cdc_adjust(priv);
5983600be5fSVoon Weifeng 
59933d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
600891434b1SRayagond Kokatanur 		shhwtstamp = skb_hwtstamps(skb);
601891434b1SRayagond Kokatanur 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
602891434b1SRayagond Kokatanur 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
603ba1ffd74SGiuseppe CAVALLARO 	} else  {
60433d4c482SMario Molitor 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
605ba1ffd74SGiuseppe CAVALLARO 	}
606891434b1SRayagond Kokatanur }
607891434b1SRayagond Kokatanur 
608891434b1SRayagond Kokatanur /**
609d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_set - control hardware timestamping.
610891434b1SRayagond Kokatanur  *  @dev: device pointer.
6118d45e42bSLABBE Corentin  *  @ifr: An IOCTL specific structure, that can contain a pointer to
612891434b1SRayagond Kokatanur  *  a proprietary structure used to pass information to the driver.
613891434b1SRayagond Kokatanur  *  Description:
614891434b1SRayagond Kokatanur  *  This function configures the MAC to enable/disable both outgoing(TX)
615891434b1SRayagond Kokatanur  *  and incoming(RX) packets time stamping based on user input.
616891434b1SRayagond Kokatanur  *  Return Value:
617891434b1SRayagond Kokatanur  *  0 on success and an appropriate -ve integer on failure.
618891434b1SRayagond Kokatanur  */
619d6228b7cSArtem Panfilov static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
620891434b1SRayagond Kokatanur {
621891434b1SRayagond Kokatanur 	struct stmmac_priv *priv = netdev_priv(dev);
622891434b1SRayagond Kokatanur 	struct hwtstamp_config config;
623891434b1SRayagond Kokatanur 	u32 ptp_v2 = 0;
624891434b1SRayagond Kokatanur 	u32 tstamp_all = 0;
625891434b1SRayagond Kokatanur 	u32 ptp_over_ipv4_udp = 0;
626891434b1SRayagond Kokatanur 	u32 ptp_over_ipv6_udp = 0;
627891434b1SRayagond Kokatanur 	u32 ptp_over_ethernet = 0;
628891434b1SRayagond Kokatanur 	u32 snap_type_sel = 0;
629891434b1SRayagond Kokatanur 	u32 ts_master_en = 0;
630891434b1SRayagond Kokatanur 	u32 ts_event_en = 0;
631891434b1SRayagond Kokatanur 
632891434b1SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
633891434b1SRayagond Kokatanur 		netdev_alert(priv->dev, "No support for HW time stamping\n");
634891434b1SRayagond Kokatanur 		priv->hwts_tx_en = 0;
635891434b1SRayagond Kokatanur 		priv->hwts_rx_en = 0;
636891434b1SRayagond Kokatanur 
637891434b1SRayagond Kokatanur 		return -EOPNOTSUPP;
638891434b1SRayagond Kokatanur 	}
639891434b1SRayagond Kokatanur 
640891434b1SRayagond Kokatanur 	if (copy_from_user(&config, ifr->ifr_data,
641d6228b7cSArtem Panfilov 			   sizeof(config)))
642891434b1SRayagond Kokatanur 		return -EFAULT;
643891434b1SRayagond Kokatanur 
64438ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
645891434b1SRayagond Kokatanur 		   __func__, config.flags, config.tx_type, config.rx_filter);
646891434b1SRayagond Kokatanur 
647891434b1SRayagond Kokatanur 	/* reserved for future extensions */
648891434b1SRayagond Kokatanur 	if (config.flags)
649891434b1SRayagond Kokatanur 		return -EINVAL;
650891434b1SRayagond Kokatanur 
6515f3da328SBen Hutchings 	if (config.tx_type != HWTSTAMP_TX_OFF &&
6525f3da328SBen Hutchings 	    config.tx_type != HWTSTAMP_TX_ON)
653891434b1SRayagond Kokatanur 		return -ERANGE;
654891434b1SRayagond Kokatanur 
655891434b1SRayagond Kokatanur 	if (priv->adv_ts) {
656891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
657891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
658ceb69499SGiuseppe CAVALLARO 			/* time stamp no incoming packet at all */
659891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
660891434b1SRayagond Kokatanur 			break;
661891434b1SRayagond Kokatanur 
662891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
663ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, any kind of event packet */
664891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
6657d8e249fSIlias Apalodimas 			/* 'xmac' hardware can support Sync, Pdelay_Req and
6667d8e249fSIlias Apalodimas 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
6677d8e249fSIlias Apalodimas 			 * This leaves Delay_Req timestamps out.
6687d8e249fSIlias Apalodimas 			 * Enable all events *and* general purpose message
6697d8e249fSIlias Apalodimas 			 * timestamping
6707d8e249fSIlias Apalodimas 			 */
671891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
672891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
673891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
674891434b1SRayagond Kokatanur 			break;
675891434b1SRayagond Kokatanur 
676891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
677ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Sync packet */
678891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
679891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
680891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
681891434b1SRayagond Kokatanur 
682891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
683891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
684891434b1SRayagond Kokatanur 			break;
685891434b1SRayagond Kokatanur 
686891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
687ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Delay_req packet */
688891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
689891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
690891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
691891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
692891434b1SRayagond Kokatanur 
693891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
694891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
695891434b1SRayagond Kokatanur 			break;
696891434b1SRayagond Kokatanur 
697891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
698ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, any kind of event packet */
699891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
700891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
701891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
702891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
703891434b1SRayagond Kokatanur 
704891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
705891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
706891434b1SRayagond Kokatanur 			break;
707891434b1SRayagond Kokatanur 
708891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
709ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Sync packet */
710891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
711891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
712891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
713891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
714891434b1SRayagond Kokatanur 
715891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
716891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
717891434b1SRayagond Kokatanur 			break;
718891434b1SRayagond Kokatanur 
719891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
720ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Delay_req packet */
721891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
722891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
723891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
724891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
725891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
726891434b1SRayagond Kokatanur 
727891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
728891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
729891434b1SRayagond Kokatanur 			break;
730891434b1SRayagond Kokatanur 
731891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
732ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1 any layer, any kind of event packet */
733891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
734891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
735891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
7363cb95802SKurt Kanzenbach 			if (priv->synopsys_id < DWMAC_CORE_4_10)
73714f34733SJose Abreu 				ts_event_en = PTP_TCR_TSEVNTENA;
738891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
739891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
740891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
741891434b1SRayagond Kokatanur 			break;
742891434b1SRayagond Kokatanur 
743891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
744ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Sync packet */
745891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
746891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
747891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
748891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
749891434b1SRayagond Kokatanur 
750891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
751891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
752891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
753891434b1SRayagond Kokatanur 			break;
754891434b1SRayagond Kokatanur 
755891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
756ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Delay_req packet */
757891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
758891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
759891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
760891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
761891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
762891434b1SRayagond Kokatanur 
763891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
764891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
765891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
766891434b1SRayagond Kokatanur 			break;
767891434b1SRayagond Kokatanur 
768e3412575SMiroslav Lichvar 		case HWTSTAMP_FILTER_NTP_ALL:
769891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_ALL:
770ceb69499SGiuseppe CAVALLARO 			/* time stamp any incoming packet */
771891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_ALL;
772891434b1SRayagond Kokatanur 			tstamp_all = PTP_TCR_TSENALL;
773891434b1SRayagond Kokatanur 			break;
774891434b1SRayagond Kokatanur 
775891434b1SRayagond Kokatanur 		default:
776891434b1SRayagond Kokatanur 			return -ERANGE;
777891434b1SRayagond Kokatanur 		}
778891434b1SRayagond Kokatanur 	} else {
779891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
780891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
781891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
782891434b1SRayagond Kokatanur 			break;
783891434b1SRayagond Kokatanur 		default:
784891434b1SRayagond Kokatanur 			/* PTP v1, UDP, any kind of event packet */
785891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
786891434b1SRayagond Kokatanur 			break;
787891434b1SRayagond Kokatanur 		}
788891434b1SRayagond Kokatanur 	}
789891434b1SRayagond Kokatanur 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
7905f3da328SBen Hutchings 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
791891434b1SRayagond Kokatanur 
792a6da2bbbSHolger Assmann 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
793891434b1SRayagond Kokatanur 
794a6da2bbbSHolger Assmann 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
795a6da2bbbSHolger Assmann 		priv->systime_flags |= tstamp_all | ptp_v2 |
796a6da2bbbSHolger Assmann 				       ptp_over_ethernet | ptp_over_ipv6_udp |
797a6da2bbbSHolger Assmann 				       ptp_over_ipv4_udp | ts_event_en |
798a6da2bbbSHolger Assmann 				       ts_master_en | snap_type_sel;
799891434b1SRayagond Kokatanur 	}
800891434b1SRayagond Kokatanur 
801a6da2bbbSHolger Assmann 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
802a6da2bbbSHolger Assmann 
803d6228b7cSArtem Panfilov 	memcpy(&priv->tstamp_config, &config, sizeof(config));
804d6228b7cSArtem Panfilov 
805891434b1SRayagond Kokatanur 	return copy_to_user(ifr->ifr_data, &config,
806d6228b7cSArtem Panfilov 			    sizeof(config)) ? -EFAULT : 0;
807d6228b7cSArtem Panfilov }
808d6228b7cSArtem Panfilov 
809d6228b7cSArtem Panfilov /**
810d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_get - read hardware timestamping.
811d6228b7cSArtem Panfilov  *  @dev: device pointer.
812d6228b7cSArtem Panfilov  *  @ifr: An IOCTL specific structure, that can contain a pointer to
813d6228b7cSArtem Panfilov  *  a proprietary structure used to pass information to the driver.
814d6228b7cSArtem Panfilov  *  Description:
815d6228b7cSArtem Panfilov  *  This function obtain the current hardware timestamping settings
816d0ea5cbdSJesse Brandeburg  *  as requested.
817d6228b7cSArtem Panfilov  */
818d6228b7cSArtem Panfilov static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
819d6228b7cSArtem Panfilov {
820d6228b7cSArtem Panfilov 	struct stmmac_priv *priv = netdev_priv(dev);
821d6228b7cSArtem Panfilov 	struct hwtstamp_config *config = &priv->tstamp_config;
822d6228b7cSArtem Panfilov 
823d6228b7cSArtem Panfilov 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
824d6228b7cSArtem Panfilov 		return -EOPNOTSUPP;
825d6228b7cSArtem Panfilov 
826d6228b7cSArtem Panfilov 	return copy_to_user(ifr->ifr_data, config,
827d6228b7cSArtem Panfilov 			    sizeof(*config)) ? -EFAULT : 0;
828891434b1SRayagond Kokatanur }
829891434b1SRayagond Kokatanur 
83032ceabcaSGiuseppe CAVALLARO /**
831a6da2bbbSHolger Assmann  * stmmac_init_tstamp_counter - init hardware timestamping counter
832a6da2bbbSHolger Assmann  * @priv: driver private structure
833a6da2bbbSHolger Assmann  * @systime_flags: timestamping flags
834a6da2bbbSHolger Assmann  * Description:
835a6da2bbbSHolger Assmann  * Initialize hardware counter for packet timestamping.
836a6da2bbbSHolger Assmann  * This is valid as long as the interface is open and not suspended.
837a6da2bbbSHolger Assmann  * Will be rerun after resuming from suspend, case in which the timestamping
838a6da2bbbSHolger Assmann  * flags updated by stmmac_hwtstamp_set() also need to be restored.
839a6da2bbbSHolger Assmann  */
840a6da2bbbSHolger Assmann int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
841a6da2bbbSHolger Assmann {
842a6da2bbbSHolger Assmann 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
843a6da2bbbSHolger Assmann 	struct timespec64 now;
844a6da2bbbSHolger Assmann 	u32 sec_inc = 0;
845a6da2bbbSHolger Assmann 	u64 temp = 0;
846a6da2bbbSHolger Assmann 	int ret;
847a6da2bbbSHolger Assmann 
848a6da2bbbSHolger Assmann 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
849a6da2bbbSHolger Assmann 		return -EOPNOTSUPP;
850a6da2bbbSHolger Assmann 
851a6da2bbbSHolger Assmann 	ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
852a6da2bbbSHolger Assmann 	if (ret < 0) {
853a6da2bbbSHolger Assmann 		netdev_warn(priv->dev,
854a6da2bbbSHolger Assmann 			    "failed to enable PTP reference clock: %pe\n",
855a6da2bbbSHolger Assmann 			    ERR_PTR(ret));
856a6da2bbbSHolger Assmann 		return ret;
857a6da2bbbSHolger Assmann 	}
858a6da2bbbSHolger Assmann 
859a6da2bbbSHolger Assmann 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
860a6da2bbbSHolger Assmann 	priv->systime_flags = systime_flags;
861a6da2bbbSHolger Assmann 
862a6da2bbbSHolger Assmann 	/* program Sub Second Increment reg */
863a6da2bbbSHolger Assmann 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
864a6da2bbbSHolger Assmann 					   priv->plat->clk_ptp_rate,
865a6da2bbbSHolger Assmann 					   xmac, &sec_inc);
866a6da2bbbSHolger Assmann 	temp = div_u64(1000000000ULL, sec_inc);
867a6da2bbbSHolger Assmann 
868a6da2bbbSHolger Assmann 	/* Store sub second increment for later use */
869a6da2bbbSHolger Assmann 	priv->sub_second_inc = sec_inc;
870a6da2bbbSHolger Assmann 
871a6da2bbbSHolger Assmann 	/* calculate default added value:
872a6da2bbbSHolger Assmann 	 * formula is :
873a6da2bbbSHolger Assmann 	 * addend = (2^32)/freq_div_ratio;
874a6da2bbbSHolger Assmann 	 * where, freq_div_ratio = 1e9ns/sec_inc
875a6da2bbbSHolger Assmann 	 */
876a6da2bbbSHolger Assmann 	temp = (u64)(temp << 32);
877a6da2bbbSHolger Assmann 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
878a6da2bbbSHolger Assmann 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
879a6da2bbbSHolger Assmann 
880a6da2bbbSHolger Assmann 	/* initialize system time */
881a6da2bbbSHolger Assmann 	ktime_get_real_ts64(&now);
882a6da2bbbSHolger Assmann 
883a6da2bbbSHolger Assmann 	/* lower 32 bits of tv_sec are safe until y2106 */
884a6da2bbbSHolger Assmann 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
885a6da2bbbSHolger Assmann 
886a6da2bbbSHolger Assmann 	return 0;
887a6da2bbbSHolger Assmann }
888a6da2bbbSHolger Assmann EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
889a6da2bbbSHolger Assmann 
890a6da2bbbSHolger Assmann /**
891732fdf0eSGiuseppe CAVALLARO  * stmmac_init_ptp - init PTP
89232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
893732fdf0eSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
89432ceabcaSGiuseppe CAVALLARO  * This is done by looking at the HW cap. register.
895732fdf0eSGiuseppe CAVALLARO  * This function also registers the ptp driver.
89632ceabcaSGiuseppe CAVALLARO  */
89792ba6888SRayagond Kokatanur static int stmmac_init_ptp(struct stmmac_priv *priv)
898891434b1SRayagond Kokatanur {
8997d9e6c5aSJose Abreu 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
900a6da2bbbSHolger Assmann 	int ret;
9017d9e6c5aSJose Abreu 
902a6da2bbbSHolger Assmann 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
903a6da2bbbSHolger Assmann 	if (ret)
904a6da2bbbSHolger Assmann 		return ret;
90592ba6888SRayagond Kokatanur 
906891434b1SRayagond Kokatanur 	priv->adv_ts = 0;
9077d9e6c5aSJose Abreu 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
9087d9e6c5aSJose Abreu 	if (xmac && priv->dma_cap.atime_stamp)
909be9b3174SGiuseppe CAVALLARO 		priv->adv_ts = 1;
910be9b3174SGiuseppe CAVALLARO 	/* Dwmac 3.x core with extend_desc can support adv_ts */
911be9b3174SGiuseppe CAVALLARO 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
912891434b1SRayagond Kokatanur 		priv->adv_ts = 1;
9137cd01399SVince Bridgers 
914be9b3174SGiuseppe CAVALLARO 	if (priv->dma_cap.time_stamp)
915be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
9167cd01399SVince Bridgers 
917be9b3174SGiuseppe CAVALLARO 	if (priv->adv_ts)
918be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev,
919be9b3174SGiuseppe CAVALLARO 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
920891434b1SRayagond Kokatanur 
921891434b1SRayagond Kokatanur 	priv->hwts_tx_en = 0;
922891434b1SRayagond Kokatanur 	priv->hwts_rx_en = 0;
92392ba6888SRayagond Kokatanur 
924c30a70d3SGiuseppe CAVALLARO 	stmmac_ptp_register(priv);
925c30a70d3SGiuseppe CAVALLARO 
926c30a70d3SGiuseppe CAVALLARO 	return 0;
92792ba6888SRayagond Kokatanur }
92892ba6888SRayagond Kokatanur 
92992ba6888SRayagond Kokatanur static void stmmac_release_ptp(struct stmmac_priv *priv)
93092ba6888SRayagond Kokatanur {
931f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
93292ba6888SRayagond Kokatanur 	stmmac_ptp_unregister(priv);
933891434b1SRayagond Kokatanur }
934891434b1SRayagond Kokatanur 
9357ac6653aSJeff Kirsher /**
93629feff39SJoao Pinto  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
93729feff39SJoao Pinto  *  @priv: driver private structure
938d0ea5cbdSJesse Brandeburg  *  @duplex: duplex passed to the next function
93929feff39SJoao Pinto  *  Description: It is used for configuring the flow control in all queues
94029feff39SJoao Pinto  */
94129feff39SJoao Pinto static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
94229feff39SJoao Pinto {
94329feff39SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
94429feff39SJoao Pinto 
945c10d4c82SJose Abreu 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
94629feff39SJoao Pinto 			priv->pause, tx_cnt);
94729feff39SJoao Pinto }
94829feff39SJoao Pinto 
949eeef2f6bSJose Abreu static void stmmac_validate(struct phylink_config *config,
950eeef2f6bSJose Abreu 			    unsigned long *supported,
951eeef2f6bSJose Abreu 			    struct phylink_link_state *state)
952eeef2f6bSJose Abreu {
953eeef2f6bSJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
9545b0d7d7dSJose Abreu 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
955eeef2f6bSJose Abreu 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
956eeef2f6bSJose Abreu 	int tx_cnt = priv->plat->tx_queues_to_use;
957eeef2f6bSJose Abreu 	int max_speed = priv->plat->max_speed;
958eeef2f6bSJose Abreu 
9595b0d7d7dSJose Abreu 	phylink_set(mac_supported, 10baseT_Half);
9605b0d7d7dSJose Abreu 	phylink_set(mac_supported, 10baseT_Full);
9615b0d7d7dSJose Abreu 	phylink_set(mac_supported, 100baseT_Half);
9625b0d7d7dSJose Abreu 	phylink_set(mac_supported, 100baseT_Full);
963df7699c7SJose Abreu 	phylink_set(mac_supported, 1000baseT_Half);
964df7699c7SJose Abreu 	phylink_set(mac_supported, 1000baseT_Full);
965df7699c7SJose Abreu 	phylink_set(mac_supported, 1000baseKX_Full);
9665b0d7d7dSJose Abreu 
9675b0d7d7dSJose Abreu 	phylink_set(mac_supported, Autoneg);
9685b0d7d7dSJose Abreu 	phylink_set(mac_supported, Pause);
9695b0d7d7dSJose Abreu 	phylink_set(mac_supported, Asym_Pause);
9705b0d7d7dSJose Abreu 	phylink_set_port_modes(mac_supported);
9715b0d7d7dSJose Abreu 
972eeef2f6bSJose Abreu 	/* Cut down 1G if asked to */
973eeef2f6bSJose Abreu 	if ((max_speed > 0) && (max_speed < 1000)) {
974eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseT_Full);
975eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseX_Full);
97646682cb8SVoon Weifeng 	} else if (priv->plat->has_gmac4) {
977345502afSColin Ian King 		if (!max_speed || max_speed >= 2500) {
97846682cb8SVoon Weifeng 			phylink_set(mac_supported, 2500baseT_Full);
97946682cb8SVoon Weifeng 			phylink_set(mac_supported, 2500baseX_Full);
980345502afSColin Ian King 		}
9815b0d7d7dSJose Abreu 	} else if (priv->plat->has_xgmac) {
982d9da2c87SJose Abreu 		if (!max_speed || (max_speed >= 2500)) {
9835b0d7d7dSJose Abreu 			phylink_set(mac_supported, 2500baseT_Full);
984d9da2c87SJose Abreu 			phylink_set(mac_supported, 2500baseX_Full);
985d9da2c87SJose Abreu 		}
986d9da2c87SJose Abreu 		if (!max_speed || (max_speed >= 5000)) {
9875b0d7d7dSJose Abreu 			phylink_set(mac_supported, 5000baseT_Full);
988d9da2c87SJose Abreu 		}
989d9da2c87SJose Abreu 		if (!max_speed || (max_speed >= 10000)) {
9905b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseSR_Full);
9915b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseLR_Full);
9925b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseER_Full);
9935b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseLRM_Full);
9945b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseT_Full);
9955b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseKX4_Full);
9965b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseKR_Full);
997eeef2f6bSJose Abreu 		}
9988a880936SJose Abreu 		if (!max_speed || (max_speed >= 25000)) {
9998a880936SJose Abreu 			phylink_set(mac_supported, 25000baseCR_Full);
10008a880936SJose Abreu 			phylink_set(mac_supported, 25000baseKR_Full);
10018a880936SJose Abreu 			phylink_set(mac_supported, 25000baseSR_Full);
10028a880936SJose Abreu 		}
10038a880936SJose Abreu 		if (!max_speed || (max_speed >= 40000)) {
10048a880936SJose Abreu 			phylink_set(mac_supported, 40000baseKR4_Full);
10058a880936SJose Abreu 			phylink_set(mac_supported, 40000baseCR4_Full);
10068a880936SJose Abreu 			phylink_set(mac_supported, 40000baseSR4_Full);
10078a880936SJose Abreu 			phylink_set(mac_supported, 40000baseLR4_Full);
10088a880936SJose Abreu 		}
10098a880936SJose Abreu 		if (!max_speed || (max_speed >= 50000)) {
10108a880936SJose Abreu 			phylink_set(mac_supported, 50000baseCR2_Full);
10118a880936SJose Abreu 			phylink_set(mac_supported, 50000baseKR2_Full);
10128a880936SJose Abreu 			phylink_set(mac_supported, 50000baseSR2_Full);
10138a880936SJose Abreu 			phylink_set(mac_supported, 50000baseKR_Full);
10148a880936SJose Abreu 			phylink_set(mac_supported, 50000baseSR_Full);
10158a880936SJose Abreu 			phylink_set(mac_supported, 50000baseCR_Full);
10168a880936SJose Abreu 			phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
10178a880936SJose Abreu 			phylink_set(mac_supported, 50000baseDR_Full);
10188a880936SJose Abreu 		}
10198a880936SJose Abreu 		if (!max_speed || (max_speed >= 100000)) {
10208a880936SJose Abreu 			phylink_set(mac_supported, 100000baseKR4_Full);
10218a880936SJose Abreu 			phylink_set(mac_supported, 100000baseSR4_Full);
10228a880936SJose Abreu 			phylink_set(mac_supported, 100000baseCR4_Full);
10238a880936SJose Abreu 			phylink_set(mac_supported, 100000baseLR4_ER4_Full);
10248a880936SJose Abreu 			phylink_set(mac_supported, 100000baseKR2_Full);
10258a880936SJose Abreu 			phylink_set(mac_supported, 100000baseSR2_Full);
10268a880936SJose Abreu 			phylink_set(mac_supported, 100000baseCR2_Full);
10278a880936SJose Abreu 			phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
10288a880936SJose Abreu 			phylink_set(mac_supported, 100000baseDR2_Full);
10298a880936SJose Abreu 		}
1030d9da2c87SJose Abreu 	}
1031eeef2f6bSJose Abreu 
1032eeef2f6bSJose Abreu 	/* Half-Duplex can only work with single queue */
1033eeef2f6bSJose Abreu 	if (tx_cnt > 1) {
1034eeef2f6bSJose Abreu 		phylink_set(mask, 10baseT_Half);
1035eeef2f6bSJose Abreu 		phylink_set(mask, 100baseT_Half);
1036eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseT_Half);
1037eeef2f6bSJose Abreu 	}
1038eeef2f6bSJose Abreu 
1039422829f9SJose Abreu 	linkmode_and(supported, supported, mac_supported);
1040422829f9SJose Abreu 	linkmode_andnot(supported, supported, mask);
1041422829f9SJose Abreu 
1042422829f9SJose Abreu 	linkmode_and(state->advertising, state->advertising, mac_supported);
1043422829f9SJose Abreu 	linkmode_andnot(state->advertising, state->advertising, mask);
1044f213bbe8SJose Abreu 
1045f213bbe8SJose Abreu 	/* If PCS is supported, check which modes it supports. */
1046a1a753edSVladimir Oltean 	if (priv->hw->xpcs)
104711059740SVladimir Oltean 		xpcs_validate(priv->hw->xpcs, supported, state);
1048eeef2f6bSJose Abreu }
1049eeef2f6bSJose Abreu 
105074371272SJose Abreu static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
105174371272SJose Abreu 			      const struct phylink_link_state *state)
10529ad372fcSJose Abreu {
105311059740SVladimir Oltean 	/* Nothing to do, xpcs_config() handles everything */
1054eeef2f6bSJose Abreu }
1055eeef2f6bSJose Abreu 
10565a558611SOng Boon Leong static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
10575a558611SOng Boon Leong {
10585a558611SOng Boon Leong 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
10595a558611SOng Boon Leong 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
10605a558611SOng Boon Leong 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
10615a558611SOng Boon Leong 	bool *hs_enable = &fpe_cfg->hs_enable;
10625a558611SOng Boon Leong 
10635a558611SOng Boon Leong 	if (is_up && *hs_enable) {
10645a558611SOng Boon Leong 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
10655a558611SOng Boon Leong 	} else {
10661f7096f0SWong Vee Khee 		*lo_state = FPE_STATE_OFF;
10671f7096f0SWong Vee Khee 		*lp_state = FPE_STATE_OFF;
10685a558611SOng Boon Leong 	}
10695a558611SOng Boon Leong }
10705a558611SOng Boon Leong 
107174371272SJose Abreu static void stmmac_mac_link_down(struct phylink_config *config,
107274371272SJose Abreu 				 unsigned int mode, phy_interface_t interface)
10739ad372fcSJose Abreu {
107474371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
10759ad372fcSJose Abreu 
10769ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
107774371272SJose Abreu 	priv->eee_active = false;
1078388e201dSVineetha G. Jaya Kumaran 	priv->tx_lpi_enabled = false;
1079d4aeaed8SWong Vee Khee 	priv->eee_enabled = stmmac_eee_init(priv);
108074371272SJose Abreu 	stmmac_set_eee_pls(priv, priv->hw, false);
10815a558611SOng Boon Leong 
108263c173ffSMohammad Athari Bin Ismail 	if (priv->dma_cap.fpesel)
10835a558611SOng Boon Leong 		stmmac_fpe_link_state_handle(priv, false);
10849ad372fcSJose Abreu }
10859ad372fcSJose Abreu 
108674371272SJose Abreu static void stmmac_mac_link_up(struct phylink_config *config,
108791a208f2SRussell King 			       struct phy_device *phy,
108874371272SJose Abreu 			       unsigned int mode, phy_interface_t interface,
108991a208f2SRussell King 			       int speed, int duplex,
109091a208f2SRussell King 			       bool tx_pause, bool rx_pause)
10919ad372fcSJose Abreu {
109274371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
109346f69dedSJose Abreu 	u32 ctrl;
109446f69dedSJose Abreu 
109546f69dedSJose Abreu 	ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
109646f69dedSJose Abreu 	ctrl &= ~priv->hw->link.speed_mask;
109746f69dedSJose Abreu 
109846f69dedSJose Abreu 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
109946f69dedSJose Abreu 		switch (speed) {
110046f69dedSJose Abreu 		case SPEED_10000:
110146f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed10000;
110246f69dedSJose Abreu 			break;
110346f69dedSJose Abreu 		case SPEED_5000:
110446f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed5000;
110546f69dedSJose Abreu 			break;
110646f69dedSJose Abreu 		case SPEED_2500:
110746f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed2500;
110846f69dedSJose Abreu 			break;
110946f69dedSJose Abreu 		default:
111046f69dedSJose Abreu 			return;
111146f69dedSJose Abreu 		}
11128a880936SJose Abreu 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
11138a880936SJose Abreu 		switch (speed) {
11148a880936SJose Abreu 		case SPEED_100000:
11158a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed100000;
11168a880936SJose Abreu 			break;
11178a880936SJose Abreu 		case SPEED_50000:
11188a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed50000;
11198a880936SJose Abreu 			break;
11208a880936SJose Abreu 		case SPEED_40000:
11218a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed40000;
11228a880936SJose Abreu 			break;
11238a880936SJose Abreu 		case SPEED_25000:
11248a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed25000;
11258a880936SJose Abreu 			break;
11268a880936SJose Abreu 		case SPEED_10000:
11278a880936SJose Abreu 			ctrl |= priv->hw->link.xgmii.speed10000;
11288a880936SJose Abreu 			break;
11298a880936SJose Abreu 		case SPEED_2500:
11308a880936SJose Abreu 			ctrl |= priv->hw->link.speed2500;
11318a880936SJose Abreu 			break;
11328a880936SJose Abreu 		case SPEED_1000:
11338a880936SJose Abreu 			ctrl |= priv->hw->link.speed1000;
11348a880936SJose Abreu 			break;
11358a880936SJose Abreu 		default:
11368a880936SJose Abreu 			return;
11378a880936SJose Abreu 		}
113846f69dedSJose Abreu 	} else {
113946f69dedSJose Abreu 		switch (speed) {
114046f69dedSJose Abreu 		case SPEED_2500:
114146f69dedSJose Abreu 			ctrl |= priv->hw->link.speed2500;
114246f69dedSJose Abreu 			break;
114346f69dedSJose Abreu 		case SPEED_1000:
114446f69dedSJose Abreu 			ctrl |= priv->hw->link.speed1000;
114546f69dedSJose Abreu 			break;
114646f69dedSJose Abreu 		case SPEED_100:
114746f69dedSJose Abreu 			ctrl |= priv->hw->link.speed100;
114846f69dedSJose Abreu 			break;
114946f69dedSJose Abreu 		case SPEED_10:
115046f69dedSJose Abreu 			ctrl |= priv->hw->link.speed10;
115146f69dedSJose Abreu 			break;
115246f69dedSJose Abreu 		default:
115346f69dedSJose Abreu 			return;
115446f69dedSJose Abreu 		}
115546f69dedSJose Abreu 	}
115646f69dedSJose Abreu 
115746f69dedSJose Abreu 	priv->speed = speed;
115846f69dedSJose Abreu 
115946f69dedSJose Abreu 	if (priv->plat->fix_mac_speed)
116046f69dedSJose Abreu 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
116146f69dedSJose Abreu 
116246f69dedSJose Abreu 	if (!duplex)
116346f69dedSJose Abreu 		ctrl &= ~priv->hw->link.duplex;
116446f69dedSJose Abreu 	else
116546f69dedSJose Abreu 		ctrl |= priv->hw->link.duplex;
116646f69dedSJose Abreu 
116746f69dedSJose Abreu 	/* Flow Control operation */
116846f69dedSJose Abreu 	if (tx_pause && rx_pause)
116946f69dedSJose Abreu 		stmmac_mac_flow_ctrl(priv, duplex);
117046f69dedSJose Abreu 
117146f69dedSJose Abreu 	writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
11729ad372fcSJose Abreu 
11739ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
11745b111770SJose Abreu 	if (phy && priv->dma_cap.eee) {
117574371272SJose Abreu 		priv->eee_active = phy_init_eee(phy, 1) >= 0;
117674371272SJose Abreu 		priv->eee_enabled = stmmac_eee_init(priv);
1177388e201dSVineetha G. Jaya Kumaran 		priv->tx_lpi_enabled = priv->eee_enabled;
117874371272SJose Abreu 		stmmac_set_eee_pls(priv, priv->hw, true);
117974371272SJose Abreu 	}
11805a558611SOng Boon Leong 
118163c173ffSMohammad Athari Bin Ismail 	if (priv->dma_cap.fpesel)
11825a558611SOng Boon Leong 		stmmac_fpe_link_state_handle(priv, true);
11839ad372fcSJose Abreu }
11849ad372fcSJose Abreu 
118574371272SJose Abreu static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1186eeef2f6bSJose Abreu 	.validate = stmmac_validate,
118774371272SJose Abreu 	.mac_config = stmmac_mac_config,
118874371272SJose Abreu 	.mac_link_down = stmmac_mac_link_down,
118974371272SJose Abreu 	.mac_link_up = stmmac_mac_link_up,
1190eeef2f6bSJose Abreu };
1191eeef2f6bSJose Abreu 
119229feff39SJoao Pinto /**
1193732fdf0eSGiuseppe CAVALLARO  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
119432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
119532ceabcaSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PCS.
119632ceabcaSGiuseppe CAVALLARO  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
119732ceabcaSGiuseppe CAVALLARO  * configured for the TBI, RTBI, or SGMII PHY interface.
119832ceabcaSGiuseppe CAVALLARO  */
1199e58bb43fSGiuseppe CAVALLARO static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1200e58bb43fSGiuseppe CAVALLARO {
1201e58bb43fSGiuseppe CAVALLARO 	int interface = priv->plat->interface;
1202e58bb43fSGiuseppe CAVALLARO 
1203e58bb43fSGiuseppe CAVALLARO 	if (priv->dma_cap.pcs) {
12040d909dcdSByungho An 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
12050d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
12060d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
12070d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
120838ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
12093fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_RGMII;
12100d909dcdSByungho An 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
121138ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
12123fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_SGMII;
1213e58bb43fSGiuseppe CAVALLARO 		}
1214e58bb43fSGiuseppe CAVALLARO 	}
1215e58bb43fSGiuseppe CAVALLARO }
1216e58bb43fSGiuseppe CAVALLARO 
12177ac6653aSJeff Kirsher /**
12187ac6653aSJeff Kirsher  * stmmac_init_phy - PHY initialization
12197ac6653aSJeff Kirsher  * @dev: net device structure
12207ac6653aSJeff Kirsher  * Description: it initializes the driver's PHY state, and attaches the PHY
12217ac6653aSJeff Kirsher  * to the mac driver.
12227ac6653aSJeff Kirsher  *  Return value:
12237ac6653aSJeff Kirsher  *  0 on success
12247ac6653aSJeff Kirsher  */
12257ac6653aSJeff Kirsher static int stmmac_init_phy(struct net_device *dev)
12267ac6653aSJeff Kirsher {
12277ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
122874371272SJose Abreu 	struct device_node *node;
122974371272SJose Abreu 	int ret;
12307ac6653aSJeff Kirsher 
12314838a540SJose Abreu 	node = priv->plat->phylink_node;
123274371272SJose Abreu 
123342e87024SJose Abreu 	if (node)
123474371272SJose Abreu 		ret = phylink_of_phy_connect(priv->phylink, node, 0);
123542e87024SJose Abreu 
123642e87024SJose Abreu 	/* Some DT bindings do not set-up the PHY handle. Let's try to
123742e87024SJose Abreu 	 * manually parse it
123842e87024SJose Abreu 	 */
123942e87024SJose Abreu 	if (!node || ret) {
124074371272SJose Abreu 		int addr = priv->plat->phy_addr;
124174371272SJose Abreu 		struct phy_device *phydev;
1242f142af2eSSrinivas Kandagatla 
124374371272SJose Abreu 		phydev = mdiobus_get_phy(priv->mii, addr);
124474371272SJose Abreu 		if (!phydev) {
124574371272SJose Abreu 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
12467ac6653aSJeff Kirsher 			return -ENODEV;
12477ac6653aSJeff Kirsher 		}
12488e99fc5fSGiuseppe Cavallaro 
124974371272SJose Abreu 		ret = phylink_connect_phy(priv->phylink, phydev);
125074371272SJose Abreu 	}
1251c51e424dSFlorian Fainelli 
1252576f9eacSJoakim Zhang 	if (!priv->plat->pmt) {
1253576f9eacSJoakim Zhang 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1254576f9eacSJoakim Zhang 
12551d8e5b0fSJisheng Zhang 		phylink_ethtool_get_wol(priv->phylink, &wol);
12561d8e5b0fSJisheng Zhang 		device_set_wakeup_capable(priv->device, !!wol.supported);
1257576f9eacSJoakim Zhang 	}
12581d8e5b0fSJisheng Zhang 
125974371272SJose Abreu 	return ret;
126074371272SJose Abreu }
126174371272SJose Abreu 
126274371272SJose Abreu static int stmmac_phy_setup(struct stmmac_priv *priv)
126374371272SJose Abreu {
126411059740SVladimir Oltean 	struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
1265c63d1e5cSArnd Bergmann 	struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
12660060c878SAlexandru Ardelean 	int mode = priv->plat->phy_interface;
126774371272SJose Abreu 	struct phylink *phylink;
126874371272SJose Abreu 
126974371272SJose Abreu 	priv->phylink_config.dev = &priv->dev->dev;
127074371272SJose Abreu 	priv->phylink_config.type = PHYLINK_NETDEV;
1271f213bbe8SJose Abreu 	priv->phylink_config.pcs_poll = true;
1272593f555fSSriranjani P 	if (priv->plat->mdio_bus_data)
1273e5e5b771SOng Boon Leong 		priv->phylink_config.ovr_an_inband =
127412628565SDavid S. Miller 			mdio_bus_data->xpcs_an_inband;
127574371272SJose Abreu 
12768dc6051cSJose Abreu 	if (!fwnode)
12778dc6051cSJose Abreu 		fwnode = dev_fwnode(priv->device);
12788dc6051cSJose Abreu 
1279c63d1e5cSArnd Bergmann 	phylink = phylink_create(&priv->phylink_config, fwnode,
128074371272SJose Abreu 				 mode, &stmmac_phylink_mac_ops);
128174371272SJose Abreu 	if (IS_ERR(phylink))
128274371272SJose Abreu 		return PTR_ERR(phylink);
128374371272SJose Abreu 
1284b55b1d50SVladimir Oltean 	if (priv->hw->xpcs)
1285b55b1d50SVladimir Oltean 		phylink_set_pcs(phylink, &priv->hw->xpcs->pcs);
128611059740SVladimir Oltean 
128774371272SJose Abreu 	priv->phylink = phylink;
12887ac6653aSJeff Kirsher 	return 0;
12897ac6653aSJeff Kirsher }
12907ac6653aSJeff Kirsher 
129171fedb01SJoao Pinto static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1292c24602efSGiuseppe CAVALLARO {
129354139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1294bfaf91caSJoakim Zhang 	unsigned int desc_size;
129571fedb01SJoao Pinto 	void *head_rx;
129654139cf3SJoao Pinto 	u32 queue;
129754139cf3SJoao Pinto 
129854139cf3SJoao Pinto 	/* Display RX rings */
129954139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
130054139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
130154139cf3SJoao Pinto 
130254139cf3SJoao Pinto 		pr_info("\tRX Queue %u rings\n", queue);
1303d0225e7dSAlexandre TORGUE 
1304bfaf91caSJoakim Zhang 		if (priv->extend_desc) {
130554139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_erx;
1306bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_extended_desc);
1307bfaf91caSJoakim Zhang 		} else {
130854139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_rx;
1309bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_desc);
1310bfaf91caSJoakim Zhang 		}
131171fedb01SJoao Pinto 
131271fedb01SJoao Pinto 		/* Display RX ring */
1313bfaf91caSJoakim Zhang 		stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
1314bfaf91caSJoakim Zhang 				    rx_q->dma_rx_phy, desc_size);
13155bacd778SLABBE Corentin 	}
131654139cf3SJoao Pinto }
1317d0225e7dSAlexandre TORGUE 
131871fedb01SJoao Pinto static void stmmac_display_tx_rings(struct stmmac_priv *priv)
131971fedb01SJoao Pinto {
1320ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1321bfaf91caSJoakim Zhang 	unsigned int desc_size;
132271fedb01SJoao Pinto 	void *head_tx;
1323ce736788SJoao Pinto 	u32 queue;
1324ce736788SJoao Pinto 
1325ce736788SJoao Pinto 	/* Display TX rings */
1326ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
1327ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1328ce736788SJoao Pinto 
1329ce736788SJoao Pinto 		pr_info("\tTX Queue %d rings\n", queue);
133071fedb01SJoao Pinto 
1331bfaf91caSJoakim Zhang 		if (priv->extend_desc) {
1332ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_etx;
1333bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_extended_desc);
1334bfaf91caSJoakim Zhang 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1335579a25a8SJose Abreu 			head_tx = (void *)tx_q->dma_entx;
1336bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_edesc);
1337bfaf91caSJoakim Zhang 		} else {
1338ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_tx;
1339bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_desc);
1340bfaf91caSJoakim Zhang 		}
134171fedb01SJoao Pinto 
1342bfaf91caSJoakim Zhang 		stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
1343bfaf91caSJoakim Zhang 				    tx_q->dma_tx_phy, desc_size);
1344c24602efSGiuseppe CAVALLARO 	}
1345ce736788SJoao Pinto }
1346c24602efSGiuseppe CAVALLARO 
134771fedb01SJoao Pinto static void stmmac_display_rings(struct stmmac_priv *priv)
134871fedb01SJoao Pinto {
134971fedb01SJoao Pinto 	/* Display RX ring */
135071fedb01SJoao Pinto 	stmmac_display_rx_rings(priv);
135171fedb01SJoao Pinto 
135271fedb01SJoao Pinto 	/* Display TX ring */
135371fedb01SJoao Pinto 	stmmac_display_tx_rings(priv);
135471fedb01SJoao Pinto }
135571fedb01SJoao Pinto 
1356286a8372SGiuseppe CAVALLARO static int stmmac_set_bfsize(int mtu, int bufsize)
1357286a8372SGiuseppe CAVALLARO {
1358286a8372SGiuseppe CAVALLARO 	int ret = bufsize;
1359286a8372SGiuseppe CAVALLARO 
1360b2f3a481SJose Abreu 	if (mtu >= BUF_SIZE_8KiB)
1361b2f3a481SJose Abreu 		ret = BUF_SIZE_16KiB;
1362b2f3a481SJose Abreu 	else if (mtu >= BUF_SIZE_4KiB)
1363286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_8KiB;
1364286a8372SGiuseppe CAVALLARO 	else if (mtu >= BUF_SIZE_2KiB)
1365286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_4KiB;
1366d916701cSGiuseppe CAVALLARO 	else if (mtu > DEFAULT_BUFSIZE)
1367286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_2KiB;
1368286a8372SGiuseppe CAVALLARO 	else
1369d916701cSGiuseppe CAVALLARO 		ret = DEFAULT_BUFSIZE;
1370286a8372SGiuseppe CAVALLARO 
1371286a8372SGiuseppe CAVALLARO 	return ret;
1372286a8372SGiuseppe CAVALLARO }
1373286a8372SGiuseppe CAVALLARO 
137432ceabcaSGiuseppe CAVALLARO /**
137571fedb01SJoao Pinto  * stmmac_clear_rx_descriptors - clear RX descriptors
137632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
137754139cf3SJoao Pinto  * @queue: RX queue index
137871fedb01SJoao Pinto  * Description: this function is called to clear the RX descriptors
137932ceabcaSGiuseppe CAVALLARO  * in case of both basic and extended descriptors are used.
138032ceabcaSGiuseppe CAVALLARO  */
138154139cf3SJoao Pinto static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1382c24602efSGiuseppe CAVALLARO {
138354139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
13845bacd778SLABBE Corentin 	int i;
1385c24602efSGiuseppe CAVALLARO 
138671fedb01SJoao Pinto 	/* Clear the RX descriptors */
1387aa042f60SSong, Yoong Siang 	for (i = 0; i < priv->dma_rx_size; i++)
13885bacd778SLABBE Corentin 		if (priv->extend_desc)
138942de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
13905bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1391aa042f60SSong, Yoong Siang 					(i == priv->dma_rx_size - 1),
1392583e6361SAaro Koskinen 					priv->dma_buf_sz);
13935bacd778SLABBE Corentin 		else
139442de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
13955bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1396aa042f60SSong, Yoong Siang 					(i == priv->dma_rx_size - 1),
1397583e6361SAaro Koskinen 					priv->dma_buf_sz);
139871fedb01SJoao Pinto }
139971fedb01SJoao Pinto 
140071fedb01SJoao Pinto /**
140171fedb01SJoao Pinto  * stmmac_clear_tx_descriptors - clear tx descriptors
140271fedb01SJoao Pinto  * @priv: driver private structure
1403ce736788SJoao Pinto  * @queue: TX queue index.
140471fedb01SJoao Pinto  * Description: this function is called to clear the TX descriptors
140571fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
140671fedb01SJoao Pinto  */
1407ce736788SJoao Pinto static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
140871fedb01SJoao Pinto {
1409ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
141071fedb01SJoao Pinto 	int i;
141171fedb01SJoao Pinto 
141271fedb01SJoao Pinto 	/* Clear the TX descriptors */
1413aa042f60SSong, Yoong Siang 	for (i = 0; i < priv->dma_tx_size; i++) {
1414aa042f60SSong, Yoong Siang 		int last = (i == (priv->dma_tx_size - 1));
1415579a25a8SJose Abreu 		struct dma_desc *p;
1416579a25a8SJose Abreu 
14175bacd778SLABBE Corentin 		if (priv->extend_desc)
1418579a25a8SJose Abreu 			p = &tx_q->dma_etx[i].basic;
1419579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1420579a25a8SJose Abreu 			p = &tx_q->dma_entx[i].basic;
14215bacd778SLABBE Corentin 		else
1422579a25a8SJose Abreu 			p = &tx_q->dma_tx[i];
1423579a25a8SJose Abreu 
1424579a25a8SJose Abreu 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1425579a25a8SJose Abreu 	}
1426c24602efSGiuseppe CAVALLARO }
1427c24602efSGiuseppe CAVALLARO 
1428732fdf0eSGiuseppe CAVALLARO /**
142971fedb01SJoao Pinto  * stmmac_clear_descriptors - clear descriptors
143071fedb01SJoao Pinto  * @priv: driver private structure
143171fedb01SJoao Pinto  * Description: this function is called to clear the TX and RX descriptors
143271fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
143371fedb01SJoao Pinto  */
143471fedb01SJoao Pinto static void stmmac_clear_descriptors(struct stmmac_priv *priv)
143571fedb01SJoao Pinto {
143654139cf3SJoao Pinto 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1437ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
143854139cf3SJoao Pinto 	u32 queue;
143954139cf3SJoao Pinto 
144071fedb01SJoao Pinto 	/* Clear the RX descriptors */
144154139cf3SJoao Pinto 	for (queue = 0; queue < rx_queue_cnt; queue++)
144254139cf3SJoao Pinto 		stmmac_clear_rx_descriptors(priv, queue);
144371fedb01SJoao Pinto 
144471fedb01SJoao Pinto 	/* Clear the TX descriptors */
1445ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++)
1446ce736788SJoao Pinto 		stmmac_clear_tx_descriptors(priv, queue);
144771fedb01SJoao Pinto }
144871fedb01SJoao Pinto 
144971fedb01SJoao Pinto /**
1450732fdf0eSGiuseppe CAVALLARO  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1451732fdf0eSGiuseppe CAVALLARO  * @priv: driver private structure
1452732fdf0eSGiuseppe CAVALLARO  * @p: descriptor pointer
1453732fdf0eSGiuseppe CAVALLARO  * @i: descriptor index
145454139cf3SJoao Pinto  * @flags: gfp flag
145554139cf3SJoao Pinto  * @queue: RX queue index
1456732fdf0eSGiuseppe CAVALLARO  * Description: this function is called to allocate a receive buffer, perform
1457732fdf0eSGiuseppe CAVALLARO  * the DMA mapping and init the descriptor.
1458732fdf0eSGiuseppe CAVALLARO  */
1459c24602efSGiuseppe CAVALLARO static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
146054139cf3SJoao Pinto 				  int i, gfp_t flags, u32 queue)
1461c24602efSGiuseppe CAVALLARO {
146254139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
14632af6106aSJose Abreu 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1464c24602efSGiuseppe CAVALLARO 
1465da5ec7f2SOng Boon Leong 	if (!buf->page) {
14662af6106aSJose Abreu 		buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
14672af6106aSJose Abreu 		if (!buf->page)
146856329137SBartlomiej Zolnierkiewicz 			return -ENOMEM;
14695fabb012SOng Boon Leong 		buf->page_offset = stmmac_rx_offset(priv);
1470da5ec7f2SOng Boon Leong 	}
1471c24602efSGiuseppe CAVALLARO 
1472da5ec7f2SOng Boon Leong 	if (priv->sph && !buf->sec_page) {
147367afd6d1SJose Abreu 		buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
147467afd6d1SJose Abreu 		if (!buf->sec_page)
147567afd6d1SJose Abreu 			return -ENOMEM;
147667afd6d1SJose Abreu 
147767afd6d1SJose Abreu 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1478396e13e1SJoakim Zhang 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
147967afd6d1SJose Abreu 	} else {
148067afd6d1SJose Abreu 		buf->sec_page = NULL;
1481396e13e1SJoakim Zhang 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
148267afd6d1SJose Abreu 	}
148367afd6d1SJose Abreu 
14845fabb012SOng Boon Leong 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
14855fabb012SOng Boon Leong 
14862af6106aSJose Abreu 	stmmac_set_desc_addr(priv, p, buf->addr);
14872c520b1cSJose Abreu 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
14882c520b1cSJose Abreu 		stmmac_init_desc3(priv, p);
1489c24602efSGiuseppe CAVALLARO 
1490c24602efSGiuseppe CAVALLARO 	return 0;
1491c24602efSGiuseppe CAVALLARO }
1492c24602efSGiuseppe CAVALLARO 
149371fedb01SJoao Pinto /**
149471fedb01SJoao Pinto  * stmmac_free_rx_buffer - free RX dma buffers
149571fedb01SJoao Pinto  * @priv: private structure
149654139cf3SJoao Pinto  * @queue: RX queue index
149771fedb01SJoao Pinto  * @i: buffer index.
149871fedb01SJoao Pinto  */
149954139cf3SJoao Pinto static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
150056329137SBartlomiej Zolnierkiewicz {
150154139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
15022af6106aSJose Abreu 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
150354139cf3SJoao Pinto 
15042af6106aSJose Abreu 	if (buf->page)
1505458de8a9SIlias Apalodimas 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
15062af6106aSJose Abreu 	buf->page = NULL;
150767afd6d1SJose Abreu 
150867afd6d1SJose Abreu 	if (buf->sec_page)
1509458de8a9SIlias Apalodimas 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
151067afd6d1SJose Abreu 	buf->sec_page = NULL;
151156329137SBartlomiej Zolnierkiewicz }
151256329137SBartlomiej Zolnierkiewicz 
15137ac6653aSJeff Kirsher /**
151471fedb01SJoao Pinto  * stmmac_free_tx_buffer - free RX dma buffers
151571fedb01SJoao Pinto  * @priv: private structure
1516ce736788SJoao Pinto  * @queue: RX queue index
151771fedb01SJoao Pinto  * @i: buffer index.
151871fedb01SJoao Pinto  */
1519ce736788SJoao Pinto static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
152071fedb01SJoao Pinto {
1521ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1522ce736788SJoao Pinto 
1523be8b38a7SOng Boon Leong 	if (tx_q->tx_skbuff_dma[i].buf &&
1524be8b38a7SOng Boon Leong 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1525ce736788SJoao Pinto 		if (tx_q->tx_skbuff_dma[i].map_as_page)
152671fedb01SJoao Pinto 			dma_unmap_page(priv->device,
1527ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].buf,
1528ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].len,
152971fedb01SJoao Pinto 				       DMA_TO_DEVICE);
153071fedb01SJoao Pinto 		else
153171fedb01SJoao Pinto 			dma_unmap_single(priv->device,
1532ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].buf,
1533ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].len,
153471fedb01SJoao Pinto 					 DMA_TO_DEVICE);
153571fedb01SJoao Pinto 	}
153671fedb01SJoao Pinto 
1537be8b38a7SOng Boon Leong 	if (tx_q->xdpf[i] &&
15388b278a5bSOng Boon Leong 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
15398b278a5bSOng Boon Leong 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1540be8b38a7SOng Boon Leong 		xdp_return_frame(tx_q->xdpf[i]);
1541be8b38a7SOng Boon Leong 		tx_q->xdpf[i] = NULL;
1542be8b38a7SOng Boon Leong 	}
1543be8b38a7SOng Boon Leong 
1544132c32eeSOng Boon Leong 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1545132c32eeSOng Boon Leong 		tx_q->xsk_frames_done++;
1546132c32eeSOng Boon Leong 
1547be8b38a7SOng Boon Leong 	if (tx_q->tx_skbuff[i] &&
1548be8b38a7SOng Boon Leong 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1549ce736788SJoao Pinto 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1550ce736788SJoao Pinto 		tx_q->tx_skbuff[i] = NULL;
1551be8b38a7SOng Boon Leong 	}
1552be8b38a7SOng Boon Leong 
1553ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[i].buf = 0;
1554ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[i].map_as_page = false;
155571fedb01SJoao Pinto }
155671fedb01SJoao Pinto 
155771fedb01SJoao Pinto /**
15584298255fSOng Boon Leong  * dma_free_rx_skbufs - free RX dma buffers
15594298255fSOng Boon Leong  * @priv: private structure
15604298255fSOng Boon Leong  * @queue: RX queue index
15614298255fSOng Boon Leong  */
15624298255fSOng Boon Leong static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
15634298255fSOng Boon Leong {
15644298255fSOng Boon Leong 	int i;
15654298255fSOng Boon Leong 
15664298255fSOng Boon Leong 	for (i = 0; i < priv->dma_rx_size; i++)
15674298255fSOng Boon Leong 		stmmac_free_rx_buffer(priv, queue, i);
15684298255fSOng Boon Leong }
15694298255fSOng Boon Leong 
15704298255fSOng Boon Leong static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
15714298255fSOng Boon Leong 				   gfp_t flags)
15724298255fSOng Boon Leong {
15734298255fSOng Boon Leong 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
15744298255fSOng Boon Leong 	int i;
15754298255fSOng Boon Leong 
15764298255fSOng Boon Leong 	for (i = 0; i < priv->dma_rx_size; i++) {
15774298255fSOng Boon Leong 		struct dma_desc *p;
15784298255fSOng Boon Leong 		int ret;
15794298255fSOng Boon Leong 
15804298255fSOng Boon Leong 		if (priv->extend_desc)
15814298255fSOng Boon Leong 			p = &((rx_q->dma_erx + i)->basic);
15824298255fSOng Boon Leong 		else
15834298255fSOng Boon Leong 			p = rx_q->dma_rx + i;
15844298255fSOng Boon Leong 
15854298255fSOng Boon Leong 		ret = stmmac_init_rx_buffers(priv, p, i, flags,
15864298255fSOng Boon Leong 					     queue);
15874298255fSOng Boon Leong 		if (ret)
15884298255fSOng Boon Leong 			return ret;
1589bba2556eSOng Boon Leong 
1590bba2556eSOng Boon Leong 		rx_q->buf_alloc_num++;
15914298255fSOng Boon Leong 	}
15924298255fSOng Boon Leong 
15934298255fSOng Boon Leong 	return 0;
15944298255fSOng Boon Leong }
15954298255fSOng Boon Leong 
15964298255fSOng Boon Leong /**
1597bba2556eSOng Boon Leong  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1598bba2556eSOng Boon Leong  * @priv: private structure
1599bba2556eSOng Boon Leong  * @queue: RX queue index
1600bba2556eSOng Boon Leong  */
1601bba2556eSOng Boon Leong static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
1602bba2556eSOng Boon Leong {
1603bba2556eSOng Boon Leong 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1604bba2556eSOng Boon Leong 	int i;
1605bba2556eSOng Boon Leong 
1606bba2556eSOng Boon Leong 	for (i = 0; i < priv->dma_rx_size; i++) {
1607bba2556eSOng Boon Leong 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1608bba2556eSOng Boon Leong 
1609bba2556eSOng Boon Leong 		if (!buf->xdp)
1610bba2556eSOng Boon Leong 			continue;
1611bba2556eSOng Boon Leong 
1612bba2556eSOng Boon Leong 		xsk_buff_free(buf->xdp);
1613bba2556eSOng Boon Leong 		buf->xdp = NULL;
1614bba2556eSOng Boon Leong 	}
1615bba2556eSOng Boon Leong }
1616bba2556eSOng Boon Leong 
1617bba2556eSOng Boon Leong static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
1618bba2556eSOng Boon Leong {
1619bba2556eSOng Boon Leong 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1620bba2556eSOng Boon Leong 	int i;
1621bba2556eSOng Boon Leong 
1622bba2556eSOng Boon Leong 	for (i = 0; i < priv->dma_rx_size; i++) {
1623bba2556eSOng Boon Leong 		struct stmmac_rx_buffer *buf;
1624bba2556eSOng Boon Leong 		dma_addr_t dma_addr;
1625bba2556eSOng Boon Leong 		struct dma_desc *p;
1626bba2556eSOng Boon Leong 
1627bba2556eSOng Boon Leong 		if (priv->extend_desc)
1628bba2556eSOng Boon Leong 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1629bba2556eSOng Boon Leong 		else
1630bba2556eSOng Boon Leong 			p = rx_q->dma_rx + i;
1631bba2556eSOng Boon Leong 
1632bba2556eSOng Boon Leong 		buf = &rx_q->buf_pool[i];
1633bba2556eSOng Boon Leong 
1634bba2556eSOng Boon Leong 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1635bba2556eSOng Boon Leong 		if (!buf->xdp)
1636bba2556eSOng Boon Leong 			return -ENOMEM;
1637bba2556eSOng Boon Leong 
1638bba2556eSOng Boon Leong 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1639bba2556eSOng Boon Leong 		stmmac_set_desc_addr(priv, p, dma_addr);
1640bba2556eSOng Boon Leong 		rx_q->buf_alloc_num++;
1641bba2556eSOng Boon Leong 	}
1642bba2556eSOng Boon Leong 
1643bba2556eSOng Boon Leong 	return 0;
1644bba2556eSOng Boon Leong }
1645bba2556eSOng Boon Leong 
1646bba2556eSOng Boon Leong static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1647bba2556eSOng Boon Leong {
1648bba2556eSOng Boon Leong 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1649bba2556eSOng Boon Leong 		return NULL;
1650bba2556eSOng Boon Leong 
1651bba2556eSOng Boon Leong 	return xsk_get_pool_from_qid(priv->dev, queue);
1652bba2556eSOng Boon Leong }
1653bba2556eSOng Boon Leong 
16549c63faaaSJoakim Zhang /**
1655de0b90e5SOng Boon Leong  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1656de0b90e5SOng Boon Leong  * @priv: driver private structure
1657de0b90e5SOng Boon Leong  * @queue: RX queue index
16585bacd778SLABBE Corentin  * @flags: gfp flag.
165971fedb01SJoao Pinto  * Description: this function initializes the DMA RX descriptors
16605bacd778SLABBE Corentin  * and allocates the socket buffers. It supports the chained and ring
1661286a8372SGiuseppe CAVALLARO  * modes.
16627ac6653aSJeff Kirsher  */
1663de0b90e5SOng Boon Leong static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
16647ac6653aSJeff Kirsher {
166554139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1666de0b90e5SOng Boon Leong 	int ret;
166754139cf3SJoao Pinto 
166854139cf3SJoao Pinto 	netif_dbg(priv, probe, priv->dev,
166954139cf3SJoao Pinto 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
167054139cf3SJoao Pinto 		  (u32)rx_q->dma_rx_phy);
167154139cf3SJoao Pinto 
1672cbcf0999SJose Abreu 	stmmac_clear_rx_descriptors(priv, queue);
1673cbcf0999SJose Abreu 
1674bba2556eSOng Boon Leong 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1675bba2556eSOng Boon Leong 
1676bba2556eSOng Boon Leong 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1677bba2556eSOng Boon Leong 
1678bba2556eSOng Boon Leong 	if (rx_q->xsk_pool) {
1679bba2556eSOng Boon Leong 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1680bba2556eSOng Boon Leong 						   MEM_TYPE_XSK_BUFF_POOL,
1681bba2556eSOng Boon Leong 						   NULL));
1682bba2556eSOng Boon Leong 		netdev_info(priv->dev,
1683bba2556eSOng Boon Leong 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1684bba2556eSOng Boon Leong 			    rx_q->queue_index);
1685bba2556eSOng Boon Leong 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1686bba2556eSOng Boon Leong 	} else {
1687be8b38a7SOng Boon Leong 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1688be8b38a7SOng Boon Leong 						   MEM_TYPE_PAGE_POOL,
1689be8b38a7SOng Boon Leong 						   rx_q->page_pool));
1690be8b38a7SOng Boon Leong 		netdev_info(priv->dev,
1691be8b38a7SOng Boon Leong 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1692be8b38a7SOng Boon Leong 			    rx_q->queue_index);
1693bba2556eSOng Boon Leong 	}
1694be8b38a7SOng Boon Leong 
1695bba2556eSOng Boon Leong 	if (rx_q->xsk_pool) {
1696bba2556eSOng Boon Leong 		/* RX XDP ZC buffer pool may not be populated, e.g.
1697bba2556eSOng Boon Leong 		 * xdpsock TX-only.
1698bba2556eSOng Boon Leong 		 */
1699bba2556eSOng Boon Leong 		stmmac_alloc_rx_buffers_zc(priv, queue);
1700bba2556eSOng Boon Leong 	} else {
17014298255fSOng Boon Leong 		ret = stmmac_alloc_rx_buffers(priv, queue, flags);
17024298255fSOng Boon Leong 		if (ret < 0)
1703de0b90e5SOng Boon Leong 			return -ENOMEM;
1704bba2556eSOng Boon Leong 	}
170554139cf3SJoao Pinto 
170654139cf3SJoao Pinto 	rx_q->cur_rx = 0;
17074298255fSOng Boon Leong 	rx_q->dirty_rx = 0;
170854139cf3SJoao Pinto 
1709c24602efSGiuseppe CAVALLARO 	/* Setup the chained descriptor addresses */
1710c24602efSGiuseppe CAVALLARO 	if (priv->mode == STMMAC_CHAIN_MODE) {
171171fedb01SJoao Pinto 		if (priv->extend_desc)
17122c520b1cSJose Abreu 			stmmac_mode_init(priv, rx_q->dma_erx,
1713aa042f60SSong, Yoong Siang 					 rx_q->dma_rx_phy,
1714aa042f60SSong, Yoong Siang 					 priv->dma_rx_size, 1);
171571fedb01SJoao Pinto 		else
17162c520b1cSJose Abreu 			stmmac_mode_init(priv, rx_q->dma_rx,
1717aa042f60SSong, Yoong Siang 					 rx_q->dma_rx_phy,
1718aa042f60SSong, Yoong Siang 					 priv->dma_rx_size, 0);
171971fedb01SJoao Pinto 	}
1720de0b90e5SOng Boon Leong 
1721de0b90e5SOng Boon Leong 	return 0;
1722de0b90e5SOng Boon Leong }
1723de0b90e5SOng Boon Leong 
1724de0b90e5SOng Boon Leong static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1725de0b90e5SOng Boon Leong {
1726de0b90e5SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
1727de0b90e5SOng Boon Leong 	u32 rx_count = priv->plat->rx_queues_to_use;
1728de0b90e5SOng Boon Leong 	u32 queue;
1729de0b90e5SOng Boon Leong 	int ret;
1730de0b90e5SOng Boon Leong 
1731de0b90e5SOng Boon Leong 	/* RX INITIALIZATION */
1732de0b90e5SOng Boon Leong 	netif_dbg(priv, probe, priv->dev,
1733de0b90e5SOng Boon Leong 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1734de0b90e5SOng Boon Leong 
1735de0b90e5SOng Boon Leong 	for (queue = 0; queue < rx_count; queue++) {
1736de0b90e5SOng Boon Leong 		ret = __init_dma_rx_desc_rings(priv, queue, flags);
1737de0b90e5SOng Boon Leong 		if (ret)
1738de0b90e5SOng Boon Leong 			goto err_init_rx_buffers;
173954139cf3SJoao Pinto 	}
174054139cf3SJoao Pinto 
174171fedb01SJoao Pinto 	return 0;
174254139cf3SJoao Pinto 
174371fedb01SJoao Pinto err_init_rx_buffers:
174454139cf3SJoao Pinto 	while (queue >= 0) {
1745bba2556eSOng Boon Leong 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1746bba2556eSOng Boon Leong 
1747bba2556eSOng Boon Leong 		if (rx_q->xsk_pool)
1748bba2556eSOng Boon Leong 			dma_free_rx_xskbufs(priv, queue);
1749bba2556eSOng Boon Leong 		else
17504298255fSOng Boon Leong 			dma_free_rx_skbufs(priv, queue);
175154139cf3SJoao Pinto 
1752bba2556eSOng Boon Leong 		rx_q->buf_alloc_num = 0;
1753bba2556eSOng Boon Leong 		rx_q->xsk_pool = NULL;
1754bba2556eSOng Boon Leong 
175554139cf3SJoao Pinto 		if (queue == 0)
175654139cf3SJoao Pinto 			break;
175754139cf3SJoao Pinto 
175854139cf3SJoao Pinto 		queue--;
175954139cf3SJoao Pinto 	}
176054139cf3SJoao Pinto 
176171fedb01SJoao Pinto 	return ret;
176271fedb01SJoao Pinto }
176371fedb01SJoao Pinto 
176471fedb01SJoao Pinto /**
1765de0b90e5SOng Boon Leong  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1766de0b90e5SOng Boon Leong  * @priv: driver private structure
1767de0b90e5SOng Boon Leong  * @queue : TX queue index
176871fedb01SJoao Pinto  * Description: this function initializes the DMA TX descriptors
176971fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
177071fedb01SJoao Pinto  * modes.
177171fedb01SJoao Pinto  */
1772de0b90e5SOng Boon Leong static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
177371fedb01SJoao Pinto {
1774ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1775de0b90e5SOng Boon Leong 	int i;
1776ce736788SJoao Pinto 
177771fedb01SJoao Pinto 	netif_dbg(priv, probe, priv->dev,
1778ce736788SJoao Pinto 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1779ce736788SJoao Pinto 		  (u32)tx_q->dma_tx_phy);
178071fedb01SJoao Pinto 
178171fedb01SJoao Pinto 	/* Setup the chained descriptor addresses */
178271fedb01SJoao Pinto 	if (priv->mode == STMMAC_CHAIN_MODE) {
178371fedb01SJoao Pinto 		if (priv->extend_desc)
17842c520b1cSJose Abreu 			stmmac_mode_init(priv, tx_q->dma_etx,
1785aa042f60SSong, Yoong Siang 					 tx_q->dma_tx_phy,
1786aa042f60SSong, Yoong Siang 					 priv->dma_tx_size, 1);
1787579a25a8SJose Abreu 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
17882c520b1cSJose Abreu 			stmmac_mode_init(priv, tx_q->dma_tx,
1789aa042f60SSong, Yoong Siang 					 tx_q->dma_tx_phy,
1790aa042f60SSong, Yoong Siang 					 priv->dma_tx_size, 0);
1791c24602efSGiuseppe CAVALLARO 	}
1792286a8372SGiuseppe CAVALLARO 
1793132c32eeSOng Boon Leong 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1794132c32eeSOng Boon Leong 
1795aa042f60SSong, Yoong Siang 	for (i = 0; i < priv->dma_tx_size; i++) {
1796c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
1797de0b90e5SOng Boon Leong 
1798c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
1799ce736788SJoao Pinto 			p = &((tx_q->dma_etx + i)->basic);
1800579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1801579a25a8SJose Abreu 			p = &((tx_q->dma_entx + i)->basic);
1802c24602efSGiuseppe CAVALLARO 		else
1803ce736788SJoao Pinto 			p = tx_q->dma_tx + i;
1804f748be53SAlexandre TORGUE 
180544c67f85SJose Abreu 		stmmac_clear_desc(priv, p);
1806f748be53SAlexandre TORGUE 
1807ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].buf = 0;
1808ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1809ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].len = 0;
1810ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].last_segment = false;
1811ce736788SJoao Pinto 		tx_q->tx_skbuff[i] = NULL;
18124a7d666aSGiuseppe CAVALLARO 	}
1813c24602efSGiuseppe CAVALLARO 
1814ce736788SJoao Pinto 	tx_q->dirty_tx = 0;
1815ce736788SJoao Pinto 	tx_q->cur_tx = 0;
18168d212a9eSNiklas Cassel 	tx_q->mss = 0;
1817ce736788SJoao Pinto 
1818c22a3f48SJoao Pinto 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1819de0b90e5SOng Boon Leong 
1820de0b90e5SOng Boon Leong 	return 0;
1821c22a3f48SJoao Pinto }
18227ac6653aSJeff Kirsher 
1823de0b90e5SOng Boon Leong static int init_dma_tx_desc_rings(struct net_device *dev)
1824de0b90e5SOng Boon Leong {
1825de0b90e5SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
1826de0b90e5SOng Boon Leong 	u32 tx_queue_cnt;
1827de0b90e5SOng Boon Leong 	u32 queue;
1828de0b90e5SOng Boon Leong 
1829de0b90e5SOng Boon Leong 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1830de0b90e5SOng Boon Leong 
1831de0b90e5SOng Boon Leong 	for (queue = 0; queue < tx_queue_cnt; queue++)
1832de0b90e5SOng Boon Leong 		__init_dma_tx_desc_rings(priv, queue);
1833de0b90e5SOng Boon Leong 
183471fedb01SJoao Pinto 	return 0;
183571fedb01SJoao Pinto }
183671fedb01SJoao Pinto 
183771fedb01SJoao Pinto /**
183871fedb01SJoao Pinto  * init_dma_desc_rings - init the RX/TX descriptor rings
183971fedb01SJoao Pinto  * @dev: net device structure
184071fedb01SJoao Pinto  * @flags: gfp flag.
184171fedb01SJoao Pinto  * Description: this function initializes the DMA RX/TX descriptors
184271fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
184371fedb01SJoao Pinto  * modes.
184471fedb01SJoao Pinto  */
184571fedb01SJoao Pinto static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
184671fedb01SJoao Pinto {
184771fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
184871fedb01SJoao Pinto 	int ret;
184971fedb01SJoao Pinto 
185071fedb01SJoao Pinto 	ret = init_dma_rx_desc_rings(dev, flags);
185171fedb01SJoao Pinto 	if (ret)
185271fedb01SJoao Pinto 		return ret;
185371fedb01SJoao Pinto 
185471fedb01SJoao Pinto 	ret = init_dma_tx_desc_rings(dev);
185571fedb01SJoao Pinto 
18565bacd778SLABBE Corentin 	stmmac_clear_descriptors(priv);
18577ac6653aSJeff Kirsher 
1858c24602efSGiuseppe CAVALLARO 	if (netif_msg_hw(priv))
1859c24602efSGiuseppe CAVALLARO 		stmmac_display_rings(priv);
186056329137SBartlomiej Zolnierkiewicz 
186156329137SBartlomiej Zolnierkiewicz 	return ret;
18627ac6653aSJeff Kirsher }
18637ac6653aSJeff Kirsher 
186471fedb01SJoao Pinto /**
186571fedb01SJoao Pinto  * dma_free_tx_skbufs - free TX dma buffers
186671fedb01SJoao Pinto  * @priv: private structure
1867ce736788SJoao Pinto  * @queue: TX queue index
186871fedb01SJoao Pinto  */
1869ce736788SJoao Pinto static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
18707ac6653aSJeff Kirsher {
1871132c32eeSOng Boon Leong 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
18727ac6653aSJeff Kirsher 	int i;
18737ac6653aSJeff Kirsher 
1874132c32eeSOng Boon Leong 	tx_q->xsk_frames_done = 0;
1875132c32eeSOng Boon Leong 
1876aa042f60SSong, Yoong Siang 	for (i = 0; i < priv->dma_tx_size; i++)
1877ce736788SJoao Pinto 		stmmac_free_tx_buffer(priv, queue, i);
1878132c32eeSOng Boon Leong 
1879132c32eeSOng Boon Leong 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1880132c32eeSOng Boon Leong 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1881132c32eeSOng Boon Leong 		tx_q->xsk_frames_done = 0;
1882132c32eeSOng Boon Leong 		tx_q->xsk_pool = NULL;
1883132c32eeSOng Boon Leong 	}
18847ac6653aSJeff Kirsher }
18857ac6653aSJeff Kirsher 
1886732fdf0eSGiuseppe CAVALLARO /**
18874ec236c7SFugang Duan  * stmmac_free_tx_skbufs - free TX skb buffers
18884ec236c7SFugang Duan  * @priv: private structure
18894ec236c7SFugang Duan  */
18904ec236c7SFugang Duan static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
18914ec236c7SFugang Duan {
18924ec236c7SFugang Duan 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
18934ec236c7SFugang Duan 	u32 queue;
18944ec236c7SFugang Duan 
18954ec236c7SFugang Duan 	for (queue = 0; queue < tx_queue_cnt; queue++)
18964ec236c7SFugang Duan 		dma_free_tx_skbufs(priv, queue);
18974ec236c7SFugang Duan }
18984ec236c7SFugang Duan 
18994ec236c7SFugang Duan /**
1900da5ec7f2SOng Boon Leong  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
190154139cf3SJoao Pinto  * @priv: private structure
1902da5ec7f2SOng Boon Leong  * @queue: RX queue index
190354139cf3SJoao Pinto  */
1904da5ec7f2SOng Boon Leong static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
190554139cf3SJoao Pinto {
190654139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
190754139cf3SJoao Pinto 
190854139cf3SJoao Pinto 	/* Release the DMA RX socket buffers */
1909bba2556eSOng Boon Leong 	if (rx_q->xsk_pool)
1910bba2556eSOng Boon Leong 		dma_free_rx_xskbufs(priv, queue);
1911bba2556eSOng Boon Leong 	else
191254139cf3SJoao Pinto 		dma_free_rx_skbufs(priv, queue);
191354139cf3SJoao Pinto 
1914bba2556eSOng Boon Leong 	rx_q->buf_alloc_num = 0;
1915bba2556eSOng Boon Leong 	rx_q->xsk_pool = NULL;
1916bba2556eSOng Boon Leong 
191754139cf3SJoao Pinto 	/* Free DMA regions of consistent memory previously allocated */
191854139cf3SJoao Pinto 	if (!priv->extend_desc)
1919aa042f60SSong, Yoong Siang 		dma_free_coherent(priv->device, priv->dma_rx_size *
1920aa042f60SSong, Yoong Siang 				  sizeof(struct dma_desc),
192154139cf3SJoao Pinto 				  rx_q->dma_rx, rx_q->dma_rx_phy);
192254139cf3SJoao Pinto 	else
1923aa042f60SSong, Yoong Siang 		dma_free_coherent(priv->device, priv->dma_rx_size *
192454139cf3SJoao Pinto 				  sizeof(struct dma_extended_desc),
192554139cf3SJoao Pinto 				  rx_q->dma_erx, rx_q->dma_rx_phy);
192654139cf3SJoao Pinto 
1927be8b38a7SOng Boon Leong 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1928be8b38a7SOng Boon Leong 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1929be8b38a7SOng Boon Leong 
19302af6106aSJose Abreu 	kfree(rx_q->buf_pool);
1931c3f812ceSJonathan Lemon 	if (rx_q->page_pool)
19322af6106aSJose Abreu 		page_pool_destroy(rx_q->page_pool);
19332af6106aSJose Abreu }
1934da5ec7f2SOng Boon Leong 
1935da5ec7f2SOng Boon Leong static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1936da5ec7f2SOng Boon Leong {
1937da5ec7f2SOng Boon Leong 	u32 rx_count = priv->plat->rx_queues_to_use;
1938da5ec7f2SOng Boon Leong 	u32 queue;
1939da5ec7f2SOng Boon Leong 
1940da5ec7f2SOng Boon Leong 	/* Free RX queue resources */
1941da5ec7f2SOng Boon Leong 	for (queue = 0; queue < rx_count; queue++)
1942da5ec7f2SOng Boon Leong 		__free_dma_rx_desc_resources(priv, queue);
194354139cf3SJoao Pinto }
194454139cf3SJoao Pinto 
194554139cf3SJoao Pinto /**
1946da5ec7f2SOng Boon Leong  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1947ce736788SJoao Pinto  * @priv: private structure
1948da5ec7f2SOng Boon Leong  * @queue: TX queue index
1949ce736788SJoao Pinto  */
1950da5ec7f2SOng Boon Leong static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
1951ce736788SJoao Pinto {
1952ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1953579a25a8SJose Abreu 	size_t size;
1954579a25a8SJose Abreu 	void *addr;
1955ce736788SJoao Pinto 
1956ce736788SJoao Pinto 	/* Release the DMA TX socket buffers */
1957ce736788SJoao Pinto 	dma_free_tx_skbufs(priv, queue);
1958ce736788SJoao Pinto 
1959579a25a8SJose Abreu 	if (priv->extend_desc) {
1960579a25a8SJose Abreu 		size = sizeof(struct dma_extended_desc);
1961579a25a8SJose Abreu 		addr = tx_q->dma_etx;
1962579a25a8SJose Abreu 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1963579a25a8SJose Abreu 		size = sizeof(struct dma_edesc);
1964579a25a8SJose Abreu 		addr = tx_q->dma_entx;
1965579a25a8SJose Abreu 	} else {
1966579a25a8SJose Abreu 		size = sizeof(struct dma_desc);
1967579a25a8SJose Abreu 		addr = tx_q->dma_tx;
1968579a25a8SJose Abreu 	}
1969579a25a8SJose Abreu 
1970aa042f60SSong, Yoong Siang 	size *= priv->dma_tx_size;
1971579a25a8SJose Abreu 
1972579a25a8SJose Abreu 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1973ce736788SJoao Pinto 
1974ce736788SJoao Pinto 	kfree(tx_q->tx_skbuff_dma);
1975ce736788SJoao Pinto 	kfree(tx_q->tx_skbuff);
1976ce736788SJoao Pinto }
1977da5ec7f2SOng Boon Leong 
1978da5ec7f2SOng Boon Leong static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1979da5ec7f2SOng Boon Leong {
1980da5ec7f2SOng Boon Leong 	u32 tx_count = priv->plat->tx_queues_to_use;
1981da5ec7f2SOng Boon Leong 	u32 queue;
1982da5ec7f2SOng Boon Leong 
1983da5ec7f2SOng Boon Leong 	/* Free TX queue resources */
1984da5ec7f2SOng Boon Leong 	for (queue = 0; queue < tx_count; queue++)
1985da5ec7f2SOng Boon Leong 		__free_dma_tx_desc_resources(priv, queue);
1986ce736788SJoao Pinto }
1987ce736788SJoao Pinto 
1988ce736788SJoao Pinto /**
1989da5ec7f2SOng Boon Leong  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1990732fdf0eSGiuseppe CAVALLARO  * @priv: private structure
1991da5ec7f2SOng Boon Leong  * @queue: RX queue index
1992732fdf0eSGiuseppe CAVALLARO  * Description: according to which descriptor can be used (extend or basic)
1993732fdf0eSGiuseppe CAVALLARO  * this function allocates the resources for TX and RX paths. In case of
1994732fdf0eSGiuseppe CAVALLARO  * reception, for example, it pre-allocated the RX socket buffer in order to
1995732fdf0eSGiuseppe CAVALLARO  * allow zero-copy mechanism.
1996732fdf0eSGiuseppe CAVALLARO  */
1997da5ec7f2SOng Boon Leong static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
199809f8d696SSrinivas Kandagatla {
199954139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2000be8b38a7SOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
2001da5ec7f2SOng Boon Leong 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
20022af6106aSJose Abreu 	struct page_pool_params pp_params = { 0 };
20034f28bd95SThierry Reding 	unsigned int num_pages;
2004132c32eeSOng Boon Leong 	unsigned int napi_id;
2005be8b38a7SOng Boon Leong 	int ret;
200654139cf3SJoao Pinto 
200754139cf3SJoao Pinto 	rx_q->queue_index = queue;
200854139cf3SJoao Pinto 	rx_q->priv_data = priv;
200954139cf3SJoao Pinto 
20105fabb012SOng Boon Leong 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2011aa042f60SSong, Yoong Siang 	pp_params.pool_size = priv->dma_rx_size;
20124f28bd95SThierry Reding 	num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
20134f28bd95SThierry Reding 	pp_params.order = ilog2(num_pages);
20142af6106aSJose Abreu 	pp_params.nid = dev_to_node(priv->device);
20152af6106aSJose Abreu 	pp_params.dev = priv->device;
20165fabb012SOng Boon Leong 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
20175fabb012SOng Boon Leong 	pp_params.offset = stmmac_rx_offset(priv);
20185fabb012SOng Boon Leong 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
20195bacd778SLABBE Corentin 
20202af6106aSJose Abreu 	rx_q->page_pool = page_pool_create(&pp_params);
20212af6106aSJose Abreu 	if (IS_ERR(rx_q->page_pool)) {
20222af6106aSJose Abreu 		ret = PTR_ERR(rx_q->page_pool);
20232af6106aSJose Abreu 		rx_q->page_pool = NULL;
2024da5ec7f2SOng Boon Leong 		return ret;
20252af6106aSJose Abreu 	}
20262af6106aSJose Abreu 
2027aa042f60SSong, Yoong Siang 	rx_q->buf_pool = kcalloc(priv->dma_rx_size,
2028aa042f60SSong, Yoong Siang 				 sizeof(*rx_q->buf_pool),
20295bacd778SLABBE Corentin 				 GFP_KERNEL);
20302af6106aSJose Abreu 	if (!rx_q->buf_pool)
2031da5ec7f2SOng Boon Leong 		return -ENOMEM;
20325bacd778SLABBE Corentin 
20335bacd778SLABBE Corentin 	if (priv->extend_desc) {
2034750afb08SLuis Chamberlain 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2035aa042f60SSong, Yoong Siang 						   priv->dma_rx_size *
2036aa042f60SSong, Yoong Siang 						   sizeof(struct dma_extended_desc),
203754139cf3SJoao Pinto 						   &rx_q->dma_rx_phy,
20385bacd778SLABBE Corentin 						   GFP_KERNEL);
203954139cf3SJoao Pinto 		if (!rx_q->dma_erx)
2040da5ec7f2SOng Boon Leong 			return -ENOMEM;
20415bacd778SLABBE Corentin 
204271fedb01SJoao Pinto 	} else {
2043750afb08SLuis Chamberlain 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2044aa042f60SSong, Yoong Siang 						  priv->dma_rx_size *
2045aa042f60SSong, Yoong Siang 						  sizeof(struct dma_desc),
204654139cf3SJoao Pinto 						  &rx_q->dma_rx_phy,
204771fedb01SJoao Pinto 						  GFP_KERNEL);
204854139cf3SJoao Pinto 		if (!rx_q->dma_rx)
2049da5ec7f2SOng Boon Leong 			return -ENOMEM;
205071fedb01SJoao Pinto 	}
2051be8b38a7SOng Boon Leong 
2052132c32eeSOng Boon Leong 	if (stmmac_xdp_is_enabled(priv) &&
2053132c32eeSOng Boon Leong 	    test_bit(queue, priv->af_xdp_zc_qps))
2054132c32eeSOng Boon Leong 		napi_id = ch->rxtx_napi.napi_id;
2055132c32eeSOng Boon Leong 	else
2056132c32eeSOng Boon Leong 		napi_id = ch->rx_napi.napi_id;
2057132c32eeSOng Boon Leong 
2058be8b38a7SOng Boon Leong 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2059be8b38a7SOng Boon Leong 			       rx_q->queue_index,
2060132c32eeSOng Boon Leong 			       napi_id);
2061be8b38a7SOng Boon Leong 	if (ret) {
2062be8b38a7SOng Boon Leong 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2063da5ec7f2SOng Boon Leong 		return -EINVAL;
2064be8b38a7SOng Boon Leong 	}
2065da5ec7f2SOng Boon Leong 
2066da5ec7f2SOng Boon Leong 	return 0;
2067da5ec7f2SOng Boon Leong }
2068da5ec7f2SOng Boon Leong 
2069da5ec7f2SOng Boon Leong static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
2070da5ec7f2SOng Boon Leong {
2071da5ec7f2SOng Boon Leong 	u32 rx_count = priv->plat->rx_queues_to_use;
2072da5ec7f2SOng Boon Leong 	u32 queue;
2073da5ec7f2SOng Boon Leong 	int ret;
2074da5ec7f2SOng Boon Leong 
2075da5ec7f2SOng Boon Leong 	/* RX queues buffers and DMA */
2076da5ec7f2SOng Boon Leong 	for (queue = 0; queue < rx_count; queue++) {
2077da5ec7f2SOng Boon Leong 		ret = __alloc_dma_rx_desc_resources(priv, queue);
2078da5ec7f2SOng Boon Leong 		if (ret)
2079da5ec7f2SOng Boon Leong 			goto err_dma;
208054139cf3SJoao Pinto 	}
208171fedb01SJoao Pinto 
208271fedb01SJoao Pinto 	return 0;
208371fedb01SJoao Pinto 
208471fedb01SJoao Pinto err_dma:
208554139cf3SJoao Pinto 	free_dma_rx_desc_resources(priv);
208654139cf3SJoao Pinto 
208771fedb01SJoao Pinto 	return ret;
208871fedb01SJoao Pinto }
208971fedb01SJoao Pinto 
209071fedb01SJoao Pinto /**
2091da5ec7f2SOng Boon Leong  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
209271fedb01SJoao Pinto  * @priv: private structure
2093da5ec7f2SOng Boon Leong  * @queue: TX queue index
209471fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
209571fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
209671fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
209771fedb01SJoao Pinto  * allow zero-copy mechanism.
209871fedb01SJoao Pinto  */
2099da5ec7f2SOng Boon Leong static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
210071fedb01SJoao Pinto {
2101ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2102579a25a8SJose Abreu 	size_t size;
2103579a25a8SJose Abreu 	void *addr;
2104ce736788SJoao Pinto 
2105ce736788SJoao Pinto 	tx_q->queue_index = queue;
2106ce736788SJoao Pinto 	tx_q->priv_data = priv;
2107ce736788SJoao Pinto 
2108aa042f60SSong, Yoong Siang 	tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
2109ce736788SJoao Pinto 				      sizeof(*tx_q->tx_skbuff_dma),
211071fedb01SJoao Pinto 				      GFP_KERNEL);
2111ce736788SJoao Pinto 	if (!tx_q->tx_skbuff_dma)
2112da5ec7f2SOng Boon Leong 		return -ENOMEM;
211371fedb01SJoao Pinto 
2114aa042f60SSong, Yoong Siang 	tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
2115ce736788SJoao Pinto 				  sizeof(struct sk_buff *),
211671fedb01SJoao Pinto 				  GFP_KERNEL);
2117ce736788SJoao Pinto 	if (!tx_q->tx_skbuff)
2118da5ec7f2SOng Boon Leong 		return -ENOMEM;
211971fedb01SJoao Pinto 
2120579a25a8SJose Abreu 	if (priv->extend_desc)
2121579a25a8SJose Abreu 		size = sizeof(struct dma_extended_desc);
2122579a25a8SJose Abreu 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2123579a25a8SJose Abreu 		size = sizeof(struct dma_edesc);
2124579a25a8SJose Abreu 	else
2125579a25a8SJose Abreu 		size = sizeof(struct dma_desc);
2126579a25a8SJose Abreu 
2127aa042f60SSong, Yoong Siang 	size *= priv->dma_tx_size;
2128579a25a8SJose Abreu 
2129579a25a8SJose Abreu 	addr = dma_alloc_coherent(priv->device, size,
2130579a25a8SJose Abreu 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2131579a25a8SJose Abreu 	if (!addr)
2132da5ec7f2SOng Boon Leong 		return -ENOMEM;
2133579a25a8SJose Abreu 
2134579a25a8SJose Abreu 	if (priv->extend_desc)
2135579a25a8SJose Abreu 		tx_q->dma_etx = addr;
2136579a25a8SJose Abreu 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2137579a25a8SJose Abreu 		tx_q->dma_entx = addr;
2138579a25a8SJose Abreu 	else
2139579a25a8SJose Abreu 		tx_q->dma_tx = addr;
2140da5ec7f2SOng Boon Leong 
2141da5ec7f2SOng Boon Leong 	return 0;
2142da5ec7f2SOng Boon Leong }
2143da5ec7f2SOng Boon Leong 
2144da5ec7f2SOng Boon Leong static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
2145da5ec7f2SOng Boon Leong {
2146da5ec7f2SOng Boon Leong 	u32 tx_count = priv->plat->tx_queues_to_use;
2147da5ec7f2SOng Boon Leong 	u32 queue;
2148da5ec7f2SOng Boon Leong 	int ret;
2149da5ec7f2SOng Boon Leong 
2150da5ec7f2SOng Boon Leong 	/* TX queues buffers and DMA */
2151da5ec7f2SOng Boon Leong 	for (queue = 0; queue < tx_count; queue++) {
2152da5ec7f2SOng Boon Leong 		ret = __alloc_dma_tx_desc_resources(priv, queue);
2153da5ec7f2SOng Boon Leong 		if (ret)
2154da5ec7f2SOng Boon Leong 			goto err_dma;
21555bacd778SLABBE Corentin 	}
21565bacd778SLABBE Corentin 
21575bacd778SLABBE Corentin 	return 0;
21585bacd778SLABBE Corentin 
215962242260SChristophe Jaillet err_dma:
2160ce736788SJoao Pinto 	free_dma_tx_desc_resources(priv);
216109f8d696SSrinivas Kandagatla 	return ret;
21625bacd778SLABBE Corentin }
216309f8d696SSrinivas Kandagatla 
216471fedb01SJoao Pinto /**
216571fedb01SJoao Pinto  * alloc_dma_desc_resources - alloc TX/RX resources.
216671fedb01SJoao Pinto  * @priv: private structure
216771fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
216871fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
216971fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
217071fedb01SJoao Pinto  * allow zero-copy mechanism.
217171fedb01SJoao Pinto  */
217271fedb01SJoao Pinto static int alloc_dma_desc_resources(struct stmmac_priv *priv)
21735bacd778SLABBE Corentin {
217454139cf3SJoao Pinto 	/* RX Allocation */
217571fedb01SJoao Pinto 	int ret = alloc_dma_rx_desc_resources(priv);
217671fedb01SJoao Pinto 
217771fedb01SJoao Pinto 	if (ret)
217871fedb01SJoao Pinto 		return ret;
217971fedb01SJoao Pinto 
218071fedb01SJoao Pinto 	ret = alloc_dma_tx_desc_resources(priv);
218171fedb01SJoao Pinto 
218271fedb01SJoao Pinto 	return ret;
218371fedb01SJoao Pinto }
218471fedb01SJoao Pinto 
218571fedb01SJoao Pinto /**
218671fedb01SJoao Pinto  * free_dma_desc_resources - free dma desc resources
218771fedb01SJoao Pinto  * @priv: private structure
218871fedb01SJoao Pinto  */
218971fedb01SJoao Pinto static void free_dma_desc_resources(struct stmmac_priv *priv)
219071fedb01SJoao Pinto {
219171fedb01SJoao Pinto 	/* Release the DMA TX socket buffers */
219271fedb01SJoao Pinto 	free_dma_tx_desc_resources(priv);
2193be8b38a7SOng Boon Leong 
2194be8b38a7SOng Boon Leong 	/* Release the DMA RX socket buffers later
2195be8b38a7SOng Boon Leong 	 * to ensure all pending XDP_TX buffers are returned.
2196be8b38a7SOng Boon Leong 	 */
2197be8b38a7SOng Boon Leong 	free_dma_rx_desc_resources(priv);
219871fedb01SJoao Pinto }
219971fedb01SJoao Pinto 
220071fedb01SJoao Pinto /**
22019eb12474Sjpinto  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
22029eb12474Sjpinto  *  @priv: driver private structure
22039eb12474Sjpinto  *  Description: It is used for enabling the rx queues in the MAC
22049eb12474Sjpinto  */
22059eb12474Sjpinto static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
22069eb12474Sjpinto {
22074f6046f5SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
22084f6046f5SJoao Pinto 	int queue;
22094f6046f5SJoao Pinto 	u8 mode;
22109eb12474Sjpinto 
22114f6046f5SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
22124f6046f5SJoao Pinto 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2213c10d4c82SJose Abreu 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
22144f6046f5SJoao Pinto 	}
22159eb12474Sjpinto }
22169eb12474Sjpinto 
22179eb12474Sjpinto /**
2218ae4f0d46SJoao Pinto  * stmmac_start_rx_dma - start RX DMA channel
2219ae4f0d46SJoao Pinto  * @priv: driver private structure
2220ae4f0d46SJoao Pinto  * @chan: RX channel index
2221ae4f0d46SJoao Pinto  * Description:
2222ae4f0d46SJoao Pinto  * This starts a RX DMA channel
2223ae4f0d46SJoao Pinto  */
2224ae4f0d46SJoao Pinto static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2225ae4f0d46SJoao Pinto {
2226ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2227a4e887faSJose Abreu 	stmmac_start_rx(priv, priv->ioaddr, chan);
2228ae4f0d46SJoao Pinto }
2229ae4f0d46SJoao Pinto 
2230ae4f0d46SJoao Pinto /**
2231ae4f0d46SJoao Pinto  * stmmac_start_tx_dma - start TX DMA channel
2232ae4f0d46SJoao Pinto  * @priv: driver private structure
2233ae4f0d46SJoao Pinto  * @chan: TX channel index
2234ae4f0d46SJoao Pinto  * Description:
2235ae4f0d46SJoao Pinto  * This starts a TX DMA channel
2236ae4f0d46SJoao Pinto  */
2237ae4f0d46SJoao Pinto static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2238ae4f0d46SJoao Pinto {
2239ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2240a4e887faSJose Abreu 	stmmac_start_tx(priv, priv->ioaddr, chan);
2241ae4f0d46SJoao Pinto }
2242ae4f0d46SJoao Pinto 
2243ae4f0d46SJoao Pinto /**
2244ae4f0d46SJoao Pinto  * stmmac_stop_rx_dma - stop RX DMA channel
2245ae4f0d46SJoao Pinto  * @priv: driver private structure
2246ae4f0d46SJoao Pinto  * @chan: RX channel index
2247ae4f0d46SJoao Pinto  * Description:
2248ae4f0d46SJoao Pinto  * This stops a RX DMA channel
2249ae4f0d46SJoao Pinto  */
2250ae4f0d46SJoao Pinto static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2251ae4f0d46SJoao Pinto {
2252ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2253a4e887faSJose Abreu 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2254ae4f0d46SJoao Pinto }
2255ae4f0d46SJoao Pinto 
2256ae4f0d46SJoao Pinto /**
2257ae4f0d46SJoao Pinto  * stmmac_stop_tx_dma - stop TX DMA channel
2258ae4f0d46SJoao Pinto  * @priv: driver private structure
2259ae4f0d46SJoao Pinto  * @chan: TX channel index
2260ae4f0d46SJoao Pinto  * Description:
2261ae4f0d46SJoao Pinto  * This stops a TX DMA channel
2262ae4f0d46SJoao Pinto  */
2263ae4f0d46SJoao Pinto static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2264ae4f0d46SJoao Pinto {
2265ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2266a4e887faSJose Abreu 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2267ae4f0d46SJoao Pinto }
2268ae4f0d46SJoao Pinto 
2269ae4f0d46SJoao Pinto /**
2270ae4f0d46SJoao Pinto  * stmmac_start_all_dma - start all RX and TX DMA channels
2271ae4f0d46SJoao Pinto  * @priv: driver private structure
2272ae4f0d46SJoao Pinto  * Description:
2273ae4f0d46SJoao Pinto  * This starts all the RX and TX DMA channels
2274ae4f0d46SJoao Pinto  */
2275ae4f0d46SJoao Pinto static void stmmac_start_all_dma(struct stmmac_priv *priv)
2276ae4f0d46SJoao Pinto {
2277ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2278ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2279ae4f0d46SJoao Pinto 	u32 chan = 0;
2280ae4f0d46SJoao Pinto 
2281ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2282ae4f0d46SJoao Pinto 		stmmac_start_rx_dma(priv, chan);
2283ae4f0d46SJoao Pinto 
2284ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2285ae4f0d46SJoao Pinto 		stmmac_start_tx_dma(priv, chan);
2286ae4f0d46SJoao Pinto }
2287ae4f0d46SJoao Pinto 
2288ae4f0d46SJoao Pinto /**
2289ae4f0d46SJoao Pinto  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2290ae4f0d46SJoao Pinto  * @priv: driver private structure
2291ae4f0d46SJoao Pinto  * Description:
2292ae4f0d46SJoao Pinto  * This stops the RX and TX DMA channels
2293ae4f0d46SJoao Pinto  */
2294ae4f0d46SJoao Pinto static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2295ae4f0d46SJoao Pinto {
2296ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2297ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2298ae4f0d46SJoao Pinto 	u32 chan = 0;
2299ae4f0d46SJoao Pinto 
2300ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2301ae4f0d46SJoao Pinto 		stmmac_stop_rx_dma(priv, chan);
2302ae4f0d46SJoao Pinto 
2303ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2304ae4f0d46SJoao Pinto 		stmmac_stop_tx_dma(priv, chan);
2305ae4f0d46SJoao Pinto }
2306ae4f0d46SJoao Pinto 
2307ae4f0d46SJoao Pinto /**
23087ac6653aSJeff Kirsher  *  stmmac_dma_operation_mode - HW DMA operation mode
230932ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
2310732fdf0eSGiuseppe CAVALLARO  *  Description: it is used for configuring the DMA operation mode register in
2311732fdf0eSGiuseppe CAVALLARO  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
23127ac6653aSJeff Kirsher  */
23137ac6653aSJeff Kirsher static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
23147ac6653aSJeff Kirsher {
23156deee222SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
23166deee222SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2317f88203a2SVince Bridgers 	int rxfifosz = priv->plat->rx_fifo_size;
231852a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
23196deee222SJoao Pinto 	u32 txmode = 0;
23206deee222SJoao Pinto 	u32 rxmode = 0;
23216deee222SJoao Pinto 	u32 chan = 0;
2322a0daae13SJose Abreu 	u8 qmode = 0;
2323f88203a2SVince Bridgers 
232411fbf811SThierry Reding 	if (rxfifosz == 0)
232511fbf811SThierry Reding 		rxfifosz = priv->dma_cap.rx_fifo_size;
232652a76235SJose Abreu 	if (txfifosz == 0)
232752a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
232852a76235SJose Abreu 
232952a76235SJose Abreu 	/* Adjust for real per queue fifo size */
233052a76235SJose Abreu 	rxfifosz /= rx_channels_count;
233152a76235SJose Abreu 	txfifosz /= tx_channels_count;
233211fbf811SThierry Reding 
23336deee222SJoao Pinto 	if (priv->plat->force_thresh_dma_mode) {
23346deee222SJoao Pinto 		txmode = tc;
23356deee222SJoao Pinto 		rxmode = tc;
23366deee222SJoao Pinto 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
23377ac6653aSJeff Kirsher 		/*
23387ac6653aSJeff Kirsher 		 * In case of GMAC, SF mode can be enabled
23397ac6653aSJeff Kirsher 		 * to perform the TX COE in HW. This depends on:
23407ac6653aSJeff Kirsher 		 * 1) TX COE if actually supported
23417ac6653aSJeff Kirsher 		 * 2) There is no bugged Jumbo frame support
23427ac6653aSJeff Kirsher 		 *    that needs to not insert csum in the TDES.
23437ac6653aSJeff Kirsher 		 */
23446deee222SJoao Pinto 		txmode = SF_DMA_MODE;
23456deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
2346b2dec116SSonic Zhang 		priv->xstats.threshold = SF_DMA_MODE;
23476deee222SJoao Pinto 	} else {
23486deee222SJoao Pinto 		txmode = tc;
23496deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
23506deee222SJoao Pinto 	}
23516deee222SJoao Pinto 
23526deee222SJoao Pinto 	/* configure all channels */
2353a0daae13SJose Abreu 	for (chan = 0; chan < rx_channels_count; chan++) {
2354bba2556eSOng Boon Leong 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2355bba2556eSOng Boon Leong 		u32 buf_size;
2356bba2556eSOng Boon Leong 
2357a0daae13SJose Abreu 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
23586deee222SJoao Pinto 
2359a4e887faSJose Abreu 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2360a0daae13SJose Abreu 				rxfifosz, qmode);
2361bba2556eSOng Boon Leong 
2362bba2556eSOng Boon Leong 		if (rx_q->xsk_pool) {
2363bba2556eSOng Boon Leong 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2364bba2556eSOng Boon Leong 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2365bba2556eSOng Boon Leong 					      buf_size,
23664205c88eSJose Abreu 					      chan);
2367bba2556eSOng Boon Leong 		} else {
2368bba2556eSOng Boon Leong 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2369bba2556eSOng Boon Leong 					      priv->dma_buf_sz,
2370bba2556eSOng Boon Leong 					      chan);
2371bba2556eSOng Boon Leong 		}
2372a0daae13SJose Abreu 	}
2373a0daae13SJose Abreu 
2374a0daae13SJose Abreu 	for (chan = 0; chan < tx_channels_count; chan++) {
2375a0daae13SJose Abreu 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2376a0daae13SJose Abreu 
2377a4e887faSJose Abreu 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2378a0daae13SJose Abreu 				txfifosz, qmode);
2379a0daae13SJose Abreu 	}
23807ac6653aSJeff Kirsher }
23817ac6653aSJeff Kirsher 
2382132c32eeSOng Boon Leong static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2383132c32eeSOng Boon Leong {
2384132c32eeSOng Boon Leong 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2385132c32eeSOng Boon Leong 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2386132c32eeSOng Boon Leong 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2387132c32eeSOng Boon Leong 	unsigned int entry = tx_q->cur_tx;
2388132c32eeSOng Boon Leong 	struct dma_desc *tx_desc = NULL;
2389132c32eeSOng Boon Leong 	struct xdp_desc xdp_desc;
2390132c32eeSOng Boon Leong 	bool work_done = true;
2391132c32eeSOng Boon Leong 
2392132c32eeSOng Boon Leong 	/* Avoids TX time-out as we are sharing with slow path */
2393132c32eeSOng Boon Leong 	nq->trans_start = jiffies;
2394132c32eeSOng Boon Leong 
2395132c32eeSOng Boon Leong 	budget = min(budget, stmmac_tx_avail(priv, queue));
2396132c32eeSOng Boon Leong 
2397132c32eeSOng Boon Leong 	while (budget-- > 0) {
2398132c32eeSOng Boon Leong 		dma_addr_t dma_addr;
2399132c32eeSOng Boon Leong 		bool set_ic;
2400132c32eeSOng Boon Leong 
2401132c32eeSOng Boon Leong 		/* We are sharing with slow path and stop XSK TX desc submission when
2402132c32eeSOng Boon Leong 		 * available TX ring is less than threshold.
2403132c32eeSOng Boon Leong 		 */
2404132c32eeSOng Boon Leong 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2405132c32eeSOng Boon Leong 		    !netif_carrier_ok(priv->dev)) {
2406132c32eeSOng Boon Leong 			work_done = false;
2407132c32eeSOng Boon Leong 			break;
2408132c32eeSOng Boon Leong 		}
2409132c32eeSOng Boon Leong 
2410132c32eeSOng Boon Leong 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2411132c32eeSOng Boon Leong 			break;
2412132c32eeSOng Boon Leong 
2413132c32eeSOng Boon Leong 		if (likely(priv->extend_desc))
2414132c32eeSOng Boon Leong 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2415132c32eeSOng Boon Leong 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2416132c32eeSOng Boon Leong 			tx_desc = &tx_q->dma_entx[entry].basic;
2417132c32eeSOng Boon Leong 		else
2418132c32eeSOng Boon Leong 			tx_desc = tx_q->dma_tx + entry;
2419132c32eeSOng Boon Leong 
2420132c32eeSOng Boon Leong 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2421132c32eeSOng Boon Leong 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2422132c32eeSOng Boon Leong 
2423132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2424132c32eeSOng Boon Leong 
2425132c32eeSOng Boon Leong 		/* To return XDP buffer to XSK pool, we simple call
2426132c32eeSOng Boon Leong 		 * xsk_tx_completed(), so we don't need to fill up
2427132c32eeSOng Boon Leong 		 * 'buf' and 'xdpf'.
2428132c32eeSOng Boon Leong 		 */
2429132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].buf = 0;
2430132c32eeSOng Boon Leong 		tx_q->xdpf[entry] = NULL;
2431132c32eeSOng Boon Leong 
2432132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2433132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2434132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2435132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2436132c32eeSOng Boon Leong 
2437132c32eeSOng Boon Leong 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2438132c32eeSOng Boon Leong 
2439132c32eeSOng Boon Leong 		tx_q->tx_count_frames++;
2440132c32eeSOng Boon Leong 
2441132c32eeSOng Boon Leong 		if (!priv->tx_coal_frames[queue])
2442132c32eeSOng Boon Leong 			set_ic = false;
2443132c32eeSOng Boon Leong 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2444132c32eeSOng Boon Leong 			set_ic = true;
2445132c32eeSOng Boon Leong 		else
2446132c32eeSOng Boon Leong 			set_ic = false;
2447132c32eeSOng Boon Leong 
2448132c32eeSOng Boon Leong 		if (set_ic) {
2449132c32eeSOng Boon Leong 			tx_q->tx_count_frames = 0;
2450132c32eeSOng Boon Leong 			stmmac_set_tx_ic(priv, tx_desc);
2451132c32eeSOng Boon Leong 			priv->xstats.tx_set_ic_bit++;
2452132c32eeSOng Boon Leong 		}
2453132c32eeSOng Boon Leong 
2454132c32eeSOng Boon Leong 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2455132c32eeSOng Boon Leong 				       true, priv->mode, true, true,
2456132c32eeSOng Boon Leong 				       xdp_desc.len);
2457132c32eeSOng Boon Leong 
2458132c32eeSOng Boon Leong 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2459132c32eeSOng Boon Leong 
2460132c32eeSOng Boon Leong 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
2461132c32eeSOng Boon Leong 		entry = tx_q->cur_tx;
2462132c32eeSOng Boon Leong 	}
2463132c32eeSOng Boon Leong 
2464132c32eeSOng Boon Leong 	if (tx_desc) {
2465132c32eeSOng Boon Leong 		stmmac_flush_tx_descriptors(priv, queue);
2466132c32eeSOng Boon Leong 		xsk_tx_release(pool);
2467132c32eeSOng Boon Leong 	}
2468132c32eeSOng Boon Leong 
2469132c32eeSOng Boon Leong 	/* Return true if all of the 3 conditions are met
2470132c32eeSOng Boon Leong 	 *  a) TX Budget is still available
2471132c32eeSOng Boon Leong 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2472132c32eeSOng Boon Leong 	 *     pending XSK TX for transmission)
2473132c32eeSOng Boon Leong 	 */
2474132c32eeSOng Boon Leong 	return !!budget && work_done;
2475132c32eeSOng Boon Leong }
2476132c32eeSOng Boon Leong 
24777ac6653aSJeff Kirsher /**
2478732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_clean - to manage the transmission completion
247932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
2480d0ea5cbdSJesse Brandeburg  * @budget: napi budget limiting this functions packet handling
2481ce736788SJoao Pinto  * @queue: TX queue index
2482732fdf0eSGiuseppe CAVALLARO  * Description: it reclaims the transmit resources after transmission completes.
24837ac6653aSJeff Kirsher  */
24848fce3331SJose Abreu static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
24857ac6653aSJeff Kirsher {
2486ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
248738979574SBeniamino Galvani 	unsigned int bytes_compl = 0, pkts_compl = 0;
2488132c32eeSOng Boon Leong 	unsigned int entry, xmits = 0, count = 0;
24897ac6653aSJeff Kirsher 
24908fce3331SJose Abreu 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2491a9097a96SGiuseppe CAVALLARO 
24929125cdd1SGiuseppe CAVALLARO 	priv->xstats.tx_clean++;
24939125cdd1SGiuseppe CAVALLARO 
2494132c32eeSOng Boon Leong 	tx_q->xsk_frames_done = 0;
2495132c32eeSOng Boon Leong 
24968d5f4b07SBernd Edlinger 	entry = tx_q->dirty_tx;
2497132c32eeSOng Boon Leong 
2498132c32eeSOng Boon Leong 	/* Try to clean all TX complete frame in 1 shot */
2499132c32eeSOng Boon Leong 	while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) {
2500be8b38a7SOng Boon Leong 		struct xdp_frame *xdpf;
2501be8b38a7SOng Boon Leong 		struct sk_buff *skb;
2502c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
2503c363b658SFabrice Gasnier 		int status;
2504c24602efSGiuseppe CAVALLARO 
25058b278a5bSOng Boon Leong 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
25068b278a5bSOng Boon Leong 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2507be8b38a7SOng Boon Leong 			xdpf = tx_q->xdpf[entry];
2508be8b38a7SOng Boon Leong 			skb = NULL;
2509be8b38a7SOng Boon Leong 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2510be8b38a7SOng Boon Leong 			xdpf = NULL;
2511be8b38a7SOng Boon Leong 			skb = tx_q->tx_skbuff[entry];
2512be8b38a7SOng Boon Leong 		} else {
2513be8b38a7SOng Boon Leong 			xdpf = NULL;
2514be8b38a7SOng Boon Leong 			skb = NULL;
2515be8b38a7SOng Boon Leong 		}
2516be8b38a7SOng Boon Leong 
2517c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
2518ce736788SJoao Pinto 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2519579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2520579a25a8SJose Abreu 			p = &tx_q->dma_entx[entry].basic;
2521c24602efSGiuseppe CAVALLARO 		else
2522ce736788SJoao Pinto 			p = tx_q->dma_tx + entry;
25237ac6653aSJeff Kirsher 
252442de047dSJose Abreu 		status = stmmac_tx_status(priv, &priv->dev->stats,
252542de047dSJose Abreu 				&priv->xstats, p, priv->ioaddr);
2526c363b658SFabrice Gasnier 		/* Check if the descriptor is owned by the DMA */
2527c363b658SFabrice Gasnier 		if (unlikely(status & tx_dma_own))
2528c363b658SFabrice Gasnier 			break;
2529c363b658SFabrice Gasnier 
25308fce3331SJose Abreu 		count++;
25318fce3331SJose Abreu 
2532a6b25da5SNiklas Cassel 		/* Make sure descriptor fields are read after reading
2533a6b25da5SNiklas Cassel 		 * the own bit.
2534a6b25da5SNiklas Cassel 		 */
2535a6b25da5SNiklas Cassel 		dma_rmb();
2536a6b25da5SNiklas Cassel 
2537c363b658SFabrice Gasnier 		/* Just consider the last segment and ...*/
2538c363b658SFabrice Gasnier 		if (likely(!(status & tx_not_ls))) {
2539c363b658SFabrice Gasnier 			/* ... verify the status error condition */
2540c363b658SFabrice Gasnier 			if (unlikely(status & tx_err)) {
2541c363b658SFabrice Gasnier 				priv->dev->stats.tx_errors++;
2542c363b658SFabrice Gasnier 			} else {
25437ac6653aSJeff Kirsher 				priv->dev->stats.tx_packets++;
25447ac6653aSJeff Kirsher 				priv->xstats.tx_pkt_n++;
254568e9c5deSVijayakannan Ayyathurai 				priv->xstats.txq_stats[queue].tx_pkt_n++;
2546c363b658SFabrice Gasnier 			}
2547be8b38a7SOng Boon Leong 			if (skb)
2548ba1ffd74SGiuseppe CAVALLARO 				stmmac_get_tx_hwtstamp(priv, p, skb);
25497ac6653aSJeff Kirsher 		}
25507ac6653aSJeff Kirsher 
2551be8b38a7SOng Boon Leong 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2552be8b38a7SOng Boon Leong 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2553ce736788SJoao Pinto 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2554362b37beSGiuseppe CAVALLARO 				dma_unmap_page(priv->device,
2555ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].buf,
2556ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].len,
25577ac6653aSJeff Kirsher 					       DMA_TO_DEVICE);
2558362b37beSGiuseppe CAVALLARO 			else
2559362b37beSGiuseppe CAVALLARO 				dma_unmap_single(priv->device,
2560ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].buf,
2561ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].len,
2562362b37beSGiuseppe CAVALLARO 						 DMA_TO_DEVICE);
2563ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].buf = 0;
2564ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].len = 0;
2565ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2566cf32deecSRayagond Kokatanur 		}
2567f748be53SAlexandre TORGUE 
25682c520b1cSJose Abreu 		stmmac_clean_desc3(priv, tx_q, p);
2569f748be53SAlexandre TORGUE 
2570ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2571ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
25727ac6653aSJeff Kirsher 
2573be8b38a7SOng Boon Leong 		if (xdpf &&
2574be8b38a7SOng Boon Leong 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2575be8b38a7SOng Boon Leong 			xdp_return_frame_rx_napi(xdpf);
2576be8b38a7SOng Boon Leong 			tx_q->xdpf[entry] = NULL;
2577be8b38a7SOng Boon Leong 		}
2578be8b38a7SOng Boon Leong 
25798b278a5bSOng Boon Leong 		if (xdpf &&
25808b278a5bSOng Boon Leong 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
25818b278a5bSOng Boon Leong 			xdp_return_frame(xdpf);
25828b278a5bSOng Boon Leong 			tx_q->xdpf[entry] = NULL;
25838b278a5bSOng Boon Leong 		}
25848b278a5bSOng Boon Leong 
2585132c32eeSOng Boon Leong 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2586132c32eeSOng Boon Leong 			tx_q->xsk_frames_done++;
2587132c32eeSOng Boon Leong 
2588be8b38a7SOng Boon Leong 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2589be8b38a7SOng Boon Leong 			if (likely(skb)) {
259038979574SBeniamino Galvani 				pkts_compl++;
259138979574SBeniamino Galvani 				bytes_compl += skb->len;
25927c565c33SEric W. Biederman 				dev_consume_skb_any(skb);
2593ce736788SJoao Pinto 				tx_q->tx_skbuff[entry] = NULL;
25947ac6653aSJeff Kirsher 			}
2595be8b38a7SOng Boon Leong 		}
25967ac6653aSJeff Kirsher 
259742de047dSJose Abreu 		stmmac_release_tx_desc(priv, p, priv->mode);
25987ac6653aSJeff Kirsher 
2599aa042f60SSong, Yoong Siang 		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
26007ac6653aSJeff Kirsher 	}
2601ce736788SJoao Pinto 	tx_q->dirty_tx = entry;
260238979574SBeniamino Galvani 
2603c22a3f48SJoao Pinto 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2604c22a3f48SJoao Pinto 				  pkts_compl, bytes_compl);
260538979574SBeniamino Galvani 
2606c22a3f48SJoao Pinto 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2607c22a3f48SJoao Pinto 								queue))) &&
2608aa042f60SSong, Yoong Siang 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2609c22a3f48SJoao Pinto 
2610b3e51069SLABBE Corentin 		netif_dbg(priv, tx_done, priv->dev,
2611b3e51069SLABBE Corentin 			  "%s: restart transmit\n", __func__);
2612c22a3f48SJoao Pinto 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
26137ac6653aSJeff Kirsher 	}
2614d765955dSGiuseppe CAVALLARO 
2615132c32eeSOng Boon Leong 	if (tx_q->xsk_pool) {
2616132c32eeSOng Boon Leong 		bool work_done;
2617132c32eeSOng Boon Leong 
2618132c32eeSOng Boon Leong 		if (tx_q->xsk_frames_done)
2619132c32eeSOng Boon Leong 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2620132c32eeSOng Boon Leong 
2621132c32eeSOng Boon Leong 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2622132c32eeSOng Boon Leong 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2623132c32eeSOng Boon Leong 
2624132c32eeSOng Boon Leong 		/* For XSK TX, we try to send as many as possible.
2625132c32eeSOng Boon Leong 		 * If XSK work done (XSK TX desc empty and budget still
2626132c32eeSOng Boon Leong 		 * available), return "budget - 1" to reenable TX IRQ.
2627132c32eeSOng Boon Leong 		 * Else, return "budget" to make NAPI continue polling.
2628132c32eeSOng Boon Leong 		 */
2629132c32eeSOng Boon Leong 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2630132c32eeSOng Boon Leong 					       STMMAC_XSK_TX_BUDGET_MAX);
2631132c32eeSOng Boon Leong 		if (work_done)
2632132c32eeSOng Boon Leong 			xmits = budget - 1;
2633132c32eeSOng Boon Leong 		else
2634132c32eeSOng Boon Leong 			xmits = budget;
2635132c32eeSOng Boon Leong 	}
2636132c32eeSOng Boon Leong 
2637be1c7eaeSVineetha G. Jaya Kumaran 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2638be1c7eaeSVineetha G. Jaya Kumaran 	    priv->eee_sw_timer_en) {
2639d765955dSGiuseppe CAVALLARO 		stmmac_enable_eee_mode(priv);
2640388e201dSVineetha G. Jaya Kumaran 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2641d765955dSGiuseppe CAVALLARO 	}
26428fce3331SJose Abreu 
26434ccb4585SJose Abreu 	/* We still have pending packets, let's call for a new scheduling */
26444ccb4585SJose Abreu 	if (tx_q->dirty_tx != tx_q->cur_tx)
2645db2f2842SOng Boon Leong 		hrtimer_start(&tx_q->txtimer,
2646db2f2842SOng Boon Leong 			      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2647d5a05e69SVincent Whitchurch 			      HRTIMER_MODE_REL);
26484ccb4585SJose Abreu 
26498fce3331SJose Abreu 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
26508fce3331SJose Abreu 
2651132c32eeSOng Boon Leong 	/* Combine decisions from TX clean and XSK TX */
2652132c32eeSOng Boon Leong 	return max(count, xmits);
26537ac6653aSJeff Kirsher }
26547ac6653aSJeff Kirsher 
26557ac6653aSJeff Kirsher /**
2656732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_err - to manage the tx error
265732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
26585bacd778SLABBE Corentin  * @chan: channel index
26597ac6653aSJeff Kirsher  * Description: it cleans the descriptors and restarts the transmission
2660732fdf0eSGiuseppe CAVALLARO  * in case of transmission errors.
26617ac6653aSJeff Kirsher  */
26625bacd778SLABBE Corentin static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
26637ac6653aSJeff Kirsher {
2664ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2665ce736788SJoao Pinto 
2666c22a3f48SJoao Pinto 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
26677ac6653aSJeff Kirsher 
2668ae4f0d46SJoao Pinto 	stmmac_stop_tx_dma(priv, chan);
2669ce736788SJoao Pinto 	dma_free_tx_skbufs(priv, chan);
2670579a25a8SJose Abreu 	stmmac_clear_tx_descriptors(priv, chan);
2671ce736788SJoao Pinto 	tx_q->dirty_tx = 0;
2672ce736788SJoao Pinto 	tx_q->cur_tx = 0;
26738d212a9eSNiklas Cassel 	tx_q->mss = 0;
2674c22a3f48SJoao Pinto 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2675f421031eSJongsung Kim 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2676f421031eSJongsung Kim 			    tx_q->dma_tx_phy, chan);
2677ae4f0d46SJoao Pinto 	stmmac_start_tx_dma(priv, chan);
26787ac6653aSJeff Kirsher 
26797ac6653aSJeff Kirsher 	priv->dev->stats.tx_errors++;
2680c22a3f48SJoao Pinto 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
26817ac6653aSJeff Kirsher }
26827ac6653aSJeff Kirsher 
268332ceabcaSGiuseppe CAVALLARO /**
26846deee222SJoao Pinto  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
26856deee222SJoao Pinto  *  @priv: driver private structure
26866deee222SJoao Pinto  *  @txmode: TX operating mode
26876deee222SJoao Pinto  *  @rxmode: RX operating mode
26886deee222SJoao Pinto  *  @chan: channel index
26896deee222SJoao Pinto  *  Description: it is used for configuring of the DMA operation mode in
26906deee222SJoao Pinto  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
26916deee222SJoao Pinto  *  mode.
26926deee222SJoao Pinto  */
26936deee222SJoao Pinto static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
26946deee222SJoao Pinto 					  u32 rxmode, u32 chan)
26956deee222SJoao Pinto {
2696a0daae13SJose Abreu 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2697a0daae13SJose Abreu 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
269852a76235SJose Abreu 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
269952a76235SJose Abreu 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
27006deee222SJoao Pinto 	int rxfifosz = priv->plat->rx_fifo_size;
270152a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
27026deee222SJoao Pinto 
27036deee222SJoao Pinto 	if (rxfifosz == 0)
27046deee222SJoao Pinto 		rxfifosz = priv->dma_cap.rx_fifo_size;
270552a76235SJose Abreu 	if (txfifosz == 0)
270652a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
270752a76235SJose Abreu 
270852a76235SJose Abreu 	/* Adjust for real per queue fifo size */
270952a76235SJose Abreu 	rxfifosz /= rx_channels_count;
271052a76235SJose Abreu 	txfifosz /= tx_channels_count;
27116deee222SJoao Pinto 
2712ab0204e3SJose Abreu 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2713ab0204e3SJose Abreu 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
27146deee222SJoao Pinto }
27156deee222SJoao Pinto 
27168bf993a5SJose Abreu static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
27178bf993a5SJose Abreu {
271863a550fcSJose Abreu 	int ret;
27198bf993a5SJose Abreu 
2720c10d4c82SJose Abreu 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
27218bf993a5SJose Abreu 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2722c10d4c82SJose Abreu 	if (ret && (ret != -EINVAL)) {
27238bf993a5SJose Abreu 		stmmac_global_err(priv);
2724c10d4c82SJose Abreu 		return true;
2725c10d4c82SJose Abreu 	}
2726c10d4c82SJose Abreu 
2727c10d4c82SJose Abreu 	return false;
27288bf993a5SJose Abreu }
27298bf993a5SJose Abreu 
27307e1c520cSOng Boon Leong static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
27318fce3331SJose Abreu {
27328fce3331SJose Abreu 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
27337e1c520cSOng Boon Leong 						 &priv->xstats, chan, dir);
2734132c32eeSOng Boon Leong 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2735132c32eeSOng Boon Leong 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
27368fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[chan];
2737132c32eeSOng Boon Leong 	struct napi_struct *rx_napi;
2738132c32eeSOng Boon Leong 	struct napi_struct *tx_napi;
2739021bd5e3SJose Abreu 	unsigned long flags;
27408fce3331SJose Abreu 
2741132c32eeSOng Boon Leong 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2742132c32eeSOng Boon Leong 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2743132c32eeSOng Boon Leong 
27444ccb4585SJose Abreu 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2745132c32eeSOng Boon Leong 		if (napi_schedule_prep(rx_napi)) {
2746021bd5e3SJose Abreu 			spin_lock_irqsave(&ch->lock, flags);
2747021bd5e3SJose Abreu 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2748021bd5e3SJose Abreu 			spin_unlock_irqrestore(&ch->lock, flags);
2749132c32eeSOng Boon Leong 			__napi_schedule(rx_napi);
27503ba07debSJose Abreu 		}
27514ccb4585SJose Abreu 	}
27524ccb4585SJose Abreu 
2753021bd5e3SJose Abreu 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2754132c32eeSOng Boon Leong 		if (napi_schedule_prep(tx_napi)) {
2755021bd5e3SJose Abreu 			spin_lock_irqsave(&ch->lock, flags);
2756021bd5e3SJose Abreu 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2757021bd5e3SJose Abreu 			spin_unlock_irqrestore(&ch->lock, flags);
2758132c32eeSOng Boon Leong 			__napi_schedule(tx_napi);
2759021bd5e3SJose Abreu 		}
2760021bd5e3SJose Abreu 	}
27618fce3331SJose Abreu 
27628fce3331SJose Abreu 	return status;
27638fce3331SJose Abreu }
27648fce3331SJose Abreu 
27656deee222SJoao Pinto /**
2766732fdf0eSGiuseppe CAVALLARO  * stmmac_dma_interrupt - DMA ISR
276732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
276832ceabcaSGiuseppe CAVALLARO  * Description: this is the DMA ISR. It is called by the main ISR.
2769732fdf0eSGiuseppe CAVALLARO  * It calls the dwmac dma routine and schedule poll method in case of some
2770732fdf0eSGiuseppe CAVALLARO  * work can be done.
277132ceabcaSGiuseppe CAVALLARO  */
27727ac6653aSJeff Kirsher static void stmmac_dma_interrupt(struct stmmac_priv *priv)
27737ac6653aSJeff Kirsher {
2774d62a107aSJoao Pinto 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
27755a6a0445SNiklas Cassel 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
27765a6a0445SNiklas Cassel 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
27775a6a0445SNiklas Cassel 				tx_channel_count : rx_channel_count;
2778d62a107aSJoao Pinto 	u32 chan;
27798ac60ffbSKees Cook 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
27808ac60ffbSKees Cook 
27818ac60ffbSKees Cook 	/* Make sure we never check beyond our status buffer. */
27828ac60ffbSKees Cook 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
27838ac60ffbSKees Cook 		channels_to_check = ARRAY_SIZE(status);
278468e5cfafSJoao Pinto 
27855a6a0445SNiklas Cassel 	for (chan = 0; chan < channels_to_check; chan++)
27867e1c520cSOng Boon Leong 		status[chan] = stmmac_napi_check(priv, chan,
27877e1c520cSOng Boon Leong 						 DMA_DIR_RXTX);
2788d62a107aSJoao Pinto 
27895a6a0445SNiklas Cassel 	for (chan = 0; chan < tx_channel_count; chan++) {
27905a6a0445SNiklas Cassel 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
27917ac6653aSJeff Kirsher 			/* Try to bump up the dma threshold on this failure */
2792b2dec116SSonic Zhang 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2793b2dec116SSonic Zhang 			    (tc <= 256)) {
27947ac6653aSJeff Kirsher 				tc += 64;
2795c405abe2SSonic Zhang 				if (priv->plat->force_thresh_dma_mode)
2796d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2797d62a107aSJoao Pinto 								      tc,
2798d62a107aSJoao Pinto 								      tc,
2799d62a107aSJoao Pinto 								      chan);
2800c405abe2SSonic Zhang 				else
2801d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2802d62a107aSJoao Pinto 								    tc,
2803d62a107aSJoao Pinto 								    SF_DMA_MODE,
2804d62a107aSJoao Pinto 								    chan);
28057ac6653aSJeff Kirsher 				priv->xstats.threshold = tc;
28067ac6653aSJeff Kirsher 			}
28075a6a0445SNiklas Cassel 		} else if (unlikely(status[chan] == tx_hard_error)) {
28084e593262SJoao Pinto 			stmmac_tx_err(priv, chan);
28097ac6653aSJeff Kirsher 		}
2810d62a107aSJoao Pinto 	}
2811d62a107aSJoao Pinto }
28127ac6653aSJeff Kirsher 
281332ceabcaSGiuseppe CAVALLARO /**
281432ceabcaSGiuseppe CAVALLARO  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
281532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
281632ceabcaSGiuseppe CAVALLARO  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
281732ceabcaSGiuseppe CAVALLARO  */
28181c901a46SGiuseppe CAVALLARO static void stmmac_mmc_setup(struct stmmac_priv *priv)
28191c901a46SGiuseppe CAVALLARO {
28201c901a46SGiuseppe CAVALLARO 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
28211c901a46SGiuseppe CAVALLARO 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
28221c901a46SGiuseppe CAVALLARO 
28233b1dd2c5SJose Abreu 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
28244f795b25SGiuseppe CAVALLARO 
28254f795b25SGiuseppe CAVALLARO 	if (priv->dma_cap.rmon) {
28263b1dd2c5SJose Abreu 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
28271c901a46SGiuseppe CAVALLARO 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
28284f795b25SGiuseppe CAVALLARO 	} else
282938ddc59dSLABBE Corentin 		netdev_info(priv->dev, "No MAC Management Counters available\n");
28301c901a46SGiuseppe CAVALLARO }
28311c901a46SGiuseppe CAVALLARO 
2832732fdf0eSGiuseppe CAVALLARO /**
2833732fdf0eSGiuseppe CAVALLARO  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
283432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
283519e30c14SGiuseppe CAVALLARO  * Description:
283619e30c14SGiuseppe CAVALLARO  *  new GMAC chip generations have a new register to indicate the
2837e7434821SGiuseppe CAVALLARO  *  presence of the optional feature/functions.
283819e30c14SGiuseppe CAVALLARO  *  This can be also used to override the value passed through the
283919e30c14SGiuseppe CAVALLARO  *  platform and necessary for old MAC10/100 and GMAC chips.
2840e7434821SGiuseppe CAVALLARO  */
2841e7434821SGiuseppe CAVALLARO static int stmmac_get_hw_features(struct stmmac_priv *priv)
2842e7434821SGiuseppe CAVALLARO {
2843a4e887faSJose Abreu 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2844e7434821SGiuseppe CAVALLARO }
2845e7434821SGiuseppe CAVALLARO 
284632ceabcaSGiuseppe CAVALLARO /**
2847732fdf0eSGiuseppe CAVALLARO  * stmmac_check_ether_addr - check if the MAC addr is valid
284832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
284932ceabcaSGiuseppe CAVALLARO  * Description:
285032ceabcaSGiuseppe CAVALLARO  * it is to verify if the MAC address is valid, in case of failures it
285132ceabcaSGiuseppe CAVALLARO  * generates a random MAC address
285232ceabcaSGiuseppe CAVALLARO  */
2853bfab27a1SGiuseppe CAVALLARO static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2854bfab27a1SGiuseppe CAVALLARO {
28557f9b8fe5SJakub Kicinski 	u8 addr[ETH_ALEN];
28567f9b8fe5SJakub Kicinski 
2857bfab27a1SGiuseppe CAVALLARO 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
28587f9b8fe5SJakub Kicinski 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
28597f9b8fe5SJakub Kicinski 		if (is_valid_ether_addr(addr))
28607f9b8fe5SJakub Kicinski 			eth_hw_addr_set(priv->dev, addr);
28617f9b8fe5SJakub Kicinski 		else
2862f2cedb63SDanny Kukawka 			eth_hw_addr_random(priv->dev);
2863af649352SJisheng Zhang 		dev_info(priv->device, "device MAC address %pM\n",
2864bfab27a1SGiuseppe CAVALLARO 			 priv->dev->dev_addr);
2865bfab27a1SGiuseppe CAVALLARO 	}
2866c88460b7SHans de Goede }
2867bfab27a1SGiuseppe CAVALLARO 
286832ceabcaSGiuseppe CAVALLARO /**
2869732fdf0eSGiuseppe CAVALLARO  * stmmac_init_dma_engine - DMA init.
287032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
287132ceabcaSGiuseppe CAVALLARO  * Description:
287232ceabcaSGiuseppe CAVALLARO  * It inits the DMA invoking the specific MAC/GMAC callback.
287332ceabcaSGiuseppe CAVALLARO  * Some DMA parameters can be passed from the platform;
287432ceabcaSGiuseppe CAVALLARO  * in case of these are not passed a default is kept for the MAC or GMAC.
287532ceabcaSGiuseppe CAVALLARO  */
28760f1f88a8SGiuseppe CAVALLARO static int stmmac_init_dma_engine(struct stmmac_priv *priv)
28770f1f88a8SGiuseppe CAVALLARO {
287847f2a9ceSJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
287947f2a9ceSJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
288024aaed0cSJose Abreu 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
288154139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q;
2882ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
288347f2a9ceSJoao Pinto 	u32 chan = 0;
2884c24602efSGiuseppe CAVALLARO 	int atds = 0;
2885495db273SGiuseppe Cavallaro 	int ret = 0;
28860f1f88a8SGiuseppe CAVALLARO 
2887a332e2faSNiklas Cassel 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2888a332e2faSNiklas Cassel 		dev_err(priv->device, "Invalid DMA configuration\n");
288989ab75bfSNiklas Cassel 		return -EINVAL;
28900f1f88a8SGiuseppe CAVALLARO 	}
28910f1f88a8SGiuseppe CAVALLARO 
2892c24602efSGiuseppe CAVALLARO 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2893c24602efSGiuseppe CAVALLARO 		atds = 1;
2894c24602efSGiuseppe CAVALLARO 
2895a4e887faSJose Abreu 	ret = stmmac_reset(priv, priv->ioaddr);
2896495db273SGiuseppe Cavallaro 	if (ret) {
2897495db273SGiuseppe Cavallaro 		dev_err(priv->device, "Failed to reset the dma\n");
2898495db273SGiuseppe Cavallaro 		return ret;
2899495db273SGiuseppe Cavallaro 	}
2900495db273SGiuseppe Cavallaro 
29017d9e6c5aSJose Abreu 	/* DMA Configuration */
29027d9e6c5aSJose Abreu 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
29037d9e6c5aSJose Abreu 
29047d9e6c5aSJose Abreu 	if (priv->plat->axi)
29057d9e6c5aSJose Abreu 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
29067d9e6c5aSJose Abreu 
2907af8f3fb7SWeifeng Voon 	/* DMA CSR Channel configuration */
2908af8f3fb7SWeifeng Voon 	for (chan = 0; chan < dma_csr_ch; chan++)
2909af8f3fb7SWeifeng Voon 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2910af8f3fb7SWeifeng Voon 
291147f2a9ceSJoao Pinto 	/* DMA RX Channel Configuration */
291247f2a9ceSJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++) {
291354139cf3SJoao Pinto 		rx_q = &priv->rx_queue[chan];
291454139cf3SJoao Pinto 
291524aaed0cSJose Abreu 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
291624aaed0cSJose Abreu 				    rx_q->dma_rx_phy, chan);
291747f2a9ceSJoao Pinto 
291854139cf3SJoao Pinto 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2919bba2556eSOng Boon Leong 				     (rx_q->buf_alloc_num *
2920aa042f60SSong, Yoong Siang 				      sizeof(struct dma_desc));
2921a4e887faSJose Abreu 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2922a4e887faSJose Abreu 				       rx_q->rx_tail_addr, chan);
292347f2a9ceSJoao Pinto 	}
292447f2a9ceSJoao Pinto 
292547f2a9ceSJoao Pinto 	/* DMA TX Channel Configuration */
292647f2a9ceSJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++) {
2927ce736788SJoao Pinto 		tx_q = &priv->tx_queue[chan];
2928ce736788SJoao Pinto 
292924aaed0cSJose Abreu 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
293024aaed0cSJose Abreu 				    tx_q->dma_tx_phy, chan);
2931f748be53SAlexandre TORGUE 
29320431100bSJose Abreu 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2933a4e887faSJose Abreu 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2934a4e887faSJose Abreu 				       tx_q->tx_tail_addr, chan);
293547f2a9ceSJoao Pinto 	}
293624aaed0cSJose Abreu 
2937495db273SGiuseppe Cavallaro 	return ret;
29380f1f88a8SGiuseppe CAVALLARO }
29390f1f88a8SGiuseppe CAVALLARO 
29408fce3331SJose Abreu static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
29418fce3331SJose Abreu {
29428fce3331SJose Abreu 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
29438fce3331SJose Abreu 
2944db2f2842SOng Boon Leong 	hrtimer_start(&tx_q->txtimer,
2945db2f2842SOng Boon Leong 		      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2946d5a05e69SVincent Whitchurch 		      HRTIMER_MODE_REL);
29478fce3331SJose Abreu }
29488fce3331SJose Abreu 
2949bfab27a1SGiuseppe CAVALLARO /**
2950732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_timer - mitigation sw timer for tx.
2951d0ea5cbdSJesse Brandeburg  * @t: data pointer
29529125cdd1SGiuseppe CAVALLARO  * Description:
29539125cdd1SGiuseppe CAVALLARO  * This is the timer handler to directly invoke the stmmac_tx_clean.
29549125cdd1SGiuseppe CAVALLARO  */
2955d5a05e69SVincent Whitchurch static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
29569125cdd1SGiuseppe CAVALLARO {
2957d5a05e69SVincent Whitchurch 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
29588fce3331SJose Abreu 	struct stmmac_priv *priv = tx_q->priv_data;
29598fce3331SJose Abreu 	struct stmmac_channel *ch;
2960132c32eeSOng Boon Leong 	struct napi_struct *napi;
29619125cdd1SGiuseppe CAVALLARO 
29628fce3331SJose Abreu 	ch = &priv->channel[tx_q->queue_index];
2963132c32eeSOng Boon Leong 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
29648fce3331SJose Abreu 
2965132c32eeSOng Boon Leong 	if (likely(napi_schedule_prep(napi))) {
2966021bd5e3SJose Abreu 		unsigned long flags;
2967021bd5e3SJose Abreu 
2968021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
2969021bd5e3SJose Abreu 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2970021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
2971132c32eeSOng Boon Leong 		__napi_schedule(napi);
2972021bd5e3SJose Abreu 	}
2973d5a05e69SVincent Whitchurch 
2974d5a05e69SVincent Whitchurch 	return HRTIMER_NORESTART;
29759125cdd1SGiuseppe CAVALLARO }
29769125cdd1SGiuseppe CAVALLARO 
29779125cdd1SGiuseppe CAVALLARO /**
2978d429b66eSJose Abreu  * stmmac_init_coalesce - init mitigation options.
297932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
29809125cdd1SGiuseppe CAVALLARO  * Description:
2981d429b66eSJose Abreu  * This inits the coalesce parameters: i.e. timer rate,
29829125cdd1SGiuseppe CAVALLARO  * timer handler and default threshold used for enabling the
29839125cdd1SGiuseppe CAVALLARO  * interrupt on completion bit.
29849125cdd1SGiuseppe CAVALLARO  */
2985d429b66eSJose Abreu static void stmmac_init_coalesce(struct stmmac_priv *priv)
29869125cdd1SGiuseppe CAVALLARO {
29878fce3331SJose Abreu 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2988db2f2842SOng Boon Leong 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
29898fce3331SJose Abreu 	u32 chan;
29908fce3331SJose Abreu 
29918fce3331SJose Abreu 	for (chan = 0; chan < tx_channel_count; chan++) {
29928fce3331SJose Abreu 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
29938fce3331SJose Abreu 
2994db2f2842SOng Boon Leong 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
2995db2f2842SOng Boon Leong 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
2996db2f2842SOng Boon Leong 
2997d5a05e69SVincent Whitchurch 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2998d5a05e69SVincent Whitchurch 		tx_q->txtimer.function = stmmac_tx_timer;
29998fce3331SJose Abreu 	}
3000db2f2842SOng Boon Leong 
3001db2f2842SOng Boon Leong 	for (chan = 0; chan < rx_channel_count; chan++)
3002db2f2842SOng Boon Leong 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
30039125cdd1SGiuseppe CAVALLARO }
30049125cdd1SGiuseppe CAVALLARO 
30054854ab99SJoao Pinto static void stmmac_set_rings_length(struct stmmac_priv *priv)
30064854ab99SJoao Pinto {
30074854ab99SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
30084854ab99SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
30094854ab99SJoao Pinto 	u32 chan;
30104854ab99SJoao Pinto 
30114854ab99SJoao Pinto 	/* set TX ring length */
30124854ab99SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
3013a4e887faSJose Abreu 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3014aa042f60SSong, Yoong Siang 				       (priv->dma_tx_size - 1), chan);
30154854ab99SJoao Pinto 
30164854ab99SJoao Pinto 	/* set RX ring length */
30174854ab99SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
3018a4e887faSJose Abreu 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3019aa042f60SSong, Yoong Siang 				       (priv->dma_rx_size - 1), chan);
30204854ab99SJoao Pinto }
30214854ab99SJoao Pinto 
30229125cdd1SGiuseppe CAVALLARO /**
30236a3a7193SJoao Pinto  *  stmmac_set_tx_queue_weight - Set TX queue weight
30246a3a7193SJoao Pinto  *  @priv: driver private structure
30256a3a7193SJoao Pinto  *  Description: It is used for setting TX queues weight
30266a3a7193SJoao Pinto  */
30276a3a7193SJoao Pinto static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
30286a3a7193SJoao Pinto {
30296a3a7193SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
30306a3a7193SJoao Pinto 	u32 weight;
30316a3a7193SJoao Pinto 	u32 queue;
30326a3a7193SJoao Pinto 
30336a3a7193SJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
30346a3a7193SJoao Pinto 		weight = priv->plat->tx_queues_cfg[queue].weight;
3035c10d4c82SJose Abreu 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
30366a3a7193SJoao Pinto 	}
30376a3a7193SJoao Pinto }
30386a3a7193SJoao Pinto 
30396a3a7193SJoao Pinto /**
304019d91873SJoao Pinto  *  stmmac_configure_cbs - Configure CBS in TX queue
304119d91873SJoao Pinto  *  @priv: driver private structure
304219d91873SJoao Pinto  *  Description: It is used for configuring CBS in AVB TX queues
304319d91873SJoao Pinto  */
304419d91873SJoao Pinto static void stmmac_configure_cbs(struct stmmac_priv *priv)
304519d91873SJoao Pinto {
304619d91873SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
304719d91873SJoao Pinto 	u32 mode_to_use;
304819d91873SJoao Pinto 	u32 queue;
304919d91873SJoao Pinto 
305044781fefSJoao Pinto 	/* queue 0 is reserved for legacy traffic */
305144781fefSJoao Pinto 	for (queue = 1; queue < tx_queues_count; queue++) {
305219d91873SJoao Pinto 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
305319d91873SJoao Pinto 		if (mode_to_use == MTL_QUEUE_DCB)
305419d91873SJoao Pinto 			continue;
305519d91873SJoao Pinto 
3056c10d4c82SJose Abreu 		stmmac_config_cbs(priv, priv->hw,
305719d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].send_slope,
305819d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].idle_slope,
305919d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].high_credit,
306019d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].low_credit,
306119d91873SJoao Pinto 				queue);
306219d91873SJoao Pinto 	}
306319d91873SJoao Pinto }
306419d91873SJoao Pinto 
306519d91873SJoao Pinto /**
3066d43042f4SJoao Pinto  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3067d43042f4SJoao Pinto  *  @priv: driver private structure
3068d43042f4SJoao Pinto  *  Description: It is used for mapping RX queues to RX dma channels
3069d43042f4SJoao Pinto  */
3070d43042f4SJoao Pinto static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3071d43042f4SJoao Pinto {
3072d43042f4SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3073d43042f4SJoao Pinto 	u32 queue;
3074d43042f4SJoao Pinto 	u32 chan;
3075d43042f4SJoao Pinto 
3076d43042f4SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
3077d43042f4SJoao Pinto 		chan = priv->plat->rx_queues_cfg[queue].chan;
3078c10d4c82SJose Abreu 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3079d43042f4SJoao Pinto 	}
3080d43042f4SJoao Pinto }
3081d43042f4SJoao Pinto 
3082d43042f4SJoao Pinto /**
3083a8f5102aSJoao Pinto  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3084a8f5102aSJoao Pinto  *  @priv: driver private structure
3085a8f5102aSJoao Pinto  *  Description: It is used for configuring the RX Queue Priority
3086a8f5102aSJoao Pinto  */
3087a8f5102aSJoao Pinto static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3088a8f5102aSJoao Pinto {
3089a8f5102aSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3090a8f5102aSJoao Pinto 	u32 queue;
3091a8f5102aSJoao Pinto 	u32 prio;
3092a8f5102aSJoao Pinto 
3093a8f5102aSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
3094a8f5102aSJoao Pinto 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3095a8f5102aSJoao Pinto 			continue;
3096a8f5102aSJoao Pinto 
3097a8f5102aSJoao Pinto 		prio = priv->plat->rx_queues_cfg[queue].prio;
3098c10d4c82SJose Abreu 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3099a8f5102aSJoao Pinto 	}
3100a8f5102aSJoao Pinto }
3101a8f5102aSJoao Pinto 
3102a8f5102aSJoao Pinto /**
3103a8f5102aSJoao Pinto  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3104a8f5102aSJoao Pinto  *  @priv: driver private structure
3105a8f5102aSJoao Pinto  *  Description: It is used for configuring the TX Queue Priority
3106a8f5102aSJoao Pinto  */
3107a8f5102aSJoao Pinto static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3108a8f5102aSJoao Pinto {
3109a8f5102aSJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3110a8f5102aSJoao Pinto 	u32 queue;
3111a8f5102aSJoao Pinto 	u32 prio;
3112a8f5102aSJoao Pinto 
3113a8f5102aSJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
3114a8f5102aSJoao Pinto 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3115a8f5102aSJoao Pinto 			continue;
3116a8f5102aSJoao Pinto 
3117a8f5102aSJoao Pinto 		prio = priv->plat->tx_queues_cfg[queue].prio;
3118c10d4c82SJose Abreu 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3119a8f5102aSJoao Pinto 	}
3120a8f5102aSJoao Pinto }
3121a8f5102aSJoao Pinto 
3122a8f5102aSJoao Pinto /**
3123abe80fdcSJoao Pinto  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3124abe80fdcSJoao Pinto  *  @priv: driver private structure
3125abe80fdcSJoao Pinto  *  Description: It is used for configuring the RX queue routing
3126abe80fdcSJoao Pinto  */
3127abe80fdcSJoao Pinto static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3128abe80fdcSJoao Pinto {
3129abe80fdcSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3130abe80fdcSJoao Pinto 	u32 queue;
3131abe80fdcSJoao Pinto 	u8 packet;
3132abe80fdcSJoao Pinto 
3133abe80fdcSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
3134abe80fdcSJoao Pinto 		/* no specific packet type routing specified for the queue */
3135abe80fdcSJoao Pinto 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3136abe80fdcSJoao Pinto 			continue;
3137abe80fdcSJoao Pinto 
3138abe80fdcSJoao Pinto 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3139c10d4c82SJose Abreu 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3140abe80fdcSJoao Pinto 	}
3141abe80fdcSJoao Pinto }
3142abe80fdcSJoao Pinto 
314376067459SJose Abreu static void stmmac_mac_config_rss(struct stmmac_priv *priv)
314476067459SJose Abreu {
314576067459SJose Abreu 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
314676067459SJose Abreu 		priv->rss.enable = false;
314776067459SJose Abreu 		return;
314876067459SJose Abreu 	}
314976067459SJose Abreu 
315076067459SJose Abreu 	if (priv->dev->features & NETIF_F_RXHASH)
315176067459SJose Abreu 		priv->rss.enable = true;
315276067459SJose Abreu 	else
315376067459SJose Abreu 		priv->rss.enable = false;
315476067459SJose Abreu 
315576067459SJose Abreu 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
315676067459SJose Abreu 			     priv->plat->rx_queues_to_use);
315776067459SJose Abreu }
315876067459SJose Abreu 
3159abe80fdcSJoao Pinto /**
3160d0a9c9f9SJoao Pinto  *  stmmac_mtl_configuration - Configure MTL
3161d0a9c9f9SJoao Pinto  *  @priv: driver private structure
3162d0a9c9f9SJoao Pinto  *  Description: It is used for configurring MTL
3163d0a9c9f9SJoao Pinto  */
3164d0a9c9f9SJoao Pinto static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3165d0a9c9f9SJoao Pinto {
3166d0a9c9f9SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3167d0a9c9f9SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3168d0a9c9f9SJoao Pinto 
3169c10d4c82SJose Abreu 	if (tx_queues_count > 1)
31706a3a7193SJoao Pinto 		stmmac_set_tx_queue_weight(priv);
31716a3a7193SJoao Pinto 
3172d0a9c9f9SJoao Pinto 	/* Configure MTL RX algorithms */
3173c10d4c82SJose Abreu 	if (rx_queues_count > 1)
3174c10d4c82SJose Abreu 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3175d0a9c9f9SJoao Pinto 				priv->plat->rx_sched_algorithm);
3176d0a9c9f9SJoao Pinto 
3177d0a9c9f9SJoao Pinto 	/* Configure MTL TX algorithms */
3178c10d4c82SJose Abreu 	if (tx_queues_count > 1)
3179c10d4c82SJose Abreu 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3180d0a9c9f9SJoao Pinto 				priv->plat->tx_sched_algorithm);
3181d0a9c9f9SJoao Pinto 
318219d91873SJoao Pinto 	/* Configure CBS in AVB TX queues */
3183c10d4c82SJose Abreu 	if (tx_queues_count > 1)
318419d91873SJoao Pinto 		stmmac_configure_cbs(priv);
318519d91873SJoao Pinto 
3186d43042f4SJoao Pinto 	/* Map RX MTL to DMA channels */
3187d43042f4SJoao Pinto 	stmmac_rx_queue_dma_chan_map(priv);
3188d43042f4SJoao Pinto 
3189d0a9c9f9SJoao Pinto 	/* Enable MAC RX Queues */
3190d0a9c9f9SJoao Pinto 	stmmac_mac_enable_rx_queues(priv);
31916deee222SJoao Pinto 
3192a8f5102aSJoao Pinto 	/* Set RX priorities */
3193c10d4c82SJose Abreu 	if (rx_queues_count > 1)
3194a8f5102aSJoao Pinto 		stmmac_mac_config_rx_queues_prio(priv);
3195a8f5102aSJoao Pinto 
3196a8f5102aSJoao Pinto 	/* Set TX priorities */
3197c10d4c82SJose Abreu 	if (tx_queues_count > 1)
3198a8f5102aSJoao Pinto 		stmmac_mac_config_tx_queues_prio(priv);
3199abe80fdcSJoao Pinto 
3200abe80fdcSJoao Pinto 	/* Set RX routing */
3201c10d4c82SJose Abreu 	if (rx_queues_count > 1)
3202abe80fdcSJoao Pinto 		stmmac_mac_config_rx_queues_routing(priv);
320376067459SJose Abreu 
320476067459SJose Abreu 	/* Receive Side Scaling */
320576067459SJose Abreu 	if (rx_queues_count > 1)
320676067459SJose Abreu 		stmmac_mac_config_rss(priv);
3207d0a9c9f9SJoao Pinto }
3208d0a9c9f9SJoao Pinto 
32098bf993a5SJose Abreu static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
32108bf993a5SJose Abreu {
3211c10d4c82SJose Abreu 	if (priv->dma_cap.asp) {
32128bf993a5SJose Abreu 		netdev_info(priv->dev, "Enabling Safety Features\n");
32135ac712dcSWong Vee Khee 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
32145ac712dcSWong Vee Khee 					  priv->plat->safety_feat_cfg);
32158bf993a5SJose Abreu 	} else {
32168bf993a5SJose Abreu 		netdev_info(priv->dev, "No Safety Features support found\n");
32178bf993a5SJose Abreu 	}
32188bf993a5SJose Abreu }
32198bf993a5SJose Abreu 
32205a558611SOng Boon Leong static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
32215a558611SOng Boon Leong {
32225a558611SOng Boon Leong 	char *name;
32235a558611SOng Boon Leong 
32245a558611SOng Boon Leong 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3225db7c691dSMohammad Athari Bin Ismail 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
32265a558611SOng Boon Leong 
32275a558611SOng Boon Leong 	name = priv->wq_name;
32285a558611SOng Boon Leong 	sprintf(name, "%s-fpe", priv->dev->name);
32295a558611SOng Boon Leong 
32305a558611SOng Boon Leong 	priv->fpe_wq = create_singlethread_workqueue(name);
32315a558611SOng Boon Leong 	if (!priv->fpe_wq) {
32325a558611SOng Boon Leong 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
32335a558611SOng Boon Leong 
32345a558611SOng Boon Leong 		return -ENOMEM;
32355a558611SOng Boon Leong 	}
32365a558611SOng Boon Leong 	netdev_info(priv->dev, "FPE workqueue start");
32375a558611SOng Boon Leong 
32385a558611SOng Boon Leong 	return 0;
32395a558611SOng Boon Leong }
32405a558611SOng Boon Leong 
3241d0a9c9f9SJoao Pinto /**
3242732fdf0eSGiuseppe CAVALLARO  * stmmac_hw_setup - setup mac in a usable state.
3243523f11b5SSrinivas Kandagatla  *  @dev : pointer to the device structure.
3244d0ea5cbdSJesse Brandeburg  *  @init_ptp: initialize PTP if set
3245523f11b5SSrinivas Kandagatla  *  Description:
3246732fdf0eSGiuseppe CAVALLARO  *  this is the main function to setup the HW in a usable state because the
3247732fdf0eSGiuseppe CAVALLARO  *  dma engine is reset, the core registers are configured (e.g. AXI,
3248732fdf0eSGiuseppe CAVALLARO  *  Checksum features, timers). The DMA is ready to start receiving and
3249732fdf0eSGiuseppe CAVALLARO  *  transmitting.
3250523f11b5SSrinivas Kandagatla  *  Return value:
3251523f11b5SSrinivas Kandagatla  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3252523f11b5SSrinivas Kandagatla  *  file on failure.
3253523f11b5SSrinivas Kandagatla  */
3254fe131929SHuacai Chen static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
3255523f11b5SSrinivas Kandagatla {
3256523f11b5SSrinivas Kandagatla 	struct stmmac_priv *priv = netdev_priv(dev);
32573c55d4d0SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3258146617b8SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3259d08d32d1SOng Boon Leong 	bool sph_en;
3260146617b8SJoao Pinto 	u32 chan;
3261523f11b5SSrinivas Kandagatla 	int ret;
3262523f11b5SSrinivas Kandagatla 
3263523f11b5SSrinivas Kandagatla 	/* DMA initialization and SW reset */
3264523f11b5SSrinivas Kandagatla 	ret = stmmac_init_dma_engine(priv);
3265523f11b5SSrinivas Kandagatla 	if (ret < 0) {
326638ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
326738ddc59dSLABBE Corentin 			   __func__);
3268523f11b5SSrinivas Kandagatla 		return ret;
3269523f11b5SSrinivas Kandagatla 	}
3270523f11b5SSrinivas Kandagatla 
3271523f11b5SSrinivas Kandagatla 	/* Copy the MAC addr into the HW  */
3272c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3273523f11b5SSrinivas Kandagatla 
327402e57b9dSGiuseppe CAVALLARO 	/* PS and related bits will be programmed according to the speed */
327502e57b9dSGiuseppe CAVALLARO 	if (priv->hw->pcs) {
327602e57b9dSGiuseppe CAVALLARO 		int speed = priv->plat->mac_port_sel_speed;
327702e57b9dSGiuseppe CAVALLARO 
327802e57b9dSGiuseppe CAVALLARO 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
327902e57b9dSGiuseppe CAVALLARO 		    (speed == SPEED_1000)) {
328002e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = speed;
328102e57b9dSGiuseppe CAVALLARO 		} else {
328202e57b9dSGiuseppe CAVALLARO 			dev_warn(priv->device, "invalid port speed\n");
328302e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = 0;
328402e57b9dSGiuseppe CAVALLARO 		}
328502e57b9dSGiuseppe CAVALLARO 	}
328602e57b9dSGiuseppe CAVALLARO 
3287523f11b5SSrinivas Kandagatla 	/* Initialize the MAC Core */
3288c10d4c82SJose Abreu 	stmmac_core_init(priv, priv->hw, dev);
3289523f11b5SSrinivas Kandagatla 
3290d0a9c9f9SJoao Pinto 	/* Initialize MTL*/
3291d0a9c9f9SJoao Pinto 	stmmac_mtl_configuration(priv);
32929eb12474Sjpinto 
32938bf993a5SJose Abreu 	/* Initialize Safety Features */
32948bf993a5SJose Abreu 	stmmac_safety_feat_configuration(priv);
32958bf993a5SJose Abreu 
3296c10d4c82SJose Abreu 	ret = stmmac_rx_ipc(priv, priv->hw);
3297978aded4SGiuseppe CAVALLARO 	if (!ret) {
329838ddc59dSLABBE Corentin 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3299978aded4SGiuseppe CAVALLARO 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3300d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
3301978aded4SGiuseppe CAVALLARO 	}
3302978aded4SGiuseppe CAVALLARO 
3303523f11b5SSrinivas Kandagatla 	/* Enable the MAC Rx/Tx */
3304c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
3305523f11b5SSrinivas Kandagatla 
3306b4f0a661SJoao Pinto 	/* Set the HW DMA mode and the COE */
3307b4f0a661SJoao Pinto 	stmmac_dma_operation_mode(priv);
3308b4f0a661SJoao Pinto 
3309523f11b5SSrinivas Kandagatla 	stmmac_mmc_setup(priv);
3310523f11b5SSrinivas Kandagatla 
3311fe131929SHuacai Chen 	if (init_ptp) {
3312523f11b5SSrinivas Kandagatla 		ret = stmmac_init_ptp(priv);
3313722eef28SHeiner Kallweit 		if (ret == -EOPNOTSUPP)
3314722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP not supported by HW\n");
3315722eef28SHeiner Kallweit 		else if (ret)
3316722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP init failed\n");
3317fe131929SHuacai Chen 	}
3318523f11b5SSrinivas Kandagatla 
3319388e201dSVineetha G. Jaya Kumaran 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3320388e201dSVineetha G. Jaya Kumaran 
3321388e201dSVineetha G. Jaya Kumaran 	/* Convert the timer from msec to usec */
3322388e201dSVineetha G. Jaya Kumaran 	if (!priv->tx_lpi_timer)
3323388e201dSVineetha G. Jaya Kumaran 		priv->tx_lpi_timer = eee_timer * 1000;
3324523f11b5SSrinivas Kandagatla 
3325a4e887faSJose Abreu 	if (priv->use_riwt) {
3326db2f2842SOng Boon Leong 		u32 queue;
33274e4337ccSJose Abreu 
3328db2f2842SOng Boon Leong 		for (queue = 0; queue < rx_cnt; queue++) {
3329db2f2842SOng Boon Leong 			if (!priv->rx_riwt[queue])
3330db2f2842SOng Boon Leong 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3331db2f2842SOng Boon Leong 
3332db2f2842SOng Boon Leong 			stmmac_rx_watchdog(priv, priv->ioaddr,
3333db2f2842SOng Boon Leong 					   priv->rx_riwt[queue], queue);
3334db2f2842SOng Boon Leong 		}
3335523f11b5SSrinivas Kandagatla 	}
3336523f11b5SSrinivas Kandagatla 
3337c10d4c82SJose Abreu 	if (priv->hw->pcs)
3338c9ad4c10SBen Dooks (Codethink) 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3339523f11b5SSrinivas Kandagatla 
33404854ab99SJoao Pinto 	/* set TX and RX rings length */
33414854ab99SJoao Pinto 	stmmac_set_rings_length(priv);
33424854ab99SJoao Pinto 
3343f748be53SAlexandre TORGUE 	/* Enable TSO */
3344146617b8SJoao Pinto 	if (priv->tso) {
33455e6038b8SOng Boon Leong 		for (chan = 0; chan < tx_cnt; chan++) {
33465e6038b8SOng Boon Leong 			struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
33475e6038b8SOng Boon Leong 
33485e6038b8SOng Boon Leong 			/* TSO and TBS cannot co-exist */
33495e6038b8SOng Boon Leong 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
33505e6038b8SOng Boon Leong 				continue;
33515e6038b8SOng Boon Leong 
3352a4e887faSJose Abreu 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3353146617b8SJoao Pinto 		}
33545e6038b8SOng Boon Leong 	}
3355f748be53SAlexandre TORGUE 
335667afd6d1SJose Abreu 	/* Enable Split Header */
3357d08d32d1SOng Boon Leong 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
335867afd6d1SJose Abreu 	for (chan = 0; chan < rx_cnt; chan++)
3359d08d32d1SOng Boon Leong 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3360d08d32d1SOng Boon Leong 
336167afd6d1SJose Abreu 
336230d93227SJose Abreu 	/* VLAN Tag Insertion */
336330d93227SJose Abreu 	if (priv->dma_cap.vlins)
336430d93227SJose Abreu 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
336530d93227SJose Abreu 
3366579a25a8SJose Abreu 	/* TBS */
3367579a25a8SJose Abreu 	for (chan = 0; chan < tx_cnt; chan++) {
3368579a25a8SJose Abreu 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3369579a25a8SJose Abreu 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3370579a25a8SJose Abreu 
3371579a25a8SJose Abreu 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3372579a25a8SJose Abreu 	}
3373579a25a8SJose Abreu 
3374686cff3dSAashish Verma 	/* Configure real RX and TX queues */
3375686cff3dSAashish Verma 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3376686cff3dSAashish Verma 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3377686cff3dSAashish Verma 
33787d9e6c5aSJose Abreu 	/* Start the ball rolling... */
33797d9e6c5aSJose Abreu 	stmmac_start_all_dma(priv);
33807d9e6c5aSJose Abreu 
33815a558611SOng Boon Leong 	if (priv->dma_cap.fpesel) {
33825a558611SOng Boon Leong 		stmmac_fpe_start_wq(priv);
33835a558611SOng Boon Leong 
33845a558611SOng Boon Leong 		if (priv->plat->fpe_cfg->enable)
33855a558611SOng Boon Leong 			stmmac_fpe_handshake(priv, true);
33865a558611SOng Boon Leong 	}
33875a558611SOng Boon Leong 
3388523f11b5SSrinivas Kandagatla 	return 0;
3389523f11b5SSrinivas Kandagatla }
3390523f11b5SSrinivas Kandagatla 
3391c66f6c37SThierry Reding static void stmmac_hw_teardown(struct net_device *dev)
3392c66f6c37SThierry Reding {
3393c66f6c37SThierry Reding 	struct stmmac_priv *priv = netdev_priv(dev);
3394c66f6c37SThierry Reding 
3395c66f6c37SThierry Reding 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3396c66f6c37SThierry Reding }
3397c66f6c37SThierry Reding 
33988532f613SOng Boon Leong static void stmmac_free_irq(struct net_device *dev,
33998532f613SOng Boon Leong 			    enum request_irq_err irq_err, int irq_idx)
34008532f613SOng Boon Leong {
34018532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
34028532f613SOng Boon Leong 	int j;
34038532f613SOng Boon Leong 
34048532f613SOng Boon Leong 	switch (irq_err) {
34058532f613SOng Boon Leong 	case REQ_IRQ_ERR_ALL:
34068532f613SOng Boon Leong 		irq_idx = priv->plat->tx_queues_to_use;
34078532f613SOng Boon Leong 		fallthrough;
34088532f613SOng Boon Leong 	case REQ_IRQ_ERR_TX:
34098532f613SOng Boon Leong 		for (j = irq_idx - 1; j >= 0; j--) {
34108deec94cSOng Boon Leong 			if (priv->tx_irq[j] > 0) {
34118deec94cSOng Boon Leong 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
34128532f613SOng Boon Leong 				free_irq(priv->tx_irq[j], &priv->tx_queue[j]);
34138532f613SOng Boon Leong 			}
34148deec94cSOng Boon Leong 		}
34158532f613SOng Boon Leong 		irq_idx = priv->plat->rx_queues_to_use;
34168532f613SOng Boon Leong 		fallthrough;
34178532f613SOng Boon Leong 	case REQ_IRQ_ERR_RX:
34188532f613SOng Boon Leong 		for (j = irq_idx - 1; j >= 0; j--) {
34198deec94cSOng Boon Leong 			if (priv->rx_irq[j] > 0) {
34208deec94cSOng Boon Leong 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
34218532f613SOng Boon Leong 				free_irq(priv->rx_irq[j], &priv->rx_queue[j]);
34228532f613SOng Boon Leong 			}
34238deec94cSOng Boon Leong 		}
34248532f613SOng Boon Leong 
34258532f613SOng Boon Leong 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
34268532f613SOng Boon Leong 			free_irq(priv->sfty_ue_irq, dev);
34278532f613SOng Boon Leong 		fallthrough;
34288532f613SOng Boon Leong 	case REQ_IRQ_ERR_SFTY_UE:
34298532f613SOng Boon Leong 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
34308532f613SOng Boon Leong 			free_irq(priv->sfty_ce_irq, dev);
34318532f613SOng Boon Leong 		fallthrough;
34328532f613SOng Boon Leong 	case REQ_IRQ_ERR_SFTY_CE:
34338532f613SOng Boon Leong 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
34348532f613SOng Boon Leong 			free_irq(priv->lpi_irq, dev);
34358532f613SOng Boon Leong 		fallthrough;
34368532f613SOng Boon Leong 	case REQ_IRQ_ERR_LPI:
34378532f613SOng Boon Leong 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
34388532f613SOng Boon Leong 			free_irq(priv->wol_irq, dev);
34398532f613SOng Boon Leong 		fallthrough;
34408532f613SOng Boon Leong 	case REQ_IRQ_ERR_WOL:
34418532f613SOng Boon Leong 		free_irq(dev->irq, dev);
34428532f613SOng Boon Leong 		fallthrough;
34438532f613SOng Boon Leong 	case REQ_IRQ_ERR_MAC:
34448532f613SOng Boon Leong 	case REQ_IRQ_ERR_NO:
34458532f613SOng Boon Leong 		/* If MAC IRQ request error, no more IRQ to free */
34468532f613SOng Boon Leong 		break;
34478532f613SOng Boon Leong 	}
34488532f613SOng Boon Leong }
34498532f613SOng Boon Leong 
34508532f613SOng Boon Leong static int stmmac_request_irq_multi_msi(struct net_device *dev)
34518532f613SOng Boon Leong {
34528532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
34533e6dc7b6SWong Vee Khee 	enum request_irq_err irq_err;
34548deec94cSOng Boon Leong 	cpumask_t cpu_mask;
34558532f613SOng Boon Leong 	int irq_idx = 0;
34568532f613SOng Boon Leong 	char *int_name;
34578532f613SOng Boon Leong 	int ret;
34588532f613SOng Boon Leong 	int i;
34598532f613SOng Boon Leong 
34608532f613SOng Boon Leong 	/* For common interrupt */
34618532f613SOng Boon Leong 	int_name = priv->int_name_mac;
34628532f613SOng Boon Leong 	sprintf(int_name, "%s:%s", dev->name, "mac");
34638532f613SOng Boon Leong 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
34648532f613SOng Boon Leong 			  0, int_name, dev);
34658532f613SOng Boon Leong 	if (unlikely(ret < 0)) {
34668532f613SOng Boon Leong 		netdev_err(priv->dev,
34678532f613SOng Boon Leong 			   "%s: alloc mac MSI %d (error: %d)\n",
34688532f613SOng Boon Leong 			   __func__, dev->irq, ret);
34698532f613SOng Boon Leong 		irq_err = REQ_IRQ_ERR_MAC;
34708532f613SOng Boon Leong 		goto irq_error;
34718532f613SOng Boon Leong 	}
34728532f613SOng Boon Leong 
34738532f613SOng Boon Leong 	/* Request the Wake IRQ in case of another line
34748532f613SOng Boon Leong 	 * is used for WoL
34758532f613SOng Boon Leong 	 */
34768532f613SOng Boon Leong 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
34778532f613SOng Boon Leong 		int_name = priv->int_name_wol;
34788532f613SOng Boon Leong 		sprintf(int_name, "%s:%s", dev->name, "wol");
34798532f613SOng Boon Leong 		ret = request_irq(priv->wol_irq,
34808532f613SOng Boon Leong 				  stmmac_mac_interrupt,
34818532f613SOng Boon Leong 				  0, int_name, dev);
34828532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
34838532f613SOng Boon Leong 			netdev_err(priv->dev,
34848532f613SOng Boon Leong 				   "%s: alloc wol MSI %d (error: %d)\n",
34858532f613SOng Boon Leong 				   __func__, priv->wol_irq, ret);
34868532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_WOL;
34878532f613SOng Boon Leong 			goto irq_error;
34888532f613SOng Boon Leong 		}
34898532f613SOng Boon Leong 	}
34908532f613SOng Boon Leong 
34918532f613SOng Boon Leong 	/* Request the LPI IRQ in case of another line
34928532f613SOng Boon Leong 	 * is used for LPI
34938532f613SOng Boon Leong 	 */
34948532f613SOng Boon Leong 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
34958532f613SOng Boon Leong 		int_name = priv->int_name_lpi;
34968532f613SOng Boon Leong 		sprintf(int_name, "%s:%s", dev->name, "lpi");
34978532f613SOng Boon Leong 		ret = request_irq(priv->lpi_irq,
34988532f613SOng Boon Leong 				  stmmac_mac_interrupt,
34998532f613SOng Boon Leong 				  0, int_name, dev);
35008532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
35018532f613SOng Boon Leong 			netdev_err(priv->dev,
35028532f613SOng Boon Leong 				   "%s: alloc lpi MSI %d (error: %d)\n",
35038532f613SOng Boon Leong 				   __func__, priv->lpi_irq, ret);
35048532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_LPI;
35058532f613SOng Boon Leong 			goto irq_error;
35068532f613SOng Boon Leong 		}
35078532f613SOng Boon Leong 	}
35088532f613SOng Boon Leong 
35098532f613SOng Boon Leong 	/* Request the Safety Feature Correctible Error line in
35108532f613SOng Boon Leong 	 * case of another line is used
35118532f613SOng Boon Leong 	 */
35128532f613SOng Boon Leong 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
35138532f613SOng Boon Leong 		int_name = priv->int_name_sfty_ce;
35148532f613SOng Boon Leong 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
35158532f613SOng Boon Leong 		ret = request_irq(priv->sfty_ce_irq,
35168532f613SOng Boon Leong 				  stmmac_safety_interrupt,
35178532f613SOng Boon Leong 				  0, int_name, dev);
35188532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
35198532f613SOng Boon Leong 			netdev_err(priv->dev,
35208532f613SOng Boon Leong 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
35218532f613SOng Boon Leong 				   __func__, priv->sfty_ce_irq, ret);
35228532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_SFTY_CE;
35238532f613SOng Boon Leong 			goto irq_error;
35248532f613SOng Boon Leong 		}
35258532f613SOng Boon Leong 	}
35268532f613SOng Boon Leong 
35278532f613SOng Boon Leong 	/* Request the Safety Feature Uncorrectible Error line in
35288532f613SOng Boon Leong 	 * case of another line is used
35298532f613SOng Boon Leong 	 */
35308532f613SOng Boon Leong 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
35318532f613SOng Boon Leong 		int_name = priv->int_name_sfty_ue;
35328532f613SOng Boon Leong 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
35338532f613SOng Boon Leong 		ret = request_irq(priv->sfty_ue_irq,
35348532f613SOng Boon Leong 				  stmmac_safety_interrupt,
35358532f613SOng Boon Leong 				  0, int_name, dev);
35368532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
35378532f613SOng Boon Leong 			netdev_err(priv->dev,
35388532f613SOng Boon Leong 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
35398532f613SOng Boon Leong 				   __func__, priv->sfty_ue_irq, ret);
35408532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_SFTY_UE;
35418532f613SOng Boon Leong 			goto irq_error;
35428532f613SOng Boon Leong 		}
35438532f613SOng Boon Leong 	}
35448532f613SOng Boon Leong 
35458532f613SOng Boon Leong 	/* Request Rx MSI irq */
35468532f613SOng Boon Leong 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3547d68c2e1dSArnd Bergmann 		if (i >= MTL_MAX_RX_QUEUES)
35483e0d5699SArnd Bergmann 			break;
35498532f613SOng Boon Leong 		if (priv->rx_irq[i] == 0)
35508532f613SOng Boon Leong 			continue;
35518532f613SOng Boon Leong 
35528532f613SOng Boon Leong 		int_name = priv->int_name_rx_irq[i];
35538532f613SOng Boon Leong 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
35548532f613SOng Boon Leong 		ret = request_irq(priv->rx_irq[i],
35558532f613SOng Boon Leong 				  stmmac_msi_intr_rx,
35568532f613SOng Boon Leong 				  0, int_name, &priv->rx_queue[i]);
35578532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
35588532f613SOng Boon Leong 			netdev_err(priv->dev,
35598532f613SOng Boon Leong 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
35608532f613SOng Boon Leong 				   __func__, i, priv->rx_irq[i], ret);
35618532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_RX;
35628532f613SOng Boon Leong 			irq_idx = i;
35638532f613SOng Boon Leong 			goto irq_error;
35648532f613SOng Boon Leong 		}
35658deec94cSOng Boon Leong 		cpumask_clear(&cpu_mask);
35668deec94cSOng Boon Leong 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
35678deec94cSOng Boon Leong 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
35688532f613SOng Boon Leong 	}
35698532f613SOng Boon Leong 
35708532f613SOng Boon Leong 	/* Request Tx MSI irq */
35718532f613SOng Boon Leong 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3572d68c2e1dSArnd Bergmann 		if (i >= MTL_MAX_TX_QUEUES)
35733e0d5699SArnd Bergmann 			break;
35748532f613SOng Boon Leong 		if (priv->tx_irq[i] == 0)
35758532f613SOng Boon Leong 			continue;
35768532f613SOng Boon Leong 
35778532f613SOng Boon Leong 		int_name = priv->int_name_tx_irq[i];
35788532f613SOng Boon Leong 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
35798532f613SOng Boon Leong 		ret = request_irq(priv->tx_irq[i],
35808532f613SOng Boon Leong 				  stmmac_msi_intr_tx,
35818532f613SOng Boon Leong 				  0, int_name, &priv->tx_queue[i]);
35828532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
35838532f613SOng Boon Leong 			netdev_err(priv->dev,
35848532f613SOng Boon Leong 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
35858532f613SOng Boon Leong 				   __func__, i, priv->tx_irq[i], ret);
35868532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_TX;
35878532f613SOng Boon Leong 			irq_idx = i;
35888532f613SOng Boon Leong 			goto irq_error;
35898532f613SOng Boon Leong 		}
35908deec94cSOng Boon Leong 		cpumask_clear(&cpu_mask);
35918deec94cSOng Boon Leong 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
35928deec94cSOng Boon Leong 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
35938532f613SOng Boon Leong 	}
35948532f613SOng Boon Leong 
35958532f613SOng Boon Leong 	return 0;
35968532f613SOng Boon Leong 
35978532f613SOng Boon Leong irq_error:
35988532f613SOng Boon Leong 	stmmac_free_irq(dev, irq_err, irq_idx);
35998532f613SOng Boon Leong 	return ret;
36008532f613SOng Boon Leong }
36018532f613SOng Boon Leong 
36028532f613SOng Boon Leong static int stmmac_request_irq_single(struct net_device *dev)
36038532f613SOng Boon Leong {
36048532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
36053e6dc7b6SWong Vee Khee 	enum request_irq_err irq_err;
36068532f613SOng Boon Leong 	int ret;
36078532f613SOng Boon Leong 
36088532f613SOng Boon Leong 	ret = request_irq(dev->irq, stmmac_interrupt,
36098532f613SOng Boon Leong 			  IRQF_SHARED, dev->name, dev);
36108532f613SOng Boon Leong 	if (unlikely(ret < 0)) {
36118532f613SOng Boon Leong 		netdev_err(priv->dev,
36128532f613SOng Boon Leong 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
36138532f613SOng Boon Leong 			   __func__, dev->irq, ret);
36148532f613SOng Boon Leong 		irq_err = REQ_IRQ_ERR_MAC;
36153e6dc7b6SWong Vee Khee 		goto irq_error;
36168532f613SOng Boon Leong 	}
36178532f613SOng Boon Leong 
36188532f613SOng Boon Leong 	/* Request the Wake IRQ in case of another line
36198532f613SOng Boon Leong 	 * is used for WoL
36208532f613SOng Boon Leong 	 */
36218532f613SOng Boon Leong 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
36228532f613SOng Boon Leong 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
36238532f613SOng Boon Leong 				  IRQF_SHARED, dev->name, dev);
36248532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
36258532f613SOng Boon Leong 			netdev_err(priv->dev,
36268532f613SOng Boon Leong 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
36278532f613SOng Boon Leong 				   __func__, priv->wol_irq, ret);
36288532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_WOL;
36293e6dc7b6SWong Vee Khee 			goto irq_error;
36308532f613SOng Boon Leong 		}
36318532f613SOng Boon Leong 	}
36328532f613SOng Boon Leong 
36338532f613SOng Boon Leong 	/* Request the IRQ lines */
36348532f613SOng Boon Leong 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
36358532f613SOng Boon Leong 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
36368532f613SOng Boon Leong 				  IRQF_SHARED, dev->name, dev);
36378532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
36388532f613SOng Boon Leong 			netdev_err(priv->dev,
36398532f613SOng Boon Leong 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
36408532f613SOng Boon Leong 				   __func__, priv->lpi_irq, ret);
36418532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_LPI;
36428532f613SOng Boon Leong 			goto irq_error;
36438532f613SOng Boon Leong 		}
36448532f613SOng Boon Leong 	}
36458532f613SOng Boon Leong 
36468532f613SOng Boon Leong 	return 0;
36478532f613SOng Boon Leong 
36488532f613SOng Boon Leong irq_error:
36498532f613SOng Boon Leong 	stmmac_free_irq(dev, irq_err, 0);
36508532f613SOng Boon Leong 	return ret;
36518532f613SOng Boon Leong }
36528532f613SOng Boon Leong 
36538532f613SOng Boon Leong static int stmmac_request_irq(struct net_device *dev)
36548532f613SOng Boon Leong {
36558532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
36568532f613SOng Boon Leong 	int ret;
36578532f613SOng Boon Leong 
36588532f613SOng Boon Leong 	/* Request the IRQ lines */
36598532f613SOng Boon Leong 	if (priv->plat->multi_msi_en)
36608532f613SOng Boon Leong 		ret = stmmac_request_irq_multi_msi(dev);
36618532f613SOng Boon Leong 	else
36628532f613SOng Boon Leong 		ret = stmmac_request_irq_single(dev);
36638532f613SOng Boon Leong 
36648532f613SOng Boon Leong 	return ret;
36658532f613SOng Boon Leong }
36668532f613SOng Boon Leong 
3667523f11b5SSrinivas Kandagatla /**
36687ac6653aSJeff Kirsher  *  stmmac_open - open entry point of the driver
36697ac6653aSJeff Kirsher  *  @dev : pointer to the device structure.
36707ac6653aSJeff Kirsher  *  Description:
36717ac6653aSJeff Kirsher  *  This function is the open entry point of the driver.
36727ac6653aSJeff Kirsher  *  Return value:
36737ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
36747ac6653aSJeff Kirsher  *  file on failure.
36757ac6653aSJeff Kirsher  */
36765fabb012SOng Boon Leong int stmmac_open(struct net_device *dev)
36777ac6653aSJeff Kirsher {
36787ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
36799900074eSVladimir Oltean 	int mode = priv->plat->phy_interface;
36805d626c87SJose Abreu 	int bfsize = 0;
36818fce3331SJose Abreu 	u32 chan;
36827ac6653aSJeff Kirsher 	int ret;
36837ac6653aSJeff Kirsher 
36845ec55823SJoakim Zhang 	ret = pm_runtime_get_sync(priv->device);
36855ec55823SJoakim Zhang 	if (ret < 0) {
36865ec55823SJoakim Zhang 		pm_runtime_put_noidle(priv->device);
36875ec55823SJoakim Zhang 		return ret;
36885ec55823SJoakim Zhang 	}
36895ec55823SJoakim Zhang 
3690a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3691f213bbe8SJose Abreu 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
36929900074eSVladimir Oltean 	    (!priv->hw->xpcs ||
369311059740SVladimir Oltean 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
36947ac6653aSJeff Kirsher 		ret = stmmac_init_phy(dev);
3695e58bb43fSGiuseppe CAVALLARO 		if (ret) {
369638ddc59dSLABBE Corentin 			netdev_err(priv->dev,
369738ddc59dSLABBE Corentin 				   "%s: Cannot attach to PHY (error: %d)\n",
3698e58bb43fSGiuseppe CAVALLARO 				   __func__, ret);
36995ec55823SJoakim Zhang 			goto init_phy_error;
37007ac6653aSJeff Kirsher 		}
3701e58bb43fSGiuseppe CAVALLARO 	}
37027ac6653aSJeff Kirsher 
3703523f11b5SSrinivas Kandagatla 	/* Extra statistics */
3704523f11b5SSrinivas Kandagatla 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
3705523f11b5SSrinivas Kandagatla 	priv->xstats.threshold = tc;
3706523f11b5SSrinivas Kandagatla 
37075d626c87SJose Abreu 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
37085d626c87SJose Abreu 	if (bfsize < 0)
37095d626c87SJose Abreu 		bfsize = 0;
37105d626c87SJose Abreu 
37115d626c87SJose Abreu 	if (bfsize < BUF_SIZE_16KiB)
37125d626c87SJose Abreu 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
37135d626c87SJose Abreu 
37145d626c87SJose Abreu 	priv->dma_buf_sz = bfsize;
37155d626c87SJose Abreu 	buf_sz = bfsize;
37165d626c87SJose Abreu 
371722ad3838SGiuseppe Cavallaro 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
371856329137SBartlomiej Zolnierkiewicz 
3719aa042f60SSong, Yoong Siang 	if (!priv->dma_tx_size)
3720aa042f60SSong, Yoong Siang 		priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3721aa042f60SSong, Yoong Siang 	if (!priv->dma_rx_size)
3722aa042f60SSong, Yoong Siang 		priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3723aa042f60SSong, Yoong Siang 
3724579a25a8SJose Abreu 	/* Earlier check for TBS */
3725579a25a8SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3726579a25a8SJose Abreu 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3727579a25a8SJose Abreu 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3728579a25a8SJose Abreu 
37295e6038b8SOng Boon Leong 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3730579a25a8SJose Abreu 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3731579a25a8SJose Abreu 	}
3732579a25a8SJose Abreu 
37335bacd778SLABBE Corentin 	ret = alloc_dma_desc_resources(priv);
37345bacd778SLABBE Corentin 	if (ret < 0) {
37355bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
37365bacd778SLABBE Corentin 			   __func__);
37375bacd778SLABBE Corentin 		goto dma_desc_error;
37385bacd778SLABBE Corentin 	}
37395bacd778SLABBE Corentin 
37405bacd778SLABBE Corentin 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
37415bacd778SLABBE Corentin 	if (ret < 0) {
37425bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
37435bacd778SLABBE Corentin 			   __func__);
37445bacd778SLABBE Corentin 		goto init_error;
37455bacd778SLABBE Corentin 	}
37465bacd778SLABBE Corentin 
3747fe131929SHuacai Chen 	ret = stmmac_hw_setup(dev, true);
374856329137SBartlomiej Zolnierkiewicz 	if (ret < 0) {
374938ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3750c9324d18SGiuseppe CAVALLARO 		goto init_error;
37517ac6653aSJeff Kirsher 	}
37527ac6653aSJeff Kirsher 
3753d429b66eSJose Abreu 	stmmac_init_coalesce(priv);
3754777da230SGiuseppe CAVALLARO 
375574371272SJose Abreu 	phylink_start(priv->phylink);
375677b28983SJisheng Zhang 	/* We may have called phylink_speed_down before */
375777b28983SJisheng Zhang 	phylink_speed_up(priv->phylink);
37587ac6653aSJeff Kirsher 
37598532f613SOng Boon Leong 	ret = stmmac_request_irq(dev);
37608532f613SOng Boon Leong 	if (ret)
37616c1e5abeSThierry Reding 		goto irq_error;
3762d765955dSGiuseppe CAVALLARO 
3763c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
37649f19306dSOng Boon Leong 	netif_tx_start_all_queues(priv->dev);
37657ac6653aSJeff Kirsher 
37667ac6653aSJeff Kirsher 	return 0;
37677ac6653aSJeff Kirsher 
37686c1e5abeSThierry Reding irq_error:
376974371272SJose Abreu 	phylink_stop(priv->phylink);
37707a13f8f5SFrancesco Virlinzi 
37718fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3772d5a05e69SVincent Whitchurch 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
37738fce3331SJose Abreu 
3774c66f6c37SThierry Reding 	stmmac_hw_teardown(dev);
3775c9324d18SGiuseppe CAVALLARO init_error:
3776c9324d18SGiuseppe CAVALLARO 	free_dma_desc_resources(priv);
37775bacd778SLABBE Corentin dma_desc_error:
377874371272SJose Abreu 	phylink_disconnect_phy(priv->phylink);
37795ec55823SJoakim Zhang init_phy_error:
37805ec55823SJoakim Zhang 	pm_runtime_put(priv->device);
37817ac6653aSJeff Kirsher 	return ret;
37827ac6653aSJeff Kirsher }
37837ac6653aSJeff Kirsher 
37845a558611SOng Boon Leong static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
37855a558611SOng Boon Leong {
37865a558611SOng Boon Leong 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
37875a558611SOng Boon Leong 
37885a558611SOng Boon Leong 	if (priv->fpe_wq)
37895a558611SOng Boon Leong 		destroy_workqueue(priv->fpe_wq);
37905a558611SOng Boon Leong 
37915a558611SOng Boon Leong 	netdev_info(priv->dev, "FPE workqueue stop");
37925a558611SOng Boon Leong }
37935a558611SOng Boon Leong 
37947ac6653aSJeff Kirsher /**
37957ac6653aSJeff Kirsher  *  stmmac_release - close entry point of the driver
37967ac6653aSJeff Kirsher  *  @dev : device pointer.
37977ac6653aSJeff Kirsher  *  Description:
37987ac6653aSJeff Kirsher  *  This is the stop entry point of the driver.
37997ac6653aSJeff Kirsher  */
38005fabb012SOng Boon Leong int stmmac_release(struct net_device *dev)
38017ac6653aSJeff Kirsher {
38027ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
38038fce3331SJose Abreu 	u32 chan;
38047ac6653aSJeff Kirsher 
3805*b270bfe6SYannick Vignon 	netif_tx_disable(dev);
3806*b270bfe6SYannick Vignon 
380777b28983SJisheng Zhang 	if (device_may_wakeup(priv->device))
380877b28983SJisheng Zhang 		phylink_speed_down(priv->phylink, false);
38097ac6653aSJeff Kirsher 	/* Stop and disconnect the PHY */
381074371272SJose Abreu 	phylink_stop(priv->phylink);
381174371272SJose Abreu 	phylink_disconnect_phy(priv->phylink);
38127ac6653aSJeff Kirsher 
3813c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
38147ac6653aSJeff Kirsher 
38158fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3816d5a05e69SVincent Whitchurch 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
38179125cdd1SGiuseppe CAVALLARO 
38187ac6653aSJeff Kirsher 	/* Free the IRQ lines */
38198532f613SOng Boon Leong 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
38207ac6653aSJeff Kirsher 
38215f585913SFugang Duan 	if (priv->eee_enabled) {
38225f585913SFugang Duan 		priv->tx_path_in_lpi_mode = false;
38235f585913SFugang Duan 		del_timer_sync(&priv->eee_ctrl_timer);
38245f585913SFugang Duan 	}
38255f585913SFugang Duan 
38267ac6653aSJeff Kirsher 	/* Stop TX/RX DMA and clear the descriptors */
3827ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
38287ac6653aSJeff Kirsher 
38297ac6653aSJeff Kirsher 	/* Release and free the Rx/Tx resources */
38307ac6653aSJeff Kirsher 	free_dma_desc_resources(priv);
38317ac6653aSJeff Kirsher 
38327ac6653aSJeff Kirsher 	/* Disable the MAC Rx/Tx */
3833c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
38347ac6653aSJeff Kirsher 
38357ac6653aSJeff Kirsher 	netif_carrier_off(dev);
38367ac6653aSJeff Kirsher 
383792ba6888SRayagond Kokatanur 	stmmac_release_ptp(priv);
383892ba6888SRayagond Kokatanur 
38395ec55823SJoakim Zhang 	pm_runtime_put(priv->device);
38405ec55823SJoakim Zhang 
38415a558611SOng Boon Leong 	if (priv->dma_cap.fpesel)
38425a558611SOng Boon Leong 		stmmac_fpe_stop_wq(priv);
38435a558611SOng Boon Leong 
38447ac6653aSJeff Kirsher 	return 0;
38457ac6653aSJeff Kirsher }
38467ac6653aSJeff Kirsher 
384730d93227SJose Abreu static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
384830d93227SJose Abreu 			       struct stmmac_tx_queue *tx_q)
384930d93227SJose Abreu {
385030d93227SJose Abreu 	u16 tag = 0x0, inner_tag = 0x0;
385130d93227SJose Abreu 	u32 inner_type = 0x0;
385230d93227SJose Abreu 	struct dma_desc *p;
385330d93227SJose Abreu 
385430d93227SJose Abreu 	if (!priv->dma_cap.vlins)
385530d93227SJose Abreu 		return false;
385630d93227SJose Abreu 	if (!skb_vlan_tag_present(skb))
385730d93227SJose Abreu 		return false;
385830d93227SJose Abreu 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
385930d93227SJose Abreu 		inner_tag = skb_vlan_tag_get(skb);
386030d93227SJose Abreu 		inner_type = STMMAC_VLAN_INSERT;
386130d93227SJose Abreu 	}
386230d93227SJose Abreu 
386330d93227SJose Abreu 	tag = skb_vlan_tag_get(skb);
386430d93227SJose Abreu 
3865579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3866579a25a8SJose Abreu 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3867579a25a8SJose Abreu 	else
3868579a25a8SJose Abreu 		p = &tx_q->dma_tx[tx_q->cur_tx];
3869579a25a8SJose Abreu 
387030d93227SJose Abreu 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
387130d93227SJose Abreu 		return false;
387230d93227SJose Abreu 
387330d93227SJose Abreu 	stmmac_set_tx_owner(priv, p);
3874aa042f60SSong, Yoong Siang 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
387530d93227SJose Abreu 	return true;
387630d93227SJose Abreu }
387730d93227SJose Abreu 
38787ac6653aSJeff Kirsher /**
3879f748be53SAlexandre TORGUE  *  stmmac_tso_allocator - close entry point of the driver
3880f748be53SAlexandre TORGUE  *  @priv: driver private structure
3881f748be53SAlexandre TORGUE  *  @des: buffer start address
3882f748be53SAlexandre TORGUE  *  @total_len: total length to fill in descriptors
3883d0ea5cbdSJesse Brandeburg  *  @last_segment: condition for the last descriptor
3884ce736788SJoao Pinto  *  @queue: TX queue index
3885f748be53SAlexandre TORGUE  *  Description:
3886f748be53SAlexandre TORGUE  *  This function fills descriptor and request new descriptors according to
3887f748be53SAlexandre TORGUE  *  buffer length to fill
3888f748be53SAlexandre TORGUE  */
3889a993db88SJose Abreu static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
3890ce736788SJoao Pinto 				 int total_len, bool last_segment, u32 queue)
3891f748be53SAlexandre TORGUE {
3892ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3893f748be53SAlexandre TORGUE 	struct dma_desc *desc;
38945bacd778SLABBE Corentin 	u32 buff_size;
3895ce736788SJoao Pinto 	int tmp_len;
3896f748be53SAlexandre TORGUE 
3897f748be53SAlexandre TORGUE 	tmp_len = total_len;
3898f748be53SAlexandre TORGUE 
3899f748be53SAlexandre TORGUE 	while (tmp_len > 0) {
3900a993db88SJose Abreu 		dma_addr_t curr_addr;
3901a993db88SJose Abreu 
3902aa042f60SSong, Yoong Siang 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3903aa042f60SSong, Yoong Siang 						priv->dma_tx_size);
3904b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3905579a25a8SJose Abreu 
3906579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3907579a25a8SJose Abreu 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3908579a25a8SJose Abreu 		else
3909579a25a8SJose Abreu 			desc = &tx_q->dma_tx[tx_q->cur_tx];
3910f748be53SAlexandre TORGUE 
3911a993db88SJose Abreu 		curr_addr = des + (total_len - tmp_len);
3912a993db88SJose Abreu 		if (priv->dma_cap.addr64 <= 32)
3913a993db88SJose Abreu 			desc->des0 = cpu_to_le32(curr_addr);
3914a993db88SJose Abreu 		else
3915a993db88SJose Abreu 			stmmac_set_desc_addr(priv, desc, curr_addr);
3916a993db88SJose Abreu 
3917f748be53SAlexandre TORGUE 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
3918f748be53SAlexandre TORGUE 			    TSO_MAX_BUFF_SIZE : tmp_len;
3919f748be53SAlexandre TORGUE 
392042de047dSJose Abreu 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
3921f748be53SAlexandre TORGUE 				0, 1,
3922426849e6SNiklas Cassel 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
3923f748be53SAlexandre TORGUE 				0, 0);
3924f748be53SAlexandre TORGUE 
3925f748be53SAlexandre TORGUE 		tmp_len -= TSO_MAX_BUFF_SIZE;
3926f748be53SAlexandre TORGUE 	}
3927f748be53SAlexandre TORGUE }
3928f748be53SAlexandre TORGUE 
3929d96febedSOng Boon Leong static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
3930d96febedSOng Boon Leong {
3931d96febedSOng Boon Leong 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3932d96febedSOng Boon Leong 	int desc_size;
3933d96febedSOng Boon Leong 
3934d96febedSOng Boon Leong 	if (likely(priv->extend_desc))
3935d96febedSOng Boon Leong 		desc_size = sizeof(struct dma_extended_desc);
3936d96febedSOng Boon Leong 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3937d96febedSOng Boon Leong 		desc_size = sizeof(struct dma_edesc);
3938d96febedSOng Boon Leong 	else
3939d96febedSOng Boon Leong 		desc_size = sizeof(struct dma_desc);
3940d96febedSOng Boon Leong 
3941d96febedSOng Boon Leong 	/* The own bit must be the latest setting done when prepare the
3942d96febedSOng Boon Leong 	 * descriptor and then barrier is needed to make sure that
3943d96febedSOng Boon Leong 	 * all is coherent before granting the DMA engine.
3944d96febedSOng Boon Leong 	 */
3945d96febedSOng Boon Leong 	wmb();
3946d96febedSOng Boon Leong 
3947d96febedSOng Boon Leong 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3948d96febedSOng Boon Leong 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3949d96febedSOng Boon Leong }
3950d96febedSOng Boon Leong 
3951f748be53SAlexandre TORGUE /**
3952f748be53SAlexandre TORGUE  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
3953f748be53SAlexandre TORGUE  *  @skb : the socket buffer
3954f748be53SAlexandre TORGUE  *  @dev : device pointer
3955f748be53SAlexandre TORGUE  *  Description: this is the transmit function that is called on TSO frames
3956f748be53SAlexandre TORGUE  *  (support available on GMAC4 and newer chips).
3957f748be53SAlexandre TORGUE  *  Diagram below show the ring programming in case of TSO frames:
3958f748be53SAlexandre TORGUE  *
3959f748be53SAlexandre TORGUE  *  First Descriptor
3960f748be53SAlexandre TORGUE  *   --------
3961f748be53SAlexandre TORGUE  *   | DES0 |---> buffer1 = L2/L3/L4 header
3962f748be53SAlexandre TORGUE  *   | DES1 |---> TCP Payload (can continue on next descr...)
3963f748be53SAlexandre TORGUE  *   | DES2 |---> buffer 1 and 2 len
3964f748be53SAlexandre TORGUE  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
3965f748be53SAlexandre TORGUE  *   --------
3966f748be53SAlexandre TORGUE  *	|
3967f748be53SAlexandre TORGUE  *     ...
3968f748be53SAlexandre TORGUE  *	|
3969f748be53SAlexandre TORGUE  *   --------
3970f748be53SAlexandre TORGUE  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
3971f748be53SAlexandre TORGUE  *   | DES1 | --|
3972f748be53SAlexandre TORGUE  *   | DES2 | --> buffer 1 and 2 len
3973f748be53SAlexandre TORGUE  *   | DES3 |
3974f748be53SAlexandre TORGUE  *   --------
3975f748be53SAlexandre TORGUE  *
3976f748be53SAlexandre TORGUE  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
3977f748be53SAlexandre TORGUE  */
3978f748be53SAlexandre TORGUE static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3979f748be53SAlexandre TORGUE {
3980ce736788SJoao Pinto 	struct dma_desc *desc, *first, *mss_desc = NULL;
3981f748be53SAlexandre TORGUE 	struct stmmac_priv *priv = netdev_priv(dev);
3982f748be53SAlexandre TORGUE 	int nfrags = skb_shinfo(skb)->nr_frags;
3983ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
3984c2837423SJose Abreu 	unsigned int first_entry, tx_packets;
3985d96febedSOng Boon Leong 	int tmp_pay_len = 0, first_tx;
3986ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
3987c2837423SJose Abreu 	bool has_vlan, set_ic;
3988579a25a8SJose Abreu 	u8 proto_hdr_len, hdr;
3989ce736788SJoao Pinto 	u32 pay_len, mss;
3990a993db88SJose Abreu 	dma_addr_t des;
3991f748be53SAlexandre TORGUE 	int i;
3992f748be53SAlexandre TORGUE 
3993ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
3994c2837423SJose Abreu 	first_tx = tx_q->cur_tx;
3995ce736788SJoao Pinto 
3996f748be53SAlexandre TORGUE 	/* Compute header lengths */
3997b7766206SJose Abreu 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3998b7766206SJose Abreu 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
3999b7766206SJose Abreu 		hdr = sizeof(struct udphdr);
4000b7766206SJose Abreu 	} else {
4001f748be53SAlexandre TORGUE 		proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4002b7766206SJose Abreu 		hdr = tcp_hdrlen(skb);
4003b7766206SJose Abreu 	}
4004f748be53SAlexandre TORGUE 
4005f748be53SAlexandre TORGUE 	/* Desc availability based on threshold should be enough safe */
4006ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <
4007f748be53SAlexandre TORGUE 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4008c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4009c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4010c22a3f48SJoao Pinto 								queue));
4011f748be53SAlexandre TORGUE 			/* This is a hard error, log it. */
401238ddc59dSLABBE Corentin 			netdev_err(priv->dev,
401338ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
401438ddc59dSLABBE Corentin 				   __func__);
4015f748be53SAlexandre TORGUE 		}
4016f748be53SAlexandre TORGUE 		return NETDEV_TX_BUSY;
4017f748be53SAlexandre TORGUE 	}
4018f748be53SAlexandre TORGUE 
4019f748be53SAlexandre TORGUE 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4020f748be53SAlexandre TORGUE 
4021f748be53SAlexandre TORGUE 	mss = skb_shinfo(skb)->gso_size;
4022f748be53SAlexandre TORGUE 
4023f748be53SAlexandre TORGUE 	/* set new MSS value if needed */
40248d212a9eSNiklas Cassel 	if (mss != tx_q->mss) {
4025579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4026579a25a8SJose Abreu 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4027579a25a8SJose Abreu 		else
4028579a25a8SJose Abreu 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4029579a25a8SJose Abreu 
403042de047dSJose Abreu 		stmmac_set_mss(priv, mss_desc, mss);
40318d212a9eSNiklas Cassel 		tx_q->mss = mss;
4032aa042f60SSong, Yoong Siang 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4033aa042f60SSong, Yoong Siang 						priv->dma_tx_size);
4034b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4035f748be53SAlexandre TORGUE 	}
4036f748be53SAlexandre TORGUE 
4037f748be53SAlexandre TORGUE 	if (netif_msg_tx_queued(priv)) {
4038b7766206SJose Abreu 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4039b7766206SJose Abreu 			__func__, hdr, proto_hdr_len, pay_len, mss);
4040f748be53SAlexandre TORGUE 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4041f748be53SAlexandre TORGUE 			skb->data_len);
4042f748be53SAlexandre TORGUE 	}
4043f748be53SAlexandre TORGUE 
404430d93227SJose Abreu 	/* Check if VLAN can be inserted by HW */
404530d93227SJose Abreu 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
404630d93227SJose Abreu 
4047ce736788SJoao Pinto 	first_entry = tx_q->cur_tx;
4048b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4049f748be53SAlexandre TORGUE 
4050579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4051579a25a8SJose Abreu 		desc = &tx_q->dma_entx[first_entry].basic;
4052579a25a8SJose Abreu 	else
4053579a25a8SJose Abreu 		desc = &tx_q->dma_tx[first_entry];
4054f748be53SAlexandre TORGUE 	first = desc;
4055f748be53SAlexandre TORGUE 
405630d93227SJose Abreu 	if (has_vlan)
405730d93227SJose Abreu 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
405830d93227SJose Abreu 
4059f748be53SAlexandre TORGUE 	/* first descriptor: fill Headers on Buf1 */
4060f748be53SAlexandre TORGUE 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4061f748be53SAlexandre TORGUE 			     DMA_TO_DEVICE);
4062f748be53SAlexandre TORGUE 	if (dma_mapping_error(priv->device, des))
4063f748be53SAlexandre TORGUE 		goto dma_map_err;
4064f748be53SAlexandre TORGUE 
4065ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4066ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4067be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4068be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4069f748be53SAlexandre TORGUE 
4070a993db88SJose Abreu 	if (priv->dma_cap.addr64 <= 32) {
4071f8be0d78SMichael Weiser 		first->des0 = cpu_to_le32(des);
4072f748be53SAlexandre TORGUE 
4073f748be53SAlexandre TORGUE 		/* Fill start of payload in buff2 of first descriptor */
4074f748be53SAlexandre TORGUE 		if (pay_len)
4075f8be0d78SMichael Weiser 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4076f748be53SAlexandre TORGUE 
4077f748be53SAlexandre TORGUE 		/* If needed take extra descriptors to fill the remaining payload */
4078f748be53SAlexandre TORGUE 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4079a993db88SJose Abreu 	} else {
4080a993db88SJose Abreu 		stmmac_set_desc_addr(priv, first, des);
4081a993db88SJose Abreu 		tmp_pay_len = pay_len;
408234c15202Syuqi jin 		des += proto_hdr_len;
4083b2f07199SJose Abreu 		pay_len = 0;
4084a993db88SJose Abreu 	}
4085f748be53SAlexandre TORGUE 
4086ce736788SJoao Pinto 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4087f748be53SAlexandre TORGUE 
4088f748be53SAlexandre TORGUE 	/* Prepare fragments */
4089f748be53SAlexandre TORGUE 	for (i = 0; i < nfrags; i++) {
4090f748be53SAlexandre TORGUE 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4091f748be53SAlexandre TORGUE 
4092f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0,
4093f748be53SAlexandre TORGUE 				       skb_frag_size(frag),
4094f748be53SAlexandre TORGUE 				       DMA_TO_DEVICE);
4095937071c1SThierry Reding 		if (dma_mapping_error(priv->device, des))
4096937071c1SThierry Reding 			goto dma_map_err;
4097f748be53SAlexandre TORGUE 
4098f748be53SAlexandre TORGUE 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4099ce736788SJoao Pinto 				     (i == nfrags - 1), queue);
4100f748be53SAlexandre TORGUE 
4101ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4102ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4103ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4104be8b38a7SOng Boon Leong 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4105f748be53SAlexandre TORGUE 	}
4106f748be53SAlexandre TORGUE 
4107ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4108f748be53SAlexandre TORGUE 
410905cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
411005cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4111be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
411205cf0d1bSNiklas Cassel 
41137df4a3a7SJose Abreu 	/* Manage tx mitigation */
4114c2837423SJose Abreu 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4115c2837423SJose Abreu 	tx_q->tx_count_frames += tx_packets;
4116c2837423SJose Abreu 
4117c2837423SJose Abreu 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4118c2837423SJose Abreu 		set_ic = true;
4119db2f2842SOng Boon Leong 	else if (!priv->tx_coal_frames[queue])
4120c2837423SJose Abreu 		set_ic = false;
4121db2f2842SOng Boon Leong 	else if (tx_packets > priv->tx_coal_frames[queue])
4122c2837423SJose Abreu 		set_ic = true;
4123db2f2842SOng Boon Leong 	else if ((tx_q->tx_count_frames %
4124db2f2842SOng Boon Leong 		  priv->tx_coal_frames[queue]) < tx_packets)
4125c2837423SJose Abreu 		set_ic = true;
4126c2837423SJose Abreu 	else
4127c2837423SJose Abreu 		set_ic = false;
4128c2837423SJose Abreu 
4129c2837423SJose Abreu 	if (set_ic) {
4130579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4131579a25a8SJose Abreu 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4132579a25a8SJose Abreu 		else
41337df4a3a7SJose Abreu 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4134579a25a8SJose Abreu 
41357df4a3a7SJose Abreu 		tx_q->tx_count_frames = 0;
41367df4a3a7SJose Abreu 		stmmac_set_tx_ic(priv, desc);
41377df4a3a7SJose Abreu 		priv->xstats.tx_set_ic_bit++;
41387df4a3a7SJose Abreu 	}
41397df4a3a7SJose Abreu 
414005cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
414105cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
414205cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
414305cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
414405cf0d1bSNiklas Cassel 	 */
4145aa042f60SSong, Yoong Siang 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
4146f748be53SAlexandre TORGUE 
4147ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4148b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
414938ddc59dSLABBE Corentin 			  __func__);
4150c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4151f748be53SAlexandre TORGUE 	}
4152f748be53SAlexandre TORGUE 
4153f748be53SAlexandre TORGUE 	dev->stats.tx_bytes += skb->len;
4154f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_frames++;
4155f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_nfrags += nfrags;
4156f748be53SAlexandre TORGUE 
41578000ddc0SJose Abreu 	if (priv->sarc_type)
41588000ddc0SJose Abreu 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
41598000ddc0SJose Abreu 
4160f748be53SAlexandre TORGUE 	skb_tx_timestamp(skb);
4161f748be53SAlexandre TORGUE 
4162f748be53SAlexandre TORGUE 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4163f748be53SAlexandre TORGUE 		     priv->hwts_tx_en)) {
4164f748be53SAlexandre TORGUE 		/* declare that device is doing timestamping */
4165f748be53SAlexandre TORGUE 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
416642de047dSJose Abreu 		stmmac_enable_tx_timestamp(priv, first);
4167f748be53SAlexandre TORGUE 	}
4168f748be53SAlexandre TORGUE 
4169f748be53SAlexandre TORGUE 	/* Complete the first descriptor before granting the DMA */
417042de047dSJose Abreu 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4171f748be53SAlexandre TORGUE 			proto_hdr_len,
4172f748be53SAlexandre TORGUE 			pay_len,
4173ce736788SJoao Pinto 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4174b7766206SJose Abreu 			hdr / 4, (skb->len - proto_hdr_len));
4175f748be53SAlexandre TORGUE 
4176f748be53SAlexandre TORGUE 	/* If context desc is used to change MSS */
417715d2ee42SNiklas Cassel 	if (mss_desc) {
417815d2ee42SNiklas Cassel 		/* Make sure that first descriptor has been completely
417915d2ee42SNiklas Cassel 		 * written, including its own bit. This is because MSS is
418015d2ee42SNiklas Cassel 		 * actually before first descriptor, so we need to make
418115d2ee42SNiklas Cassel 		 * sure that MSS's own bit is the last thing written.
418215d2ee42SNiklas Cassel 		 */
418315d2ee42SNiklas Cassel 		dma_wmb();
418442de047dSJose Abreu 		stmmac_set_tx_owner(priv, mss_desc);
418515d2ee42SNiklas Cassel 	}
4186f748be53SAlexandre TORGUE 
4187f748be53SAlexandre TORGUE 	if (netif_msg_pktdata(priv)) {
4188f748be53SAlexandre TORGUE 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4189ce736788SJoao Pinto 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4190ce736788SJoao Pinto 			tx_q->cur_tx, first, nfrags);
4191f748be53SAlexandre TORGUE 		pr_info(">>> frame to be transmitted: ");
4192f748be53SAlexandre TORGUE 		print_pkt(skb->data, skb_headlen(skb));
4193f748be53SAlexandre TORGUE 	}
4194f748be53SAlexandre TORGUE 
4195c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4196f748be53SAlexandre TORGUE 
4197d96febedSOng Boon Leong 	stmmac_flush_tx_descriptors(priv, queue);
41984772f26dSJose Abreu 	stmmac_tx_timer_arm(priv, queue);
4199f748be53SAlexandre TORGUE 
4200f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
4201f748be53SAlexandre TORGUE 
4202f748be53SAlexandre TORGUE dma_map_err:
4203f748be53SAlexandre TORGUE 	dev_err(priv->device, "Tx dma map failed\n");
4204f748be53SAlexandre TORGUE 	dev_kfree_skb(skb);
4205f748be53SAlexandre TORGUE 	priv->dev->stats.tx_dropped++;
4206f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
4207f748be53SAlexandre TORGUE }
4208f748be53SAlexandre TORGUE 
4209f748be53SAlexandre TORGUE /**
4210732fdf0eSGiuseppe CAVALLARO  *  stmmac_xmit - Tx entry point of the driver
42117ac6653aSJeff Kirsher  *  @skb : the socket buffer
42127ac6653aSJeff Kirsher  *  @dev : device pointer
421332ceabcaSGiuseppe CAVALLARO  *  Description : this is the tx entry point of the driver.
421432ceabcaSGiuseppe CAVALLARO  *  It programs the chain or the ring and supports oversized frames
421532ceabcaSGiuseppe CAVALLARO  *  and SG feature.
42167ac6653aSJeff Kirsher  */
42177ac6653aSJeff Kirsher static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
42187ac6653aSJeff Kirsher {
4219c2837423SJose Abreu 	unsigned int first_entry, tx_packets, enh_desc;
42207ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
42210e80bdc9SGiuseppe Cavallaro 	unsigned int nopaged_len = skb_headlen(skb);
42224a7d666aSGiuseppe CAVALLARO 	int i, csum_insertion = 0, is_jumbo = 0;
4223ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
42247ac6653aSJeff Kirsher 	int nfrags = skb_shinfo(skb)->nr_frags;
4225b7766206SJose Abreu 	int gso = skb_shinfo(skb)->gso_type;
4226579a25a8SJose Abreu 	struct dma_edesc *tbs_desc = NULL;
42277ac6653aSJeff Kirsher 	struct dma_desc *desc, *first;
4228ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
4229c2837423SJose Abreu 	bool has_vlan, set_ic;
4230d96febedSOng Boon Leong 	int entry, first_tx;
4231a993db88SJose Abreu 	dma_addr_t des;
4232f748be53SAlexandre TORGUE 
4233ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
4234c2837423SJose Abreu 	first_tx = tx_q->cur_tx;
4235ce736788SJoao Pinto 
4236be1c7eaeSVineetha G. Jaya Kumaran 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4237e2cd682dSJose Abreu 		stmmac_disable_eee_mode(priv);
4238e2cd682dSJose Abreu 
4239f748be53SAlexandre TORGUE 	/* Manage oversized TCP frames for GMAC4 device */
4240f748be53SAlexandre TORGUE 	if (skb_is_gso(skb) && priv->tso) {
4241b7766206SJose Abreu 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4242b7766206SJose Abreu 			return stmmac_tso_xmit(skb, dev);
4243b7766206SJose Abreu 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4244f748be53SAlexandre TORGUE 			return stmmac_tso_xmit(skb, dev);
4245f748be53SAlexandre TORGUE 	}
42467ac6653aSJeff Kirsher 
4247ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4248c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4249c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4250c22a3f48SJoao Pinto 								queue));
42517ac6653aSJeff Kirsher 			/* This is a hard error, log it. */
425238ddc59dSLABBE Corentin 			netdev_err(priv->dev,
425338ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
425438ddc59dSLABBE Corentin 				   __func__);
42557ac6653aSJeff Kirsher 		}
42567ac6653aSJeff Kirsher 		return NETDEV_TX_BUSY;
42577ac6653aSJeff Kirsher 	}
42587ac6653aSJeff Kirsher 
425930d93227SJose Abreu 	/* Check if VLAN can be inserted by HW */
426030d93227SJose Abreu 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
426130d93227SJose Abreu 
4262ce736788SJoao Pinto 	entry = tx_q->cur_tx;
42630e80bdc9SGiuseppe Cavallaro 	first_entry = entry;
4264b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
42657ac6653aSJeff Kirsher 
42667ac6653aSJeff Kirsher 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
42677ac6653aSJeff Kirsher 
42680e80bdc9SGiuseppe Cavallaro 	if (likely(priv->extend_desc))
4269ce736788SJoao Pinto 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4270579a25a8SJose Abreu 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4271579a25a8SJose Abreu 		desc = &tx_q->dma_entx[entry].basic;
4272c24602efSGiuseppe CAVALLARO 	else
4273ce736788SJoao Pinto 		desc = tx_q->dma_tx + entry;
4274c24602efSGiuseppe CAVALLARO 
42757ac6653aSJeff Kirsher 	first = desc;
42767ac6653aSJeff Kirsher 
427730d93227SJose Abreu 	if (has_vlan)
427830d93227SJose Abreu 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
427930d93227SJose Abreu 
42800e80bdc9SGiuseppe Cavallaro 	enh_desc = priv->plat->enh_desc;
42814a7d666aSGiuseppe CAVALLARO 	/* To program the descriptors according to the size of the frame */
428229896a67SGiuseppe CAVALLARO 	if (enh_desc)
42832c520b1cSJose Abreu 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
428429896a67SGiuseppe CAVALLARO 
428563a550fcSJose Abreu 	if (unlikely(is_jumbo)) {
42862c520b1cSJose Abreu 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
428763a550fcSJose Abreu 		if (unlikely(entry < 0) && (entry != -EINVAL))
4288362b37beSGiuseppe CAVALLARO 			goto dma_map_err;
428929896a67SGiuseppe CAVALLARO 	}
42907ac6653aSJeff Kirsher 
42917ac6653aSJeff Kirsher 	for (i = 0; i < nfrags; i++) {
42929e903e08SEric Dumazet 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
42939e903e08SEric Dumazet 		int len = skb_frag_size(frag);
4294be434d50SGiuseppe Cavallaro 		bool last_segment = (i == (nfrags - 1));
42957ac6653aSJeff Kirsher 
4296aa042f60SSong, Yoong Siang 		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4297b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[entry]);
4298e3ad57c9SGiuseppe Cavallaro 
42990e80bdc9SGiuseppe Cavallaro 		if (likely(priv->extend_desc))
4300ce736788SJoao Pinto 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4301579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4302579a25a8SJose Abreu 			desc = &tx_q->dma_entx[entry].basic;
4303c24602efSGiuseppe CAVALLARO 		else
4304ce736788SJoao Pinto 			desc = tx_q->dma_tx + entry;
43057ac6653aSJeff Kirsher 
4306f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4307f722380dSIan Campbell 				       DMA_TO_DEVICE);
4308f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
4309362b37beSGiuseppe CAVALLARO 			goto dma_map_err; /* should reuse desc w/o issues */
4310362b37beSGiuseppe CAVALLARO 
4311ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].buf = des;
43126844171dSJose Abreu 
43136844171dSJose Abreu 		stmmac_set_desc_addr(priv, desc, des);
4314f748be53SAlexandre TORGUE 
4315ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4316ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].len = len;
4317ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4318be8b38a7SOng Boon Leong 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
43190e80bdc9SGiuseppe Cavallaro 
43200e80bdc9SGiuseppe Cavallaro 		/* Prepare the descriptor and set the own bit too */
432142de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
432242de047dSJose Abreu 				priv->mode, 1, last_segment, skb->len);
43237ac6653aSJeff Kirsher 	}
43247ac6653aSJeff Kirsher 
432505cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
432605cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[entry] = skb;
4327be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4328e3ad57c9SGiuseppe Cavallaro 
43297df4a3a7SJose Abreu 	/* According to the coalesce parameter the IC bit for the latest
43307df4a3a7SJose Abreu 	 * segment is reset and the timer re-started to clean the tx status.
43317df4a3a7SJose Abreu 	 * This approach takes care about the fragments: desc is the first
43327df4a3a7SJose Abreu 	 * element in case of no SG.
43337df4a3a7SJose Abreu 	 */
4334c2837423SJose Abreu 	tx_packets = (entry + 1) - first_tx;
4335c2837423SJose Abreu 	tx_q->tx_count_frames += tx_packets;
4336c2837423SJose Abreu 
4337c2837423SJose Abreu 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4338c2837423SJose Abreu 		set_ic = true;
4339db2f2842SOng Boon Leong 	else if (!priv->tx_coal_frames[queue])
4340c2837423SJose Abreu 		set_ic = false;
4341db2f2842SOng Boon Leong 	else if (tx_packets > priv->tx_coal_frames[queue])
4342c2837423SJose Abreu 		set_ic = true;
4343db2f2842SOng Boon Leong 	else if ((tx_q->tx_count_frames %
4344db2f2842SOng Boon Leong 		  priv->tx_coal_frames[queue]) < tx_packets)
4345c2837423SJose Abreu 		set_ic = true;
4346c2837423SJose Abreu 	else
4347c2837423SJose Abreu 		set_ic = false;
4348c2837423SJose Abreu 
4349c2837423SJose Abreu 	if (set_ic) {
43507df4a3a7SJose Abreu 		if (likely(priv->extend_desc))
43517df4a3a7SJose Abreu 			desc = &tx_q->dma_etx[entry].basic;
4352579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4353579a25a8SJose Abreu 			desc = &tx_q->dma_entx[entry].basic;
43547df4a3a7SJose Abreu 		else
43557df4a3a7SJose Abreu 			desc = &tx_q->dma_tx[entry];
43567df4a3a7SJose Abreu 
43577df4a3a7SJose Abreu 		tx_q->tx_count_frames = 0;
43587df4a3a7SJose Abreu 		stmmac_set_tx_ic(priv, desc);
43597df4a3a7SJose Abreu 		priv->xstats.tx_set_ic_bit++;
43607df4a3a7SJose Abreu 	}
43617df4a3a7SJose Abreu 
436205cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
436305cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
436405cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
436505cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
436605cf0d1bSNiklas Cassel 	 */
4367aa042f60SSong, Yoong Siang 	entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4368ce736788SJoao Pinto 	tx_q->cur_tx = entry;
43697ac6653aSJeff Kirsher 
43707ac6653aSJeff Kirsher 	if (netif_msg_pktdata(priv)) {
437138ddc59dSLABBE Corentin 		netdev_dbg(priv->dev,
437238ddc59dSLABBE Corentin 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4373ce736788SJoao Pinto 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
43740e80bdc9SGiuseppe Cavallaro 			   entry, first, nfrags);
437583d7af64SGiuseppe CAVALLARO 
437638ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
43777ac6653aSJeff Kirsher 		print_pkt(skb->data, skb->len);
43787ac6653aSJeff Kirsher 	}
43790e80bdc9SGiuseppe Cavallaro 
4380ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4381b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4382b3e51069SLABBE Corentin 			  __func__);
4383c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
43847ac6653aSJeff Kirsher 	}
43857ac6653aSJeff Kirsher 
43867ac6653aSJeff Kirsher 	dev->stats.tx_bytes += skb->len;
43877ac6653aSJeff Kirsher 
43888000ddc0SJose Abreu 	if (priv->sarc_type)
43898000ddc0SJose Abreu 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
43908000ddc0SJose Abreu 
43910e80bdc9SGiuseppe Cavallaro 	skb_tx_timestamp(skb);
43920e80bdc9SGiuseppe Cavallaro 
43930e80bdc9SGiuseppe Cavallaro 	/* Ready to fill the first descriptor and set the OWN bit w/o any
43940e80bdc9SGiuseppe Cavallaro 	 * problems because all the descriptors are actually ready to be
43950e80bdc9SGiuseppe Cavallaro 	 * passed to the DMA engine.
43960e80bdc9SGiuseppe Cavallaro 	 */
43970e80bdc9SGiuseppe Cavallaro 	if (likely(!is_jumbo)) {
43980e80bdc9SGiuseppe Cavallaro 		bool last_segment = (nfrags == 0);
43990e80bdc9SGiuseppe Cavallaro 
4400f748be53SAlexandre TORGUE 		des = dma_map_single(priv->device, skb->data,
44010e80bdc9SGiuseppe Cavallaro 				     nopaged_len, DMA_TO_DEVICE);
4402f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
44030e80bdc9SGiuseppe Cavallaro 			goto dma_map_err;
44040e80bdc9SGiuseppe Cavallaro 
4405ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4406be8b38a7SOng Boon Leong 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4407be8b38a7SOng Boon Leong 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
44086844171dSJose Abreu 
44096844171dSJose Abreu 		stmmac_set_desc_addr(priv, first, des);
4410f748be53SAlexandre TORGUE 
4411ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4412ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
44130e80bdc9SGiuseppe Cavallaro 
4414891434b1SRayagond Kokatanur 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4415891434b1SRayagond Kokatanur 			     priv->hwts_tx_en)) {
4416891434b1SRayagond Kokatanur 			/* declare that device is doing timestamping */
4417891434b1SRayagond Kokatanur 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
441842de047dSJose Abreu 			stmmac_enable_tx_timestamp(priv, first);
4419891434b1SRayagond Kokatanur 		}
4420891434b1SRayagond Kokatanur 
44210e80bdc9SGiuseppe Cavallaro 		/* Prepare the first descriptor setting the OWN bit too */
442242de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4423579a25a8SJose Abreu 				csum_insertion, priv->mode, 0, last_segment,
442442de047dSJose Abreu 				skb->len);
442580acbed9SAaro Koskinen 	}
44260e80bdc9SGiuseppe Cavallaro 
4427579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_EN) {
4428579a25a8SJose Abreu 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4429579a25a8SJose Abreu 
4430579a25a8SJose Abreu 		tbs_desc = &tx_q->dma_entx[first_entry];
4431579a25a8SJose Abreu 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4432579a25a8SJose Abreu 	}
4433579a25a8SJose Abreu 
4434579a25a8SJose Abreu 	stmmac_set_tx_owner(priv, first);
4435579a25a8SJose Abreu 
4436c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4437f748be53SAlexandre TORGUE 
4438a4e887faSJose Abreu 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
44398fce3331SJose Abreu 
4440d96febedSOng Boon Leong 	stmmac_flush_tx_descriptors(priv, queue);
44414772f26dSJose Abreu 	stmmac_tx_timer_arm(priv, queue);
44427ac6653aSJeff Kirsher 
4443362b37beSGiuseppe CAVALLARO 	return NETDEV_TX_OK;
4444a9097a96SGiuseppe CAVALLARO 
4445362b37beSGiuseppe CAVALLARO dma_map_err:
444638ddc59dSLABBE Corentin 	netdev_err(priv->dev, "Tx DMA map failed\n");
4447362b37beSGiuseppe CAVALLARO 	dev_kfree_skb(skb);
4448362b37beSGiuseppe CAVALLARO 	priv->dev->stats.tx_dropped++;
44497ac6653aSJeff Kirsher 	return NETDEV_TX_OK;
44507ac6653aSJeff Kirsher }
44517ac6653aSJeff Kirsher 
4452b9381985SVince Bridgers static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4453b9381985SVince Bridgers {
4454ab188e8fSElad Nachman 	struct vlan_ethhdr *veth;
4455ab188e8fSElad Nachman 	__be16 vlan_proto;
4456b9381985SVince Bridgers 	u16 vlanid;
4457b9381985SVince Bridgers 
4458ab188e8fSElad Nachman 	veth = (struct vlan_ethhdr *)skb->data;
4459ab188e8fSElad Nachman 	vlan_proto = veth->h_vlan_proto;
4460ab188e8fSElad Nachman 
4461ab188e8fSElad Nachman 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4462ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4463ab188e8fSElad Nachman 	    (vlan_proto == htons(ETH_P_8021AD) &&
4464ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4465b9381985SVince Bridgers 		/* pop the vlan tag */
4466ab188e8fSElad Nachman 		vlanid = ntohs(veth->h_vlan_TCI);
4467ab188e8fSElad Nachman 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4468b9381985SVince Bridgers 		skb_pull(skb, VLAN_HLEN);
4469ab188e8fSElad Nachman 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4470b9381985SVince Bridgers 	}
4471b9381985SVince Bridgers }
4472b9381985SVince Bridgers 
447332ceabcaSGiuseppe CAVALLARO /**
4474732fdf0eSGiuseppe CAVALLARO  * stmmac_rx_refill - refill used skb preallocated buffers
447532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
447654139cf3SJoao Pinto  * @queue: RX queue index
447732ceabcaSGiuseppe CAVALLARO  * Description : this is to reallocate the skb for the reception process
447832ceabcaSGiuseppe CAVALLARO  * that is based on zero-copy.
447932ceabcaSGiuseppe CAVALLARO  */
448054139cf3SJoao Pinto static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
44817ac6653aSJeff Kirsher {
448254139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
44835fabb012SOng Boon Leong 	int dirty = stmmac_rx_dirty(priv, queue);
448454139cf3SJoao Pinto 	unsigned int entry = rx_q->dirty_rx;
448554139cf3SJoao Pinto 
4486e3ad57c9SGiuseppe Cavallaro 	while (dirty-- > 0) {
44872af6106aSJose Abreu 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4488c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
4489d429b66eSJose Abreu 		bool use_rx_wd;
4490c24602efSGiuseppe CAVALLARO 
4491c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
449254139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4493c24602efSGiuseppe CAVALLARO 		else
449454139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
4495c24602efSGiuseppe CAVALLARO 
44962af6106aSJose Abreu 		if (!buf->page) {
44972af6106aSJose Abreu 			buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
44982af6106aSJose Abreu 			if (!buf->page)
44997ac6653aSJeff Kirsher 				break;
4500120e87f9SGiuseppe Cavallaro 		}
45017ac6653aSJeff Kirsher 
450267afd6d1SJose Abreu 		if (priv->sph && !buf->sec_page) {
450367afd6d1SJose Abreu 			buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
450467afd6d1SJose Abreu 			if (!buf->sec_page)
450567afd6d1SJose Abreu 				break;
450667afd6d1SJose Abreu 
450767afd6d1SJose Abreu 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
450867afd6d1SJose Abreu 		}
450967afd6d1SJose Abreu 
45105fabb012SOng Boon Leong 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
45113caa61c2SJose Abreu 
45122af6106aSJose Abreu 		stmmac_set_desc_addr(priv, p, buf->addr);
4513396e13e1SJoakim Zhang 		if (priv->sph)
4514396e13e1SJoakim Zhang 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4515396e13e1SJoakim Zhang 		else
4516396e13e1SJoakim Zhang 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
45172c520b1cSJose Abreu 		stmmac_refill_desc3(priv, rx_q, p);
4518286a8372SGiuseppe CAVALLARO 
4519d429b66eSJose Abreu 		rx_q->rx_count_frames++;
4520db2f2842SOng Boon Leong 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4521db2f2842SOng Boon Leong 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
45226fa9d691SJose Abreu 			rx_q->rx_count_frames = 0;
452309146abeSJose Abreu 
4524db2f2842SOng Boon Leong 		use_rx_wd = !priv->rx_coal_frames[queue];
452509146abeSJose Abreu 		use_rx_wd |= rx_q->rx_count_frames > 0;
452609146abeSJose Abreu 		if (!priv->use_riwt)
452709146abeSJose Abreu 			use_rx_wd = false;
4528d429b66eSJose Abreu 
4529ad688cdbSPavel Machek 		dma_wmb();
45302af6106aSJose Abreu 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4531e3ad57c9SGiuseppe Cavallaro 
4532aa042f60SSong, Yoong Siang 		entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
45337ac6653aSJeff Kirsher 	}
453454139cf3SJoao Pinto 	rx_q->dirty_rx = entry;
4535858a31ffSJose Abreu 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4536858a31ffSJose Abreu 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
45374523a561SBiao Huang 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
45387ac6653aSJeff Kirsher }
45397ac6653aSJeff Kirsher 
454088ebe2cfSJose Abreu static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
454188ebe2cfSJose Abreu 				       struct dma_desc *p,
454288ebe2cfSJose Abreu 				       int status, unsigned int len)
454388ebe2cfSJose Abreu {
454488ebe2cfSJose Abreu 	unsigned int plen = 0, hlen = 0;
454531f2760eSLuo Jiaxing 	int coe = priv->hw->rx_csum;
454688ebe2cfSJose Abreu 
454788ebe2cfSJose Abreu 	/* Not first descriptor, buffer is always zero */
454888ebe2cfSJose Abreu 	if (priv->sph && len)
454988ebe2cfSJose Abreu 		return 0;
455088ebe2cfSJose Abreu 
455188ebe2cfSJose Abreu 	/* First descriptor, get split header length */
455231f2760eSLuo Jiaxing 	stmmac_get_rx_header_len(priv, p, &hlen);
455388ebe2cfSJose Abreu 	if (priv->sph && hlen) {
455488ebe2cfSJose Abreu 		priv->xstats.rx_split_hdr_pkt_n++;
455588ebe2cfSJose Abreu 		return hlen;
455688ebe2cfSJose Abreu 	}
455788ebe2cfSJose Abreu 
455888ebe2cfSJose Abreu 	/* First descriptor, not last descriptor and not split header */
455988ebe2cfSJose Abreu 	if (status & rx_not_ls)
456088ebe2cfSJose Abreu 		return priv->dma_buf_sz;
456188ebe2cfSJose Abreu 
456288ebe2cfSJose Abreu 	plen = stmmac_get_rx_frame_len(priv, p, coe);
456388ebe2cfSJose Abreu 
456488ebe2cfSJose Abreu 	/* First descriptor and last descriptor and not split header */
456588ebe2cfSJose Abreu 	return min_t(unsigned int, priv->dma_buf_sz, plen);
456688ebe2cfSJose Abreu }
456788ebe2cfSJose Abreu 
456888ebe2cfSJose Abreu static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
456988ebe2cfSJose Abreu 				       struct dma_desc *p,
457088ebe2cfSJose Abreu 				       int status, unsigned int len)
457188ebe2cfSJose Abreu {
457288ebe2cfSJose Abreu 	int coe = priv->hw->rx_csum;
457388ebe2cfSJose Abreu 	unsigned int plen = 0;
457488ebe2cfSJose Abreu 
457588ebe2cfSJose Abreu 	/* Not split header, buffer is not available */
457688ebe2cfSJose Abreu 	if (!priv->sph)
457788ebe2cfSJose Abreu 		return 0;
457888ebe2cfSJose Abreu 
457988ebe2cfSJose Abreu 	/* Not last descriptor */
458088ebe2cfSJose Abreu 	if (status & rx_not_ls)
458188ebe2cfSJose Abreu 		return priv->dma_buf_sz;
458288ebe2cfSJose Abreu 
458388ebe2cfSJose Abreu 	plen = stmmac_get_rx_frame_len(priv, p, coe);
458488ebe2cfSJose Abreu 
458588ebe2cfSJose Abreu 	/* Last descriptor */
458688ebe2cfSJose Abreu 	return plen - len;
458788ebe2cfSJose Abreu }
458888ebe2cfSJose Abreu 
4589be8b38a7SOng Boon Leong static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
45908b278a5bSOng Boon Leong 				struct xdp_frame *xdpf, bool dma_map)
4591be8b38a7SOng Boon Leong {
4592be8b38a7SOng Boon Leong 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4593be8b38a7SOng Boon Leong 	unsigned int entry = tx_q->cur_tx;
4594be8b38a7SOng Boon Leong 	struct dma_desc *tx_desc;
4595be8b38a7SOng Boon Leong 	dma_addr_t dma_addr;
4596be8b38a7SOng Boon Leong 	bool set_ic;
4597be8b38a7SOng Boon Leong 
4598be8b38a7SOng Boon Leong 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4599be8b38a7SOng Boon Leong 		return STMMAC_XDP_CONSUMED;
4600be8b38a7SOng Boon Leong 
4601be8b38a7SOng Boon Leong 	if (likely(priv->extend_desc))
4602be8b38a7SOng Boon Leong 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4603be8b38a7SOng Boon Leong 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4604be8b38a7SOng Boon Leong 		tx_desc = &tx_q->dma_entx[entry].basic;
4605be8b38a7SOng Boon Leong 	else
4606be8b38a7SOng Boon Leong 		tx_desc = tx_q->dma_tx + entry;
4607be8b38a7SOng Boon Leong 
46088b278a5bSOng Boon Leong 	if (dma_map) {
46098b278a5bSOng Boon Leong 		dma_addr = dma_map_single(priv->device, xdpf->data,
46108b278a5bSOng Boon Leong 					  xdpf->len, DMA_TO_DEVICE);
46118b278a5bSOng Boon Leong 		if (dma_mapping_error(priv->device, dma_addr))
46128b278a5bSOng Boon Leong 			return STMMAC_XDP_CONSUMED;
46138b278a5bSOng Boon Leong 
46148b278a5bSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
46158b278a5bSOng Boon Leong 	} else {
46168b278a5bSOng Boon Leong 		struct page *page = virt_to_page(xdpf->data);
46178b278a5bSOng Boon Leong 
4618be8b38a7SOng Boon Leong 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4619be8b38a7SOng Boon Leong 			   xdpf->headroom;
4620be8b38a7SOng Boon Leong 		dma_sync_single_for_device(priv->device, dma_addr,
4621be8b38a7SOng Boon Leong 					   xdpf->len, DMA_BIDIRECTIONAL);
4622be8b38a7SOng Boon Leong 
4623be8b38a7SOng Boon Leong 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
46248b278a5bSOng Boon Leong 	}
4625be8b38a7SOng Boon Leong 
4626be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4627be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4628be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4629be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4630be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4631be8b38a7SOng Boon Leong 
4632be8b38a7SOng Boon Leong 	tx_q->xdpf[entry] = xdpf;
4633be8b38a7SOng Boon Leong 
4634be8b38a7SOng Boon Leong 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4635be8b38a7SOng Boon Leong 
4636be8b38a7SOng Boon Leong 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4637be8b38a7SOng Boon Leong 			       true, priv->mode, true, true,
4638be8b38a7SOng Boon Leong 			       xdpf->len);
4639be8b38a7SOng Boon Leong 
4640be8b38a7SOng Boon Leong 	tx_q->tx_count_frames++;
4641be8b38a7SOng Boon Leong 
4642be8b38a7SOng Boon Leong 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4643be8b38a7SOng Boon Leong 		set_ic = true;
4644be8b38a7SOng Boon Leong 	else
4645be8b38a7SOng Boon Leong 		set_ic = false;
4646be8b38a7SOng Boon Leong 
4647be8b38a7SOng Boon Leong 	if (set_ic) {
4648be8b38a7SOng Boon Leong 		tx_q->tx_count_frames = 0;
4649be8b38a7SOng Boon Leong 		stmmac_set_tx_ic(priv, tx_desc);
4650be8b38a7SOng Boon Leong 		priv->xstats.tx_set_ic_bit++;
4651be8b38a7SOng Boon Leong 	}
4652be8b38a7SOng Boon Leong 
4653be8b38a7SOng Boon Leong 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4654be8b38a7SOng Boon Leong 
4655be8b38a7SOng Boon Leong 	entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4656be8b38a7SOng Boon Leong 	tx_q->cur_tx = entry;
4657be8b38a7SOng Boon Leong 
4658be8b38a7SOng Boon Leong 	return STMMAC_XDP_TX;
4659be8b38a7SOng Boon Leong }
4660be8b38a7SOng Boon Leong 
4661be8b38a7SOng Boon Leong static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4662be8b38a7SOng Boon Leong 				   int cpu)
4663be8b38a7SOng Boon Leong {
4664be8b38a7SOng Boon Leong 	int index = cpu;
4665be8b38a7SOng Boon Leong 
4666be8b38a7SOng Boon Leong 	if (unlikely(index < 0))
4667be8b38a7SOng Boon Leong 		index = 0;
4668be8b38a7SOng Boon Leong 
4669be8b38a7SOng Boon Leong 	while (index >= priv->plat->tx_queues_to_use)
4670be8b38a7SOng Boon Leong 		index -= priv->plat->tx_queues_to_use;
4671be8b38a7SOng Boon Leong 
4672be8b38a7SOng Boon Leong 	return index;
4673be8b38a7SOng Boon Leong }
4674be8b38a7SOng Boon Leong 
4675be8b38a7SOng Boon Leong static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4676be8b38a7SOng Boon Leong 				struct xdp_buff *xdp)
4677be8b38a7SOng Boon Leong {
4678be8b38a7SOng Boon Leong 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4679be8b38a7SOng Boon Leong 	int cpu = smp_processor_id();
4680be8b38a7SOng Boon Leong 	struct netdev_queue *nq;
4681be8b38a7SOng Boon Leong 	int queue;
4682be8b38a7SOng Boon Leong 	int res;
4683be8b38a7SOng Boon Leong 
4684be8b38a7SOng Boon Leong 	if (unlikely(!xdpf))
4685be8b38a7SOng Boon Leong 		return STMMAC_XDP_CONSUMED;
4686be8b38a7SOng Boon Leong 
4687be8b38a7SOng Boon Leong 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4688be8b38a7SOng Boon Leong 	nq = netdev_get_tx_queue(priv->dev, queue);
4689be8b38a7SOng Boon Leong 
4690be8b38a7SOng Boon Leong 	__netif_tx_lock(nq, cpu);
4691be8b38a7SOng Boon Leong 	/* Avoids TX time-out as we are sharing with slow path */
4692be8b38a7SOng Boon Leong 	nq->trans_start = jiffies;
4693be8b38a7SOng Boon Leong 
46948b278a5bSOng Boon Leong 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4695be8b38a7SOng Boon Leong 	if (res == STMMAC_XDP_TX)
4696be8b38a7SOng Boon Leong 		stmmac_flush_tx_descriptors(priv, queue);
4697be8b38a7SOng Boon Leong 
4698be8b38a7SOng Boon Leong 	__netif_tx_unlock(nq);
4699be8b38a7SOng Boon Leong 
4700be8b38a7SOng Boon Leong 	return res;
4701be8b38a7SOng Boon Leong }
4702be8b38a7SOng Boon Leong 
4703bba71cacSOng Boon Leong static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4704bba71cacSOng Boon Leong 				 struct bpf_prog *prog,
47055fabb012SOng Boon Leong 				 struct xdp_buff *xdp)
47065fabb012SOng Boon Leong {
47075fabb012SOng Boon Leong 	u32 act;
4708bba71cacSOng Boon Leong 	int res;
47095fabb012SOng Boon Leong 
47105fabb012SOng Boon Leong 	act = bpf_prog_run_xdp(prog, xdp);
47115fabb012SOng Boon Leong 	switch (act) {
47125fabb012SOng Boon Leong 	case XDP_PASS:
47135fabb012SOng Boon Leong 		res = STMMAC_XDP_PASS;
47145fabb012SOng Boon Leong 		break;
4715be8b38a7SOng Boon Leong 	case XDP_TX:
4716be8b38a7SOng Boon Leong 		res = stmmac_xdp_xmit_back(priv, xdp);
4717be8b38a7SOng Boon Leong 		break;
47188b278a5bSOng Boon Leong 	case XDP_REDIRECT:
47198b278a5bSOng Boon Leong 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
47208b278a5bSOng Boon Leong 			res = STMMAC_XDP_CONSUMED;
47218b278a5bSOng Boon Leong 		else
47228b278a5bSOng Boon Leong 			res = STMMAC_XDP_REDIRECT;
47238b278a5bSOng Boon Leong 		break;
47245fabb012SOng Boon Leong 	default:
47255fabb012SOng Boon Leong 		bpf_warn_invalid_xdp_action(act);
47265fabb012SOng Boon Leong 		fallthrough;
47275fabb012SOng Boon Leong 	case XDP_ABORTED:
47285fabb012SOng Boon Leong 		trace_xdp_exception(priv->dev, prog, act);
47295fabb012SOng Boon Leong 		fallthrough;
47305fabb012SOng Boon Leong 	case XDP_DROP:
47315fabb012SOng Boon Leong 		res = STMMAC_XDP_CONSUMED;
47325fabb012SOng Boon Leong 		break;
47335fabb012SOng Boon Leong 	}
47345fabb012SOng Boon Leong 
4735bba71cacSOng Boon Leong 	return res;
4736bba71cacSOng Boon Leong }
4737bba71cacSOng Boon Leong 
4738bba71cacSOng Boon Leong static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4739bba71cacSOng Boon Leong 					   struct xdp_buff *xdp)
4740bba71cacSOng Boon Leong {
4741bba71cacSOng Boon Leong 	struct bpf_prog *prog;
4742bba71cacSOng Boon Leong 	int res;
4743bba71cacSOng Boon Leong 
4744bba71cacSOng Boon Leong 	prog = READ_ONCE(priv->xdp_prog);
4745bba71cacSOng Boon Leong 	if (!prog) {
4746bba71cacSOng Boon Leong 		res = STMMAC_XDP_PASS;
47472f1e432dSToke Høiland-Jørgensen 		goto out;
4748bba71cacSOng Boon Leong 	}
4749bba71cacSOng Boon Leong 
4750bba71cacSOng Boon Leong 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
47512f1e432dSToke Høiland-Jørgensen out:
47525fabb012SOng Boon Leong 	return ERR_PTR(-res);
47535fabb012SOng Boon Leong }
47545fabb012SOng Boon Leong 
4755be8b38a7SOng Boon Leong static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4756be8b38a7SOng Boon Leong 				   int xdp_status)
4757be8b38a7SOng Boon Leong {
4758be8b38a7SOng Boon Leong 	int cpu = smp_processor_id();
4759be8b38a7SOng Boon Leong 	int queue;
4760be8b38a7SOng Boon Leong 
4761be8b38a7SOng Boon Leong 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4762be8b38a7SOng Boon Leong 
4763be8b38a7SOng Boon Leong 	if (xdp_status & STMMAC_XDP_TX)
4764be8b38a7SOng Boon Leong 		stmmac_tx_timer_arm(priv, queue);
47658b278a5bSOng Boon Leong 
47668b278a5bSOng Boon Leong 	if (xdp_status & STMMAC_XDP_REDIRECT)
47678b278a5bSOng Boon Leong 		xdp_do_flush();
4768be8b38a7SOng Boon Leong }
4769be8b38a7SOng Boon Leong 
4770bba2556eSOng Boon Leong static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4771bba2556eSOng Boon Leong 					       struct xdp_buff *xdp)
4772bba2556eSOng Boon Leong {
4773bba2556eSOng Boon Leong 	unsigned int metasize = xdp->data - xdp->data_meta;
4774bba2556eSOng Boon Leong 	unsigned int datasize = xdp->data_end - xdp->data;
4775bba2556eSOng Boon Leong 	struct sk_buff *skb;
4776bba2556eSOng Boon Leong 
4777132c32eeSOng Boon Leong 	skb = __napi_alloc_skb(&ch->rxtx_napi,
4778bba2556eSOng Boon Leong 			       xdp->data_end - xdp->data_hard_start,
4779bba2556eSOng Boon Leong 			       GFP_ATOMIC | __GFP_NOWARN);
4780bba2556eSOng Boon Leong 	if (unlikely(!skb))
4781bba2556eSOng Boon Leong 		return NULL;
4782bba2556eSOng Boon Leong 
4783bba2556eSOng Boon Leong 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
4784bba2556eSOng Boon Leong 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4785bba2556eSOng Boon Leong 	if (metasize)
4786bba2556eSOng Boon Leong 		skb_metadata_set(skb, metasize);
4787bba2556eSOng Boon Leong 
4788bba2556eSOng Boon Leong 	return skb;
4789bba2556eSOng Boon Leong }
4790bba2556eSOng Boon Leong 
4791bba2556eSOng Boon Leong static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4792bba2556eSOng Boon Leong 				   struct dma_desc *p, struct dma_desc *np,
4793bba2556eSOng Boon Leong 				   struct xdp_buff *xdp)
4794bba2556eSOng Boon Leong {
4795bba2556eSOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
4796bba2556eSOng Boon Leong 	unsigned int len = xdp->data_end - xdp->data;
4797bba2556eSOng Boon Leong 	enum pkt_hash_types hash_type;
4798bba2556eSOng Boon Leong 	int coe = priv->hw->rx_csum;
4799bba2556eSOng Boon Leong 	struct sk_buff *skb;
4800bba2556eSOng Boon Leong 	u32 hash;
4801bba2556eSOng Boon Leong 
4802bba2556eSOng Boon Leong 	skb = stmmac_construct_skb_zc(ch, xdp);
4803bba2556eSOng Boon Leong 	if (!skb) {
4804bba2556eSOng Boon Leong 		priv->dev->stats.rx_dropped++;
4805bba2556eSOng Boon Leong 		return;
4806bba2556eSOng Boon Leong 	}
4807bba2556eSOng Boon Leong 
4808bba2556eSOng Boon Leong 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
4809bba2556eSOng Boon Leong 	stmmac_rx_vlan(priv->dev, skb);
4810bba2556eSOng Boon Leong 	skb->protocol = eth_type_trans(skb, priv->dev);
4811bba2556eSOng Boon Leong 
4812bba2556eSOng Boon Leong 	if (unlikely(!coe))
4813bba2556eSOng Boon Leong 		skb_checksum_none_assert(skb);
4814bba2556eSOng Boon Leong 	else
4815bba2556eSOng Boon Leong 		skb->ip_summed = CHECKSUM_UNNECESSARY;
4816bba2556eSOng Boon Leong 
4817bba2556eSOng Boon Leong 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4818bba2556eSOng Boon Leong 		skb_set_hash(skb, hash, hash_type);
4819bba2556eSOng Boon Leong 
4820bba2556eSOng Boon Leong 	skb_record_rx_queue(skb, queue);
4821132c32eeSOng Boon Leong 	napi_gro_receive(&ch->rxtx_napi, skb);
4822bba2556eSOng Boon Leong 
4823bba2556eSOng Boon Leong 	priv->dev->stats.rx_packets++;
4824bba2556eSOng Boon Leong 	priv->dev->stats.rx_bytes += len;
4825bba2556eSOng Boon Leong }
4826bba2556eSOng Boon Leong 
4827bba2556eSOng Boon Leong static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4828bba2556eSOng Boon Leong {
4829bba2556eSOng Boon Leong 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4830bba2556eSOng Boon Leong 	unsigned int entry = rx_q->dirty_rx;
4831bba2556eSOng Boon Leong 	struct dma_desc *rx_desc = NULL;
4832bba2556eSOng Boon Leong 	bool ret = true;
4833bba2556eSOng Boon Leong 
4834bba2556eSOng Boon Leong 	budget = min(budget, stmmac_rx_dirty(priv, queue));
4835bba2556eSOng Boon Leong 
4836bba2556eSOng Boon Leong 	while (budget-- > 0 && entry != rx_q->cur_rx) {
4837bba2556eSOng Boon Leong 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4838bba2556eSOng Boon Leong 		dma_addr_t dma_addr;
4839bba2556eSOng Boon Leong 		bool use_rx_wd;
4840bba2556eSOng Boon Leong 
4841bba2556eSOng Boon Leong 		if (!buf->xdp) {
4842bba2556eSOng Boon Leong 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
4843bba2556eSOng Boon Leong 			if (!buf->xdp) {
4844bba2556eSOng Boon Leong 				ret = false;
4845bba2556eSOng Boon Leong 				break;
4846bba2556eSOng Boon Leong 			}
4847bba2556eSOng Boon Leong 		}
4848bba2556eSOng Boon Leong 
4849bba2556eSOng Boon Leong 		if (priv->extend_desc)
4850bba2556eSOng Boon Leong 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
4851bba2556eSOng Boon Leong 		else
4852bba2556eSOng Boon Leong 			rx_desc = rx_q->dma_rx + entry;
4853bba2556eSOng Boon Leong 
4854bba2556eSOng Boon Leong 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
4855bba2556eSOng Boon Leong 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
4856bba2556eSOng Boon Leong 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
4857bba2556eSOng Boon Leong 		stmmac_refill_desc3(priv, rx_q, rx_desc);
4858bba2556eSOng Boon Leong 
4859bba2556eSOng Boon Leong 		rx_q->rx_count_frames++;
4860bba2556eSOng Boon Leong 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4861bba2556eSOng Boon Leong 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4862bba2556eSOng Boon Leong 			rx_q->rx_count_frames = 0;
4863bba2556eSOng Boon Leong 
4864bba2556eSOng Boon Leong 		use_rx_wd = !priv->rx_coal_frames[queue];
4865bba2556eSOng Boon Leong 		use_rx_wd |= rx_q->rx_count_frames > 0;
4866bba2556eSOng Boon Leong 		if (!priv->use_riwt)
4867bba2556eSOng Boon Leong 			use_rx_wd = false;
4868bba2556eSOng Boon Leong 
4869bba2556eSOng Boon Leong 		dma_wmb();
4870bba2556eSOng Boon Leong 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
4871bba2556eSOng Boon Leong 
4872bba2556eSOng Boon Leong 		entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
4873bba2556eSOng Boon Leong 	}
4874bba2556eSOng Boon Leong 
4875bba2556eSOng Boon Leong 	if (rx_desc) {
4876bba2556eSOng Boon Leong 		rx_q->dirty_rx = entry;
4877bba2556eSOng Boon Leong 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4878bba2556eSOng Boon Leong 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
4879bba2556eSOng Boon Leong 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4880bba2556eSOng Boon Leong 	}
4881bba2556eSOng Boon Leong 
4882bba2556eSOng Boon Leong 	return ret;
4883bba2556eSOng Boon Leong }
4884bba2556eSOng Boon Leong 
4885bba2556eSOng Boon Leong static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
4886bba2556eSOng Boon Leong {
4887bba2556eSOng Boon Leong 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4888bba2556eSOng Boon Leong 	unsigned int count = 0, error = 0, len = 0;
4889bba2556eSOng Boon Leong 	int dirty = stmmac_rx_dirty(priv, queue);
4890bba2556eSOng Boon Leong 	unsigned int next_entry = rx_q->cur_rx;
4891bba2556eSOng Boon Leong 	unsigned int desc_size;
4892bba2556eSOng Boon Leong 	struct bpf_prog *prog;
4893bba2556eSOng Boon Leong 	bool failure = false;
4894bba2556eSOng Boon Leong 	int xdp_status = 0;
4895bba2556eSOng Boon Leong 	int status = 0;
4896bba2556eSOng Boon Leong 
4897bba2556eSOng Boon Leong 	if (netif_msg_rx_status(priv)) {
4898bba2556eSOng Boon Leong 		void *rx_head;
4899bba2556eSOng Boon Leong 
4900bba2556eSOng Boon Leong 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
4901bba2556eSOng Boon Leong 		if (priv->extend_desc) {
4902bba2556eSOng Boon Leong 			rx_head = (void *)rx_q->dma_erx;
4903bba2556eSOng Boon Leong 			desc_size = sizeof(struct dma_extended_desc);
4904bba2556eSOng Boon Leong 		} else {
4905bba2556eSOng Boon Leong 			rx_head = (void *)rx_q->dma_rx;
4906bba2556eSOng Boon Leong 			desc_size = sizeof(struct dma_desc);
4907bba2556eSOng Boon Leong 		}
4908bba2556eSOng Boon Leong 
4909bba2556eSOng Boon Leong 		stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
4910bba2556eSOng Boon Leong 				    rx_q->dma_rx_phy, desc_size);
4911bba2556eSOng Boon Leong 	}
4912bba2556eSOng Boon Leong 	while (count < limit) {
4913bba2556eSOng Boon Leong 		struct stmmac_rx_buffer *buf;
4914bba2556eSOng Boon Leong 		unsigned int buf1_len = 0;
4915bba2556eSOng Boon Leong 		struct dma_desc *np, *p;
4916bba2556eSOng Boon Leong 		int entry;
4917bba2556eSOng Boon Leong 		int res;
4918bba2556eSOng Boon Leong 
4919bba2556eSOng Boon Leong 		if (!count && rx_q->state_saved) {
4920bba2556eSOng Boon Leong 			error = rx_q->state.error;
4921bba2556eSOng Boon Leong 			len = rx_q->state.len;
4922bba2556eSOng Boon Leong 		} else {
4923bba2556eSOng Boon Leong 			rx_q->state_saved = false;
4924bba2556eSOng Boon Leong 			error = 0;
4925bba2556eSOng Boon Leong 			len = 0;
4926bba2556eSOng Boon Leong 		}
4927bba2556eSOng Boon Leong 
4928bba2556eSOng Boon Leong 		if (count >= limit)
4929bba2556eSOng Boon Leong 			break;
4930bba2556eSOng Boon Leong 
4931bba2556eSOng Boon Leong read_again:
4932bba2556eSOng Boon Leong 		buf1_len = 0;
4933bba2556eSOng Boon Leong 		entry = next_entry;
4934bba2556eSOng Boon Leong 		buf = &rx_q->buf_pool[entry];
4935bba2556eSOng Boon Leong 
4936bba2556eSOng Boon Leong 		if (dirty >= STMMAC_RX_FILL_BATCH) {
4937bba2556eSOng Boon Leong 			failure = failure ||
4938bba2556eSOng Boon Leong 				  !stmmac_rx_refill_zc(priv, queue, dirty);
4939bba2556eSOng Boon Leong 			dirty = 0;
4940bba2556eSOng Boon Leong 		}
4941bba2556eSOng Boon Leong 
4942bba2556eSOng Boon Leong 		if (priv->extend_desc)
4943bba2556eSOng Boon Leong 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4944bba2556eSOng Boon Leong 		else
4945bba2556eSOng Boon Leong 			p = rx_q->dma_rx + entry;
4946bba2556eSOng Boon Leong 
4947bba2556eSOng Boon Leong 		/* read the status of the incoming frame */
4948bba2556eSOng Boon Leong 		status = stmmac_rx_status(priv, &priv->dev->stats,
4949bba2556eSOng Boon Leong 					  &priv->xstats, p);
4950bba2556eSOng Boon Leong 		/* check if managed by the DMA otherwise go ahead */
4951bba2556eSOng Boon Leong 		if (unlikely(status & dma_own))
4952bba2556eSOng Boon Leong 			break;
4953bba2556eSOng Boon Leong 
4954bba2556eSOng Boon Leong 		/* Prefetch the next RX descriptor */
4955bba2556eSOng Boon Leong 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
4956bba2556eSOng Boon Leong 						priv->dma_rx_size);
4957bba2556eSOng Boon Leong 		next_entry = rx_q->cur_rx;
4958bba2556eSOng Boon Leong 
4959bba2556eSOng Boon Leong 		if (priv->extend_desc)
4960bba2556eSOng Boon Leong 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
4961bba2556eSOng Boon Leong 		else
4962bba2556eSOng Boon Leong 			np = rx_q->dma_rx + next_entry;
4963bba2556eSOng Boon Leong 
4964bba2556eSOng Boon Leong 		prefetch(np);
4965bba2556eSOng Boon Leong 
49662b9fff64SSong Yoong Siang 		/* Ensure a valid XSK buffer before proceed */
49672b9fff64SSong Yoong Siang 		if (!buf->xdp)
49682b9fff64SSong Yoong Siang 			break;
49692b9fff64SSong Yoong Siang 
4970bba2556eSOng Boon Leong 		if (priv->extend_desc)
4971bba2556eSOng Boon Leong 			stmmac_rx_extended_status(priv, &priv->dev->stats,
4972bba2556eSOng Boon Leong 						  &priv->xstats,
4973bba2556eSOng Boon Leong 						  rx_q->dma_erx + entry);
4974bba2556eSOng Boon Leong 		if (unlikely(status == discard_frame)) {
4975bba2556eSOng Boon Leong 			xsk_buff_free(buf->xdp);
4976bba2556eSOng Boon Leong 			buf->xdp = NULL;
4977bba2556eSOng Boon Leong 			dirty++;
4978bba2556eSOng Boon Leong 			error = 1;
4979bba2556eSOng Boon Leong 			if (!priv->hwts_rx_en)
4980bba2556eSOng Boon Leong 				priv->dev->stats.rx_errors++;
4981bba2556eSOng Boon Leong 		}
4982bba2556eSOng Boon Leong 
4983bba2556eSOng Boon Leong 		if (unlikely(error && (status & rx_not_ls)))
4984bba2556eSOng Boon Leong 			goto read_again;
4985bba2556eSOng Boon Leong 		if (unlikely(error)) {
4986bba2556eSOng Boon Leong 			count++;
4987bba2556eSOng Boon Leong 			continue;
4988bba2556eSOng Boon Leong 		}
4989bba2556eSOng Boon Leong 
4990bba2556eSOng Boon Leong 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
4991bba2556eSOng Boon Leong 		if (likely(status & rx_not_ls)) {
4992bba2556eSOng Boon Leong 			xsk_buff_free(buf->xdp);
4993bba2556eSOng Boon Leong 			buf->xdp = NULL;
4994bba2556eSOng Boon Leong 			dirty++;
4995bba2556eSOng Boon Leong 			count++;
4996bba2556eSOng Boon Leong 			goto read_again;
4997bba2556eSOng Boon Leong 		}
4998bba2556eSOng Boon Leong 
4999bba2556eSOng Boon Leong 		/* XDP ZC Frame only support primary buffers for now */
5000bba2556eSOng Boon Leong 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5001bba2556eSOng Boon Leong 		len += buf1_len;
5002bba2556eSOng Boon Leong 
5003bba2556eSOng Boon Leong 		/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
5004bba2556eSOng Boon Leong 		 * Type frames (LLC/LLC-SNAP)
5005bba2556eSOng Boon Leong 		 *
5006bba2556eSOng Boon Leong 		 * llc_snap is never checked in GMAC >= 4, so this ACS
5007bba2556eSOng Boon Leong 		 * feature is always disabled and packets need to be
5008bba2556eSOng Boon Leong 		 * stripped manually.
5009bba2556eSOng Boon Leong 		 */
5010bba2556eSOng Boon Leong 		if (likely(!(status & rx_not_ls)) &&
5011bba2556eSOng Boon Leong 		    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
5012bba2556eSOng Boon Leong 		     unlikely(status != llc_snap))) {
5013bba2556eSOng Boon Leong 			buf1_len -= ETH_FCS_LEN;
5014bba2556eSOng Boon Leong 			len -= ETH_FCS_LEN;
5015bba2556eSOng Boon Leong 		}
5016bba2556eSOng Boon Leong 
5017bba2556eSOng Boon Leong 		/* RX buffer is good and fit into a XSK pool buffer */
5018bba2556eSOng Boon Leong 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5019bba2556eSOng Boon Leong 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5020bba2556eSOng Boon Leong 
5021bba2556eSOng Boon Leong 		prog = READ_ONCE(priv->xdp_prog);
5022bba2556eSOng Boon Leong 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5023bba2556eSOng Boon Leong 
5024bba2556eSOng Boon Leong 		switch (res) {
5025bba2556eSOng Boon Leong 		case STMMAC_XDP_PASS:
5026bba2556eSOng Boon Leong 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5027bba2556eSOng Boon Leong 			xsk_buff_free(buf->xdp);
5028bba2556eSOng Boon Leong 			break;
5029bba2556eSOng Boon Leong 		case STMMAC_XDP_CONSUMED:
5030bba2556eSOng Boon Leong 			xsk_buff_free(buf->xdp);
5031bba2556eSOng Boon Leong 			priv->dev->stats.rx_dropped++;
5032bba2556eSOng Boon Leong 			break;
5033bba2556eSOng Boon Leong 		case STMMAC_XDP_TX:
5034bba2556eSOng Boon Leong 		case STMMAC_XDP_REDIRECT:
5035bba2556eSOng Boon Leong 			xdp_status |= res;
5036bba2556eSOng Boon Leong 			break;
5037bba2556eSOng Boon Leong 		}
5038bba2556eSOng Boon Leong 
5039bba2556eSOng Boon Leong 		buf->xdp = NULL;
5040bba2556eSOng Boon Leong 		dirty++;
5041bba2556eSOng Boon Leong 		count++;
5042bba2556eSOng Boon Leong 	}
5043bba2556eSOng Boon Leong 
5044bba2556eSOng Boon Leong 	if (status & rx_not_ls) {
5045bba2556eSOng Boon Leong 		rx_q->state_saved = true;
5046bba2556eSOng Boon Leong 		rx_q->state.error = error;
5047bba2556eSOng Boon Leong 		rx_q->state.len = len;
5048bba2556eSOng Boon Leong 	}
5049bba2556eSOng Boon Leong 
5050bba2556eSOng Boon Leong 	stmmac_finalize_xdp_rx(priv, xdp_status);
5051bba2556eSOng Boon Leong 
505268e9c5deSVijayakannan Ayyathurai 	priv->xstats.rx_pkt_n += count;
505368e9c5deSVijayakannan Ayyathurai 	priv->xstats.rxq_stats[queue].rx_pkt_n += count;
505468e9c5deSVijayakannan Ayyathurai 
5055bba2556eSOng Boon Leong 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5056bba2556eSOng Boon Leong 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5057bba2556eSOng Boon Leong 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5058bba2556eSOng Boon Leong 		else
5059bba2556eSOng Boon Leong 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5060bba2556eSOng Boon Leong 
5061bba2556eSOng Boon Leong 		return (int)count;
5062bba2556eSOng Boon Leong 	}
5063bba2556eSOng Boon Leong 
5064bba2556eSOng Boon Leong 	return failure ? limit : (int)count;
5065bba2556eSOng Boon Leong }
5066bba2556eSOng Boon Leong 
506732ceabcaSGiuseppe CAVALLARO /**
5068732fdf0eSGiuseppe CAVALLARO  * stmmac_rx - manage the receive process
506932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
507054139cf3SJoao Pinto  * @limit: napi bugget
507154139cf3SJoao Pinto  * @queue: RX queue index.
507232ceabcaSGiuseppe CAVALLARO  * Description :  this the function called by the napi poll method.
507332ceabcaSGiuseppe CAVALLARO  * It gets all the frames inside the ring.
507432ceabcaSGiuseppe CAVALLARO  */
507554139cf3SJoao Pinto static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
50767ac6653aSJeff Kirsher {
507754139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
50788fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[queue];
5079ec222003SJose Abreu 	unsigned int count = 0, error = 0, len = 0;
5080ec222003SJose Abreu 	int status = 0, coe = priv->hw->rx_csum;
508107b39753SAaro Koskinen 	unsigned int next_entry = rx_q->cur_rx;
50825fabb012SOng Boon Leong 	enum dma_data_direction dma_dir;
5083bfaf91caSJoakim Zhang 	unsigned int desc_size;
5084ec222003SJose Abreu 	struct sk_buff *skb = NULL;
50855fabb012SOng Boon Leong 	struct xdp_buff xdp;
5086be8b38a7SOng Boon Leong 	int xdp_status = 0;
50875fabb012SOng Boon Leong 	int buf_sz;
50885fabb012SOng Boon Leong 
50895fabb012SOng Boon Leong 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
50905fabb012SOng Boon Leong 	buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
50917ac6653aSJeff Kirsher 
509283d7af64SGiuseppe CAVALLARO 	if (netif_msg_rx_status(priv)) {
5093d0225e7dSAlexandre TORGUE 		void *rx_head;
5094d0225e7dSAlexandre TORGUE 
509538ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5096bfaf91caSJoakim Zhang 		if (priv->extend_desc) {
509754139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_erx;
5098bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_extended_desc);
5099bfaf91caSJoakim Zhang 		} else {
510054139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_rx;
5101bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_desc);
5102bfaf91caSJoakim Zhang 		}
5103d0225e7dSAlexandre TORGUE 
5104bfaf91caSJoakim Zhang 		stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
5105bfaf91caSJoakim Zhang 				    rx_q->dma_rx_phy, desc_size);
51067ac6653aSJeff Kirsher 	}
5107c24602efSGiuseppe CAVALLARO 	while (count < limit) {
510888ebe2cfSJose Abreu 		unsigned int buf1_len = 0, buf2_len = 0;
5109ec222003SJose Abreu 		enum pkt_hash_types hash_type;
51102af6106aSJose Abreu 		struct stmmac_rx_buffer *buf;
51112af6106aSJose Abreu 		struct dma_desc *np, *p;
5112ec222003SJose Abreu 		int entry;
5113ec222003SJose Abreu 		u32 hash;
51147ac6653aSJeff Kirsher 
5115ec222003SJose Abreu 		if (!count && rx_q->state_saved) {
5116ec222003SJose Abreu 			skb = rx_q->state.skb;
5117ec222003SJose Abreu 			error = rx_q->state.error;
5118ec222003SJose Abreu 			len = rx_q->state.len;
5119ec222003SJose Abreu 		} else {
5120ec222003SJose Abreu 			rx_q->state_saved = false;
5121ec222003SJose Abreu 			skb = NULL;
5122ec222003SJose Abreu 			error = 0;
5123ec222003SJose Abreu 			len = 0;
5124ec222003SJose Abreu 		}
5125ec222003SJose Abreu 
5126ec222003SJose Abreu 		if (count >= limit)
5127ec222003SJose Abreu 			break;
5128ec222003SJose Abreu 
5129ec222003SJose Abreu read_again:
513088ebe2cfSJose Abreu 		buf1_len = 0;
513188ebe2cfSJose Abreu 		buf2_len = 0;
513207b39753SAaro Koskinen 		entry = next_entry;
51332af6106aSJose Abreu 		buf = &rx_q->buf_pool[entry];
513407b39753SAaro Koskinen 
5135c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
513654139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5137c24602efSGiuseppe CAVALLARO 		else
513854139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
5139c24602efSGiuseppe CAVALLARO 
5140c1fa3212SFabrice Gasnier 		/* read the status of the incoming frame */
514142de047dSJose Abreu 		status = stmmac_rx_status(priv, &priv->dev->stats,
5142c1fa3212SFabrice Gasnier 				&priv->xstats, p);
5143c1fa3212SFabrice Gasnier 		/* check if managed by the DMA otherwise go ahead */
5144c1fa3212SFabrice Gasnier 		if (unlikely(status & dma_own))
51457ac6653aSJeff Kirsher 			break;
51467ac6653aSJeff Kirsher 
5147aa042f60SSong, Yoong Siang 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5148aa042f60SSong, Yoong Siang 						priv->dma_rx_size);
514954139cf3SJoao Pinto 		next_entry = rx_q->cur_rx;
5150e3ad57c9SGiuseppe Cavallaro 
5151c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
515254139cf3SJoao Pinto 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5153c24602efSGiuseppe CAVALLARO 		else
515454139cf3SJoao Pinto 			np = rx_q->dma_rx + next_entry;
5155ba1ffd74SGiuseppe CAVALLARO 
5156ba1ffd74SGiuseppe CAVALLARO 		prefetch(np);
51577ac6653aSJeff Kirsher 
515842de047dSJose Abreu 		if (priv->extend_desc)
515942de047dSJose Abreu 			stmmac_rx_extended_status(priv, &priv->dev->stats,
516042de047dSJose Abreu 					&priv->xstats, rx_q->dma_erx + entry);
5161891434b1SRayagond Kokatanur 		if (unlikely(status == discard_frame)) {
51622af6106aSJose Abreu 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
51632af6106aSJose Abreu 			buf->page = NULL;
5164ec222003SJose Abreu 			error = 1;
51650b273ca4SJose Abreu 			if (!priv->hwts_rx_en)
51660b273ca4SJose Abreu 				priv->dev->stats.rx_errors++;
5167ec222003SJose Abreu 		}
5168f748be53SAlexandre TORGUE 
5169ec222003SJose Abreu 		if (unlikely(error && (status & rx_not_ls)))
5170ec222003SJose Abreu 			goto read_again;
5171ec222003SJose Abreu 		if (unlikely(error)) {
5172ec222003SJose Abreu 			dev_kfree_skb(skb);
517388ebe2cfSJose Abreu 			skb = NULL;
5174cda4985aSJose Abreu 			count++;
517507b39753SAaro Koskinen 			continue;
5176e527c4a7SGiuseppe CAVALLARO 		}
5177e527c4a7SGiuseppe CAVALLARO 
5178ec222003SJose Abreu 		/* Buffer is good. Go on. */
5179ec222003SJose Abreu 
51804744bf07SMatteo Croce 		prefetch(page_address(buf->page) + buf->page_offset);
518188ebe2cfSJose Abreu 		if (buf->sec_page)
518288ebe2cfSJose Abreu 			prefetch(page_address(buf->sec_page));
518388ebe2cfSJose Abreu 
518488ebe2cfSJose Abreu 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
518588ebe2cfSJose Abreu 		len += buf1_len;
518688ebe2cfSJose Abreu 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
518788ebe2cfSJose Abreu 		len += buf2_len;
5188ec222003SJose Abreu 
51897ac6653aSJeff Kirsher 		/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
5190ceb69499SGiuseppe CAVALLARO 		 * Type frames (LLC/LLC-SNAP)
5191565020aaSJose Abreu 		 *
5192565020aaSJose Abreu 		 * llc_snap is never checked in GMAC >= 4, so this ACS
5193565020aaSJose Abreu 		 * feature is always disabled and packets need to be
5194565020aaSJose Abreu 		 * stripped manually.
5195ceb69499SGiuseppe CAVALLARO 		 */
519693b5dce4SJose Abreu 		if (likely(!(status & rx_not_ls)) &&
519793b5dce4SJose Abreu 		    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
519893b5dce4SJose Abreu 		     unlikely(status != llc_snap))) {
51990f296e78SZekun Shen 			if (buf2_len) {
520088ebe2cfSJose Abreu 				buf2_len -= ETH_FCS_LEN;
5201ec222003SJose Abreu 				len -= ETH_FCS_LEN;
52020f296e78SZekun Shen 			} else if (buf1_len) {
52030f296e78SZekun Shen 				buf1_len -= ETH_FCS_LEN;
52040f296e78SZekun Shen 				len -= ETH_FCS_LEN;
52050f296e78SZekun Shen 			}
520683d7af64SGiuseppe CAVALLARO 		}
520722ad3838SGiuseppe Cavallaro 
5208ec222003SJose Abreu 		if (!skb) {
5209be8b38a7SOng Boon Leong 			unsigned int pre_len, sync_len;
5210be8b38a7SOng Boon Leong 
52115fabb012SOng Boon Leong 			dma_sync_single_for_cpu(priv->device, buf->addr,
52125fabb012SOng Boon Leong 						buf1_len, dma_dir);
52135fabb012SOng Boon Leong 
5214d172268fSMatteo Croce 			xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq);
5215d172268fSMatteo Croce 			xdp_prepare_buff(&xdp, page_address(buf->page),
5216d172268fSMatteo Croce 					 buf->page_offset, buf1_len, false);
52175fabb012SOng Boon Leong 
5218be8b38a7SOng Boon Leong 			pre_len = xdp.data_end - xdp.data_hard_start -
5219be8b38a7SOng Boon Leong 				  buf->page_offset;
52205fabb012SOng Boon Leong 			skb = stmmac_xdp_run_prog(priv, &xdp);
5221be8b38a7SOng Boon Leong 			/* Due xdp_adjust_tail: DMA sync for_device
5222be8b38a7SOng Boon Leong 			 * cover max len CPU touch
5223be8b38a7SOng Boon Leong 			 */
5224be8b38a7SOng Boon Leong 			sync_len = xdp.data_end - xdp.data_hard_start -
5225be8b38a7SOng Boon Leong 				   buf->page_offset;
5226be8b38a7SOng Boon Leong 			sync_len = max(sync_len, pre_len);
52275fabb012SOng Boon Leong 
52285fabb012SOng Boon Leong 			/* For Not XDP_PASS verdict */
52295fabb012SOng Boon Leong 			if (IS_ERR(skb)) {
52305fabb012SOng Boon Leong 				unsigned int xdp_res = -PTR_ERR(skb);
52315fabb012SOng Boon Leong 
52325fabb012SOng Boon Leong 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5233be8b38a7SOng Boon Leong 					page_pool_put_page(rx_q->page_pool,
5234be8b38a7SOng Boon Leong 							   virt_to_head_page(xdp.data),
5235be8b38a7SOng Boon Leong 							   sync_len, true);
52365fabb012SOng Boon Leong 					buf->page = NULL;
52375fabb012SOng Boon Leong 					priv->dev->stats.rx_dropped++;
52385fabb012SOng Boon Leong 
52395fabb012SOng Boon Leong 					/* Clear skb as it was set as
52405fabb012SOng Boon Leong 					 * status by XDP program.
52415fabb012SOng Boon Leong 					 */
52425fabb012SOng Boon Leong 					skb = NULL;
52435fabb012SOng Boon Leong 
52445fabb012SOng Boon Leong 					if (unlikely((status & rx_not_ls)))
52455fabb012SOng Boon Leong 						goto read_again;
52465fabb012SOng Boon Leong 
52475fabb012SOng Boon Leong 					count++;
52485fabb012SOng Boon Leong 					continue;
52498b278a5bSOng Boon Leong 				} else if (xdp_res & (STMMAC_XDP_TX |
52508b278a5bSOng Boon Leong 						      STMMAC_XDP_REDIRECT)) {
5251be8b38a7SOng Boon Leong 					xdp_status |= xdp_res;
5252be8b38a7SOng Boon Leong 					buf->page = NULL;
5253be8b38a7SOng Boon Leong 					skb = NULL;
5254be8b38a7SOng Boon Leong 					count++;
5255be8b38a7SOng Boon Leong 					continue;
52565fabb012SOng Boon Leong 				}
52575fabb012SOng Boon Leong 			}
52585fabb012SOng Boon Leong 		}
52595fabb012SOng Boon Leong 
52605fabb012SOng Boon Leong 		if (!skb) {
52615fabb012SOng Boon Leong 			/* XDP program may expand or reduce tail */
52625fabb012SOng Boon Leong 			buf1_len = xdp.data_end - xdp.data;
52635fabb012SOng Boon Leong 
526488ebe2cfSJose Abreu 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5265ec222003SJose Abreu 			if (!skb) {
526622ad3838SGiuseppe Cavallaro 				priv->dev->stats.rx_dropped++;
5267cda4985aSJose Abreu 				count++;
526888ebe2cfSJose Abreu 				goto drain_data;
526922ad3838SGiuseppe Cavallaro 			}
527022ad3838SGiuseppe Cavallaro 
52715fabb012SOng Boon Leong 			/* XDP program may adjust header */
52725fabb012SOng Boon Leong 			skb_copy_to_linear_data(skb, xdp.data, buf1_len);
527388ebe2cfSJose Abreu 			skb_put(skb, buf1_len);
527422ad3838SGiuseppe Cavallaro 
5275ec222003SJose Abreu 			/* Data payload copied into SKB, page ready for recycle */
5276ec222003SJose Abreu 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5277ec222003SJose Abreu 			buf->page = NULL;
527888ebe2cfSJose Abreu 		} else if (buf1_len) {
5279ec222003SJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->addr,
52805fabb012SOng Boon Leong 						buf1_len, dma_dir);
5281ec222003SJose Abreu 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
52825fabb012SOng Boon Leong 					buf->page, buf->page_offset, buf1_len,
5283ec222003SJose Abreu 					priv->dma_buf_sz);
5284ec222003SJose Abreu 
5285ec222003SJose Abreu 			/* Data payload appended into SKB */
5286ec222003SJose Abreu 			page_pool_release_page(rx_q->page_pool, buf->page);
5287ec222003SJose Abreu 			buf->page = NULL;
52887ac6653aSJeff Kirsher 		}
528983d7af64SGiuseppe CAVALLARO 
529088ebe2cfSJose Abreu 		if (buf2_len) {
529167afd6d1SJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
52925fabb012SOng Boon Leong 						buf2_len, dma_dir);
529367afd6d1SJose Abreu 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
529488ebe2cfSJose Abreu 					buf->sec_page, 0, buf2_len,
529567afd6d1SJose Abreu 					priv->dma_buf_sz);
529667afd6d1SJose Abreu 
529767afd6d1SJose Abreu 			/* Data payload appended into SKB */
529867afd6d1SJose Abreu 			page_pool_release_page(rx_q->page_pool, buf->sec_page);
529967afd6d1SJose Abreu 			buf->sec_page = NULL;
530067afd6d1SJose Abreu 		}
530167afd6d1SJose Abreu 
530288ebe2cfSJose Abreu drain_data:
5303ec222003SJose Abreu 		if (likely(status & rx_not_ls))
5304ec222003SJose Abreu 			goto read_again;
530588ebe2cfSJose Abreu 		if (!skb)
530688ebe2cfSJose Abreu 			continue;
5307ec222003SJose Abreu 
5308ec222003SJose Abreu 		/* Got entire packet into SKB. Finish it. */
5309ec222003SJose Abreu 
5310ba1ffd74SGiuseppe CAVALLARO 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5311b9381985SVince Bridgers 		stmmac_rx_vlan(priv->dev, skb);
53127ac6653aSJeff Kirsher 		skb->protocol = eth_type_trans(skb, priv->dev);
53137ac6653aSJeff Kirsher 
5314ceb69499SGiuseppe CAVALLARO 		if (unlikely(!coe))
53157ac6653aSJeff Kirsher 			skb_checksum_none_assert(skb);
531662a2ab93SGiuseppe CAVALLARO 		else
53177ac6653aSJeff Kirsher 			skb->ip_summed = CHECKSUM_UNNECESSARY;
531862a2ab93SGiuseppe CAVALLARO 
531976067459SJose Abreu 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
532076067459SJose Abreu 			skb_set_hash(skb, hash, hash_type);
532176067459SJose Abreu 
532276067459SJose Abreu 		skb_record_rx_queue(skb, queue);
53234ccb4585SJose Abreu 		napi_gro_receive(&ch->rx_napi, skb);
532488ebe2cfSJose Abreu 		skb = NULL;
53257ac6653aSJeff Kirsher 
53267ac6653aSJeff Kirsher 		priv->dev->stats.rx_packets++;
5327ec222003SJose Abreu 		priv->dev->stats.rx_bytes += len;
5328cda4985aSJose Abreu 		count++;
53297ac6653aSJeff Kirsher 	}
5330ec222003SJose Abreu 
533188ebe2cfSJose Abreu 	if (status & rx_not_ls || skb) {
5332ec222003SJose Abreu 		rx_q->state_saved = true;
5333ec222003SJose Abreu 		rx_q->state.skb = skb;
5334ec222003SJose Abreu 		rx_q->state.error = error;
5335ec222003SJose Abreu 		rx_q->state.len = len;
53367ac6653aSJeff Kirsher 	}
53377ac6653aSJeff Kirsher 
5338be8b38a7SOng Boon Leong 	stmmac_finalize_xdp_rx(priv, xdp_status);
5339be8b38a7SOng Boon Leong 
534054139cf3SJoao Pinto 	stmmac_rx_refill(priv, queue);
53417ac6653aSJeff Kirsher 
53427ac6653aSJeff Kirsher 	priv->xstats.rx_pkt_n += count;
534368e9c5deSVijayakannan Ayyathurai 	priv->xstats.rxq_stats[queue].rx_pkt_n += count;
53447ac6653aSJeff Kirsher 
53457ac6653aSJeff Kirsher 	return count;
53467ac6653aSJeff Kirsher }
53477ac6653aSJeff Kirsher 
53484ccb4585SJose Abreu static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
53497ac6653aSJeff Kirsher {
53508fce3331SJose Abreu 	struct stmmac_channel *ch =
53514ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, rx_napi);
53528fce3331SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
53538fce3331SJose Abreu 	u32 chan = ch->index;
53544ccb4585SJose Abreu 	int work_done;
53557ac6653aSJeff Kirsher 
53569125cdd1SGiuseppe CAVALLARO 	priv->xstats.napi_poll++;
5357ce736788SJoao Pinto 
5358132c32eeSOng Boon Leong 	work_done = stmmac_rx(priv, budget, chan);
5359021bd5e3SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5360021bd5e3SJose Abreu 		unsigned long flags;
5361021bd5e3SJose Abreu 
5362021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
5363021bd5e3SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5364021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
5365021bd5e3SJose Abreu 	}
5366021bd5e3SJose Abreu 
53674ccb4585SJose Abreu 	return work_done;
53684ccb4585SJose Abreu }
5369ce736788SJoao Pinto 
53704ccb4585SJose Abreu static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
53714ccb4585SJose Abreu {
53724ccb4585SJose Abreu 	struct stmmac_channel *ch =
53734ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, tx_napi);
53744ccb4585SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
53754ccb4585SJose Abreu 	u32 chan = ch->index;
53764ccb4585SJose Abreu 	int work_done;
53774ccb4585SJose Abreu 
53784ccb4585SJose Abreu 	priv->xstats.napi_poll++;
53794ccb4585SJose Abreu 
5380132c32eeSOng Boon Leong 	work_done = stmmac_tx_clean(priv, budget, chan);
5381fa0be0a4SJose Abreu 	work_done = min(work_done, budget);
53828fce3331SJose Abreu 
5383021bd5e3SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5384021bd5e3SJose Abreu 		unsigned long flags;
53854ccb4585SJose Abreu 
5386021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
5387021bd5e3SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5388021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
5389fa0be0a4SJose Abreu 	}
53908fce3331SJose Abreu 
53917ac6653aSJeff Kirsher 	return work_done;
53927ac6653aSJeff Kirsher }
53937ac6653aSJeff Kirsher 
5394132c32eeSOng Boon Leong static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5395132c32eeSOng Boon Leong {
5396132c32eeSOng Boon Leong 	struct stmmac_channel *ch =
5397132c32eeSOng Boon Leong 		container_of(napi, struct stmmac_channel, rxtx_napi);
5398132c32eeSOng Boon Leong 	struct stmmac_priv *priv = ch->priv_data;
539981d0885dSSong Yoong Siang 	int rx_done, tx_done, rxtx_done;
5400132c32eeSOng Boon Leong 	u32 chan = ch->index;
5401132c32eeSOng Boon Leong 
5402132c32eeSOng Boon Leong 	priv->xstats.napi_poll++;
5403132c32eeSOng Boon Leong 
5404132c32eeSOng Boon Leong 	tx_done = stmmac_tx_clean(priv, budget, chan);
5405132c32eeSOng Boon Leong 	tx_done = min(tx_done, budget);
5406132c32eeSOng Boon Leong 
5407132c32eeSOng Boon Leong 	rx_done = stmmac_rx_zc(priv, budget, chan);
5408132c32eeSOng Boon Leong 
540981d0885dSSong Yoong Siang 	rxtx_done = max(tx_done, rx_done);
541081d0885dSSong Yoong Siang 
5411132c32eeSOng Boon Leong 	/* If either TX or RX work is not complete, return budget
5412132c32eeSOng Boon Leong 	 * and keep pooling
5413132c32eeSOng Boon Leong 	 */
541481d0885dSSong Yoong Siang 	if (rxtx_done >= budget)
5415132c32eeSOng Boon Leong 		return budget;
5416132c32eeSOng Boon Leong 
5417132c32eeSOng Boon Leong 	/* all work done, exit the polling mode */
541881d0885dSSong Yoong Siang 	if (napi_complete_done(napi, rxtx_done)) {
5419132c32eeSOng Boon Leong 		unsigned long flags;
5420132c32eeSOng Boon Leong 
5421132c32eeSOng Boon Leong 		spin_lock_irqsave(&ch->lock, flags);
5422132c32eeSOng Boon Leong 		/* Both RX and TX work done are compelte,
5423132c32eeSOng Boon Leong 		 * so enable both RX & TX IRQs.
5424132c32eeSOng Boon Leong 		 */
5425132c32eeSOng Boon Leong 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5426132c32eeSOng Boon Leong 		spin_unlock_irqrestore(&ch->lock, flags);
5427132c32eeSOng Boon Leong 	}
5428132c32eeSOng Boon Leong 
542981d0885dSSong Yoong Siang 	return min(rxtx_done, budget - 1);
5430132c32eeSOng Boon Leong }
5431132c32eeSOng Boon Leong 
54327ac6653aSJeff Kirsher /**
54337ac6653aSJeff Kirsher  *  stmmac_tx_timeout
54347ac6653aSJeff Kirsher  *  @dev : Pointer to net device structure
5435d0ea5cbdSJesse Brandeburg  *  @txqueue: the index of the hanging transmit queue
54367ac6653aSJeff Kirsher  *  Description: this function is called when a packet transmission fails to
54377284a3f1SGiuseppe CAVALLARO  *   complete within a reasonable time. The driver will mark the error in the
54387ac6653aSJeff Kirsher  *   netdev structure and arrange for the device to be reset to a sane state
54397ac6653aSJeff Kirsher  *   in order to transmit a new packet.
54407ac6653aSJeff Kirsher  */
54410290bd29SMichael S. Tsirkin static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
54427ac6653aSJeff Kirsher {
54437ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
54447ac6653aSJeff Kirsher 
544534877a15SJose Abreu 	stmmac_global_err(priv);
54467ac6653aSJeff Kirsher }
54477ac6653aSJeff Kirsher 
54487ac6653aSJeff Kirsher /**
544901789349SJiri Pirko  *  stmmac_set_rx_mode - entry point for multicast addressing
54507ac6653aSJeff Kirsher  *  @dev : pointer to the device structure
54517ac6653aSJeff Kirsher  *  Description:
54527ac6653aSJeff Kirsher  *  This function is a driver entry point which gets called by the kernel
54537ac6653aSJeff Kirsher  *  whenever multicast addresses must be enabled/disabled.
54547ac6653aSJeff Kirsher  *  Return value:
54557ac6653aSJeff Kirsher  *  void.
54567ac6653aSJeff Kirsher  */
545701789349SJiri Pirko static void stmmac_set_rx_mode(struct net_device *dev)
54587ac6653aSJeff Kirsher {
54597ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
54607ac6653aSJeff Kirsher 
5461c10d4c82SJose Abreu 	stmmac_set_filter(priv, priv->hw, dev);
54627ac6653aSJeff Kirsher }
54637ac6653aSJeff Kirsher 
54647ac6653aSJeff Kirsher /**
54657ac6653aSJeff Kirsher  *  stmmac_change_mtu - entry point to change MTU size for the device.
54667ac6653aSJeff Kirsher  *  @dev : device pointer.
54677ac6653aSJeff Kirsher  *  @new_mtu : the new MTU size for the device.
54687ac6653aSJeff Kirsher  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
54697ac6653aSJeff Kirsher  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
54707ac6653aSJeff Kirsher  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
54717ac6653aSJeff Kirsher  *  Return value:
54727ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
54737ac6653aSJeff Kirsher  *  file on failure.
54747ac6653aSJeff Kirsher  */
54757ac6653aSJeff Kirsher static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
54767ac6653aSJeff Kirsher {
547738ddc59dSLABBE Corentin 	struct stmmac_priv *priv = netdev_priv(dev);
5478eaf4fac4SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
54795b55299eSDavid Wu 	const int mtu = new_mtu;
5480eaf4fac4SJose Abreu 
5481eaf4fac4SJose Abreu 	if (txfifosz == 0)
5482eaf4fac4SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
5483eaf4fac4SJose Abreu 
5484eaf4fac4SJose Abreu 	txfifosz /= priv->plat->tx_queues_to_use;
548538ddc59dSLABBE Corentin 
54867ac6653aSJeff Kirsher 	if (netif_running(dev)) {
548738ddc59dSLABBE Corentin 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
54887ac6653aSJeff Kirsher 		return -EBUSY;
54897ac6653aSJeff Kirsher 	}
54907ac6653aSJeff Kirsher 
54915fabb012SOng Boon Leong 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
54925fabb012SOng Boon Leong 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
54935fabb012SOng Boon Leong 		return -EINVAL;
54945fabb012SOng Boon Leong 	}
54955fabb012SOng Boon Leong 
5496eaf4fac4SJose Abreu 	new_mtu = STMMAC_ALIGN(new_mtu);
5497eaf4fac4SJose Abreu 
5498eaf4fac4SJose Abreu 	/* If condition true, FIFO is too small or MTU too large */
5499eaf4fac4SJose Abreu 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5500eaf4fac4SJose Abreu 		return -EINVAL;
5501eaf4fac4SJose Abreu 
55025b55299eSDavid Wu 	dev->mtu = mtu;
5503f748be53SAlexandre TORGUE 
55047ac6653aSJeff Kirsher 	netdev_update_features(dev);
55057ac6653aSJeff Kirsher 
55067ac6653aSJeff Kirsher 	return 0;
55077ac6653aSJeff Kirsher }
55087ac6653aSJeff Kirsher 
5509c8f44affSMichał Mirosław static netdev_features_t stmmac_fix_features(struct net_device *dev,
5510c8f44affSMichał Mirosław 					     netdev_features_t features)
55117ac6653aSJeff Kirsher {
55127ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
55137ac6653aSJeff Kirsher 
551438912bdbSDeepak SIKRI 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
55157ac6653aSJeff Kirsher 		features &= ~NETIF_F_RXCSUM;
5516d2afb5bdSGiuseppe CAVALLARO 
55177ac6653aSJeff Kirsher 	if (!priv->plat->tx_coe)
5518a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
55197ac6653aSJeff Kirsher 
55207ac6653aSJeff Kirsher 	/* Some GMAC devices have a bugged Jumbo frame support that
55217ac6653aSJeff Kirsher 	 * needs to have the Tx COE disabled for oversized frames
55227ac6653aSJeff Kirsher 	 * (due to limited buffer sizes). In this case we disable
5523ceb69499SGiuseppe CAVALLARO 	 * the TX csum insertion in the TDES and not use SF.
5524ceb69499SGiuseppe CAVALLARO 	 */
55257ac6653aSJeff Kirsher 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5526a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
55277ac6653aSJeff Kirsher 
5528f748be53SAlexandre TORGUE 	/* Disable tso if asked by ethtool */
5529f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
5530f748be53SAlexandre TORGUE 		if (features & NETIF_F_TSO)
5531f748be53SAlexandre TORGUE 			priv->tso = true;
5532f748be53SAlexandre TORGUE 		else
5533f748be53SAlexandre TORGUE 			priv->tso = false;
5534f748be53SAlexandre TORGUE 	}
5535f748be53SAlexandre TORGUE 
55367ac6653aSJeff Kirsher 	return features;
55377ac6653aSJeff Kirsher }
55387ac6653aSJeff Kirsher 
5539d2afb5bdSGiuseppe CAVALLARO static int stmmac_set_features(struct net_device *netdev,
5540d2afb5bdSGiuseppe CAVALLARO 			       netdev_features_t features)
5541d2afb5bdSGiuseppe CAVALLARO {
5542d2afb5bdSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(netdev);
554367afd6d1SJose Abreu 	bool sph_en;
554467afd6d1SJose Abreu 	u32 chan;
5545d2afb5bdSGiuseppe CAVALLARO 
5546d2afb5bdSGiuseppe CAVALLARO 	/* Keep the COE Type in case of csum is supporting */
5547d2afb5bdSGiuseppe CAVALLARO 	if (features & NETIF_F_RXCSUM)
5548d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
5549d2afb5bdSGiuseppe CAVALLARO 	else
5550d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
5551d2afb5bdSGiuseppe CAVALLARO 	/* No check needed because rx_coe has been set before and it will be
5552d2afb5bdSGiuseppe CAVALLARO 	 * fixed in case of issue.
5553d2afb5bdSGiuseppe CAVALLARO 	 */
5554c10d4c82SJose Abreu 	stmmac_rx_ipc(priv, priv->hw);
5555d2afb5bdSGiuseppe CAVALLARO 
555667afd6d1SJose Abreu 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
55575fabb012SOng Boon Leong 
555867afd6d1SJose Abreu 	for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
555967afd6d1SJose Abreu 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
556067afd6d1SJose Abreu 
5561d2afb5bdSGiuseppe CAVALLARO 	return 0;
5562d2afb5bdSGiuseppe CAVALLARO }
5563d2afb5bdSGiuseppe CAVALLARO 
55645a558611SOng Boon Leong static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
55655a558611SOng Boon Leong {
55665a558611SOng Boon Leong 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
55675a558611SOng Boon Leong 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
55685a558611SOng Boon Leong 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
55695a558611SOng Boon Leong 	bool *hs_enable = &fpe_cfg->hs_enable;
55705a558611SOng Boon Leong 
55715a558611SOng Boon Leong 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
55725a558611SOng Boon Leong 		return;
55735a558611SOng Boon Leong 
55745a558611SOng Boon Leong 	/* If LP has sent verify mPacket, LP is FPE capable */
55755a558611SOng Boon Leong 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
55765a558611SOng Boon Leong 		if (*lp_state < FPE_STATE_CAPABLE)
55775a558611SOng Boon Leong 			*lp_state = FPE_STATE_CAPABLE;
55785a558611SOng Boon Leong 
55795a558611SOng Boon Leong 		/* If user has requested FPE enable, quickly response */
55805a558611SOng Boon Leong 		if (*hs_enable)
55815a558611SOng Boon Leong 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
55825a558611SOng Boon Leong 						MPACKET_RESPONSE);
55835a558611SOng Boon Leong 	}
55845a558611SOng Boon Leong 
55855a558611SOng Boon Leong 	/* If Local has sent verify mPacket, Local is FPE capable */
55865a558611SOng Boon Leong 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
55875a558611SOng Boon Leong 		if (*lo_state < FPE_STATE_CAPABLE)
55885a558611SOng Boon Leong 			*lo_state = FPE_STATE_CAPABLE;
55895a558611SOng Boon Leong 	}
55905a558611SOng Boon Leong 
55915a558611SOng Boon Leong 	/* If LP has sent response mPacket, LP is entering FPE ON */
55925a558611SOng Boon Leong 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
55935a558611SOng Boon Leong 		*lp_state = FPE_STATE_ENTERING_ON;
55945a558611SOng Boon Leong 
55955a558611SOng Boon Leong 	/* If Local has sent response mPacket, Local is entering FPE ON */
55965a558611SOng Boon Leong 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
55975a558611SOng Boon Leong 		*lo_state = FPE_STATE_ENTERING_ON;
55985a558611SOng Boon Leong 
55995a558611SOng Boon Leong 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
56005a558611SOng Boon Leong 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
56015a558611SOng Boon Leong 	    priv->fpe_wq) {
56025a558611SOng Boon Leong 		queue_work(priv->fpe_wq, &priv->fpe_task);
56035a558611SOng Boon Leong 	}
56045a558611SOng Boon Leong }
56055a558611SOng Boon Leong 
560629e6573cSOng Boon Leong static void stmmac_common_interrupt(struct stmmac_priv *priv)
56077ac6653aSJeff Kirsher {
56087bac4e1eSJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
56097bac4e1eSJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
56107bac4e1eSJoao Pinto 	u32 queues_count;
56117bac4e1eSJoao Pinto 	u32 queue;
56127d9e6c5aSJose Abreu 	bool xmac;
56137bac4e1eSJoao Pinto 
56147d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
56157bac4e1eSJoao Pinto 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
56167ac6653aSJeff Kirsher 
561789f7f2cfSSrinivas Kandagatla 	if (priv->irq_wake)
561889f7f2cfSSrinivas Kandagatla 		pm_wakeup_event(priv->device, 0);
561989f7f2cfSSrinivas Kandagatla 
5620e49aa315SVoon Weifeng 	if (priv->dma_cap.estsel)
56219f298959SOng Boon Leong 		stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
56229f298959SOng Boon Leong 				      &priv->xstats, tx_cnt);
5623e49aa315SVoon Weifeng 
56245a558611SOng Boon Leong 	if (priv->dma_cap.fpesel) {
56255a558611SOng Boon Leong 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
56265a558611SOng Boon Leong 						   priv->dev);
56275a558611SOng Boon Leong 
56285a558611SOng Boon Leong 		stmmac_fpe_event_status(priv, status);
56295a558611SOng Boon Leong 	}
56305a558611SOng Boon Leong 
56317ac6653aSJeff Kirsher 	/* To handle GMAC own interrupts */
56327d9e6c5aSJose Abreu 	if ((priv->plat->has_gmac) || xmac) {
5633c10d4c82SJose Abreu 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
56348f71a88dSJoao Pinto 
5635d765955dSGiuseppe CAVALLARO 		if (unlikely(status)) {
5636d765955dSGiuseppe CAVALLARO 			/* For LPI we need to save the tx status */
56370982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5638d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = true;
56390982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5640d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = false;
56417bac4e1eSJoao Pinto 		}
56427bac4e1eSJoao Pinto 
56437bac4e1eSJoao Pinto 		for (queue = 0; queue < queues_count; queue++) {
56448a7cb245SYannick Vignon 			status = stmmac_host_mtl_irq_status(priv, priv->hw,
56457bac4e1eSJoao Pinto 							    queue);
56467bac4e1eSJoao Pinto 		}
564770523e63SGiuseppe CAVALLARO 
564870523e63SGiuseppe CAVALLARO 		/* PCS link status */
56493fe5cadbSGiuseppe CAVALLARO 		if (priv->hw->pcs) {
565070523e63SGiuseppe CAVALLARO 			if (priv->xstats.pcs_link)
565129e6573cSOng Boon Leong 				netif_carrier_on(priv->dev);
565270523e63SGiuseppe CAVALLARO 			else
565329e6573cSOng Boon Leong 				netif_carrier_off(priv->dev);
565470523e63SGiuseppe CAVALLARO 		}
5655f4da5652STan Tee Min 
5656f4da5652STan Tee Min 		stmmac_timestamp_interrupt(priv, priv);
5657d765955dSGiuseppe CAVALLARO 	}
565829e6573cSOng Boon Leong }
565929e6573cSOng Boon Leong 
566029e6573cSOng Boon Leong /**
566129e6573cSOng Boon Leong  *  stmmac_interrupt - main ISR
566229e6573cSOng Boon Leong  *  @irq: interrupt number.
566329e6573cSOng Boon Leong  *  @dev_id: to pass the net device pointer.
566429e6573cSOng Boon Leong  *  Description: this is the main driver interrupt service routine.
566529e6573cSOng Boon Leong  *  It can call:
566629e6573cSOng Boon Leong  *  o DMA service routine (to manage incoming frame reception and transmission
566729e6573cSOng Boon Leong  *    status)
566829e6573cSOng Boon Leong  *  o Core interrupts to manage: remote wake-up, management counter, LPI
566929e6573cSOng Boon Leong  *    interrupts.
567029e6573cSOng Boon Leong  */
567129e6573cSOng Boon Leong static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
567229e6573cSOng Boon Leong {
567329e6573cSOng Boon Leong 	struct net_device *dev = (struct net_device *)dev_id;
567429e6573cSOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
567529e6573cSOng Boon Leong 
567629e6573cSOng Boon Leong 	/* Check if adapter is up */
567729e6573cSOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
567829e6573cSOng Boon Leong 		return IRQ_HANDLED;
567929e6573cSOng Boon Leong 
568029e6573cSOng Boon Leong 	/* Check if a fatal error happened */
568129e6573cSOng Boon Leong 	if (stmmac_safety_feat_interrupt(priv))
568229e6573cSOng Boon Leong 		return IRQ_HANDLED;
568329e6573cSOng Boon Leong 
568429e6573cSOng Boon Leong 	/* To handle Common interrupts */
568529e6573cSOng Boon Leong 	stmmac_common_interrupt(priv);
5686d765955dSGiuseppe CAVALLARO 
5687d765955dSGiuseppe CAVALLARO 	/* To handle DMA interrupts */
56887ac6653aSJeff Kirsher 	stmmac_dma_interrupt(priv);
56897ac6653aSJeff Kirsher 
56907ac6653aSJeff Kirsher 	return IRQ_HANDLED;
56917ac6653aSJeff Kirsher }
56927ac6653aSJeff Kirsher 
56938532f613SOng Boon Leong static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
56948532f613SOng Boon Leong {
56958532f613SOng Boon Leong 	struct net_device *dev = (struct net_device *)dev_id;
56968532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
56978532f613SOng Boon Leong 
56988532f613SOng Boon Leong 	if (unlikely(!dev)) {
56998532f613SOng Boon Leong 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
57008532f613SOng Boon Leong 		return IRQ_NONE;
57018532f613SOng Boon Leong 	}
57028532f613SOng Boon Leong 
57038532f613SOng Boon Leong 	/* Check if adapter is up */
57048532f613SOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
57058532f613SOng Boon Leong 		return IRQ_HANDLED;
57068532f613SOng Boon Leong 
57078532f613SOng Boon Leong 	/* To handle Common interrupts */
57088532f613SOng Boon Leong 	stmmac_common_interrupt(priv);
57098532f613SOng Boon Leong 
57108532f613SOng Boon Leong 	return IRQ_HANDLED;
57118532f613SOng Boon Leong }
57128532f613SOng Boon Leong 
57138532f613SOng Boon Leong static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
57148532f613SOng Boon Leong {
57158532f613SOng Boon Leong 	struct net_device *dev = (struct net_device *)dev_id;
57168532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
57178532f613SOng Boon Leong 
57188532f613SOng Boon Leong 	if (unlikely(!dev)) {
57198532f613SOng Boon Leong 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
57208532f613SOng Boon Leong 		return IRQ_NONE;
57218532f613SOng Boon Leong 	}
57228532f613SOng Boon Leong 
57238532f613SOng Boon Leong 	/* Check if adapter is up */
57248532f613SOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
57258532f613SOng Boon Leong 		return IRQ_HANDLED;
57268532f613SOng Boon Leong 
57278532f613SOng Boon Leong 	/* Check if a fatal error happened */
57288532f613SOng Boon Leong 	stmmac_safety_feat_interrupt(priv);
57298532f613SOng Boon Leong 
57308532f613SOng Boon Leong 	return IRQ_HANDLED;
57318532f613SOng Boon Leong }
57328532f613SOng Boon Leong 
57338532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
57348532f613SOng Boon Leong {
57358532f613SOng Boon Leong 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
57368532f613SOng Boon Leong 	int chan = tx_q->queue_index;
57378532f613SOng Boon Leong 	struct stmmac_priv *priv;
57388532f613SOng Boon Leong 	int status;
57398532f613SOng Boon Leong 
57408532f613SOng Boon Leong 	priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]);
57418532f613SOng Boon Leong 
57428532f613SOng Boon Leong 	if (unlikely(!data)) {
57438532f613SOng Boon Leong 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
57448532f613SOng Boon Leong 		return IRQ_NONE;
57458532f613SOng Boon Leong 	}
57468532f613SOng Boon Leong 
57478532f613SOng Boon Leong 	/* Check if adapter is up */
57488532f613SOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
57498532f613SOng Boon Leong 		return IRQ_HANDLED;
57508532f613SOng Boon Leong 
57518532f613SOng Boon Leong 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
57528532f613SOng Boon Leong 
57538532f613SOng Boon Leong 	if (unlikely(status & tx_hard_error_bump_tc)) {
57548532f613SOng Boon Leong 		/* Try to bump up the dma threshold on this failure */
57558532f613SOng Boon Leong 		if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
57568532f613SOng Boon Leong 		    tc <= 256) {
57578532f613SOng Boon Leong 			tc += 64;
57588532f613SOng Boon Leong 			if (priv->plat->force_thresh_dma_mode)
57598532f613SOng Boon Leong 				stmmac_set_dma_operation_mode(priv,
57608532f613SOng Boon Leong 							      tc,
57618532f613SOng Boon Leong 							      tc,
57628532f613SOng Boon Leong 							      chan);
57638532f613SOng Boon Leong 			else
57648532f613SOng Boon Leong 				stmmac_set_dma_operation_mode(priv,
57658532f613SOng Boon Leong 							      tc,
57668532f613SOng Boon Leong 							      SF_DMA_MODE,
57678532f613SOng Boon Leong 							      chan);
57688532f613SOng Boon Leong 			priv->xstats.threshold = tc;
57698532f613SOng Boon Leong 		}
57708532f613SOng Boon Leong 	} else if (unlikely(status == tx_hard_error)) {
57718532f613SOng Boon Leong 		stmmac_tx_err(priv, chan);
57728532f613SOng Boon Leong 	}
57738532f613SOng Boon Leong 
57748532f613SOng Boon Leong 	return IRQ_HANDLED;
57758532f613SOng Boon Leong }
57768532f613SOng Boon Leong 
57778532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
57788532f613SOng Boon Leong {
57798532f613SOng Boon Leong 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
57808532f613SOng Boon Leong 	int chan = rx_q->queue_index;
57818532f613SOng Boon Leong 	struct stmmac_priv *priv;
57828532f613SOng Boon Leong 
57838532f613SOng Boon Leong 	priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]);
57848532f613SOng Boon Leong 
57858532f613SOng Boon Leong 	if (unlikely(!data)) {
57868532f613SOng Boon Leong 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
57878532f613SOng Boon Leong 		return IRQ_NONE;
57888532f613SOng Boon Leong 	}
57898532f613SOng Boon Leong 
57908532f613SOng Boon Leong 	/* Check if adapter is up */
57918532f613SOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
57928532f613SOng Boon Leong 		return IRQ_HANDLED;
57938532f613SOng Boon Leong 
57948532f613SOng Boon Leong 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
57958532f613SOng Boon Leong 
57968532f613SOng Boon Leong 	return IRQ_HANDLED;
57978532f613SOng Boon Leong }
57988532f613SOng Boon Leong 
57997ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
58007ac6653aSJeff Kirsher /* Polling receive - used by NETCONSOLE and other diagnostic tools
5801ceb69499SGiuseppe CAVALLARO  * to allow network I/O with interrupts disabled.
5802ceb69499SGiuseppe CAVALLARO  */
58037ac6653aSJeff Kirsher static void stmmac_poll_controller(struct net_device *dev)
58047ac6653aSJeff Kirsher {
58058532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
58068532f613SOng Boon Leong 	int i;
58078532f613SOng Boon Leong 
58088532f613SOng Boon Leong 	/* If adapter is down, do nothing */
58098532f613SOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
58108532f613SOng Boon Leong 		return;
58118532f613SOng Boon Leong 
58128532f613SOng Boon Leong 	if (priv->plat->multi_msi_en) {
58138532f613SOng Boon Leong 		for (i = 0; i < priv->plat->rx_queues_to_use; i++)
58148532f613SOng Boon Leong 			stmmac_msi_intr_rx(0, &priv->rx_queue[i]);
58158532f613SOng Boon Leong 
58168532f613SOng Boon Leong 		for (i = 0; i < priv->plat->tx_queues_to_use; i++)
58178532f613SOng Boon Leong 			stmmac_msi_intr_tx(0, &priv->tx_queue[i]);
58188532f613SOng Boon Leong 	} else {
58197ac6653aSJeff Kirsher 		disable_irq(dev->irq);
58207ac6653aSJeff Kirsher 		stmmac_interrupt(dev->irq, dev);
58217ac6653aSJeff Kirsher 		enable_irq(dev->irq);
58227ac6653aSJeff Kirsher 	}
58238532f613SOng Boon Leong }
58247ac6653aSJeff Kirsher #endif
58257ac6653aSJeff Kirsher 
58267ac6653aSJeff Kirsher /**
58277ac6653aSJeff Kirsher  *  stmmac_ioctl - Entry point for the Ioctl
58287ac6653aSJeff Kirsher  *  @dev: Device pointer.
58297ac6653aSJeff Kirsher  *  @rq: An IOCTL specefic structure, that can contain a pointer to
58307ac6653aSJeff Kirsher  *  a proprietary structure used to pass information to the driver.
58317ac6653aSJeff Kirsher  *  @cmd: IOCTL command
58327ac6653aSJeff Kirsher  *  Description:
583332ceabcaSGiuseppe CAVALLARO  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
58347ac6653aSJeff Kirsher  */
58357ac6653aSJeff Kirsher static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
58367ac6653aSJeff Kirsher {
583774371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv (dev);
5838891434b1SRayagond Kokatanur 	int ret = -EOPNOTSUPP;
58397ac6653aSJeff Kirsher 
58407ac6653aSJeff Kirsher 	if (!netif_running(dev))
58417ac6653aSJeff Kirsher 		return -EINVAL;
58427ac6653aSJeff Kirsher 
5843891434b1SRayagond Kokatanur 	switch (cmd) {
5844891434b1SRayagond Kokatanur 	case SIOCGMIIPHY:
5845891434b1SRayagond Kokatanur 	case SIOCGMIIREG:
5846891434b1SRayagond Kokatanur 	case SIOCSMIIREG:
584774371272SJose Abreu 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
5848891434b1SRayagond Kokatanur 		break;
5849891434b1SRayagond Kokatanur 	case SIOCSHWTSTAMP:
5850d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_set(dev, rq);
5851d6228b7cSArtem Panfilov 		break;
5852d6228b7cSArtem Panfilov 	case SIOCGHWTSTAMP:
5853d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_get(dev, rq);
5854891434b1SRayagond Kokatanur 		break;
5855891434b1SRayagond Kokatanur 	default:
5856891434b1SRayagond Kokatanur 		break;
5857891434b1SRayagond Kokatanur 	}
58587ac6653aSJeff Kirsher 
58597ac6653aSJeff Kirsher 	return ret;
58607ac6653aSJeff Kirsher }
58617ac6653aSJeff Kirsher 
58624dbbe8ddSJose Abreu static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
58634dbbe8ddSJose Abreu 				    void *cb_priv)
58644dbbe8ddSJose Abreu {
58654dbbe8ddSJose Abreu 	struct stmmac_priv *priv = cb_priv;
58664dbbe8ddSJose Abreu 	int ret = -EOPNOTSUPP;
58674dbbe8ddSJose Abreu 
5868425eabddSJose Abreu 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
5869425eabddSJose Abreu 		return ret;
5870425eabddSJose Abreu 
5871bba2556eSOng Boon Leong 	__stmmac_disable_all_queues(priv);
58724dbbe8ddSJose Abreu 
58734dbbe8ddSJose Abreu 	switch (type) {
58744dbbe8ddSJose Abreu 	case TC_SETUP_CLSU32:
58754dbbe8ddSJose Abreu 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
58764dbbe8ddSJose Abreu 		break;
5877425eabddSJose Abreu 	case TC_SETUP_CLSFLOWER:
5878425eabddSJose Abreu 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
5879425eabddSJose Abreu 		break;
58804dbbe8ddSJose Abreu 	default:
58814dbbe8ddSJose Abreu 		break;
58824dbbe8ddSJose Abreu 	}
58834dbbe8ddSJose Abreu 
58844dbbe8ddSJose Abreu 	stmmac_enable_all_queues(priv);
58854dbbe8ddSJose Abreu 	return ret;
58864dbbe8ddSJose Abreu }
58874dbbe8ddSJose Abreu 
5888955bcb6eSPablo Neira Ayuso static LIST_HEAD(stmmac_block_cb_list);
5889955bcb6eSPablo Neira Ayuso 
58904dbbe8ddSJose Abreu static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
58914dbbe8ddSJose Abreu 			   void *type_data)
58924dbbe8ddSJose Abreu {
58934dbbe8ddSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
58944dbbe8ddSJose Abreu 
58954dbbe8ddSJose Abreu 	switch (type) {
58964dbbe8ddSJose Abreu 	case TC_SETUP_BLOCK:
5897955bcb6eSPablo Neira Ayuso 		return flow_block_cb_setup_simple(type_data,
5898955bcb6eSPablo Neira Ayuso 						  &stmmac_block_cb_list,
58994e95bc26SPablo Neira Ayuso 						  stmmac_setup_tc_block_cb,
59004e95bc26SPablo Neira Ayuso 						  priv, priv, true);
59011f705bc6SJose Abreu 	case TC_SETUP_QDISC_CBS:
59021f705bc6SJose Abreu 		return stmmac_tc_setup_cbs(priv, priv, type_data);
5903b60189e0SJose Abreu 	case TC_SETUP_QDISC_TAPRIO:
5904b60189e0SJose Abreu 		return stmmac_tc_setup_taprio(priv, priv, type_data);
5905430b383cSJose Abreu 	case TC_SETUP_QDISC_ETF:
5906430b383cSJose Abreu 		return stmmac_tc_setup_etf(priv, priv, type_data);
59074dbbe8ddSJose Abreu 	default:
59084dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
59094dbbe8ddSJose Abreu 	}
59104dbbe8ddSJose Abreu }
59114dbbe8ddSJose Abreu 
59124993e5b3SJose Abreu static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
59134993e5b3SJose Abreu 			       struct net_device *sb_dev)
59144993e5b3SJose Abreu {
5915b7766206SJose Abreu 	int gso = skb_shinfo(skb)->gso_type;
5916b7766206SJose Abreu 
5917b7766206SJose Abreu 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
59184993e5b3SJose Abreu 		/*
5919b7766206SJose Abreu 		 * There is no way to determine the number of TSO/USO
59204993e5b3SJose Abreu 		 * capable Queues. Let's use always the Queue 0
5921b7766206SJose Abreu 		 * because if TSO/USO is supported then at least this
59224993e5b3SJose Abreu 		 * one will be capable.
59234993e5b3SJose Abreu 		 */
59244993e5b3SJose Abreu 		return 0;
59254993e5b3SJose Abreu 	}
59264993e5b3SJose Abreu 
59274993e5b3SJose Abreu 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
59284993e5b3SJose Abreu }
59294993e5b3SJose Abreu 
5930a830405eSBhadram Varka static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
5931a830405eSBhadram Varka {
5932a830405eSBhadram Varka 	struct stmmac_priv *priv = netdev_priv(ndev);
5933a830405eSBhadram Varka 	int ret = 0;
5934a830405eSBhadram Varka 
59354691ffb1SJoakim Zhang 	ret = pm_runtime_get_sync(priv->device);
59364691ffb1SJoakim Zhang 	if (ret < 0) {
59374691ffb1SJoakim Zhang 		pm_runtime_put_noidle(priv->device);
59384691ffb1SJoakim Zhang 		return ret;
59394691ffb1SJoakim Zhang 	}
59404691ffb1SJoakim Zhang 
5941a830405eSBhadram Varka 	ret = eth_mac_addr(ndev, addr);
5942a830405eSBhadram Varka 	if (ret)
59434691ffb1SJoakim Zhang 		goto set_mac_error;
5944a830405eSBhadram Varka 
5945c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
5946a830405eSBhadram Varka 
59474691ffb1SJoakim Zhang set_mac_error:
59484691ffb1SJoakim Zhang 	pm_runtime_put(priv->device);
59494691ffb1SJoakim Zhang 
5950a830405eSBhadram Varka 	return ret;
5951a830405eSBhadram Varka }
5952a830405eSBhadram Varka 
595350fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
59547ac29055SGiuseppe CAVALLARO static struct dentry *stmmac_fs_dir;
59557ac29055SGiuseppe CAVALLARO 
5956c24602efSGiuseppe CAVALLARO static void sysfs_display_ring(void *head, int size, int extend_desc,
5957bfaf91caSJoakim Zhang 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
59587ac29055SGiuseppe CAVALLARO {
59597ac29055SGiuseppe CAVALLARO 	int i;
5960c24602efSGiuseppe CAVALLARO 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
5961c24602efSGiuseppe CAVALLARO 	struct dma_desc *p = (struct dma_desc *)head;
5962bfaf91caSJoakim Zhang 	dma_addr_t dma_addr;
59637ac29055SGiuseppe CAVALLARO 
5964c24602efSGiuseppe CAVALLARO 	for (i = 0; i < size; i++) {
5965c24602efSGiuseppe CAVALLARO 		if (extend_desc) {
5966bfaf91caSJoakim Zhang 			dma_addr = dma_phy_addr + i * sizeof(*ep);
5967bfaf91caSJoakim Zhang 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
5968bfaf91caSJoakim Zhang 				   i, &dma_addr,
5969f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des0),
5970f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des1),
5971f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des2),
5972f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des3));
5973c24602efSGiuseppe CAVALLARO 			ep++;
5974c24602efSGiuseppe CAVALLARO 		} else {
5975bfaf91caSJoakim Zhang 			dma_addr = dma_phy_addr + i * sizeof(*p);
5976bfaf91caSJoakim Zhang 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
5977bfaf91caSJoakim Zhang 				   i, &dma_addr,
5978f8be0d78SMichael Weiser 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
5979f8be0d78SMichael Weiser 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
5980c24602efSGiuseppe CAVALLARO 			p++;
5981c24602efSGiuseppe CAVALLARO 		}
59827ac29055SGiuseppe CAVALLARO 		seq_printf(seq, "\n");
59837ac29055SGiuseppe CAVALLARO 	}
5984c24602efSGiuseppe CAVALLARO }
59857ac29055SGiuseppe CAVALLARO 
5986fb0d9c63SYangtao Li static int stmmac_rings_status_show(struct seq_file *seq, void *v)
5987c24602efSGiuseppe CAVALLARO {
5988c24602efSGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
5989c24602efSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
599054139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
5991ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
599254139cf3SJoao Pinto 	u32 queue;
599354139cf3SJoao Pinto 
59945f2b8b62SThierry Reding 	if ((dev->flags & IFF_UP) == 0)
59955f2b8b62SThierry Reding 		return 0;
59965f2b8b62SThierry Reding 
599754139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
599854139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
599954139cf3SJoao Pinto 
600054139cf3SJoao Pinto 		seq_printf(seq, "RX Queue %d:\n", queue);
60017ac29055SGiuseppe CAVALLARO 
6002c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc) {
600354139cf3SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
600454139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_erx,
6005bfaf91caSJoakim Zhang 					   priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
600654139cf3SJoao Pinto 		} else {
600754139cf3SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
600854139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_rx,
6009bfaf91caSJoakim Zhang 					   priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
601054139cf3SJoao Pinto 		}
601154139cf3SJoao Pinto 	}
601254139cf3SJoao Pinto 
6013ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
6014ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
6015ce736788SJoao Pinto 
6016ce736788SJoao Pinto 		seq_printf(seq, "TX Queue %d:\n", queue);
6017ce736788SJoao Pinto 
601854139cf3SJoao Pinto 		if (priv->extend_desc) {
6019ce736788SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
6020ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_etx,
6021bfaf91caSJoakim Zhang 					   priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6022579a25a8SJose Abreu 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6023ce736788SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
6024ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_tx,
6025bfaf91caSJoakim Zhang 					   priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6026ce736788SJoao Pinto 		}
60277ac29055SGiuseppe CAVALLARO 	}
60287ac29055SGiuseppe CAVALLARO 
60297ac29055SGiuseppe CAVALLARO 	return 0;
60307ac29055SGiuseppe CAVALLARO }
6031fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
60327ac29055SGiuseppe CAVALLARO 
6033fb0d9c63SYangtao Li static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6034e7434821SGiuseppe CAVALLARO {
6035e7434821SGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
6036e7434821SGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
6037e7434821SGiuseppe CAVALLARO 
603819e30c14SGiuseppe CAVALLARO 	if (!priv->hw_cap_support) {
6039e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "DMA HW features not supported\n");
6040e7434821SGiuseppe CAVALLARO 		return 0;
6041e7434821SGiuseppe CAVALLARO 	}
6042e7434821SGiuseppe CAVALLARO 
6043e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
6044e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tDMA HW features\n");
6045e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
6046e7434821SGiuseppe CAVALLARO 
604722d3efe5SPavel Machek 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6048e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
604922d3efe5SPavel Machek 	seq_printf(seq, "\t1000 Mbps: %s\n",
6050e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
605122d3efe5SPavel Machek 	seq_printf(seq, "\tHalf duplex: %s\n",
6052e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6053e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tHash Filter: %s\n",
6054e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
6055e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6056e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
60578d45e42bSLABBE Corentin 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6058e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pcs) ? "Y" : "N");
6059e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6060e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6061e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6062e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6063e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6064e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6065e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRMON module: %s\n",
6066e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rmon) ? "Y" : "N");
6067e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6068e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6069e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6070e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
607122d3efe5SPavel Machek 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6072e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.eee) ? "Y" : "N");
6073e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6074e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6075e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6076f748be53SAlexandre TORGUE 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
6077f748be53SAlexandre TORGUE 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6078f748be53SAlexandre TORGUE 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6079f748be53SAlexandre TORGUE 	} else {
6080e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6081e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6082e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6083e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6084f748be53SAlexandre TORGUE 	}
6085e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6086e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6087e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6088e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_rx_channel);
6089e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6090e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_tx_channel);
60917d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
60927d0b447aSJose Abreu 		   priv->dma_cap.number_rx_queues);
60937d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
60947d0b447aSJose Abreu 		   priv->dma_cap.number_tx_queues);
6095e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6096e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
60977d0b447aSJose Abreu 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
60987d0b447aSJose Abreu 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
60997d0b447aSJose Abreu 	seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
61007d0b447aSJose Abreu 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
61017d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
61027d0b447aSJose Abreu 		   priv->dma_cap.pps_out_num);
61037d0b447aSJose Abreu 	seq_printf(seq, "\tSafety Features: %s\n",
61047d0b447aSJose Abreu 		   priv->dma_cap.asp ? "Y" : "N");
61057d0b447aSJose Abreu 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
61067d0b447aSJose Abreu 		   priv->dma_cap.frpsel ? "Y" : "N");
61077d0b447aSJose Abreu 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
61087d0b447aSJose Abreu 		   priv->dma_cap.addr64);
61097d0b447aSJose Abreu 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
61107d0b447aSJose Abreu 		   priv->dma_cap.rssen ? "Y" : "N");
61117d0b447aSJose Abreu 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
61127d0b447aSJose Abreu 		   priv->dma_cap.vlhash ? "Y" : "N");
61137d0b447aSJose Abreu 	seq_printf(seq, "\tSplit Header: %s\n",
61147d0b447aSJose Abreu 		   priv->dma_cap.sphen ? "Y" : "N");
61157d0b447aSJose Abreu 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
61167d0b447aSJose Abreu 		   priv->dma_cap.vlins ? "Y" : "N");
61177d0b447aSJose Abreu 	seq_printf(seq, "\tDouble VLAN: %s\n",
61187d0b447aSJose Abreu 		   priv->dma_cap.dvlan ? "Y" : "N");
61197d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
61207d0b447aSJose Abreu 		   priv->dma_cap.l3l4fnum);
61217d0b447aSJose Abreu 	seq_printf(seq, "\tARP Offloading: %s\n",
61227d0b447aSJose Abreu 		   priv->dma_cap.arpoffsel ? "Y" : "N");
612344e65475SJose Abreu 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
612444e65475SJose Abreu 		   priv->dma_cap.estsel ? "Y" : "N");
612544e65475SJose Abreu 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
612644e65475SJose Abreu 		   priv->dma_cap.fpesel ? "Y" : "N");
612744e65475SJose Abreu 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
612844e65475SJose Abreu 		   priv->dma_cap.tbssel ? "Y" : "N");
6129e7434821SGiuseppe CAVALLARO 	return 0;
6130e7434821SGiuseppe CAVALLARO }
6131fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6132e7434821SGiuseppe CAVALLARO 
6133481a7d15SJiping Ma /* Use network device events to rename debugfs file entries.
6134481a7d15SJiping Ma  */
6135481a7d15SJiping Ma static int stmmac_device_event(struct notifier_block *unused,
6136481a7d15SJiping Ma 			       unsigned long event, void *ptr)
6137481a7d15SJiping Ma {
6138481a7d15SJiping Ma 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6139481a7d15SJiping Ma 	struct stmmac_priv *priv = netdev_priv(dev);
6140481a7d15SJiping Ma 
6141481a7d15SJiping Ma 	if (dev->netdev_ops != &stmmac_netdev_ops)
6142481a7d15SJiping Ma 		goto done;
6143481a7d15SJiping Ma 
6144481a7d15SJiping Ma 	switch (event) {
6145481a7d15SJiping Ma 	case NETDEV_CHANGENAME:
6146481a7d15SJiping Ma 		if (priv->dbgfs_dir)
6147481a7d15SJiping Ma 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6148481a7d15SJiping Ma 							 priv->dbgfs_dir,
6149481a7d15SJiping Ma 							 stmmac_fs_dir,
6150481a7d15SJiping Ma 							 dev->name);
6151481a7d15SJiping Ma 		break;
6152481a7d15SJiping Ma 	}
6153481a7d15SJiping Ma done:
6154481a7d15SJiping Ma 	return NOTIFY_DONE;
6155481a7d15SJiping Ma }
6156481a7d15SJiping Ma 
6157481a7d15SJiping Ma static struct notifier_block stmmac_notifier = {
6158481a7d15SJiping Ma 	.notifier_call = stmmac_device_event,
6159481a7d15SJiping Ma };
6160481a7d15SJiping Ma 
61618d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev)
61627ac29055SGiuseppe CAVALLARO {
6163466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
61647ac29055SGiuseppe CAVALLARO 
6165474a31e1SAaro Koskinen 	rtnl_lock();
6166474a31e1SAaro Koskinen 
6167466c5ac8SMathieu Olivari 	/* Create per netdev entries */
6168466c5ac8SMathieu Olivari 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6169466c5ac8SMathieu Olivari 
61707ac29055SGiuseppe CAVALLARO 	/* Entry to report DMA RX/TX rings */
61718d72ab11SGreg Kroah-Hartman 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
61727ac29055SGiuseppe CAVALLARO 			    &stmmac_rings_status_fops);
61737ac29055SGiuseppe CAVALLARO 
6174e7434821SGiuseppe CAVALLARO 	/* Entry to report the DMA HW features */
61758d72ab11SGreg Kroah-Hartman 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
61768d72ab11SGreg Kroah-Hartman 			    &stmmac_dma_cap_fops);
6177481a7d15SJiping Ma 
6178474a31e1SAaro Koskinen 	rtnl_unlock();
61797ac29055SGiuseppe CAVALLARO }
61807ac29055SGiuseppe CAVALLARO 
6181466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev)
61827ac29055SGiuseppe CAVALLARO {
6183466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
6184466c5ac8SMathieu Olivari 
6185466c5ac8SMathieu Olivari 	debugfs_remove_recursive(priv->dbgfs_dir);
61867ac29055SGiuseppe CAVALLARO }
618750fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
61887ac29055SGiuseppe CAVALLARO 
61893cd1cfcbSJose Abreu static u32 stmmac_vid_crc32_le(__le16 vid_le)
61903cd1cfcbSJose Abreu {
61913cd1cfcbSJose Abreu 	unsigned char *data = (unsigned char *)&vid_le;
61923cd1cfcbSJose Abreu 	unsigned char data_byte = 0;
61933cd1cfcbSJose Abreu 	u32 crc = ~0x0;
61943cd1cfcbSJose Abreu 	u32 temp = 0;
61953cd1cfcbSJose Abreu 	int i, bits;
61963cd1cfcbSJose Abreu 
61973cd1cfcbSJose Abreu 	bits = get_bitmask_order(VLAN_VID_MASK);
61983cd1cfcbSJose Abreu 	for (i = 0; i < bits; i++) {
61993cd1cfcbSJose Abreu 		if ((i % 8) == 0)
62003cd1cfcbSJose Abreu 			data_byte = data[i / 8];
62013cd1cfcbSJose Abreu 
62023cd1cfcbSJose Abreu 		temp = ((crc & 1) ^ data_byte) & 1;
62033cd1cfcbSJose Abreu 		crc >>= 1;
62043cd1cfcbSJose Abreu 		data_byte >>= 1;
62053cd1cfcbSJose Abreu 
62063cd1cfcbSJose Abreu 		if (temp)
62073cd1cfcbSJose Abreu 			crc ^= 0xedb88320;
62083cd1cfcbSJose Abreu 	}
62093cd1cfcbSJose Abreu 
62103cd1cfcbSJose Abreu 	return crc;
62113cd1cfcbSJose Abreu }
62123cd1cfcbSJose Abreu 
62133cd1cfcbSJose Abreu static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
62143cd1cfcbSJose Abreu {
62153cd1cfcbSJose Abreu 	u32 crc, hash = 0;
6216a24cae70SJose Abreu 	__le16 pmatch = 0;
6217c7ab0b80SJose Abreu 	int count = 0;
6218c7ab0b80SJose Abreu 	u16 vid = 0;
62193cd1cfcbSJose Abreu 
62203cd1cfcbSJose Abreu 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
62213cd1cfcbSJose Abreu 		__le16 vid_le = cpu_to_le16(vid);
62223cd1cfcbSJose Abreu 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
62233cd1cfcbSJose Abreu 		hash |= (1 << crc);
6224c7ab0b80SJose Abreu 		count++;
62253cd1cfcbSJose Abreu 	}
62263cd1cfcbSJose Abreu 
6227c7ab0b80SJose Abreu 	if (!priv->dma_cap.vlhash) {
6228c7ab0b80SJose Abreu 		if (count > 2) /* VID = 0 always passes filter */
6229c7ab0b80SJose Abreu 			return -EOPNOTSUPP;
6230c7ab0b80SJose Abreu 
6231a24cae70SJose Abreu 		pmatch = cpu_to_le16(vid);
6232c7ab0b80SJose Abreu 		hash = 0;
6233c7ab0b80SJose Abreu 	}
6234c7ab0b80SJose Abreu 
6235a24cae70SJose Abreu 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
62363cd1cfcbSJose Abreu }
62373cd1cfcbSJose Abreu 
62383cd1cfcbSJose Abreu static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
62393cd1cfcbSJose Abreu {
62403cd1cfcbSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
62413cd1cfcbSJose Abreu 	bool is_double = false;
62423cd1cfcbSJose Abreu 	int ret;
62433cd1cfcbSJose Abreu 
62443cd1cfcbSJose Abreu 	if (be16_to_cpu(proto) == ETH_P_8021AD)
62453cd1cfcbSJose Abreu 		is_double = true;
62463cd1cfcbSJose Abreu 
62473cd1cfcbSJose Abreu 	set_bit(vid, priv->active_vlans);
62483cd1cfcbSJose Abreu 	ret = stmmac_vlan_update(priv, is_double);
62493cd1cfcbSJose Abreu 	if (ret) {
62503cd1cfcbSJose Abreu 		clear_bit(vid, priv->active_vlans);
62513cd1cfcbSJose Abreu 		return ret;
62523cd1cfcbSJose Abreu 	}
62533cd1cfcbSJose Abreu 
6254dd6a4998SJose Abreu 	if (priv->hw->num_vlan) {
6255ed64639bSWong Vee Khee 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6256dd6a4998SJose Abreu 		if (ret)
62573cd1cfcbSJose Abreu 			return ret;
62583cd1cfcbSJose Abreu 	}
62593cd1cfcbSJose Abreu 
6260dd6a4998SJose Abreu 	return 0;
6261dd6a4998SJose Abreu }
6262dd6a4998SJose Abreu 
62633cd1cfcbSJose Abreu static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
62643cd1cfcbSJose Abreu {
62653cd1cfcbSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
62663cd1cfcbSJose Abreu 	bool is_double = false;
6267ed64639bSWong Vee Khee 	int ret;
62683cd1cfcbSJose Abreu 
6269b3dcb312SJoakim Zhang 	ret = pm_runtime_get_sync(priv->device);
6270b3dcb312SJoakim Zhang 	if (ret < 0) {
6271b3dcb312SJoakim Zhang 		pm_runtime_put_noidle(priv->device);
6272b3dcb312SJoakim Zhang 		return ret;
6273b3dcb312SJoakim Zhang 	}
6274b3dcb312SJoakim Zhang 
62753cd1cfcbSJose Abreu 	if (be16_to_cpu(proto) == ETH_P_8021AD)
62763cd1cfcbSJose Abreu 		is_double = true;
62773cd1cfcbSJose Abreu 
62783cd1cfcbSJose Abreu 	clear_bit(vid, priv->active_vlans);
6279dd6a4998SJose Abreu 
6280dd6a4998SJose Abreu 	if (priv->hw->num_vlan) {
6281ed64639bSWong Vee Khee 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6282ed64639bSWong Vee Khee 		if (ret)
62835ec55823SJoakim Zhang 			goto del_vlan_error;
6284dd6a4998SJose Abreu 	}
6285ed64639bSWong Vee Khee 
62865ec55823SJoakim Zhang 	ret = stmmac_vlan_update(priv, is_double);
62875ec55823SJoakim Zhang 
62885ec55823SJoakim Zhang del_vlan_error:
62895ec55823SJoakim Zhang 	pm_runtime_put(priv->device);
62905ec55823SJoakim Zhang 
62915ec55823SJoakim Zhang 	return ret;
62923cd1cfcbSJose Abreu }
62933cd1cfcbSJose Abreu 
62945fabb012SOng Boon Leong static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
62955fabb012SOng Boon Leong {
62965fabb012SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
62975fabb012SOng Boon Leong 
62985fabb012SOng Boon Leong 	switch (bpf->command) {
62995fabb012SOng Boon Leong 	case XDP_SETUP_PROG:
63005fabb012SOng Boon Leong 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6301bba2556eSOng Boon Leong 	case XDP_SETUP_XSK_POOL:
6302bba2556eSOng Boon Leong 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6303bba2556eSOng Boon Leong 					     bpf->xsk.queue_id);
63045fabb012SOng Boon Leong 	default:
63055fabb012SOng Boon Leong 		return -EOPNOTSUPP;
63065fabb012SOng Boon Leong 	}
63075fabb012SOng Boon Leong }
63085fabb012SOng Boon Leong 
63098b278a5bSOng Boon Leong static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
63108b278a5bSOng Boon Leong 			   struct xdp_frame **frames, u32 flags)
63118b278a5bSOng Boon Leong {
63128b278a5bSOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
63138b278a5bSOng Boon Leong 	int cpu = smp_processor_id();
63148b278a5bSOng Boon Leong 	struct netdev_queue *nq;
63158b278a5bSOng Boon Leong 	int i, nxmit = 0;
63168b278a5bSOng Boon Leong 	int queue;
63178b278a5bSOng Boon Leong 
63188b278a5bSOng Boon Leong 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
63198b278a5bSOng Boon Leong 		return -ENETDOWN;
63208b278a5bSOng Boon Leong 
63218b278a5bSOng Boon Leong 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
63228b278a5bSOng Boon Leong 		return -EINVAL;
63238b278a5bSOng Boon Leong 
63248b278a5bSOng Boon Leong 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
63258b278a5bSOng Boon Leong 	nq = netdev_get_tx_queue(priv->dev, queue);
63268b278a5bSOng Boon Leong 
63278b278a5bSOng Boon Leong 	__netif_tx_lock(nq, cpu);
63288b278a5bSOng Boon Leong 	/* Avoids TX time-out as we are sharing with slow path */
63298b278a5bSOng Boon Leong 	nq->trans_start = jiffies;
63308b278a5bSOng Boon Leong 
63318b278a5bSOng Boon Leong 	for (i = 0; i < num_frames; i++) {
63328b278a5bSOng Boon Leong 		int res;
63338b278a5bSOng Boon Leong 
63348b278a5bSOng Boon Leong 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
63358b278a5bSOng Boon Leong 		if (res == STMMAC_XDP_CONSUMED)
63368b278a5bSOng Boon Leong 			break;
63378b278a5bSOng Boon Leong 
63388b278a5bSOng Boon Leong 		nxmit++;
63398b278a5bSOng Boon Leong 	}
63408b278a5bSOng Boon Leong 
63418b278a5bSOng Boon Leong 	if (flags & XDP_XMIT_FLUSH) {
63428b278a5bSOng Boon Leong 		stmmac_flush_tx_descriptors(priv, queue);
63438b278a5bSOng Boon Leong 		stmmac_tx_timer_arm(priv, queue);
63448b278a5bSOng Boon Leong 	}
63458b278a5bSOng Boon Leong 
63468b278a5bSOng Boon Leong 	__netif_tx_unlock(nq);
63478b278a5bSOng Boon Leong 
63488b278a5bSOng Boon Leong 	return nxmit;
63498b278a5bSOng Boon Leong }
63508b278a5bSOng Boon Leong 
6351bba2556eSOng Boon Leong void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6352bba2556eSOng Boon Leong {
6353bba2556eSOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
6354bba2556eSOng Boon Leong 	unsigned long flags;
6355bba2556eSOng Boon Leong 
6356bba2556eSOng Boon Leong 	spin_lock_irqsave(&ch->lock, flags);
6357bba2556eSOng Boon Leong 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6358bba2556eSOng Boon Leong 	spin_unlock_irqrestore(&ch->lock, flags);
6359bba2556eSOng Boon Leong 
6360bba2556eSOng Boon Leong 	stmmac_stop_rx_dma(priv, queue);
6361bba2556eSOng Boon Leong 	__free_dma_rx_desc_resources(priv, queue);
6362bba2556eSOng Boon Leong }
6363bba2556eSOng Boon Leong 
6364bba2556eSOng Boon Leong void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6365bba2556eSOng Boon Leong {
6366bba2556eSOng Boon Leong 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
6367bba2556eSOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
6368bba2556eSOng Boon Leong 	unsigned long flags;
6369bba2556eSOng Boon Leong 	u32 buf_size;
6370bba2556eSOng Boon Leong 	int ret;
6371bba2556eSOng Boon Leong 
6372bba2556eSOng Boon Leong 	ret = __alloc_dma_rx_desc_resources(priv, queue);
6373bba2556eSOng Boon Leong 	if (ret) {
6374bba2556eSOng Boon Leong 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6375bba2556eSOng Boon Leong 		return;
6376bba2556eSOng Boon Leong 	}
6377bba2556eSOng Boon Leong 
6378bba2556eSOng Boon Leong 	ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL);
6379bba2556eSOng Boon Leong 	if (ret) {
6380bba2556eSOng Boon Leong 		__free_dma_rx_desc_resources(priv, queue);
6381bba2556eSOng Boon Leong 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6382bba2556eSOng Boon Leong 		return;
6383bba2556eSOng Boon Leong 	}
6384bba2556eSOng Boon Leong 
6385bba2556eSOng Boon Leong 	stmmac_clear_rx_descriptors(priv, queue);
6386bba2556eSOng Boon Leong 
6387bba2556eSOng Boon Leong 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6388bba2556eSOng Boon Leong 			    rx_q->dma_rx_phy, rx_q->queue_index);
6389bba2556eSOng Boon Leong 
6390bba2556eSOng Boon Leong 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6391bba2556eSOng Boon Leong 			     sizeof(struct dma_desc));
6392bba2556eSOng Boon Leong 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6393bba2556eSOng Boon Leong 			       rx_q->rx_tail_addr, rx_q->queue_index);
6394bba2556eSOng Boon Leong 
6395bba2556eSOng Boon Leong 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6396bba2556eSOng Boon Leong 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6397bba2556eSOng Boon Leong 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6398bba2556eSOng Boon Leong 				      buf_size,
6399bba2556eSOng Boon Leong 				      rx_q->queue_index);
6400bba2556eSOng Boon Leong 	} else {
6401bba2556eSOng Boon Leong 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6402bba2556eSOng Boon Leong 				      priv->dma_buf_sz,
6403bba2556eSOng Boon Leong 				      rx_q->queue_index);
6404bba2556eSOng Boon Leong 	}
6405bba2556eSOng Boon Leong 
6406bba2556eSOng Boon Leong 	stmmac_start_rx_dma(priv, queue);
6407bba2556eSOng Boon Leong 
6408bba2556eSOng Boon Leong 	spin_lock_irqsave(&ch->lock, flags);
6409bba2556eSOng Boon Leong 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6410bba2556eSOng Boon Leong 	spin_unlock_irqrestore(&ch->lock, flags);
6411bba2556eSOng Boon Leong }
6412bba2556eSOng Boon Leong 
6413132c32eeSOng Boon Leong void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6414132c32eeSOng Boon Leong {
6415132c32eeSOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
6416132c32eeSOng Boon Leong 	unsigned long flags;
6417132c32eeSOng Boon Leong 
6418132c32eeSOng Boon Leong 	spin_lock_irqsave(&ch->lock, flags);
6419132c32eeSOng Boon Leong 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6420132c32eeSOng Boon Leong 	spin_unlock_irqrestore(&ch->lock, flags);
6421132c32eeSOng Boon Leong 
6422132c32eeSOng Boon Leong 	stmmac_stop_tx_dma(priv, queue);
6423132c32eeSOng Boon Leong 	__free_dma_tx_desc_resources(priv, queue);
6424132c32eeSOng Boon Leong }
6425132c32eeSOng Boon Leong 
6426132c32eeSOng Boon Leong void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6427132c32eeSOng Boon Leong {
6428132c32eeSOng Boon Leong 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
6429132c32eeSOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
6430132c32eeSOng Boon Leong 	unsigned long flags;
6431132c32eeSOng Boon Leong 	int ret;
6432132c32eeSOng Boon Leong 
6433132c32eeSOng Boon Leong 	ret = __alloc_dma_tx_desc_resources(priv, queue);
6434132c32eeSOng Boon Leong 	if (ret) {
6435132c32eeSOng Boon Leong 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6436132c32eeSOng Boon Leong 		return;
6437132c32eeSOng Boon Leong 	}
6438132c32eeSOng Boon Leong 
6439132c32eeSOng Boon Leong 	ret = __init_dma_tx_desc_rings(priv, queue);
6440132c32eeSOng Boon Leong 	if (ret) {
6441132c32eeSOng Boon Leong 		__free_dma_tx_desc_resources(priv, queue);
6442132c32eeSOng Boon Leong 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6443132c32eeSOng Boon Leong 		return;
6444132c32eeSOng Boon Leong 	}
6445132c32eeSOng Boon Leong 
6446132c32eeSOng Boon Leong 	stmmac_clear_tx_descriptors(priv, queue);
6447132c32eeSOng Boon Leong 
6448132c32eeSOng Boon Leong 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6449132c32eeSOng Boon Leong 			    tx_q->dma_tx_phy, tx_q->queue_index);
6450132c32eeSOng Boon Leong 
6451132c32eeSOng Boon Leong 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6452132c32eeSOng Boon Leong 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6453132c32eeSOng Boon Leong 
6454132c32eeSOng Boon Leong 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6455132c32eeSOng Boon Leong 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6456132c32eeSOng Boon Leong 			       tx_q->tx_tail_addr, tx_q->queue_index);
6457132c32eeSOng Boon Leong 
6458132c32eeSOng Boon Leong 	stmmac_start_tx_dma(priv, queue);
6459132c32eeSOng Boon Leong 
6460132c32eeSOng Boon Leong 	spin_lock_irqsave(&ch->lock, flags);
6461132c32eeSOng Boon Leong 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6462132c32eeSOng Boon Leong 	spin_unlock_irqrestore(&ch->lock, flags);
6463132c32eeSOng Boon Leong }
6464132c32eeSOng Boon Leong 
6465bba2556eSOng Boon Leong int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6466bba2556eSOng Boon Leong {
6467bba2556eSOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
6468bba2556eSOng Boon Leong 	struct stmmac_rx_queue *rx_q;
6469132c32eeSOng Boon Leong 	struct stmmac_tx_queue *tx_q;
6470bba2556eSOng Boon Leong 	struct stmmac_channel *ch;
6471bba2556eSOng Boon Leong 
6472bba2556eSOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6473bba2556eSOng Boon Leong 	    !netif_carrier_ok(priv->dev))
6474bba2556eSOng Boon Leong 		return -ENETDOWN;
6475bba2556eSOng Boon Leong 
6476bba2556eSOng Boon Leong 	if (!stmmac_xdp_is_enabled(priv))
6477bba2556eSOng Boon Leong 		return -ENXIO;
6478bba2556eSOng Boon Leong 
6479132c32eeSOng Boon Leong 	if (queue >= priv->plat->rx_queues_to_use ||
6480132c32eeSOng Boon Leong 	    queue >= priv->plat->tx_queues_to_use)
6481bba2556eSOng Boon Leong 		return -EINVAL;
6482bba2556eSOng Boon Leong 
6483bba2556eSOng Boon Leong 	rx_q = &priv->rx_queue[queue];
6484132c32eeSOng Boon Leong 	tx_q = &priv->tx_queue[queue];
6485bba2556eSOng Boon Leong 	ch = &priv->channel[queue];
6486bba2556eSOng Boon Leong 
6487132c32eeSOng Boon Leong 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6488bba2556eSOng Boon Leong 		return -ENXIO;
6489bba2556eSOng Boon Leong 
6490132c32eeSOng Boon Leong 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6491bba2556eSOng Boon Leong 		/* EQoS does not have per-DMA channel SW interrupt,
6492bba2556eSOng Boon Leong 		 * so we schedule RX Napi straight-away.
6493bba2556eSOng Boon Leong 		 */
6494132c32eeSOng Boon Leong 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6495132c32eeSOng Boon Leong 			__napi_schedule(&ch->rxtx_napi);
6496bba2556eSOng Boon Leong 	}
6497bba2556eSOng Boon Leong 
6498bba2556eSOng Boon Leong 	return 0;
6499bba2556eSOng Boon Leong }
6500bba2556eSOng Boon Leong 
65017ac6653aSJeff Kirsher static const struct net_device_ops stmmac_netdev_ops = {
65027ac6653aSJeff Kirsher 	.ndo_open = stmmac_open,
65037ac6653aSJeff Kirsher 	.ndo_start_xmit = stmmac_xmit,
65047ac6653aSJeff Kirsher 	.ndo_stop = stmmac_release,
65057ac6653aSJeff Kirsher 	.ndo_change_mtu = stmmac_change_mtu,
65067ac6653aSJeff Kirsher 	.ndo_fix_features = stmmac_fix_features,
6507d2afb5bdSGiuseppe CAVALLARO 	.ndo_set_features = stmmac_set_features,
650801789349SJiri Pirko 	.ndo_set_rx_mode = stmmac_set_rx_mode,
65097ac6653aSJeff Kirsher 	.ndo_tx_timeout = stmmac_tx_timeout,
6510a7605370SArnd Bergmann 	.ndo_eth_ioctl = stmmac_ioctl,
65114dbbe8ddSJose Abreu 	.ndo_setup_tc = stmmac_setup_tc,
65124993e5b3SJose Abreu 	.ndo_select_queue = stmmac_select_queue,
65137ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
65147ac6653aSJeff Kirsher 	.ndo_poll_controller = stmmac_poll_controller,
65157ac6653aSJeff Kirsher #endif
6516a830405eSBhadram Varka 	.ndo_set_mac_address = stmmac_set_mac_address,
65173cd1cfcbSJose Abreu 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
65183cd1cfcbSJose Abreu 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
65195fabb012SOng Boon Leong 	.ndo_bpf = stmmac_bpf,
65208b278a5bSOng Boon Leong 	.ndo_xdp_xmit = stmmac_xdp_xmit,
6521bba2556eSOng Boon Leong 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
65227ac6653aSJeff Kirsher };
65237ac6653aSJeff Kirsher 
652434877a15SJose Abreu static void stmmac_reset_subtask(struct stmmac_priv *priv)
652534877a15SJose Abreu {
652634877a15SJose Abreu 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
652734877a15SJose Abreu 		return;
652834877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
652934877a15SJose Abreu 		return;
653034877a15SJose Abreu 
653134877a15SJose Abreu 	netdev_err(priv->dev, "Reset adapter.\n");
653234877a15SJose Abreu 
653334877a15SJose Abreu 	rtnl_lock();
653434877a15SJose Abreu 	netif_trans_update(priv->dev);
653534877a15SJose Abreu 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
653634877a15SJose Abreu 		usleep_range(1000, 2000);
653734877a15SJose Abreu 
653834877a15SJose Abreu 	set_bit(STMMAC_DOWN, &priv->state);
653934877a15SJose Abreu 	dev_close(priv->dev);
654000f54e68SPetr Machata 	dev_open(priv->dev, NULL);
654134877a15SJose Abreu 	clear_bit(STMMAC_DOWN, &priv->state);
654234877a15SJose Abreu 	clear_bit(STMMAC_RESETING, &priv->state);
654334877a15SJose Abreu 	rtnl_unlock();
654434877a15SJose Abreu }
654534877a15SJose Abreu 
654634877a15SJose Abreu static void stmmac_service_task(struct work_struct *work)
654734877a15SJose Abreu {
654834877a15SJose Abreu 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
654934877a15SJose Abreu 			service_task);
655034877a15SJose Abreu 
655134877a15SJose Abreu 	stmmac_reset_subtask(priv);
655234877a15SJose Abreu 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
655334877a15SJose Abreu }
655434877a15SJose Abreu 
65557ac6653aSJeff Kirsher /**
6556cf3f047bSGiuseppe CAVALLARO  *  stmmac_hw_init - Init the MAC device
655732ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
6558732fdf0eSGiuseppe CAVALLARO  *  Description: this function is to configure the MAC device according to
6559732fdf0eSGiuseppe CAVALLARO  *  some platform parameters or the HW capability register. It prepares the
6560732fdf0eSGiuseppe CAVALLARO  *  driver to use either ring or chain modes and to setup either enhanced or
6561732fdf0eSGiuseppe CAVALLARO  *  normal descriptors.
6562cf3f047bSGiuseppe CAVALLARO  */
6563cf3f047bSGiuseppe CAVALLARO static int stmmac_hw_init(struct stmmac_priv *priv)
6564cf3f047bSGiuseppe CAVALLARO {
65655f0456b4SJose Abreu 	int ret;
6566cf3f047bSGiuseppe CAVALLARO 
65679f93ac8dSLABBE Corentin 	/* dwmac-sun8i only work in chain mode */
65689f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i)
65699f93ac8dSLABBE Corentin 		chain_mode = 1;
65705f0456b4SJose Abreu 	priv->chain_mode = chain_mode;
65719f93ac8dSLABBE Corentin 
65725f0456b4SJose Abreu 	/* Initialize HW Interface */
65735f0456b4SJose Abreu 	ret = stmmac_hwif_init(priv);
65745f0456b4SJose Abreu 	if (ret)
65755f0456b4SJose Abreu 		return ret;
65764a7d666aSGiuseppe CAVALLARO 
6577cf3f047bSGiuseppe CAVALLARO 	/* Get the HW capability (new GMAC newer than 3.50a) */
6578cf3f047bSGiuseppe CAVALLARO 	priv->hw_cap_support = stmmac_get_hw_features(priv);
6579cf3f047bSGiuseppe CAVALLARO 	if (priv->hw_cap_support) {
658038ddc59dSLABBE Corentin 		dev_info(priv->device, "DMA HW capability register supported\n");
6581cf3f047bSGiuseppe CAVALLARO 
6582cf3f047bSGiuseppe CAVALLARO 		/* We can override some gmac/dma configuration fields: e.g.
6583cf3f047bSGiuseppe CAVALLARO 		 * enh_desc, tx_coe (e.g. that are passed through the
6584cf3f047bSGiuseppe CAVALLARO 		 * platform) with the values from the HW capability
6585cf3f047bSGiuseppe CAVALLARO 		 * register (if supported).
6586cf3f047bSGiuseppe CAVALLARO 		 */
6587cf3f047bSGiuseppe CAVALLARO 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
65885a9b876eSLing Pei Lee 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
65895a9b876eSLing Pei Lee 				!priv->plat->use_phy_wol;
65903fe5cadbSGiuseppe CAVALLARO 		priv->hw->pmt = priv->plat->pmt;
6591b8ef7020SBiao Huang 		if (priv->dma_cap.hash_tb_sz) {
6592b8ef7020SBiao Huang 			priv->hw->multicast_filter_bins =
6593b8ef7020SBiao Huang 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
6594b8ef7020SBiao Huang 			priv->hw->mcast_bits_log2 =
6595b8ef7020SBiao Huang 					ilog2(priv->hw->multicast_filter_bins);
6596b8ef7020SBiao Huang 		}
659738912bdbSDeepak SIKRI 
6598a8df35d4SEzequiel Garcia 		/* TXCOE doesn't work in thresh DMA mode */
6599a8df35d4SEzequiel Garcia 		if (priv->plat->force_thresh_dma_mode)
6600a8df35d4SEzequiel Garcia 			priv->plat->tx_coe = 0;
6601a8df35d4SEzequiel Garcia 		else
660238912bdbSDeepak SIKRI 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
6603a8df35d4SEzequiel Garcia 
6604f748be53SAlexandre TORGUE 		/* In case of GMAC4 rx_coe is from HW cap register. */
6605f748be53SAlexandre TORGUE 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
660638912bdbSDeepak SIKRI 
660738912bdbSDeepak SIKRI 		if (priv->dma_cap.rx_coe_type2)
660838912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
660938912bdbSDeepak SIKRI 		else if (priv->dma_cap.rx_coe_type1)
661038912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
661138912bdbSDeepak SIKRI 
661238ddc59dSLABBE Corentin 	} else {
661338ddc59dSLABBE Corentin 		dev_info(priv->device, "No HW DMA feature register supported\n");
661438ddc59dSLABBE Corentin 	}
6615cf3f047bSGiuseppe CAVALLARO 
6616d2afb5bdSGiuseppe CAVALLARO 	if (priv->plat->rx_coe) {
6617d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
661838ddc59dSLABBE Corentin 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
6619f748be53SAlexandre TORGUE 		if (priv->synopsys_id < DWMAC_CORE_4_00)
662038ddc59dSLABBE Corentin 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
6621d2afb5bdSGiuseppe CAVALLARO 	}
6622cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->tx_coe)
662338ddc59dSLABBE Corentin 		dev_info(priv->device, "TX Checksum insertion supported\n");
6624cf3f047bSGiuseppe CAVALLARO 
6625cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->pmt) {
662638ddc59dSLABBE Corentin 		dev_info(priv->device, "Wake-Up On Lan supported\n");
6627cf3f047bSGiuseppe CAVALLARO 		device_set_wakeup_capable(priv->device, 1);
6628cf3f047bSGiuseppe CAVALLARO 	}
6629cf3f047bSGiuseppe CAVALLARO 
6630f748be53SAlexandre TORGUE 	if (priv->dma_cap.tsoen)
663138ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO supported\n");
6632f748be53SAlexandre TORGUE 
6633e0f9956aSChuah, Kim Tatt 	priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
6634e0f9956aSChuah, Kim Tatt 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
6635e0f9956aSChuah, Kim Tatt 
66367cfde0afSJose Abreu 	/* Run HW quirks, if any */
66377cfde0afSJose Abreu 	if (priv->hwif_quirks) {
66387cfde0afSJose Abreu 		ret = priv->hwif_quirks(priv);
66397cfde0afSJose Abreu 		if (ret)
66407cfde0afSJose Abreu 			return ret;
66417cfde0afSJose Abreu 	}
66427cfde0afSJose Abreu 
66433b509466SJose Abreu 	/* Rx Watchdog is available in the COREs newer than the 3.40.
66443b509466SJose Abreu 	 * In some case, for example on bugged HW this feature
66453b509466SJose Abreu 	 * has to be disable and this can be done by passing the
66463b509466SJose Abreu 	 * riwt_off field from the platform.
66473b509466SJose Abreu 	 */
66483b509466SJose Abreu 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
66493b509466SJose Abreu 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
66503b509466SJose Abreu 		priv->use_riwt = 1;
66513b509466SJose Abreu 		dev_info(priv->device,
66523b509466SJose Abreu 			 "Enable RX Mitigation via HW Watchdog Timer\n");
66533b509466SJose Abreu 	}
66543b509466SJose Abreu 
6655c24602efSGiuseppe CAVALLARO 	return 0;
6656cf3f047bSGiuseppe CAVALLARO }
6657cf3f047bSGiuseppe CAVALLARO 
66580366f7e0SOng Boon Leong static void stmmac_napi_add(struct net_device *dev)
66590366f7e0SOng Boon Leong {
66600366f7e0SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
66610366f7e0SOng Boon Leong 	u32 queue, maxq;
66620366f7e0SOng Boon Leong 
66630366f7e0SOng Boon Leong 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
66640366f7e0SOng Boon Leong 
66650366f7e0SOng Boon Leong 	for (queue = 0; queue < maxq; queue++) {
66660366f7e0SOng Boon Leong 		struct stmmac_channel *ch = &priv->channel[queue];
66670366f7e0SOng Boon Leong 
66680366f7e0SOng Boon Leong 		ch->priv_data = priv;
66690366f7e0SOng Boon Leong 		ch->index = queue;
66702b94f526SMarek Szyprowski 		spin_lock_init(&ch->lock);
66710366f7e0SOng Boon Leong 
66720366f7e0SOng Boon Leong 		if (queue < priv->plat->rx_queues_to_use) {
66730366f7e0SOng Boon Leong 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
66740366f7e0SOng Boon Leong 				       NAPI_POLL_WEIGHT);
66750366f7e0SOng Boon Leong 		}
66760366f7e0SOng Boon Leong 		if (queue < priv->plat->tx_queues_to_use) {
66770366f7e0SOng Boon Leong 			netif_tx_napi_add(dev, &ch->tx_napi,
66780366f7e0SOng Boon Leong 					  stmmac_napi_poll_tx,
66790366f7e0SOng Boon Leong 					  NAPI_POLL_WEIGHT);
66800366f7e0SOng Boon Leong 		}
6681132c32eeSOng Boon Leong 		if (queue < priv->plat->rx_queues_to_use &&
6682132c32eeSOng Boon Leong 		    queue < priv->plat->tx_queues_to_use) {
6683132c32eeSOng Boon Leong 			netif_napi_add(dev, &ch->rxtx_napi,
6684132c32eeSOng Boon Leong 				       stmmac_napi_poll_rxtx,
6685132c32eeSOng Boon Leong 				       NAPI_POLL_WEIGHT);
6686132c32eeSOng Boon Leong 		}
66870366f7e0SOng Boon Leong 	}
66880366f7e0SOng Boon Leong }
66890366f7e0SOng Boon Leong 
66900366f7e0SOng Boon Leong static void stmmac_napi_del(struct net_device *dev)
66910366f7e0SOng Boon Leong {
66920366f7e0SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
66930366f7e0SOng Boon Leong 	u32 queue, maxq;
66940366f7e0SOng Boon Leong 
66950366f7e0SOng Boon Leong 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
66960366f7e0SOng Boon Leong 
66970366f7e0SOng Boon Leong 	for (queue = 0; queue < maxq; queue++) {
66980366f7e0SOng Boon Leong 		struct stmmac_channel *ch = &priv->channel[queue];
66990366f7e0SOng Boon Leong 
67000366f7e0SOng Boon Leong 		if (queue < priv->plat->rx_queues_to_use)
67010366f7e0SOng Boon Leong 			netif_napi_del(&ch->rx_napi);
67020366f7e0SOng Boon Leong 		if (queue < priv->plat->tx_queues_to_use)
67030366f7e0SOng Boon Leong 			netif_napi_del(&ch->tx_napi);
6704132c32eeSOng Boon Leong 		if (queue < priv->plat->rx_queues_to_use &&
6705132c32eeSOng Boon Leong 		    queue < priv->plat->tx_queues_to_use) {
6706132c32eeSOng Boon Leong 			netif_napi_del(&ch->rxtx_napi);
6707132c32eeSOng Boon Leong 		}
67080366f7e0SOng Boon Leong 	}
67090366f7e0SOng Boon Leong }
67100366f7e0SOng Boon Leong 
67110366f7e0SOng Boon Leong int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
67120366f7e0SOng Boon Leong {
67130366f7e0SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
67140366f7e0SOng Boon Leong 	int ret = 0;
67150366f7e0SOng Boon Leong 
67160366f7e0SOng Boon Leong 	if (netif_running(dev))
67170366f7e0SOng Boon Leong 		stmmac_release(dev);
67180366f7e0SOng Boon Leong 
67190366f7e0SOng Boon Leong 	stmmac_napi_del(dev);
67200366f7e0SOng Boon Leong 
67210366f7e0SOng Boon Leong 	priv->plat->rx_queues_to_use = rx_cnt;
67220366f7e0SOng Boon Leong 	priv->plat->tx_queues_to_use = tx_cnt;
67230366f7e0SOng Boon Leong 
67240366f7e0SOng Boon Leong 	stmmac_napi_add(dev);
67250366f7e0SOng Boon Leong 
67260366f7e0SOng Boon Leong 	if (netif_running(dev))
67270366f7e0SOng Boon Leong 		ret = stmmac_open(dev);
67280366f7e0SOng Boon Leong 
67290366f7e0SOng Boon Leong 	return ret;
67300366f7e0SOng Boon Leong }
67310366f7e0SOng Boon Leong 
6732aa042f60SSong, Yoong Siang int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
6733aa042f60SSong, Yoong Siang {
6734aa042f60SSong, Yoong Siang 	struct stmmac_priv *priv = netdev_priv(dev);
6735aa042f60SSong, Yoong Siang 	int ret = 0;
6736aa042f60SSong, Yoong Siang 
6737aa042f60SSong, Yoong Siang 	if (netif_running(dev))
6738aa042f60SSong, Yoong Siang 		stmmac_release(dev);
6739aa042f60SSong, Yoong Siang 
6740aa042f60SSong, Yoong Siang 	priv->dma_rx_size = rx_size;
6741aa042f60SSong, Yoong Siang 	priv->dma_tx_size = tx_size;
6742aa042f60SSong, Yoong Siang 
6743aa042f60SSong, Yoong Siang 	if (netif_running(dev))
6744aa042f60SSong, Yoong Siang 		ret = stmmac_open(dev);
6745aa042f60SSong, Yoong Siang 
6746aa042f60SSong, Yoong Siang 	return ret;
6747aa042f60SSong, Yoong Siang }
6748aa042f60SSong, Yoong Siang 
67495a558611SOng Boon Leong #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
67505a558611SOng Boon Leong static void stmmac_fpe_lp_task(struct work_struct *work)
67515a558611SOng Boon Leong {
67525a558611SOng Boon Leong 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
67535a558611SOng Boon Leong 						fpe_task);
67545a558611SOng Boon Leong 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
67555a558611SOng Boon Leong 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
67565a558611SOng Boon Leong 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
67575a558611SOng Boon Leong 	bool *hs_enable = &fpe_cfg->hs_enable;
67585a558611SOng Boon Leong 	bool *enable = &fpe_cfg->enable;
67595a558611SOng Boon Leong 	int retries = 20;
67605a558611SOng Boon Leong 
67615a558611SOng Boon Leong 	while (retries-- > 0) {
67625a558611SOng Boon Leong 		/* Bail out immediately if FPE handshake is OFF */
67635a558611SOng Boon Leong 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
67645a558611SOng Boon Leong 			break;
67655a558611SOng Boon Leong 
67665a558611SOng Boon Leong 		if (*lo_state == FPE_STATE_ENTERING_ON &&
67675a558611SOng Boon Leong 		    *lp_state == FPE_STATE_ENTERING_ON) {
67685a558611SOng Boon Leong 			stmmac_fpe_configure(priv, priv->ioaddr,
67695a558611SOng Boon Leong 					     priv->plat->tx_queues_to_use,
67705a558611SOng Boon Leong 					     priv->plat->rx_queues_to_use,
67715a558611SOng Boon Leong 					     *enable);
67725a558611SOng Boon Leong 
67735a558611SOng Boon Leong 			netdev_info(priv->dev, "configured FPE\n");
67745a558611SOng Boon Leong 
67755a558611SOng Boon Leong 			*lo_state = FPE_STATE_ON;
67765a558611SOng Boon Leong 			*lp_state = FPE_STATE_ON;
67775a558611SOng Boon Leong 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
67785a558611SOng Boon Leong 			break;
67795a558611SOng Boon Leong 		}
67805a558611SOng Boon Leong 
67815a558611SOng Boon Leong 		if ((*lo_state == FPE_STATE_CAPABLE ||
67825a558611SOng Boon Leong 		     *lo_state == FPE_STATE_ENTERING_ON) &&
67835a558611SOng Boon Leong 		     *lp_state != FPE_STATE_ON) {
67845a558611SOng Boon Leong 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
67855a558611SOng Boon Leong 				    *lo_state, *lp_state);
67865a558611SOng Boon Leong 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
67875a558611SOng Boon Leong 						MPACKET_VERIFY);
67885a558611SOng Boon Leong 		}
67895a558611SOng Boon Leong 		/* Sleep then retry */
67905a558611SOng Boon Leong 		msleep(500);
67915a558611SOng Boon Leong 	}
67925a558611SOng Boon Leong 
67935a558611SOng Boon Leong 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
67945a558611SOng Boon Leong }
67955a558611SOng Boon Leong 
67965a558611SOng Boon Leong void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
67975a558611SOng Boon Leong {
67985a558611SOng Boon Leong 	if (priv->plat->fpe_cfg->hs_enable != enable) {
67995a558611SOng Boon Leong 		if (enable) {
68005a558611SOng Boon Leong 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
68015a558611SOng Boon Leong 						MPACKET_VERIFY);
68025a558611SOng Boon Leong 		} else {
68035a558611SOng Boon Leong 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
68045a558611SOng Boon Leong 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
68055a558611SOng Boon Leong 		}
68065a558611SOng Boon Leong 
68075a558611SOng Boon Leong 		priv->plat->fpe_cfg->hs_enable = enable;
68085a558611SOng Boon Leong 	}
68095a558611SOng Boon Leong }
68105a558611SOng Boon Leong 
6811cf3f047bSGiuseppe CAVALLARO /**
6812bfab27a1SGiuseppe CAVALLARO  * stmmac_dvr_probe
6813bfab27a1SGiuseppe CAVALLARO  * @device: device pointer
6814ff3dd78cSGiuseppe CAVALLARO  * @plat_dat: platform data pointer
6815e56788cfSJoachim Eastwood  * @res: stmmac resource pointer
6816bfab27a1SGiuseppe CAVALLARO  * Description: this is the main probe function used to
6817bfab27a1SGiuseppe CAVALLARO  * call the alloc_etherdev, allocate the priv structure.
68189afec6efSAndy Shevchenko  * Return:
681915ffac73SJoachim Eastwood  * returns 0 on success, otherwise errno.
68207ac6653aSJeff Kirsher  */
682115ffac73SJoachim Eastwood int stmmac_dvr_probe(struct device *device,
6822cf3f047bSGiuseppe CAVALLARO 		     struct plat_stmmacenet_data *plat_dat,
6823e56788cfSJoachim Eastwood 		     struct stmmac_resources *res)
68247ac6653aSJeff Kirsher {
6825bfab27a1SGiuseppe CAVALLARO 	struct net_device *ndev = NULL;
6826bfab27a1SGiuseppe CAVALLARO 	struct stmmac_priv *priv;
68270366f7e0SOng Boon Leong 	u32 rxq;
682876067459SJose Abreu 	int i, ret = 0;
68297ac6653aSJeff Kirsher 
68309737070cSJisheng Zhang 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
68319737070cSJisheng Zhang 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
683241de8d4cSJoe Perches 	if (!ndev)
683315ffac73SJoachim Eastwood 		return -ENOMEM;
68347ac6653aSJeff Kirsher 
6835bfab27a1SGiuseppe CAVALLARO 	SET_NETDEV_DEV(ndev, device);
68367ac6653aSJeff Kirsher 
6837bfab27a1SGiuseppe CAVALLARO 	priv = netdev_priv(ndev);
6838bfab27a1SGiuseppe CAVALLARO 	priv->device = device;
6839bfab27a1SGiuseppe CAVALLARO 	priv->dev = ndev;
6840bfab27a1SGiuseppe CAVALLARO 
6841bfab27a1SGiuseppe CAVALLARO 	stmmac_set_ethtool_ops(ndev);
6842cf3f047bSGiuseppe CAVALLARO 	priv->pause = pause;
6843cf3f047bSGiuseppe CAVALLARO 	priv->plat = plat_dat;
6844e56788cfSJoachim Eastwood 	priv->ioaddr = res->addr;
6845e56788cfSJoachim Eastwood 	priv->dev->base_addr = (unsigned long)res->addr;
68466ccf12aeSWong, Vee Khee 	priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
6847e56788cfSJoachim Eastwood 
6848e56788cfSJoachim Eastwood 	priv->dev->irq = res->irq;
6849e56788cfSJoachim Eastwood 	priv->wol_irq = res->wol_irq;
6850e56788cfSJoachim Eastwood 	priv->lpi_irq = res->lpi_irq;
68518532f613SOng Boon Leong 	priv->sfty_ce_irq = res->sfty_ce_irq;
68528532f613SOng Boon Leong 	priv->sfty_ue_irq = res->sfty_ue_irq;
68538532f613SOng Boon Leong 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
68548532f613SOng Boon Leong 		priv->rx_irq[i] = res->rx_irq[i];
68558532f613SOng Boon Leong 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
68568532f613SOng Boon Leong 		priv->tx_irq[i] = res->tx_irq[i];
6857e56788cfSJoachim Eastwood 
685883216e39SMichael Walle 	if (!is_zero_ether_addr(res->mac))
6859a96d317fSJakub Kicinski 		eth_hw_addr_set(priv->dev, res->mac);
6860bfab27a1SGiuseppe CAVALLARO 
6861a7a62685SJoachim Eastwood 	dev_set_drvdata(device, priv->dev);
6862803f8fc4SJoachim Eastwood 
6863cf3f047bSGiuseppe CAVALLARO 	/* Verify driver arguments */
6864cf3f047bSGiuseppe CAVALLARO 	stmmac_verify_args();
6865cf3f047bSGiuseppe CAVALLARO 
6866bba2556eSOng Boon Leong 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
6867bba2556eSOng Boon Leong 	if (!priv->af_xdp_zc_qps)
6868bba2556eSOng Boon Leong 		return -ENOMEM;
6869bba2556eSOng Boon Leong 
687034877a15SJose Abreu 	/* Allocate workqueue */
687134877a15SJose Abreu 	priv->wq = create_singlethread_workqueue("stmmac_wq");
687234877a15SJose Abreu 	if (!priv->wq) {
687334877a15SJose Abreu 		dev_err(priv->device, "failed to create workqueue\n");
68749737070cSJisheng Zhang 		return -ENOMEM;
687534877a15SJose Abreu 	}
687634877a15SJose Abreu 
687734877a15SJose Abreu 	INIT_WORK(&priv->service_task, stmmac_service_task);
687834877a15SJose Abreu 
68795a558611SOng Boon Leong 	/* Initialize Link Partner FPE workqueue */
68805a558611SOng Boon Leong 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
68815a558611SOng Boon Leong 
6882cf3f047bSGiuseppe CAVALLARO 	/* Override with kernel parameters if supplied XXX CRS XXX
6883ceb69499SGiuseppe CAVALLARO 	 * this needs to have multiple instances
6884ceb69499SGiuseppe CAVALLARO 	 */
6885cf3f047bSGiuseppe CAVALLARO 	if ((phyaddr >= 0) && (phyaddr <= 31))
6886cf3f047bSGiuseppe CAVALLARO 		priv->plat->phy_addr = phyaddr;
6887cf3f047bSGiuseppe CAVALLARO 
688890f522a2SEugeniy Paltsev 	if (priv->plat->stmmac_rst) {
688990f522a2SEugeniy Paltsev 		ret = reset_control_assert(priv->plat->stmmac_rst);
6890f573c0b9Sjpinto 		reset_control_deassert(priv->plat->stmmac_rst);
689190f522a2SEugeniy Paltsev 		/* Some reset controllers have only reset callback instead of
689290f522a2SEugeniy Paltsev 		 * assert + deassert callbacks pair.
689390f522a2SEugeniy Paltsev 		 */
689490f522a2SEugeniy Paltsev 		if (ret == -ENOTSUPP)
689590f522a2SEugeniy Paltsev 			reset_control_reset(priv->plat->stmmac_rst);
689690f522a2SEugeniy Paltsev 	}
6897c5e4ddbdSChen-Yu Tsai 
6898e67f325eSMatthew Hagan 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
6899e67f325eSMatthew Hagan 	if (ret == -ENOTSUPP)
6900e67f325eSMatthew Hagan 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
6901e67f325eSMatthew Hagan 			ERR_PTR(ret));
6902e67f325eSMatthew Hagan 
6903cf3f047bSGiuseppe CAVALLARO 	/* Init MAC and get the capabilities */
6904c24602efSGiuseppe CAVALLARO 	ret = stmmac_hw_init(priv);
6905c24602efSGiuseppe CAVALLARO 	if (ret)
690662866e98SChen-Yu Tsai 		goto error_hw_init;
6907cf3f047bSGiuseppe CAVALLARO 
690896874c61SMohammad Athari Bin Ismail 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
690996874c61SMohammad Athari Bin Ismail 	 */
691096874c61SMohammad Athari Bin Ismail 	if (priv->synopsys_id < DWMAC_CORE_5_20)
691196874c61SMohammad Athari Bin Ismail 		priv->plat->dma_cfg->dche = false;
691296874c61SMohammad Athari Bin Ismail 
6913b561af36SVinod Koul 	stmmac_check_ether_addr(priv);
6914b561af36SVinod Koul 
6915cf3f047bSGiuseppe CAVALLARO 	ndev->netdev_ops = &stmmac_netdev_ops;
6916cf3f047bSGiuseppe CAVALLARO 
6917cf3f047bSGiuseppe CAVALLARO 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6918cf3f047bSGiuseppe CAVALLARO 			    NETIF_F_RXCSUM;
6919f748be53SAlexandre TORGUE 
69204dbbe8ddSJose Abreu 	ret = stmmac_tc_init(priv, priv);
69214dbbe8ddSJose Abreu 	if (!ret) {
69224dbbe8ddSJose Abreu 		ndev->hw_features |= NETIF_F_HW_TC;
69234dbbe8ddSJose Abreu 	}
69244dbbe8ddSJose Abreu 
6925f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
69269edfa7daSNiklas Cassel 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
6927b7766206SJose Abreu 		if (priv->plat->has_gmac4)
6928b7766206SJose Abreu 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
6929f748be53SAlexandre TORGUE 		priv->tso = true;
693038ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO feature enabled\n");
6931f748be53SAlexandre TORGUE 	}
6932a993db88SJose Abreu 
693367afd6d1SJose Abreu 	if (priv->dma_cap.sphen) {
693467afd6d1SJose Abreu 		ndev->hw_features |= NETIF_F_GRO;
6935d08d32d1SOng Boon Leong 		priv->sph_cap = true;
6936d08d32d1SOng Boon Leong 		priv->sph = priv->sph_cap;
693767afd6d1SJose Abreu 		dev_info(priv->device, "SPH feature enabled\n");
693867afd6d1SJose Abreu 	}
693967afd6d1SJose Abreu 
6940f119cc98SFugang Duan 	/* The current IP register MAC_HW_Feature1[ADDR64] only define
6941f119cc98SFugang Duan 	 * 32/40/64 bit width, but some SOC support others like i.MX8MP
6942f119cc98SFugang Duan 	 * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
6943f119cc98SFugang Duan 	 * So overwrite dma_cap.addr64 according to HW real design.
6944f119cc98SFugang Duan 	 */
6945f119cc98SFugang Duan 	if (priv->plat->addr64)
6946f119cc98SFugang Duan 		priv->dma_cap.addr64 = priv->plat->addr64;
6947f119cc98SFugang Duan 
6948a993db88SJose Abreu 	if (priv->dma_cap.addr64) {
6949a993db88SJose Abreu 		ret = dma_set_mask_and_coherent(device,
6950a993db88SJose Abreu 				DMA_BIT_MASK(priv->dma_cap.addr64));
6951a993db88SJose Abreu 		if (!ret) {
6952a993db88SJose Abreu 			dev_info(priv->device, "Using %d bits DMA width\n",
6953a993db88SJose Abreu 				 priv->dma_cap.addr64);
6954968a2978SThierry Reding 
6955968a2978SThierry Reding 			/*
6956968a2978SThierry Reding 			 * If more than 32 bits can be addressed, make sure to
6957968a2978SThierry Reding 			 * enable enhanced addressing mode.
6958968a2978SThierry Reding 			 */
6959968a2978SThierry Reding 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
6960968a2978SThierry Reding 				priv->plat->dma_cfg->eame = true;
6961a993db88SJose Abreu 		} else {
6962a993db88SJose Abreu 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
6963a993db88SJose Abreu 			if (ret) {
6964a993db88SJose Abreu 				dev_err(priv->device, "Failed to set DMA Mask\n");
6965a993db88SJose Abreu 				goto error_hw_init;
6966a993db88SJose Abreu 			}
6967a993db88SJose Abreu 
6968a993db88SJose Abreu 			priv->dma_cap.addr64 = 32;
6969a993db88SJose Abreu 		}
6970a993db88SJose Abreu 	}
6971a993db88SJose Abreu 
6972bfab27a1SGiuseppe CAVALLARO 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
6973bfab27a1SGiuseppe CAVALLARO 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
69747ac6653aSJeff Kirsher #ifdef STMMAC_VLAN_TAG_USED
69757ac6653aSJeff Kirsher 	/* Both mac100 and gmac support receive VLAN tag detection */
6976ab188e8fSElad Nachman 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
69773cd1cfcbSJose Abreu 	if (priv->dma_cap.vlhash) {
69783cd1cfcbSJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
69793cd1cfcbSJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
69803cd1cfcbSJose Abreu 	}
698130d93227SJose Abreu 	if (priv->dma_cap.vlins) {
698230d93227SJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
698330d93227SJose Abreu 		if (priv->dma_cap.dvlan)
698430d93227SJose Abreu 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
698530d93227SJose Abreu 	}
69867ac6653aSJeff Kirsher #endif
69877ac6653aSJeff Kirsher 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
69887ac6653aSJeff Kirsher 
698976067459SJose Abreu 	/* Initialize RSS */
699076067459SJose Abreu 	rxq = priv->plat->rx_queues_to_use;
699176067459SJose Abreu 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
699276067459SJose Abreu 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
699376067459SJose Abreu 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
699476067459SJose Abreu 
699576067459SJose Abreu 	if (priv->dma_cap.rssen && priv->plat->rss_en)
699676067459SJose Abreu 		ndev->features |= NETIF_F_RXHASH;
699776067459SJose Abreu 
699844770e11SJarod Wilson 	/* MTU range: 46 - hw-specific max */
699944770e11SJarod Wilson 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
700056bcd591SJose Abreu 	if (priv->plat->has_xgmac)
70017d9e6c5aSJose Abreu 		ndev->max_mtu = XGMAC_JUMBO_LEN;
700256bcd591SJose Abreu 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
700356bcd591SJose Abreu 		ndev->max_mtu = JUMBO_LEN;
700444770e11SJarod Wilson 	else
700544770e11SJarod Wilson 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7006a2cd64f3SKweh, Hock Leong 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7007a2cd64f3SKweh, Hock Leong 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7008a2cd64f3SKweh, Hock Leong 	 */
7009a2cd64f3SKweh, Hock Leong 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7010a2cd64f3SKweh, Hock Leong 	    (priv->plat->maxmtu >= ndev->min_mtu))
701144770e11SJarod Wilson 		ndev->max_mtu = priv->plat->maxmtu;
7012a2cd64f3SKweh, Hock Leong 	else if (priv->plat->maxmtu < ndev->min_mtu)
7013b618ab45SHeiner Kallweit 		dev_warn(priv->device,
7014a2cd64f3SKweh, Hock Leong 			 "%s: warning: maxmtu having invalid value (%d)\n",
7015a2cd64f3SKweh, Hock Leong 			 __func__, priv->plat->maxmtu);
701644770e11SJarod Wilson 
70177ac6653aSJeff Kirsher 	if (flow_ctrl)
70187ac6653aSJeff Kirsher 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
70197ac6653aSJeff Kirsher 
70208fce3331SJose Abreu 	/* Setup channels NAPI */
70210366f7e0SOng Boon Leong 	stmmac_napi_add(ndev);
70227ac6653aSJeff Kirsher 
702329555fa3SThierry Reding 	mutex_init(&priv->lock);
70247ac6653aSJeff Kirsher 
7025cd7201f4SGiuseppe CAVALLARO 	/* If a specific clk_csr value is passed from the platform
7026cd7201f4SGiuseppe CAVALLARO 	 * this means that the CSR Clock Range selection cannot be
7027cd7201f4SGiuseppe CAVALLARO 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7028cd7201f4SGiuseppe CAVALLARO 	 * set the MDC clock dynamically according to the csr actual
7029cd7201f4SGiuseppe CAVALLARO 	 * clock input.
7030cd7201f4SGiuseppe CAVALLARO 	 */
70315e7f7fc5SBiao Huang 	if (priv->plat->clk_csr >= 0)
7032cd7201f4SGiuseppe CAVALLARO 		priv->clk_csr = priv->plat->clk_csr;
70335e7f7fc5SBiao Huang 	else
70345e7f7fc5SBiao Huang 		stmmac_clk_csr_set(priv);
7035cd7201f4SGiuseppe CAVALLARO 
7036e58bb43fSGiuseppe CAVALLARO 	stmmac_check_pcs_mode(priv);
7037e58bb43fSGiuseppe CAVALLARO 
70385ec55823SJoakim Zhang 	pm_runtime_get_noresume(device);
70395ec55823SJoakim Zhang 	pm_runtime_set_active(device);
70405ec55823SJoakim Zhang 	pm_runtime_enable(device);
70415ec55823SJoakim Zhang 
7042a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
70433fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
70444bfcbd7aSFrancesco Virlinzi 		/* MDIO bus Registration */
70454bfcbd7aSFrancesco Virlinzi 		ret = stmmac_mdio_register(ndev);
70464bfcbd7aSFrancesco Virlinzi 		if (ret < 0) {
7047b618ab45SHeiner Kallweit 			dev_err(priv->device,
704838ddc59dSLABBE Corentin 				"%s: MDIO bus (id: %d) registration failed",
70494bfcbd7aSFrancesco Virlinzi 				__func__, priv->plat->bus_id);
70506a81c26fSViresh Kumar 			goto error_mdio_register;
70514bfcbd7aSFrancesco Virlinzi 		}
7052e58bb43fSGiuseppe CAVALLARO 	}
70534bfcbd7aSFrancesco Virlinzi 
705446682cb8SVoon Weifeng 	if (priv->plat->speed_mode_2500)
705546682cb8SVoon Weifeng 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
705646682cb8SVoon Weifeng 
70577413f9a6SVladimir Oltean 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7058597a68ceSVoon Weifeng 		ret = stmmac_xpcs_setup(priv->mii);
7059597a68ceSVoon Weifeng 		if (ret)
7060597a68ceSVoon Weifeng 			goto error_xpcs_setup;
7061597a68ceSVoon Weifeng 	}
7062597a68ceSVoon Weifeng 
706374371272SJose Abreu 	ret = stmmac_phy_setup(priv);
706474371272SJose Abreu 	if (ret) {
706574371272SJose Abreu 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
706674371272SJose Abreu 		goto error_phy_setup;
706774371272SJose Abreu 	}
706874371272SJose Abreu 
706957016590SFlorian Fainelli 	ret = register_netdev(ndev);
7070b2eb09afSFlorian Fainelli 	if (ret) {
7071b618ab45SHeiner Kallweit 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
707257016590SFlorian Fainelli 			__func__, ret);
7073b2eb09afSFlorian Fainelli 		goto error_netdev_register;
7074b2eb09afSFlorian Fainelli 	}
70757ac6653aSJeff Kirsher 
7076b9663b7cSVoon Weifeng 	if (priv->plat->serdes_powerup) {
7077b9663b7cSVoon Weifeng 		ret = priv->plat->serdes_powerup(ndev,
7078b9663b7cSVoon Weifeng 						 priv->plat->bsp_priv);
7079b9663b7cSVoon Weifeng 
7080b9663b7cSVoon Weifeng 		if (ret < 0)
7081801eb050SAndy Shevchenko 			goto error_serdes_powerup;
7082b9663b7cSVoon Weifeng 	}
7083b9663b7cSVoon Weifeng 
70845f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS
70858d72ab11SGreg Kroah-Hartman 	stmmac_init_fs(ndev);
70865f2b8b62SThierry Reding #endif
70875f2b8b62SThierry Reding 
70885ec55823SJoakim Zhang 	/* Let pm_runtime_put() disable the clocks.
70895ec55823SJoakim Zhang 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
70905ec55823SJoakim Zhang 	 */
70915ec55823SJoakim Zhang 	pm_runtime_put(device);
70925ec55823SJoakim Zhang 
709357016590SFlorian Fainelli 	return ret;
70947ac6653aSJeff Kirsher 
7095801eb050SAndy Shevchenko error_serdes_powerup:
7096801eb050SAndy Shevchenko 	unregister_netdev(ndev);
70976a81c26fSViresh Kumar error_netdev_register:
709874371272SJose Abreu 	phylink_destroy(priv->phylink);
7099597a68ceSVoon Weifeng error_xpcs_setup:
710074371272SJose Abreu error_phy_setup:
7101a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7102b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7103b2eb09afSFlorian Fainelli 		stmmac_mdio_unregister(ndev);
71047ac6653aSJeff Kirsher error_mdio_register:
71050366f7e0SOng Boon Leong 	stmmac_napi_del(ndev);
710662866e98SChen-Yu Tsai error_hw_init:
710734877a15SJose Abreu 	destroy_workqueue(priv->wq);
7108d7f576dcSWong Vee Khee 	bitmap_free(priv->af_xdp_zc_qps);
71097ac6653aSJeff Kirsher 
711015ffac73SJoachim Eastwood 	return ret;
71117ac6653aSJeff Kirsher }
7112b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
71137ac6653aSJeff Kirsher 
71147ac6653aSJeff Kirsher /**
71157ac6653aSJeff Kirsher  * stmmac_dvr_remove
7116f4e7bd81SJoachim Eastwood  * @dev: device pointer
71177ac6653aSJeff Kirsher  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7118bfab27a1SGiuseppe CAVALLARO  * changes the link status, releases the DMA descriptor rings.
71197ac6653aSJeff Kirsher  */
7120f4e7bd81SJoachim Eastwood int stmmac_dvr_remove(struct device *dev)
71217ac6653aSJeff Kirsher {
7122f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
71237ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
71247ac6653aSJeff Kirsher 
712538ddc59dSLABBE Corentin 	netdev_info(priv->dev, "%s: removing driver", __func__);
71267ac6653aSJeff Kirsher 
7127ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
7128c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
71297ac6653aSJeff Kirsher 	netif_carrier_off(ndev);
71307ac6653aSJeff Kirsher 	unregister_netdev(ndev);
71319a7b3950SOng Boon Leong 
71329a7b3950SOng Boon Leong 	/* Serdes power down needs to happen after VLAN filter
71339a7b3950SOng Boon Leong 	 * is deleted that is triggered by unregister_netdev().
71349a7b3950SOng Boon Leong 	 */
71359a7b3950SOng Boon Leong 	if (priv->plat->serdes_powerdown)
71369a7b3950SOng Boon Leong 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
71379a7b3950SOng Boon Leong 
7138474a31e1SAaro Koskinen #ifdef CONFIG_DEBUG_FS
7139474a31e1SAaro Koskinen 	stmmac_exit_fs(ndev);
7140474a31e1SAaro Koskinen #endif
714174371272SJose Abreu 	phylink_destroy(priv->phylink);
7142f573c0b9Sjpinto 	if (priv->plat->stmmac_rst)
7143f573c0b9Sjpinto 		reset_control_assert(priv->plat->stmmac_rst);
7144e67f325eSMatthew Hagan 	reset_control_assert(priv->plat->stmmac_ahb_rst);
71455ec55823SJoakim Zhang 	pm_runtime_put(dev);
71465ec55823SJoakim Zhang 	pm_runtime_disable(dev);
7147a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
71483fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7149e743471fSBryan O'Donoghue 		stmmac_mdio_unregister(ndev);
715034877a15SJose Abreu 	destroy_workqueue(priv->wq);
715129555fa3SThierry Reding 	mutex_destroy(&priv->lock);
7152d7f576dcSWong Vee Khee 	bitmap_free(priv->af_xdp_zc_qps);
71537ac6653aSJeff Kirsher 
71547ac6653aSJeff Kirsher 	return 0;
71557ac6653aSJeff Kirsher }
7156b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
71577ac6653aSJeff Kirsher 
7158732fdf0eSGiuseppe CAVALLARO /**
7159732fdf0eSGiuseppe CAVALLARO  * stmmac_suspend - suspend callback
7160f4e7bd81SJoachim Eastwood  * @dev: device pointer
7161732fdf0eSGiuseppe CAVALLARO  * Description: this is the function to suspend the device and it is called
7162732fdf0eSGiuseppe CAVALLARO  * by the platform driver to stop the network queue, release the resources,
7163732fdf0eSGiuseppe CAVALLARO  * program the PMT register (for WoL), clean and release driver resources.
7164732fdf0eSGiuseppe CAVALLARO  */
7165f4e7bd81SJoachim Eastwood int stmmac_suspend(struct device *dev)
71667ac6653aSJeff Kirsher {
7167f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
71687ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
716914b41a29SNicolin Chen 	u32 chan;
71707ac6653aSJeff Kirsher 
71717ac6653aSJeff Kirsher 	if (!ndev || !netif_running(ndev))
71727ac6653aSJeff Kirsher 		return 0;
71737ac6653aSJeff Kirsher 
7174134cc4ceSThierry Reding 	mutex_lock(&priv->lock);
717519e13cb2SJose Abreu 
71767ac6653aSJeff Kirsher 	netif_device_detach(ndev);
71777ac6653aSJeff Kirsher 
7178c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
71797ac6653aSJeff Kirsher 
718014b41a29SNicolin Chen 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7181d5a05e69SVincent Whitchurch 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
718214b41a29SNicolin Chen 
71835f585913SFugang Duan 	if (priv->eee_enabled) {
71845f585913SFugang Duan 		priv->tx_path_in_lpi_mode = false;
71855f585913SFugang Duan 		del_timer_sync(&priv->eee_ctrl_timer);
71865f585913SFugang Duan 	}
71875f585913SFugang Duan 
71887ac6653aSJeff Kirsher 	/* Stop TX/RX DMA */
7189ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
7190c24602efSGiuseppe CAVALLARO 
7191b9663b7cSVoon Weifeng 	if (priv->plat->serdes_powerdown)
7192b9663b7cSVoon Weifeng 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7193b9663b7cSVoon Weifeng 
71947ac6653aSJeff Kirsher 	/* Enable Power down mode by programming the PMT regs */
7195e8377e7aSJisheng Zhang 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7196c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, priv->wolopts);
719789f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 1;
719889f7f2cfSSrinivas Kandagatla 	} else {
7199c10d4c82SJose Abreu 		stmmac_mac_set(priv, priv->ioaddr, false);
7200db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_sleep_state(priv->device);
720130f347aeSYang Yingliang 	}
72025a558611SOng Boon Leong 
720329555fa3SThierry Reding 	mutex_unlock(&priv->lock);
72042d871aa0SVince Bridgers 
720590702dcdSJoakim Zhang 	rtnl_lock();
720690702dcdSJoakim Zhang 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
720790702dcdSJoakim Zhang 		phylink_suspend(priv->phylink, true);
720890702dcdSJoakim Zhang 	} else {
720990702dcdSJoakim Zhang 		if (device_may_wakeup(priv->device))
721090702dcdSJoakim Zhang 			phylink_speed_down(priv->phylink, false);
721190702dcdSJoakim Zhang 		phylink_suspend(priv->phylink, false);
721290702dcdSJoakim Zhang 	}
721390702dcdSJoakim Zhang 	rtnl_unlock();
721490702dcdSJoakim Zhang 
72155a558611SOng Boon Leong 	if (priv->dma_cap.fpesel) {
72165a558611SOng Boon Leong 		/* Disable FPE */
72175a558611SOng Boon Leong 		stmmac_fpe_configure(priv, priv->ioaddr,
72185a558611SOng Boon Leong 				     priv->plat->tx_queues_to_use,
72195a558611SOng Boon Leong 				     priv->plat->rx_queues_to_use, false);
72205a558611SOng Boon Leong 
72215a558611SOng Boon Leong 		stmmac_fpe_handshake(priv, false);
72226b28a86dSMohammad Athari Bin Ismail 		stmmac_fpe_stop_wq(priv);
72235a558611SOng Boon Leong 	}
72245a558611SOng Boon Leong 
7225bd00632cSLABBE Corentin 	priv->speed = SPEED_UNKNOWN;
72267ac6653aSJeff Kirsher 	return 0;
72277ac6653aSJeff Kirsher }
7228b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_suspend);
72297ac6653aSJeff Kirsher 
7230732fdf0eSGiuseppe CAVALLARO /**
723154139cf3SJoao Pinto  * stmmac_reset_queues_param - reset queue parameters
7232d0ea5cbdSJesse Brandeburg  * @priv: device pointer
723354139cf3SJoao Pinto  */
723454139cf3SJoao Pinto static void stmmac_reset_queues_param(struct stmmac_priv *priv)
723554139cf3SJoao Pinto {
723654139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7237ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
723854139cf3SJoao Pinto 	u32 queue;
723954139cf3SJoao Pinto 
724054139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
724154139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
724254139cf3SJoao Pinto 
724354139cf3SJoao Pinto 		rx_q->cur_rx = 0;
724454139cf3SJoao Pinto 		rx_q->dirty_rx = 0;
724554139cf3SJoao Pinto 	}
724654139cf3SJoao Pinto 
7247ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
7248ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
7249ce736788SJoao Pinto 
7250ce736788SJoao Pinto 		tx_q->cur_tx = 0;
7251ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
72528d212a9eSNiklas Cassel 		tx_q->mss = 0;
7253c511819dSJoakim Zhang 
7254c511819dSJoakim Zhang 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7255ce736788SJoao Pinto 	}
725654139cf3SJoao Pinto }
725754139cf3SJoao Pinto 
725854139cf3SJoao Pinto /**
7259732fdf0eSGiuseppe CAVALLARO  * stmmac_resume - resume callback
7260f4e7bd81SJoachim Eastwood  * @dev: device pointer
7261732fdf0eSGiuseppe CAVALLARO  * Description: when resume this function is invoked to setup the DMA and CORE
7262732fdf0eSGiuseppe CAVALLARO  * in a usable state.
7263732fdf0eSGiuseppe CAVALLARO  */
7264f4e7bd81SJoachim Eastwood int stmmac_resume(struct device *dev)
72657ac6653aSJeff Kirsher {
7266f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
72677ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
7268b9663b7cSVoon Weifeng 	int ret;
72697ac6653aSJeff Kirsher 
72707ac6653aSJeff Kirsher 	if (!netif_running(ndev))
72717ac6653aSJeff Kirsher 		return 0;
72727ac6653aSJeff Kirsher 
72737ac6653aSJeff Kirsher 	/* Power Down bit, into the PM register, is cleared
72747ac6653aSJeff Kirsher 	 * automatically as soon as a magic packet or a Wake-up frame
72757ac6653aSJeff Kirsher 	 * is received. Anyway, it's better to manually clear
72767ac6653aSJeff Kirsher 	 * this bit because it can generate problems while resuming
7277ceb69499SGiuseppe CAVALLARO 	 * from another devices (e.g. serial console).
7278ceb69499SGiuseppe CAVALLARO 	 */
7279e8377e7aSJisheng Zhang 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
728029555fa3SThierry Reding 		mutex_lock(&priv->lock);
7281c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, 0);
728229555fa3SThierry Reding 		mutex_unlock(&priv->lock);
728389f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 0;
7284623997fbSSrinivas Kandagatla 	} else {
7285db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_default_state(priv->device);
7286623997fbSSrinivas Kandagatla 		/* reset the phy so that it's ready */
7287623997fbSSrinivas Kandagatla 		if (priv->mii)
7288623997fbSSrinivas Kandagatla 			stmmac_mdio_reset(priv->mii);
7289623997fbSSrinivas Kandagatla 	}
72907ac6653aSJeff Kirsher 
7291b9663b7cSVoon Weifeng 	if (priv->plat->serdes_powerup) {
7292b9663b7cSVoon Weifeng 		ret = priv->plat->serdes_powerup(ndev,
7293b9663b7cSVoon Weifeng 						 priv->plat->bsp_priv);
7294b9663b7cSVoon Weifeng 
7295b9663b7cSVoon Weifeng 		if (ret < 0)
7296b9663b7cSVoon Weifeng 			return ret;
7297b9663b7cSVoon Weifeng 	}
7298b9663b7cSVoon Weifeng 
729936d18b56SFugang Duan 	rtnl_lock();
730090702dcdSJoakim Zhang 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
730190702dcdSJoakim Zhang 		phylink_resume(priv->phylink);
730290702dcdSJoakim Zhang 	} else {
730390702dcdSJoakim Zhang 		phylink_resume(priv->phylink);
730490702dcdSJoakim Zhang 		if (device_may_wakeup(priv->device))
730536d18b56SFugang Duan 			phylink_speed_up(priv->phylink);
730636d18b56SFugang Duan 	}
730790702dcdSJoakim Zhang 	rtnl_unlock();
730836d18b56SFugang Duan 
73098e5debedSWong Vee Khee 	rtnl_lock();
731029555fa3SThierry Reding 	mutex_lock(&priv->lock);
7311f55d84b0SVincent Palatin 
731254139cf3SJoao Pinto 	stmmac_reset_queues_param(priv);
731300423969SThierry Reding 
73144ec236c7SFugang Duan 	stmmac_free_tx_skbufs(priv);
7315ae79a639SGiuseppe CAVALLARO 	stmmac_clear_descriptors(priv);
7316ae79a639SGiuseppe CAVALLARO 
7317fe131929SHuacai Chen 	stmmac_hw_setup(ndev, false);
7318d429b66eSJose Abreu 	stmmac_init_coalesce(priv);
7319ac316c78SGiuseppe CAVALLARO 	stmmac_set_rx_mode(ndev);
73207ac6653aSJeff Kirsher 
7321ed64639bSWong Vee Khee 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7322ed64639bSWong Vee Khee 
7323c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
73247ac6653aSJeff Kirsher 
7325134cc4ceSThierry Reding 	mutex_unlock(&priv->lock);
73268e5debedSWong Vee Khee 	rtnl_unlock();
7327134cc4ceSThierry Reding 
732831096c3eSLeon Yu 	netif_device_attach(ndev);
732931096c3eSLeon Yu 
73307ac6653aSJeff Kirsher 	return 0;
73317ac6653aSJeff Kirsher }
7332b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_resume);
7333ba27ec66SGiuseppe CAVALLARO 
73347ac6653aSJeff Kirsher #ifndef MODULE
73357ac6653aSJeff Kirsher static int __init stmmac_cmdline_opt(char *str)
73367ac6653aSJeff Kirsher {
73377ac6653aSJeff Kirsher 	char *opt;
73387ac6653aSJeff Kirsher 
73397ac6653aSJeff Kirsher 	if (!str || !*str)
73407ac6653aSJeff Kirsher 		return -EINVAL;
73417ac6653aSJeff Kirsher 	while ((opt = strsep(&str, ",")) != NULL) {
73427ac6653aSJeff Kirsher 		if (!strncmp(opt, "debug:", 6)) {
7343ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &debug))
73447ac6653aSJeff Kirsher 				goto err;
73457ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7346ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 8, 0, &phyaddr))
73477ac6653aSJeff Kirsher 				goto err;
73487ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7349ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 7, 0, &buf_sz))
73507ac6653aSJeff Kirsher 				goto err;
73517ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "tc:", 3)) {
7352ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 3, 0, &tc))
73537ac6653aSJeff Kirsher 				goto err;
73547ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "watchdog:", 9)) {
7355ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 9, 0, &watchdog))
73567ac6653aSJeff Kirsher 				goto err;
73577ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7358ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
73597ac6653aSJeff Kirsher 				goto err;
73607ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "pause:", 6)) {
7361ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &pause))
73627ac6653aSJeff Kirsher 				goto err;
7363506f669cSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7364d765955dSGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &eee_timer))
7365d765955dSGiuseppe CAVALLARO 				goto err;
73664a7d666aSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "chain_mode:", 11)) {
73674a7d666aSGiuseppe CAVALLARO 			if (kstrtoint(opt + 11, 0, &chain_mode))
73684a7d666aSGiuseppe CAVALLARO 				goto err;
73697ac6653aSJeff Kirsher 		}
73707ac6653aSJeff Kirsher 	}
73717ac6653aSJeff Kirsher 	return 0;
73727ac6653aSJeff Kirsher 
73737ac6653aSJeff Kirsher err:
73747ac6653aSJeff Kirsher 	pr_err("%s: ERROR broken module parameter conversion", __func__);
73757ac6653aSJeff Kirsher 	return -EINVAL;
73767ac6653aSJeff Kirsher }
73777ac6653aSJeff Kirsher 
73787ac6653aSJeff Kirsher __setup("stmmaceth=", stmmac_cmdline_opt);
7379ceb69499SGiuseppe CAVALLARO #endif /* MODULE */
73806fc0d0f2SGiuseppe Cavallaro 
7381466c5ac8SMathieu Olivari static int __init stmmac_init(void)
7382466c5ac8SMathieu Olivari {
7383466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
7384466c5ac8SMathieu Olivari 	/* Create debugfs main directory if it doesn't exist yet */
73858d72ab11SGreg Kroah-Hartman 	if (!stmmac_fs_dir)
7386466c5ac8SMathieu Olivari 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7387474a31e1SAaro Koskinen 	register_netdevice_notifier(&stmmac_notifier);
7388466c5ac8SMathieu Olivari #endif
7389466c5ac8SMathieu Olivari 
7390466c5ac8SMathieu Olivari 	return 0;
7391466c5ac8SMathieu Olivari }
7392466c5ac8SMathieu Olivari 
7393466c5ac8SMathieu Olivari static void __exit stmmac_exit(void)
7394466c5ac8SMathieu Olivari {
7395466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
7396474a31e1SAaro Koskinen 	unregister_netdevice_notifier(&stmmac_notifier);
7397466c5ac8SMathieu Olivari 	debugfs_remove_recursive(stmmac_fs_dir);
7398466c5ac8SMathieu Olivari #endif
7399466c5ac8SMathieu Olivari }
7400466c5ac8SMathieu Olivari 
7401466c5ac8SMathieu Olivari module_init(stmmac_init)
7402466c5ac8SMathieu Olivari module_exit(stmmac_exit)
7403466c5ac8SMathieu Olivari 
74046fc0d0f2SGiuseppe Cavallaro MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
74056fc0d0f2SGiuseppe Cavallaro MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
74066fc0d0f2SGiuseppe Cavallaro MODULE_LICENSE("GPL");
7407