xref: /openbmc/linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 060f35a317ef09101b128f399dce7ed13d019461)
14fa9c49fSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
27ac6653aSJeff Kirsher /*******************************************************************************
37ac6653aSJeff Kirsher   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
47ac6653aSJeff Kirsher   ST Ethernet IPs are built around a Synopsys IP Core.
57ac6653aSJeff Kirsher 
6286a8372SGiuseppe CAVALLARO 	Copyright(C) 2007-2011 STMicroelectronics Ltd
77ac6653aSJeff Kirsher 
87ac6653aSJeff Kirsher 
97ac6653aSJeff Kirsher   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
107ac6653aSJeff Kirsher 
117ac6653aSJeff Kirsher   Documentation available at:
127ac6653aSJeff Kirsher 	http://www.stlinux.com
137ac6653aSJeff Kirsher   Support available at:
147ac6653aSJeff Kirsher 	https://bugzilla.stlinux.com/
157ac6653aSJeff Kirsher *******************************************************************************/
167ac6653aSJeff Kirsher 
176a81c26fSViresh Kumar #include <linux/clk.h>
187ac6653aSJeff Kirsher #include <linux/kernel.h>
197ac6653aSJeff Kirsher #include <linux/interrupt.h>
207ac6653aSJeff Kirsher #include <linux/ip.h>
217ac6653aSJeff Kirsher #include <linux/tcp.h>
227ac6653aSJeff Kirsher #include <linux/skbuff.h>
237ac6653aSJeff Kirsher #include <linux/ethtool.h>
247ac6653aSJeff Kirsher #include <linux/if_ether.h>
257ac6653aSJeff Kirsher #include <linux/crc32.h>
267ac6653aSJeff Kirsher #include <linux/mii.h>
2701789349SJiri Pirko #include <linux/if.h>
287ac6653aSJeff Kirsher #include <linux/if_vlan.h>
297ac6653aSJeff Kirsher #include <linux/dma-mapping.h>
307ac6653aSJeff Kirsher #include <linux/slab.h>
315ec55823SJoakim Zhang #include <linux/pm_runtime.h>
327ac6653aSJeff Kirsher #include <linux/prefetch.h>
33db88f10aSSrinivas Kandagatla #include <linux/pinctrl/consumer.h>
3450fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
357ac29055SGiuseppe CAVALLARO #include <linux/debugfs.h>
367ac29055SGiuseppe CAVALLARO #include <linux/seq_file.h>
3750fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
38891434b1SRayagond Kokatanur #include <linux/net_tstamp.h>
39eeef2f6bSJose Abreu #include <linux/phylink.h>
40b7766206SJose Abreu #include <linux/udp.h>
415fabb012SOng Boon Leong #include <linux/bpf_trace.h>
42a9ca9f9cSYunsheng Lin #include <net/page_pool/helpers.h>
434dbbe8ddSJose Abreu #include <net/pkt_cls.h>
44bba2556eSOng Boon Leong #include <net/xdp_sock_drv.h>
45891434b1SRayagond Kokatanur #include "stmmac_ptp.h"
46286a8372SGiuseppe CAVALLARO #include "stmmac.h"
475fabb012SOng Boon Leong #include "stmmac_xdp.h"
48c5e4ddbdSChen-Yu Tsai #include <linux/reset.h>
495790cf3cSMathieu Olivari #include <linux/of_mdio.h>
5019d857c9SPhil Reid #include "dwmac1000.h"
517d9e6c5aSJose Abreu #include "dwxgmac2.h"
5242de047dSJose Abreu #include "hwif.h"
537ac6653aSJeff Kirsher 
54a6da2bbbSHolger Assmann /* As long as the interface is active, we keep the timestamping counter enabled
55a6da2bbbSHolger Assmann  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56a6da2bbbSHolger Assmann  * (clock jumps) when changing timestamping settings at runtime.
57a6da2bbbSHolger Assmann  */
58a6da2bbbSHolger Assmann #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59a6da2bbbSHolger Assmann 				 PTP_TCR_TSCTRLSSR)
60a6da2bbbSHolger Assmann 
618d558f02SJose Abreu #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62f748be53SAlexandre TORGUE #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
637ac6653aSJeff Kirsher 
647ac6653aSJeff Kirsher /* Module parameters */
6532ceabcaSGiuseppe CAVALLARO #define TX_TIMEO	5000
667ac6653aSJeff Kirsher static int watchdog = TX_TIMEO;
67d3757ba4SJoe Perches module_param(watchdog, int, 0644);
6832ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
697ac6653aSJeff Kirsher 
7032ceabcaSGiuseppe CAVALLARO static int debug = -1;
71d3757ba4SJoe Perches module_param(debug, int, 0644);
7232ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
737ac6653aSJeff Kirsher 
7447d1f71fSstephen hemminger static int phyaddr = -1;
75d3757ba4SJoe Perches module_param(phyaddr, int, 0444);
767ac6653aSJeff Kirsher MODULE_PARM_DESC(phyaddr, "Physical device address");
777ac6653aSJeff Kirsher 
788531c808SChristian Marangi #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
798531c808SChristian Marangi #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
807ac6653aSJeff Kirsher 
81132c32eeSOng Boon Leong /* Limit to make sure XDP TX and slow path can coexist */
82132c32eeSOng Boon Leong #define STMMAC_XSK_TX_BUDGET_MAX	256
83132c32eeSOng Boon Leong #define STMMAC_TX_XSK_AVAIL		16
84bba2556eSOng Boon Leong #define STMMAC_RX_FILL_BATCH		16
85bba2556eSOng Boon Leong 
865fabb012SOng Boon Leong #define STMMAC_XDP_PASS		0
875fabb012SOng Boon Leong #define STMMAC_XDP_CONSUMED	BIT(0)
88be8b38a7SOng Boon Leong #define STMMAC_XDP_TX		BIT(1)
898b278a5bSOng Boon Leong #define STMMAC_XDP_REDIRECT	BIT(2)
905fabb012SOng Boon Leong 
91e9989339SJose Abreu static int flow_ctrl = FLOW_AUTO;
92d3757ba4SJoe Perches module_param(flow_ctrl, int, 0644);
937ac6653aSJeff Kirsher MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
947ac6653aSJeff Kirsher 
957ac6653aSJeff Kirsher static int pause = PAUSE_TIME;
96d3757ba4SJoe Perches module_param(pause, int, 0644);
977ac6653aSJeff Kirsher MODULE_PARM_DESC(pause, "Flow Control Pause Time");
987ac6653aSJeff Kirsher 
997ac6653aSJeff Kirsher #define TC_DEFAULT 64
1007ac6653aSJeff Kirsher static int tc = TC_DEFAULT;
101d3757ba4SJoe Perches module_param(tc, int, 0644);
1027ac6653aSJeff Kirsher MODULE_PARM_DESC(tc, "DMA threshold control value");
1037ac6653aSJeff Kirsher 
104d916701cSGiuseppe CAVALLARO #define	DEFAULT_BUFSIZE	1536
105d916701cSGiuseppe CAVALLARO static int buf_sz = DEFAULT_BUFSIZE;
106d3757ba4SJoe Perches module_param(buf_sz, int, 0644);
1077ac6653aSJeff Kirsher MODULE_PARM_DESC(buf_sz, "DMA buffer size");
1087ac6653aSJeff Kirsher 
10922ad3838SGiuseppe Cavallaro #define	STMMAC_RX_COPYBREAK	256
11022ad3838SGiuseppe Cavallaro 
1117ac6653aSJeff Kirsher static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
1127ac6653aSJeff Kirsher 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
1137ac6653aSJeff Kirsher 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
1147ac6653aSJeff Kirsher 
115d765955dSGiuseppe CAVALLARO #define STMMAC_DEFAULT_LPI_TIMER	1000
116d765955dSGiuseppe CAVALLARO static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117d3757ba4SJoe Perches module_param(eee_timer, int, 0644);
118d765955dSGiuseppe CAVALLARO MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119388e201dSVineetha G. Jaya Kumaran #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120d765955dSGiuseppe CAVALLARO 
12122d3efe5SPavel Machek /* By default the driver will use the ring mode to manage tx and rx descriptors,
12222d3efe5SPavel Machek  * but allow user to force to use the chain instead of the ring
1234a7d666aSGiuseppe CAVALLARO  */
1244a7d666aSGiuseppe CAVALLARO static unsigned int chain_mode;
125d3757ba4SJoe Perches module_param(chain_mode, int, 0444);
1264a7d666aSGiuseppe CAVALLARO MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
1274a7d666aSGiuseppe CAVALLARO 
1287ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
1298532f613SOng Boon Leong /* For MSI interrupts handling */
1308532f613SOng Boon Leong static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
1318532f613SOng Boon Leong static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
1328532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
1338532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134f9ec5723SChristian Marangi static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135f9ec5723SChristian Marangi static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136f9ec5723SChristian Marangi static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137132c32eeSOng Boon Leong static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138132c32eeSOng Boon Leong static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
1393a6c12a0SXiaoliang Yang static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1403a6c12a0SXiaoliang Yang 					  u32 rxmode, u32 chan);
1417ac6653aSJeff Kirsher 
14250fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
143481a7d15SJiping Ma static const struct net_device_ops stmmac_netdev_ops;
1448d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev);
145466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev);
146bfab27a1SGiuseppe CAVALLARO #endif
147bfab27a1SGiuseppe CAVALLARO 
148d5a05e69SVincent Whitchurch #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
1499125cdd1SGiuseppe CAVALLARO 
stmmac_bus_clks_config(struct stmmac_priv * priv,bool enabled)1505ec55823SJoakim Zhang int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
1515ec55823SJoakim Zhang {
1525ec55823SJoakim Zhang 	int ret = 0;
1535ec55823SJoakim Zhang 
1545ec55823SJoakim Zhang 	if (enabled) {
1555ec55823SJoakim Zhang 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
1565ec55823SJoakim Zhang 		if (ret)
1575ec55823SJoakim Zhang 			return ret;
1585ec55823SJoakim Zhang 		ret = clk_prepare_enable(priv->plat->pclk);
1595ec55823SJoakim Zhang 		if (ret) {
1605ec55823SJoakim Zhang 			clk_disable_unprepare(priv->plat->stmmac_clk);
1615ec55823SJoakim Zhang 			return ret;
1625ec55823SJoakim Zhang 		}
163b4d45aeeSJoakim Zhang 		if (priv->plat->clks_config) {
164b4d45aeeSJoakim Zhang 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165b4d45aeeSJoakim Zhang 			if (ret) {
166b4d45aeeSJoakim Zhang 				clk_disable_unprepare(priv->plat->stmmac_clk);
167b4d45aeeSJoakim Zhang 				clk_disable_unprepare(priv->plat->pclk);
168b4d45aeeSJoakim Zhang 				return ret;
169b4d45aeeSJoakim Zhang 			}
170b4d45aeeSJoakim Zhang 		}
1715ec55823SJoakim Zhang 	} else {
1725ec55823SJoakim Zhang 		clk_disable_unprepare(priv->plat->stmmac_clk);
1735ec55823SJoakim Zhang 		clk_disable_unprepare(priv->plat->pclk);
174b4d45aeeSJoakim Zhang 		if (priv->plat->clks_config)
175b4d45aeeSJoakim Zhang 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
1765ec55823SJoakim Zhang 	}
1775ec55823SJoakim Zhang 
1785ec55823SJoakim Zhang 	return ret;
1795ec55823SJoakim Zhang }
1805ec55823SJoakim Zhang EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
1815ec55823SJoakim Zhang 
1827ac6653aSJeff Kirsher /**
1837ac6653aSJeff Kirsher  * stmmac_verify_args - verify the driver parameters.
184732fdf0eSGiuseppe CAVALLARO  * Description: it checks the driver parameters and set a default in case of
185732fdf0eSGiuseppe CAVALLARO  * errors.
1867ac6653aSJeff Kirsher  */
stmmac_verify_args(void)1877ac6653aSJeff Kirsher static void stmmac_verify_args(void)
1887ac6653aSJeff Kirsher {
1897ac6653aSJeff Kirsher 	if (unlikely(watchdog < 0))
1907ac6653aSJeff Kirsher 		watchdog = TX_TIMEO;
191d916701cSGiuseppe CAVALLARO 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192d916701cSGiuseppe CAVALLARO 		buf_sz = DEFAULT_BUFSIZE;
1937ac6653aSJeff Kirsher 	if (unlikely(flow_ctrl > 1))
1947ac6653aSJeff Kirsher 		flow_ctrl = FLOW_AUTO;
1957ac6653aSJeff Kirsher 	else if (likely(flow_ctrl < 0))
1967ac6653aSJeff Kirsher 		flow_ctrl = FLOW_OFF;
1977ac6653aSJeff Kirsher 	if (unlikely((pause < 0) || (pause > 0xffff)))
1987ac6653aSJeff Kirsher 		pause = PAUSE_TIME;
199d765955dSGiuseppe CAVALLARO 	if (eee_timer < 0)
200d765955dSGiuseppe CAVALLARO 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
2017ac6653aSJeff Kirsher }
2027ac6653aSJeff Kirsher 
__stmmac_disable_all_queues(struct stmmac_priv * priv)203bba2556eSOng Boon Leong static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204c22a3f48SJoao Pinto {
205c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
2068fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
2078fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208c22a3f48SJoao Pinto 	u32 queue;
209c22a3f48SJoao Pinto 
2108fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
2118fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
212c22a3f48SJoao Pinto 
213132c32eeSOng Boon Leong 		if (stmmac_xdp_is_enabled(priv) &&
214132c32eeSOng Boon Leong 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215132c32eeSOng Boon Leong 			napi_disable(&ch->rxtx_napi);
216132c32eeSOng Boon Leong 			continue;
217132c32eeSOng Boon Leong 		}
218132c32eeSOng Boon Leong 
2194ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
2204ccb4585SJose Abreu 			napi_disable(&ch->rx_napi);
2214ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
2224ccb4585SJose Abreu 			napi_disable(&ch->tx_napi);
223c22a3f48SJoao Pinto 	}
224c22a3f48SJoao Pinto }
225c22a3f48SJoao Pinto 
226c22a3f48SJoao Pinto /**
227bba2556eSOng Boon Leong  * stmmac_disable_all_queues - Disable all queues
228bba2556eSOng Boon Leong  * @priv: driver private structure
229bba2556eSOng Boon Leong  */
stmmac_disable_all_queues(struct stmmac_priv * priv)230bba2556eSOng Boon Leong static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231bba2556eSOng Boon Leong {
232bba2556eSOng Boon Leong 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233bba2556eSOng Boon Leong 	struct stmmac_rx_queue *rx_q;
234bba2556eSOng Boon Leong 	u32 queue;
235bba2556eSOng Boon Leong 
236bba2556eSOng Boon Leong 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237bba2556eSOng Boon Leong 	for (queue = 0; queue < rx_queues_cnt; queue++) {
2388531c808SChristian Marangi 		rx_q = &priv->dma_conf.rx_queue[queue];
239bba2556eSOng Boon Leong 		if (rx_q->xsk_pool) {
240bba2556eSOng Boon Leong 			synchronize_rcu();
241bba2556eSOng Boon Leong 			break;
242bba2556eSOng Boon Leong 		}
243bba2556eSOng Boon Leong 	}
244bba2556eSOng Boon Leong 
245bba2556eSOng Boon Leong 	__stmmac_disable_all_queues(priv);
246bba2556eSOng Boon Leong }
247bba2556eSOng Boon Leong 
248bba2556eSOng Boon Leong /**
249c22a3f48SJoao Pinto  * stmmac_enable_all_queues - Enable all queues
250c22a3f48SJoao Pinto  * @priv: driver private structure
251c22a3f48SJoao Pinto  */
stmmac_enable_all_queues(struct stmmac_priv * priv)252c22a3f48SJoao Pinto static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253c22a3f48SJoao Pinto {
254c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
2558fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
2568fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257c22a3f48SJoao Pinto 	u32 queue;
258c22a3f48SJoao Pinto 
2598fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
2608fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
261c22a3f48SJoao Pinto 
262132c32eeSOng Boon Leong 		if (stmmac_xdp_is_enabled(priv) &&
263132c32eeSOng Boon Leong 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264132c32eeSOng Boon Leong 			napi_enable(&ch->rxtx_napi);
265132c32eeSOng Boon Leong 			continue;
266132c32eeSOng Boon Leong 		}
267132c32eeSOng Boon Leong 
2684ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
2694ccb4585SJose Abreu 			napi_enable(&ch->rx_napi);
2704ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
2714ccb4585SJose Abreu 			napi_enable(&ch->tx_napi);
272c22a3f48SJoao Pinto 	}
273c22a3f48SJoao Pinto }
274c22a3f48SJoao Pinto 
stmmac_service_event_schedule(struct stmmac_priv * priv)27534877a15SJose Abreu static void stmmac_service_event_schedule(struct stmmac_priv *priv)
27634877a15SJose Abreu {
27734877a15SJose Abreu 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
27834877a15SJose Abreu 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
27934877a15SJose Abreu 		queue_work(priv->wq, &priv->service_task);
28034877a15SJose Abreu }
28134877a15SJose Abreu 
stmmac_global_err(struct stmmac_priv * priv)28234877a15SJose Abreu static void stmmac_global_err(struct stmmac_priv *priv)
28334877a15SJose Abreu {
28434877a15SJose Abreu 	netif_carrier_off(priv->dev);
28534877a15SJose Abreu 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
28634877a15SJose Abreu 	stmmac_service_event_schedule(priv);
28734877a15SJose Abreu }
28834877a15SJose Abreu 
289c22a3f48SJoao Pinto /**
29032ceabcaSGiuseppe CAVALLARO  * stmmac_clk_csr_set - dynamically set the MDC clock
29132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
29232ceabcaSGiuseppe CAVALLARO  * Description: this is to dynamically set the MDC clock according to the csr
29332ceabcaSGiuseppe CAVALLARO  * clock input.
29432ceabcaSGiuseppe CAVALLARO  * Note:
29532ceabcaSGiuseppe CAVALLARO  *	If a specific clk_csr value is passed from the platform
29632ceabcaSGiuseppe CAVALLARO  *	this means that the CSR Clock Range selection cannot be
29732ceabcaSGiuseppe CAVALLARO  *	changed at run-time and it is fixed (as reported in the driver
29832ceabcaSGiuseppe CAVALLARO  *	documentation). Viceversa the driver will try to set the MDC
29932ceabcaSGiuseppe CAVALLARO  *	clock dynamically according to the actual clock input.
30032ceabcaSGiuseppe CAVALLARO  */
stmmac_clk_csr_set(struct stmmac_priv * priv)301cd7201f4SGiuseppe CAVALLARO static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302cd7201f4SGiuseppe CAVALLARO {
303cd7201f4SGiuseppe CAVALLARO 	u32 clk_rate;
304cd7201f4SGiuseppe CAVALLARO 
305f573c0b9Sjpinto 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306cd7201f4SGiuseppe CAVALLARO 
307cd7201f4SGiuseppe CAVALLARO 	/* Platform provided default clk_csr would be assumed valid
308ceb69499SGiuseppe CAVALLARO 	 * for all other cases except for the below mentioned ones.
309ceb69499SGiuseppe CAVALLARO 	 * For values higher than the IEEE 802.3 specified frequency
310ceb69499SGiuseppe CAVALLARO 	 * we can not estimate the proper divider as it is not known
311ceb69499SGiuseppe CAVALLARO 	 * the frequency of clk_csr_i. So we do not change the default
312ceb69499SGiuseppe CAVALLARO 	 * divider.
313ceb69499SGiuseppe CAVALLARO 	 */
314cd7201f4SGiuseppe CAVALLARO 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315cd7201f4SGiuseppe CAVALLARO 		if (clk_rate < CSR_F_35M)
316cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_20_35M;
317cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_35_60M;
319cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_60_100M;
321cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_100_150M;
323cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_150_250M;
32508dad2f4SJesper Nilsson 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_250_300M;
327ceb69499SGiuseppe CAVALLARO 	}
3289f93ac8dSLABBE Corentin 
329d8daff28SBartosz Golaszewski 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
3309f93ac8dSLABBE Corentin 		if (clk_rate > 160000000)
3319f93ac8dSLABBE Corentin 			priv->clk_csr = 0x03;
3329f93ac8dSLABBE Corentin 		else if (clk_rate > 80000000)
3339f93ac8dSLABBE Corentin 			priv->clk_csr = 0x02;
3349f93ac8dSLABBE Corentin 		else if (clk_rate > 40000000)
3359f93ac8dSLABBE Corentin 			priv->clk_csr = 0x01;
3369f93ac8dSLABBE Corentin 		else
3379f93ac8dSLABBE Corentin 			priv->clk_csr = 0;
3389f93ac8dSLABBE Corentin 	}
3397d9e6c5aSJose Abreu 
3407d9e6c5aSJose Abreu 	if (priv->plat->has_xgmac) {
3417d9e6c5aSJose Abreu 		if (clk_rate > 400000000)
3427d9e6c5aSJose Abreu 			priv->clk_csr = 0x5;
3437d9e6c5aSJose Abreu 		else if (clk_rate > 350000000)
3447d9e6c5aSJose Abreu 			priv->clk_csr = 0x4;
3457d9e6c5aSJose Abreu 		else if (clk_rate > 300000000)
3467d9e6c5aSJose Abreu 			priv->clk_csr = 0x3;
3477d9e6c5aSJose Abreu 		else if (clk_rate > 250000000)
3487d9e6c5aSJose Abreu 			priv->clk_csr = 0x2;
3497d9e6c5aSJose Abreu 		else if (clk_rate > 150000000)
3507d9e6c5aSJose Abreu 			priv->clk_csr = 0x1;
3517d9e6c5aSJose Abreu 		else
3527d9e6c5aSJose Abreu 			priv->clk_csr = 0x0;
3537d9e6c5aSJose Abreu 	}
354cd7201f4SGiuseppe CAVALLARO }
355cd7201f4SGiuseppe CAVALLARO 
print_pkt(unsigned char * buf,int len)3567ac6653aSJeff Kirsher static void print_pkt(unsigned char *buf, int len)
3577ac6653aSJeff Kirsher {
358424c4f78SAndy Shevchenko 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359424c4f78SAndy Shevchenko 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
3607ac6653aSJeff Kirsher }
3617ac6653aSJeff Kirsher 
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)362ce736788SJoao Pinto static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
3637ac6653aSJeff Kirsher {
3648531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365a6a3e026SLABBE Corentin 	u32 avail;
366e3ad57c9SGiuseppe Cavallaro 
367ce736788SJoao Pinto 	if (tx_q->dirty_tx > tx_q->cur_tx)
368ce736788SJoao Pinto 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369e3ad57c9SGiuseppe Cavallaro 	else
3708531c808SChristian Marangi 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371e3ad57c9SGiuseppe Cavallaro 
372e3ad57c9SGiuseppe Cavallaro 	return avail;
373e3ad57c9SGiuseppe Cavallaro }
374e3ad57c9SGiuseppe Cavallaro 
37554139cf3SJoao Pinto /**
37654139cf3SJoao Pinto  * stmmac_rx_dirty - Get RX queue dirty
37754139cf3SJoao Pinto  * @priv: driver private structure
37854139cf3SJoao Pinto  * @queue: RX queue index
37954139cf3SJoao Pinto  */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)38054139cf3SJoao Pinto static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381e3ad57c9SGiuseppe Cavallaro {
3828531c808SChristian Marangi 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383a6a3e026SLABBE Corentin 	u32 dirty;
384e3ad57c9SGiuseppe Cavallaro 
38554139cf3SJoao Pinto 	if (rx_q->dirty_rx <= rx_q->cur_rx)
38654139cf3SJoao Pinto 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387e3ad57c9SGiuseppe Cavallaro 	else
3888531c808SChristian Marangi 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389e3ad57c9SGiuseppe Cavallaro 
390e3ad57c9SGiuseppe Cavallaro 	return dirty;
3917ac6653aSJeff Kirsher }
3927ac6653aSJeff Kirsher 
stmmac_lpi_entry_timer_config(struct stmmac_priv * priv,bool en)393be1c7eaeSVineetha G. Jaya Kumaran static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394be1c7eaeSVineetha G. Jaya Kumaran {
395be1c7eaeSVineetha G. Jaya Kumaran 	int tx_lpi_timer;
396be1c7eaeSVineetha G. Jaya Kumaran 
397be1c7eaeSVineetha G. Jaya Kumaran 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398be1c7eaeSVineetha G. Jaya Kumaran 	priv->eee_sw_timer_en = en ? 0 : 1;
399be1c7eaeSVineetha G. Jaya Kumaran 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400be1c7eaeSVineetha G. Jaya Kumaran 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401be1c7eaeSVineetha G. Jaya Kumaran }
402be1c7eaeSVineetha G. Jaya Kumaran 
40332ceabcaSGiuseppe CAVALLARO /**
404732fdf0eSGiuseppe CAVALLARO  * stmmac_enable_eee_mode - check and enter in LPI mode
40532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
406732fdf0eSGiuseppe CAVALLARO  * Description: this function is to verify and enter in LPI mode in case of
407732fdf0eSGiuseppe CAVALLARO  * EEE.
40832ceabcaSGiuseppe CAVALLARO  */
stmmac_enable_eee_mode(struct stmmac_priv * priv)409c74ead22SJisheng Zhang static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410d765955dSGiuseppe CAVALLARO {
411ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412ce736788SJoao Pinto 	u32 queue;
413ce736788SJoao Pinto 
414ce736788SJoao Pinto 	/* check if all TX queues have the work finished */
415ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
4168531c808SChristian Marangi 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417ce736788SJoao Pinto 
418ce736788SJoao Pinto 		if (tx_q->dirty_tx != tx_q->cur_tx)
419c74ead22SJisheng Zhang 			return -EBUSY; /* still unfinished work */
420ce736788SJoao Pinto 	}
421ce736788SJoao Pinto 
422d765955dSGiuseppe CAVALLARO 	/* Check and enter in LPI mode */
423ce736788SJoao Pinto 	if (!priv->tx_path_in_lpi_mode)
424c10d4c82SJose Abreu 		stmmac_set_eee_mode(priv, priv->hw,
4259d0c0d5eSBartosz Golaszewski 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426c74ead22SJisheng Zhang 	return 0;
427d765955dSGiuseppe CAVALLARO }
428d765955dSGiuseppe CAVALLARO 
42932ceabcaSGiuseppe CAVALLARO /**
430732fdf0eSGiuseppe CAVALLARO  * stmmac_disable_eee_mode - disable and exit from LPI mode
43132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
43232ceabcaSGiuseppe CAVALLARO  * Description: this function is to exit and disable EEE in case of
43332ceabcaSGiuseppe CAVALLARO  * LPI state is true. This is called by the xmit.
43432ceabcaSGiuseppe CAVALLARO  */
stmmac_disable_eee_mode(struct stmmac_priv * priv)435d765955dSGiuseppe CAVALLARO void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436d765955dSGiuseppe CAVALLARO {
437be1c7eaeSVineetha G. Jaya Kumaran 	if (!priv->eee_sw_timer_en) {
438be1c7eaeSVineetha G. Jaya Kumaran 		stmmac_lpi_entry_timer_config(priv, 0);
439be1c7eaeSVineetha G. Jaya Kumaran 		return;
440be1c7eaeSVineetha G. Jaya Kumaran 	}
441be1c7eaeSVineetha G. Jaya Kumaran 
442c10d4c82SJose Abreu 	stmmac_reset_eee_mode(priv, priv->hw);
443d765955dSGiuseppe CAVALLARO 	del_timer_sync(&priv->eee_ctrl_timer);
444d765955dSGiuseppe CAVALLARO 	priv->tx_path_in_lpi_mode = false;
445d765955dSGiuseppe CAVALLARO }
446d765955dSGiuseppe CAVALLARO 
447d765955dSGiuseppe CAVALLARO /**
448732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449d0ea5cbdSJesse Brandeburg  * @t:  timer_list struct containing private info
450d765955dSGiuseppe CAVALLARO  * Description:
45132ceabcaSGiuseppe CAVALLARO  *  if there is no data transfer and if we are not in LPI state,
452d765955dSGiuseppe CAVALLARO  *  then MAC Transmitter can be moved to LPI state.
453d765955dSGiuseppe CAVALLARO  */
stmmac_eee_ctrl_timer(struct timer_list * t)454e99e88a9SKees Cook static void stmmac_eee_ctrl_timer(struct timer_list *t)
455d765955dSGiuseppe CAVALLARO {
456e99e88a9SKees Cook 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457d765955dSGiuseppe CAVALLARO 
458c74ead22SJisheng Zhang 	if (stmmac_enable_eee_mode(priv))
459388e201dSVineetha G. Jaya Kumaran 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460d765955dSGiuseppe CAVALLARO }
461d765955dSGiuseppe CAVALLARO 
462d765955dSGiuseppe CAVALLARO /**
463732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_init - init EEE
46432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
465d765955dSGiuseppe CAVALLARO  * Description:
466732fdf0eSGiuseppe CAVALLARO  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467732fdf0eSGiuseppe CAVALLARO  *  can also manage EEE, this function enable the LPI state and start related
468732fdf0eSGiuseppe CAVALLARO  *  timer.
469d765955dSGiuseppe CAVALLARO  */
stmmac_eee_init(struct stmmac_priv * priv)470d765955dSGiuseppe CAVALLARO bool stmmac_eee_init(struct stmmac_priv *priv)
471d765955dSGiuseppe CAVALLARO {
472388e201dSVineetha G. Jaya Kumaran 	int eee_tw_timer = priv->eee_tw_timer;
473879626e3SJerome Brunet 
474f5351ef7SGiuseppe CAVALLARO 	/* Using PCS we cannot dial with the phy registers at this stage
475f5351ef7SGiuseppe CAVALLARO 	 * so we do not support extra feature like EEE.
476f5351ef7SGiuseppe CAVALLARO 	 */
477a47b9e15SDejin Zheng 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
478a47b9e15SDejin Zheng 	    priv->hw->pcs == STMMAC_PCS_RTBI)
47974371272SJose Abreu 		return false;
480f5351ef7SGiuseppe CAVALLARO 
48174371272SJose Abreu 	/* Check if MAC core supports the EEE feature. */
48274371272SJose Abreu 	if (!priv->dma_cap.eee)
48374371272SJose Abreu 		return false;
484d765955dSGiuseppe CAVALLARO 
48529555fa3SThierry Reding 	mutex_lock(&priv->lock);
48674371272SJose Abreu 
48774371272SJose Abreu 	/* Check if it needs to be deactivated */
488177d935aSJon Hunter 	if (!priv->eee_active) {
489177d935aSJon Hunter 		if (priv->eee_enabled) {
49038ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "disable EEE\n");
491be1c7eaeSVineetha G. Jaya Kumaran 			stmmac_lpi_entry_timer_config(priv, 0);
49283bf79b6SGiuseppe CAVALLARO 			del_timer_sync(&priv->eee_ctrl_timer);
493388e201dSVineetha G. Jaya Kumaran 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494d4aeaed8SWong Vee Khee 			if (priv->hw->xpcs)
495d4aeaed8SWong Vee Khee 				xpcs_config_eee(priv->hw->xpcs,
496d4aeaed8SWong Vee Khee 						priv->plat->mult_fact_100ns,
497d4aeaed8SWong Vee Khee 						false);
498177d935aSJon Hunter 		}
4990867bb97SJon Hunter 		mutex_unlock(&priv->lock);
50074371272SJose Abreu 		return false;
50174371272SJose Abreu 	}
50274371272SJose Abreu 
50374371272SJose Abreu 	if (priv->eee_active && !priv->eee_enabled) {
50474371272SJose Abreu 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
50574371272SJose Abreu 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506388e201dSVineetha G. Jaya Kumaran 				     eee_tw_timer);
507656ed8b0SWong Vee Khee 		if (priv->hw->xpcs)
508656ed8b0SWong Vee Khee 			xpcs_config_eee(priv->hw->xpcs,
509656ed8b0SWong Vee Khee 					priv->plat->mult_fact_100ns,
510656ed8b0SWong Vee Khee 					true);
51183bf79b6SGiuseppe CAVALLARO 	}
51274371272SJose Abreu 
513be1c7eaeSVineetha G. Jaya Kumaran 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514be1c7eaeSVineetha G. Jaya Kumaran 		del_timer_sync(&priv->eee_ctrl_timer);
515be1c7eaeSVineetha G. Jaya Kumaran 		priv->tx_path_in_lpi_mode = false;
516be1c7eaeSVineetha G. Jaya Kumaran 		stmmac_lpi_entry_timer_config(priv, 1);
517be1c7eaeSVineetha G. Jaya Kumaran 	} else {
518be1c7eaeSVineetha G. Jaya Kumaran 		stmmac_lpi_entry_timer_config(priv, 0);
519be1c7eaeSVineetha G. Jaya Kumaran 		mod_timer(&priv->eee_ctrl_timer,
520be1c7eaeSVineetha G. Jaya Kumaran 			  STMMAC_LPI_T(priv->tx_lpi_timer));
521be1c7eaeSVineetha G. Jaya Kumaran 	}
522388e201dSVineetha G. Jaya Kumaran 
52329555fa3SThierry Reding 	mutex_unlock(&priv->lock);
52438ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
52574371272SJose Abreu 	return true;
526d765955dSGiuseppe CAVALLARO }
527d765955dSGiuseppe CAVALLARO 
528732fdf0eSGiuseppe CAVALLARO /* stmmac_get_tx_hwtstamp - get HW TX timestamps
52932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
530ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
531891434b1SRayagond Kokatanur  * @skb : the socket buffer
532891434b1SRayagond Kokatanur  * Description :
533891434b1SRayagond Kokatanur  * This function will read timestamp from the descriptor & pass it to stack.
534891434b1SRayagond Kokatanur  * and also perform some sanity checks.
535891434b1SRayagond Kokatanur  */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)536891434b1SRayagond Kokatanur static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *p, struct sk_buff *skb)
538891434b1SRayagond Kokatanur {
539891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps shhwtstamp;
54025e80cd0SJose Abreu 	bool found = false;
541df103170SNathan Chancellor 	u64 ns = 0;
542891434b1SRayagond Kokatanur 
543891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en)
544891434b1SRayagond Kokatanur 		return;
545891434b1SRayagond Kokatanur 
546ceb69499SGiuseppe CAVALLARO 	/* exit if skb doesn't support hw tstamp */
54775e4364fSdamuzi000 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548891434b1SRayagond Kokatanur 		return;
549891434b1SRayagond Kokatanur 
550891434b1SRayagond Kokatanur 	/* check tx tstamp status */
55142de047dSJose Abreu 	if (stmmac_get_tx_timestamp_status(priv, p)) {
55242de047dSJose Abreu 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
55325e80cd0SJose Abreu 		found = true;
55425e80cd0SJose Abreu 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
55525e80cd0SJose Abreu 		found = true;
55625e80cd0SJose Abreu 	}
557891434b1SRayagond Kokatanur 
55825e80cd0SJose Abreu 	if (found) {
559c6d5f193SKurt Kanzenbach 		ns -= priv->plat->cdc_error_adj;
5603600be5fSVoon Weifeng 
561891434b1SRayagond Kokatanur 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562891434b1SRayagond Kokatanur 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
563ba1ffd74SGiuseppe CAVALLARO 
56433d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565891434b1SRayagond Kokatanur 		/* pass tstamp to stack */
566891434b1SRayagond Kokatanur 		skb_tstamp_tx(skb, &shhwtstamp);
567ba1ffd74SGiuseppe CAVALLARO 	}
568891434b1SRayagond Kokatanur }
569891434b1SRayagond Kokatanur 
570732fdf0eSGiuseppe CAVALLARO /* stmmac_get_rx_hwtstamp - get HW RX timestamps
57132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
572ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
573ba1ffd74SGiuseppe CAVALLARO  * @np : next descriptor pointer
574891434b1SRayagond Kokatanur  * @skb : the socket buffer
575891434b1SRayagond Kokatanur  * Description :
576891434b1SRayagond Kokatanur  * This function will read received packet's timestamp from the descriptor
577891434b1SRayagond Kokatanur  * and pass it to stack. It also perform some sanity checks.
578891434b1SRayagond Kokatanur  */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)579ba1ffd74SGiuseppe CAVALLARO static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *np, struct sk_buff *skb)
581891434b1SRayagond Kokatanur {
582891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
58398870943SJose Abreu 	struct dma_desc *desc = p;
584df103170SNathan Chancellor 	u64 ns = 0;
585891434b1SRayagond Kokatanur 
586891434b1SRayagond Kokatanur 	if (!priv->hwts_rx_en)
587891434b1SRayagond Kokatanur 		return;
588ba1ffd74SGiuseppe CAVALLARO 	/* For GMAC4, the valid timestamp is from CTX next desc. */
5897d9e6c5aSJose Abreu 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
59098870943SJose Abreu 		desc = np;
591891434b1SRayagond Kokatanur 
59298870943SJose Abreu 	/* Check if timestamp is available */
59342de047dSJose Abreu 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
59442de047dSJose Abreu 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
5953600be5fSVoon Weifeng 
596c6d5f193SKurt Kanzenbach 		ns -= priv->plat->cdc_error_adj;
5973600be5fSVoon Weifeng 
59833d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599891434b1SRayagond Kokatanur 		shhwtstamp = skb_hwtstamps(skb);
600891434b1SRayagond Kokatanur 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601891434b1SRayagond Kokatanur 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
602ba1ffd74SGiuseppe CAVALLARO 	} else  {
60333d4c482SMario Molitor 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604ba1ffd74SGiuseppe CAVALLARO 	}
605891434b1SRayagond Kokatanur }
606891434b1SRayagond Kokatanur 
607891434b1SRayagond Kokatanur /**
608d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_set - control hardware timestamping.
609891434b1SRayagond Kokatanur  *  @dev: device pointer.
6108d45e42bSLABBE Corentin  *  @ifr: An IOCTL specific structure, that can contain a pointer to
611891434b1SRayagond Kokatanur  *  a proprietary structure used to pass information to the driver.
612891434b1SRayagond Kokatanur  *  Description:
613891434b1SRayagond Kokatanur  *  This function configures the MAC to enable/disable both outgoing(TX)
614891434b1SRayagond Kokatanur  *  and incoming(RX) packets time stamping based on user input.
615891434b1SRayagond Kokatanur  *  Return Value:
616891434b1SRayagond Kokatanur  *  0 on success and an appropriate -ve integer on failure.
617891434b1SRayagond Kokatanur  */
stmmac_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)618d6228b7cSArtem Panfilov static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619891434b1SRayagond Kokatanur {
620891434b1SRayagond Kokatanur 	struct stmmac_priv *priv = netdev_priv(dev);
621891434b1SRayagond Kokatanur 	struct hwtstamp_config config;
622891434b1SRayagond Kokatanur 	u32 ptp_v2 = 0;
623891434b1SRayagond Kokatanur 	u32 tstamp_all = 0;
624891434b1SRayagond Kokatanur 	u32 ptp_over_ipv4_udp = 0;
625891434b1SRayagond Kokatanur 	u32 ptp_over_ipv6_udp = 0;
626891434b1SRayagond Kokatanur 	u32 ptp_over_ethernet = 0;
627891434b1SRayagond Kokatanur 	u32 snap_type_sel = 0;
628891434b1SRayagond Kokatanur 	u32 ts_master_en = 0;
629891434b1SRayagond Kokatanur 	u32 ts_event_en = 0;
630891434b1SRayagond Kokatanur 
631891434b1SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632891434b1SRayagond Kokatanur 		netdev_alert(priv->dev, "No support for HW time stamping\n");
633891434b1SRayagond Kokatanur 		priv->hwts_tx_en = 0;
634891434b1SRayagond Kokatanur 		priv->hwts_rx_en = 0;
635891434b1SRayagond Kokatanur 
636891434b1SRayagond Kokatanur 		return -EOPNOTSUPP;
637891434b1SRayagond Kokatanur 	}
638891434b1SRayagond Kokatanur 
639891434b1SRayagond Kokatanur 	if (copy_from_user(&config, ifr->ifr_data,
640d6228b7cSArtem Panfilov 			   sizeof(config)))
641891434b1SRayagond Kokatanur 		return -EFAULT;
642891434b1SRayagond Kokatanur 
64338ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644891434b1SRayagond Kokatanur 		   __func__, config.flags, config.tx_type, config.rx_filter);
645891434b1SRayagond Kokatanur 
6465f3da328SBen Hutchings 	if (config.tx_type != HWTSTAMP_TX_OFF &&
6475f3da328SBen Hutchings 	    config.tx_type != HWTSTAMP_TX_ON)
648891434b1SRayagond Kokatanur 		return -ERANGE;
649891434b1SRayagond Kokatanur 
650891434b1SRayagond Kokatanur 	if (priv->adv_ts) {
651891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
652891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
653ceb69499SGiuseppe CAVALLARO 			/* time stamp no incoming packet at all */
654891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
655891434b1SRayagond Kokatanur 			break;
656891434b1SRayagond Kokatanur 
657891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, any kind of event packet */
659891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
6607d8e249fSIlias Apalodimas 			/* 'xmac' hardware can support Sync, Pdelay_Req and
6617d8e249fSIlias Apalodimas 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
6627d8e249fSIlias Apalodimas 			 * This leaves Delay_Req timestamps out.
6637d8e249fSIlias Apalodimas 			 * Enable all events *and* general purpose message
6647d8e249fSIlias Apalodimas 			 * timestamping
6657d8e249fSIlias Apalodimas 			 */
666891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669891434b1SRayagond Kokatanur 			break;
670891434b1SRayagond Kokatanur 
671891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Sync packet */
673891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
675891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
676891434b1SRayagond Kokatanur 
677891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679891434b1SRayagond Kokatanur 			break;
680891434b1SRayagond Kokatanur 
681891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Delay_req packet */
683891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
685891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
686891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
687891434b1SRayagond Kokatanur 
688891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690891434b1SRayagond Kokatanur 			break;
691891434b1SRayagond Kokatanur 
692891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, any kind of event packet */
694891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
696891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
697891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698891434b1SRayagond Kokatanur 
699891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701891434b1SRayagond Kokatanur 			break;
702891434b1SRayagond Kokatanur 
703891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Sync packet */
705891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
707891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
708891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
709891434b1SRayagond Kokatanur 
710891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712891434b1SRayagond Kokatanur 			break;
713891434b1SRayagond Kokatanur 
714891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Delay_req packet */
716891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
718891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
719891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
720891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
721891434b1SRayagond Kokatanur 
722891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724891434b1SRayagond Kokatanur 			break;
725891434b1SRayagond Kokatanur 
726891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
727ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1 any layer, any kind of event packet */
728891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
730891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
7313cb95802SKurt Kanzenbach 			if (priv->synopsys_id < DWMAC_CORE_4_10)
73214f34733SJose Abreu 				ts_event_en = PTP_TCR_TSEVNTENA;
733891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
736891434b1SRayagond Kokatanur 			break;
737891434b1SRayagond Kokatanur 
738891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
739ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Sync packet */
740891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
742891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
743891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
744891434b1SRayagond Kokatanur 
745891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
748891434b1SRayagond Kokatanur 			break;
749891434b1SRayagond Kokatanur 
750891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Delay_req packet */
752891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
754891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
755891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
756891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
757891434b1SRayagond Kokatanur 
758891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
761891434b1SRayagond Kokatanur 			break;
762891434b1SRayagond Kokatanur 
763e3412575SMiroslav Lichvar 		case HWTSTAMP_FILTER_NTP_ALL:
764891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_ALL:
765ceb69499SGiuseppe CAVALLARO 			/* time stamp any incoming packet */
766891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_ALL;
767891434b1SRayagond Kokatanur 			tstamp_all = PTP_TCR_TSENALL;
768891434b1SRayagond Kokatanur 			break;
769891434b1SRayagond Kokatanur 
770891434b1SRayagond Kokatanur 		default:
771891434b1SRayagond Kokatanur 			return -ERANGE;
772891434b1SRayagond Kokatanur 		}
773891434b1SRayagond Kokatanur 	} else {
774891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
775891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
776891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
777891434b1SRayagond Kokatanur 			break;
778891434b1SRayagond Kokatanur 		default:
779891434b1SRayagond Kokatanur 			/* PTP v1, UDP, any kind of event packet */
780891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781891434b1SRayagond Kokatanur 			break;
782891434b1SRayagond Kokatanur 		}
783891434b1SRayagond Kokatanur 	}
784891434b1SRayagond Kokatanur 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
7855f3da328SBen Hutchings 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786891434b1SRayagond Kokatanur 
787a6da2bbbSHolger Assmann 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
788891434b1SRayagond Kokatanur 
789a6da2bbbSHolger Assmann 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
790a6da2bbbSHolger Assmann 		priv->systime_flags |= tstamp_all | ptp_v2 |
791a6da2bbbSHolger Assmann 				       ptp_over_ethernet | ptp_over_ipv6_udp |
792a6da2bbbSHolger Assmann 				       ptp_over_ipv4_udp | ts_event_en |
793a6da2bbbSHolger Assmann 				       ts_master_en | snap_type_sel;
794891434b1SRayagond Kokatanur 	}
795891434b1SRayagond Kokatanur 
796a6da2bbbSHolger Assmann 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797a6da2bbbSHolger Assmann 
798d6228b7cSArtem Panfilov 	memcpy(&priv->tstamp_config, &config, sizeof(config));
799d6228b7cSArtem Panfilov 
800891434b1SRayagond Kokatanur 	return copy_to_user(ifr->ifr_data, &config,
801d6228b7cSArtem Panfilov 			    sizeof(config)) ? -EFAULT : 0;
802d6228b7cSArtem Panfilov }
803d6228b7cSArtem Panfilov 
804d6228b7cSArtem Panfilov /**
805d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_get - read hardware timestamping.
806d6228b7cSArtem Panfilov  *  @dev: device pointer.
807d6228b7cSArtem Panfilov  *  @ifr: An IOCTL specific structure, that can contain a pointer to
808d6228b7cSArtem Panfilov  *  a proprietary structure used to pass information to the driver.
809d6228b7cSArtem Panfilov  *  Description:
810d6228b7cSArtem Panfilov  *  This function obtain the current hardware timestamping settings
811d0ea5cbdSJesse Brandeburg  *  as requested.
812d6228b7cSArtem Panfilov  */
stmmac_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)813d6228b7cSArtem Panfilov static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814d6228b7cSArtem Panfilov {
815d6228b7cSArtem Panfilov 	struct stmmac_priv *priv = netdev_priv(dev);
816d6228b7cSArtem Panfilov 	struct hwtstamp_config *config = &priv->tstamp_config;
817d6228b7cSArtem Panfilov 
818d6228b7cSArtem Panfilov 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819d6228b7cSArtem Panfilov 		return -EOPNOTSUPP;
820d6228b7cSArtem Panfilov 
821d6228b7cSArtem Panfilov 	return copy_to_user(ifr->ifr_data, config,
822d6228b7cSArtem Panfilov 			    sizeof(*config)) ? -EFAULT : 0;
823891434b1SRayagond Kokatanur }
824891434b1SRayagond Kokatanur 
82532ceabcaSGiuseppe CAVALLARO /**
826a6da2bbbSHolger Assmann  * stmmac_init_tstamp_counter - init hardware timestamping counter
827a6da2bbbSHolger Assmann  * @priv: driver private structure
828a6da2bbbSHolger Assmann  * @systime_flags: timestamping flags
829a6da2bbbSHolger Assmann  * Description:
830a6da2bbbSHolger Assmann  * Initialize hardware counter for packet timestamping.
831a6da2bbbSHolger Assmann  * This is valid as long as the interface is open and not suspended.
832a6da2bbbSHolger Assmann  * Will be rerun after resuming from suspend, case in which the timestamping
833a6da2bbbSHolger Assmann  * flags updated by stmmac_hwtstamp_set() also need to be restored.
834a6da2bbbSHolger Assmann  */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)835a6da2bbbSHolger Assmann int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836a6da2bbbSHolger Assmann {
837a6da2bbbSHolger Assmann 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838a6da2bbbSHolger Assmann 	struct timespec64 now;
839a6da2bbbSHolger Assmann 	u32 sec_inc = 0;
840a6da2bbbSHolger Assmann 	u64 temp = 0;
841a6da2bbbSHolger Assmann 
842a6da2bbbSHolger Assmann 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843a6da2bbbSHolger Assmann 		return -EOPNOTSUPP;
844a6da2bbbSHolger Assmann 
845a6da2bbbSHolger Assmann 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846a6da2bbbSHolger Assmann 	priv->systime_flags = systime_flags;
847a6da2bbbSHolger Assmann 
848a6da2bbbSHolger Assmann 	/* program Sub Second Increment reg */
849a6da2bbbSHolger Assmann 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850a6da2bbbSHolger Assmann 					   priv->plat->clk_ptp_rate,
851a6da2bbbSHolger Assmann 					   xmac, &sec_inc);
852a6da2bbbSHolger Assmann 	temp = div_u64(1000000000ULL, sec_inc);
853a6da2bbbSHolger Assmann 
854a6da2bbbSHolger Assmann 	/* Store sub second increment for later use */
855a6da2bbbSHolger Assmann 	priv->sub_second_inc = sec_inc;
856a6da2bbbSHolger Assmann 
857a6da2bbbSHolger Assmann 	/* calculate default added value:
858a6da2bbbSHolger Assmann 	 * formula is :
859a6da2bbbSHolger Assmann 	 * addend = (2^32)/freq_div_ratio;
860a6da2bbbSHolger Assmann 	 * where, freq_div_ratio = 1e9ns/sec_inc
861a6da2bbbSHolger Assmann 	 */
862a6da2bbbSHolger Assmann 	temp = (u64)(temp << 32);
863a6da2bbbSHolger Assmann 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864a6da2bbbSHolger Assmann 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865a6da2bbbSHolger Assmann 
866a6da2bbbSHolger Assmann 	/* initialize system time */
867a6da2bbbSHolger Assmann 	ktime_get_real_ts64(&now);
868a6da2bbbSHolger Assmann 
869a6da2bbbSHolger Assmann 	/* lower 32 bits of tv_sec are safe until y2106 */
870a6da2bbbSHolger Assmann 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871a6da2bbbSHolger Assmann 
872a6da2bbbSHolger Assmann 	return 0;
873a6da2bbbSHolger Assmann }
874a6da2bbbSHolger Assmann EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875a6da2bbbSHolger Assmann 
876a6da2bbbSHolger Assmann /**
877732fdf0eSGiuseppe CAVALLARO  * stmmac_init_ptp - init PTP
87832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
879732fdf0eSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
88032ceabcaSGiuseppe CAVALLARO  * This is done by looking at the HW cap. register.
881732fdf0eSGiuseppe CAVALLARO  * This function also registers the ptp driver.
88232ceabcaSGiuseppe CAVALLARO  */
stmmac_init_ptp(struct stmmac_priv * priv)88392ba6888SRayagond Kokatanur static int stmmac_init_ptp(struct stmmac_priv *priv)
884891434b1SRayagond Kokatanur {
8857d9e6c5aSJose Abreu 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886a6da2bbbSHolger Assmann 	int ret;
8877d9e6c5aSJose Abreu 
88894c82de4SMohammad Athari Bin Ismail 	if (priv->plat->ptp_clk_freq_config)
88994c82de4SMohammad Athari Bin Ismail 		priv->plat->ptp_clk_freq_config(priv);
89094c82de4SMohammad Athari Bin Ismail 
891a6da2bbbSHolger Assmann 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892a6da2bbbSHolger Assmann 	if (ret)
893a6da2bbbSHolger Assmann 		return ret;
89492ba6888SRayagond Kokatanur 
895891434b1SRayagond Kokatanur 	priv->adv_ts = 0;
8967d9e6c5aSJose Abreu 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
8977d9e6c5aSJose Abreu 	if (xmac && priv->dma_cap.atime_stamp)
898be9b3174SGiuseppe CAVALLARO 		priv->adv_ts = 1;
899be9b3174SGiuseppe CAVALLARO 	/* Dwmac 3.x core with extend_desc can support adv_ts */
900be9b3174SGiuseppe CAVALLARO 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901891434b1SRayagond Kokatanur 		priv->adv_ts = 1;
9027cd01399SVince Bridgers 
903be9b3174SGiuseppe CAVALLARO 	if (priv->dma_cap.time_stamp)
904be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
9057cd01399SVince Bridgers 
906be9b3174SGiuseppe CAVALLARO 	if (priv->adv_ts)
907be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev,
908be9b3174SGiuseppe CAVALLARO 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
909891434b1SRayagond Kokatanur 
910891434b1SRayagond Kokatanur 	priv->hwts_tx_en = 0;
911891434b1SRayagond Kokatanur 	priv->hwts_rx_en = 0;
91292ba6888SRayagond Kokatanur 
91326cfb838SJohannes Zink 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
91426cfb838SJohannes Zink 		stmmac_hwtstamp_correct_latency(priv, priv);
91526cfb838SJohannes Zink 
916c30a70d3SGiuseppe CAVALLARO 	return 0;
91792ba6888SRayagond Kokatanur }
91892ba6888SRayagond Kokatanur 
stmmac_release_ptp(struct stmmac_priv * priv)91992ba6888SRayagond Kokatanur static void stmmac_release_ptp(struct stmmac_priv *priv)
92092ba6888SRayagond Kokatanur {
921f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
92292ba6888SRayagond Kokatanur 	stmmac_ptp_unregister(priv);
923891434b1SRayagond Kokatanur }
924891434b1SRayagond Kokatanur 
9257ac6653aSJeff Kirsher /**
92629feff39SJoao Pinto  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
92729feff39SJoao Pinto  *  @priv: driver private structure
928d0ea5cbdSJesse Brandeburg  *  @duplex: duplex passed to the next function
92929feff39SJoao Pinto  *  Description: It is used for configuring the flow control in all queues
93029feff39SJoao Pinto  */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex)93129feff39SJoao Pinto static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
93229feff39SJoao Pinto {
93329feff39SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
93429feff39SJoao Pinto 
935c10d4c82SJose Abreu 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
93629feff39SJoao Pinto 			priv->pause, tx_cnt);
93729feff39SJoao Pinto }
93829feff39SJoao Pinto 
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)93972e94511SRussell King (Oracle) static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
94072e94511SRussell King (Oracle) 						 phy_interface_t interface)
94172e94511SRussell King (Oracle) {
94272e94511SRussell King (Oracle) 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
94372e94511SRussell King (Oracle) 
9445d1f3fe7SMaxime Chevallier 	if (priv->hw->xpcs)
94572e94511SRussell King (Oracle) 		return &priv->hw->xpcs->pcs;
9465d1f3fe7SMaxime Chevallier 
9475d1f3fe7SMaxime Chevallier 	if (priv->hw->lynx_pcs)
9485d1f3fe7SMaxime Chevallier 		return priv->hw->lynx_pcs;
9495d1f3fe7SMaxime Chevallier 
9505d1f3fe7SMaxime Chevallier 	return NULL;
95172e94511SRussell King (Oracle) }
95272e94511SRussell King (Oracle) 
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)95374371272SJose Abreu static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
95474371272SJose Abreu 			      const struct phylink_link_state *state)
9559ad372fcSJose Abreu {
95611059740SVladimir Oltean 	/* Nothing to do, xpcs_config() handles everything */
957eeef2f6bSJose Abreu }
958eeef2f6bSJose Abreu 
stmmac_fpe_link_state_handle(struct stmmac_priv * priv,bool is_up)9595a558611SOng Boon Leong static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
9605a558611SOng Boon Leong {
9615a558611SOng Boon Leong 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
9625a558611SOng Boon Leong 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
9635a558611SOng Boon Leong 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
9645a558611SOng Boon Leong 	bool *hs_enable = &fpe_cfg->hs_enable;
9655a558611SOng Boon Leong 
9665a558611SOng Boon Leong 	if (is_up && *hs_enable) {
967e1fbdef9SJianheng Zhang 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
968e1fbdef9SJianheng Zhang 					MPACKET_VERIFY);
9695a558611SOng Boon Leong 	} else {
9701f7096f0SWong Vee Khee 		*lo_state = FPE_STATE_OFF;
9711f7096f0SWong Vee Khee 		*lp_state = FPE_STATE_OFF;
9725a558611SOng Boon Leong 	}
9735a558611SOng Boon Leong }
9745a558611SOng Boon Leong 
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)97574371272SJose Abreu static void stmmac_mac_link_down(struct phylink_config *config,
97674371272SJose Abreu 				 unsigned int mode, phy_interface_t interface)
9779ad372fcSJose Abreu {
97874371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
9799ad372fcSJose Abreu 
9809ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
98174371272SJose Abreu 	priv->eee_active = false;
982388e201dSVineetha G. Jaya Kumaran 	priv->tx_lpi_enabled = false;
983d4aeaed8SWong Vee Khee 	priv->eee_enabled = stmmac_eee_init(priv);
98474371272SJose Abreu 	stmmac_set_eee_pls(priv, priv->hw, false);
9855a558611SOng Boon Leong 
98663c173ffSMohammad Athari Bin Ismail 	if (priv->dma_cap.fpesel)
9875a558611SOng Boon Leong 		stmmac_fpe_link_state_handle(priv, false);
9889ad372fcSJose Abreu }
9899ad372fcSJose Abreu 
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)99074371272SJose Abreu static void stmmac_mac_link_up(struct phylink_config *config,
99191a208f2SRussell King 			       struct phy_device *phy,
99274371272SJose Abreu 			       unsigned int mode, phy_interface_t interface,
99391a208f2SRussell King 			       int speed, int duplex,
99491a208f2SRussell King 			       bool tx_pause, bool rx_pause)
9959ad372fcSJose Abreu {
99674371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
997a3a57bf0SHeiner Kallweit 	u32 old_ctrl, ctrl;
99846f69dedSJose Abreu 
999efe92571SBartosz Golaszewski 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1000efe92571SBartosz Golaszewski 	    priv->plat->serdes_powerup)
1001a46e9010SRevanth Kumar Uppala 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1002a46e9010SRevanth Kumar Uppala 
1003a3a57bf0SHeiner Kallweit 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1004a3a57bf0SHeiner Kallweit 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
100546f69dedSJose Abreu 
100646f69dedSJose Abreu 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
100746f69dedSJose Abreu 		switch (speed) {
100846f69dedSJose Abreu 		case SPEED_10000:
100946f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed10000;
101046f69dedSJose Abreu 			break;
101146f69dedSJose Abreu 		case SPEED_5000:
101246f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed5000;
101346f69dedSJose Abreu 			break;
101446f69dedSJose Abreu 		case SPEED_2500:
101546f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed2500;
101646f69dedSJose Abreu 			break;
101746f69dedSJose Abreu 		default:
101846f69dedSJose Abreu 			return;
101946f69dedSJose Abreu 		}
10208a880936SJose Abreu 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
10218a880936SJose Abreu 		switch (speed) {
10228a880936SJose Abreu 		case SPEED_100000:
10238a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed100000;
10248a880936SJose Abreu 			break;
10258a880936SJose Abreu 		case SPEED_50000:
10268a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed50000;
10278a880936SJose Abreu 			break;
10288a880936SJose Abreu 		case SPEED_40000:
10298a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed40000;
10308a880936SJose Abreu 			break;
10318a880936SJose Abreu 		case SPEED_25000:
10328a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed25000;
10338a880936SJose Abreu 			break;
10348a880936SJose Abreu 		case SPEED_10000:
10358a880936SJose Abreu 			ctrl |= priv->hw->link.xgmii.speed10000;
10368a880936SJose Abreu 			break;
10378a880936SJose Abreu 		case SPEED_2500:
10388a880936SJose Abreu 			ctrl |= priv->hw->link.speed2500;
10398a880936SJose Abreu 			break;
10408a880936SJose Abreu 		case SPEED_1000:
10418a880936SJose Abreu 			ctrl |= priv->hw->link.speed1000;
10428a880936SJose Abreu 			break;
10438a880936SJose Abreu 		default:
10448a880936SJose Abreu 			return;
10458a880936SJose Abreu 		}
104646f69dedSJose Abreu 	} else {
104746f69dedSJose Abreu 		switch (speed) {
104846f69dedSJose Abreu 		case SPEED_2500:
104946f69dedSJose Abreu 			ctrl |= priv->hw->link.speed2500;
105046f69dedSJose Abreu 			break;
105146f69dedSJose Abreu 		case SPEED_1000:
105246f69dedSJose Abreu 			ctrl |= priv->hw->link.speed1000;
105346f69dedSJose Abreu 			break;
105446f69dedSJose Abreu 		case SPEED_100:
105546f69dedSJose Abreu 			ctrl |= priv->hw->link.speed100;
105646f69dedSJose Abreu 			break;
105746f69dedSJose Abreu 		case SPEED_10:
105846f69dedSJose Abreu 			ctrl |= priv->hw->link.speed10;
105946f69dedSJose Abreu 			break;
106046f69dedSJose Abreu 		default:
106146f69dedSJose Abreu 			return;
106246f69dedSJose Abreu 		}
106346f69dedSJose Abreu 	}
106446f69dedSJose Abreu 
106546f69dedSJose Abreu 	priv->speed = speed;
106646f69dedSJose Abreu 
106746f69dedSJose Abreu 	if (priv->plat->fix_mac_speed)
10681fc04a0bSShenwei Wang 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
106946f69dedSJose Abreu 
107046f69dedSJose Abreu 	if (!duplex)
107146f69dedSJose Abreu 		ctrl &= ~priv->hw->link.duplex;
107246f69dedSJose Abreu 	else
107346f69dedSJose Abreu 		ctrl |= priv->hw->link.duplex;
107446f69dedSJose Abreu 
107546f69dedSJose Abreu 	/* Flow Control operation */
1076cc3d2b5fSGoh, Wei Sheng 	if (rx_pause && tx_pause)
1077cc3d2b5fSGoh, Wei Sheng 		priv->flow_ctrl = FLOW_AUTO;
1078cc3d2b5fSGoh, Wei Sheng 	else if (rx_pause && !tx_pause)
1079cc3d2b5fSGoh, Wei Sheng 		priv->flow_ctrl = FLOW_RX;
1080cc3d2b5fSGoh, Wei Sheng 	else if (!rx_pause && tx_pause)
1081cc3d2b5fSGoh, Wei Sheng 		priv->flow_ctrl = FLOW_TX;
1082cc3d2b5fSGoh, Wei Sheng 	else
1083cc3d2b5fSGoh, Wei Sheng 		priv->flow_ctrl = FLOW_OFF;
1084cc3d2b5fSGoh, Wei Sheng 
108546f69dedSJose Abreu 	stmmac_mac_flow_ctrl(priv, duplex);
108646f69dedSJose Abreu 
1087a3a57bf0SHeiner Kallweit 	if (ctrl != old_ctrl)
108846f69dedSJose Abreu 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
10899ad372fcSJose Abreu 
10909ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
10915b111770SJose Abreu 	if (phy && priv->dma_cap.eee) {
109254aa39a5SAndrey Konovalov 		priv->eee_active =
1093743dd1dbSBartosz Golaszewski 			phy_init_eee(phy, !(priv->plat->flags &
1094743dd1dbSBartosz Golaszewski 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
109574371272SJose Abreu 		priv->eee_enabled = stmmac_eee_init(priv);
1096388e201dSVineetha G. Jaya Kumaran 		priv->tx_lpi_enabled = priv->eee_enabled;
109774371272SJose Abreu 		stmmac_set_eee_pls(priv, priv->hw, true);
109874371272SJose Abreu 	}
10995a558611SOng Boon Leong 
110063c173ffSMohammad Athari Bin Ismail 	if (priv->dma_cap.fpesel)
11015a558611SOng Boon Leong 		stmmac_fpe_link_state_handle(priv, true);
110226cfb838SJohannes Zink 
110326cfb838SJohannes Zink 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
110426cfb838SJohannes Zink 		stmmac_hwtstamp_correct_latency(priv, priv);
11059ad372fcSJose Abreu }
11069ad372fcSJose Abreu 
110774371272SJose Abreu static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
110872e94511SRussell King (Oracle) 	.mac_select_pcs = stmmac_mac_select_pcs,
110974371272SJose Abreu 	.mac_config = stmmac_mac_config,
111074371272SJose Abreu 	.mac_link_down = stmmac_mac_link_down,
111174371272SJose Abreu 	.mac_link_up = stmmac_mac_link_up,
1112eeef2f6bSJose Abreu };
1113eeef2f6bSJose Abreu 
111429feff39SJoao Pinto /**
1115732fdf0eSGiuseppe CAVALLARO  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
111632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
111732ceabcaSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PCS.
111832ceabcaSGiuseppe CAVALLARO  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
111932ceabcaSGiuseppe CAVALLARO  * configured for the TBI, RTBI, or SGMII PHY interface.
112032ceabcaSGiuseppe CAVALLARO  */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1121e58bb43fSGiuseppe CAVALLARO static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1122e58bb43fSGiuseppe CAVALLARO {
1123a014c355SRussell King (Oracle) 	int interface = priv->plat->mac_interface;
1124e58bb43fSGiuseppe CAVALLARO 
1125e58bb43fSGiuseppe CAVALLARO 	if (priv->dma_cap.pcs) {
11260d909dcdSByungho An 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
11270d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
11280d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
11290d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
113038ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
11313fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_RGMII;
11320d909dcdSByungho An 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
113338ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
11343fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_SGMII;
1135e58bb43fSGiuseppe CAVALLARO 		}
1136e58bb43fSGiuseppe CAVALLARO 	}
1137e58bb43fSGiuseppe CAVALLARO }
1138e58bb43fSGiuseppe CAVALLARO 
11397ac6653aSJeff Kirsher /**
11407ac6653aSJeff Kirsher  * stmmac_init_phy - PHY initialization
11417ac6653aSJeff Kirsher  * @dev: net device structure
11427ac6653aSJeff Kirsher  * Description: it initializes the driver's PHY state, and attaches the PHY
11437ac6653aSJeff Kirsher  * to the mac driver.
11447ac6653aSJeff Kirsher  *  Return value:
11457ac6653aSJeff Kirsher  *  0 on success
11467ac6653aSJeff Kirsher  */
stmmac_init_phy(struct net_device * dev)11477ac6653aSJeff Kirsher static int stmmac_init_phy(struct net_device *dev)
11487ac6653aSJeff Kirsher {
11497ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
11508fbc10b9SMichael Sit Wei Hong 	struct fwnode_handle *phy_fwnode;
1151ab21cf92SOng Boon Leong 	struct fwnode_handle *fwnode;
115274371272SJose Abreu 	int ret;
11537ac6653aSJeff Kirsher 
11548fbc10b9SMichael Sit Wei Hong 	if (!phylink_expects_phy(priv->phylink))
11558fbc10b9SMichael Sit Wei Hong 		return 0;
11568fbc10b9SMichael Sit Wei Hong 
1157e80af2acSRussell King (Oracle) 	fwnode = priv->plat->port_node;
1158ab21cf92SOng Boon Leong 	if (!fwnode)
1159ab21cf92SOng Boon Leong 		fwnode = dev_fwnode(priv->device);
116074371272SJose Abreu 
1161ab21cf92SOng Boon Leong 	if (fwnode)
11628fbc10b9SMichael Sit Wei Hong 		phy_fwnode = fwnode_get_phy_node(fwnode);
11638fbc10b9SMichael Sit Wei Hong 	else
11648fbc10b9SMichael Sit Wei Hong 		phy_fwnode = NULL;
116542e87024SJose Abreu 
116642e87024SJose Abreu 	/* Some DT bindings do not set-up the PHY handle. Let's try to
116742e87024SJose Abreu 	 * manually parse it
116842e87024SJose Abreu 	 */
11698fbc10b9SMichael Sit Wei Hong 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
117074371272SJose Abreu 		int addr = priv->plat->phy_addr;
117174371272SJose Abreu 		struct phy_device *phydev;
1172f142af2eSSrinivas Kandagatla 
11731f3bd64aSHeiner Kallweit 		if (addr < 0) {
11741f3bd64aSHeiner Kallweit 			netdev_err(priv->dev, "no phy found\n");
11751f3bd64aSHeiner Kallweit 			return -ENODEV;
11761f3bd64aSHeiner Kallweit 		}
11771f3bd64aSHeiner Kallweit 
117874371272SJose Abreu 		phydev = mdiobus_get_phy(priv->mii, addr);
117974371272SJose Abreu 		if (!phydev) {
118074371272SJose Abreu 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
11817ac6653aSJeff Kirsher 			return -ENODEV;
11827ac6653aSJeff Kirsher 		}
11838e99fc5fSGiuseppe Cavallaro 
118474371272SJose Abreu 		ret = phylink_connect_phy(priv->phylink, phydev);
11858fbc10b9SMichael Sit Wei Hong 	} else {
11868fbc10b9SMichael Sit Wei Hong 		fwnode_handle_put(phy_fwnode);
11878fbc10b9SMichael Sit Wei Hong 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
118874371272SJose Abreu 	}
1189c51e424dSFlorian Fainelli 
1190576f9eacSJoakim Zhang 	if (!priv->plat->pmt) {
1191576f9eacSJoakim Zhang 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1192576f9eacSJoakim Zhang 
11931d8e5b0fSJisheng Zhang 		phylink_ethtool_get_wol(priv->phylink, &wol);
11941d8e5b0fSJisheng Zhang 		device_set_wakeup_capable(priv->device, !!wol.supported);
1195a9334b70SRongguang Wei 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1196576f9eacSJoakim Zhang 	}
11971d8e5b0fSJisheng Zhang 
119874371272SJose Abreu 	return ret;
119974371272SJose Abreu }
120074371272SJose Abreu 
stmmac_phy_setup(struct stmmac_priv * priv)120174371272SJose Abreu static int stmmac_phy_setup(struct stmmac_priv *priv)
120274371272SJose Abreu {
12032b070cddSRussell King (Oracle) 	struct stmmac_mdio_bus_data *mdio_bus_data;
12040060c878SAlexandru Ardelean 	int mode = priv->plat->phy_interface;
12051a37c1c1SRussell King (Oracle) 	struct fwnode_handle *fwnode;
120674371272SJose Abreu 	struct phylink *phylink;
1207a4ac612bSRussell King (Oracle) 	int max_speed;
120874371272SJose Abreu 
120974371272SJose Abreu 	priv->phylink_config.dev = &priv->dev->dev;
121074371272SJose Abreu 	priv->phylink_config.type = PHYLINK_NETDEV;
121164961f1bSRussell King (Oracle) 	priv->phylink_config.mac_managed_pm = true;
12122b070cddSRussell King (Oracle) 
12132b070cddSRussell King (Oracle) 	mdio_bus_data = priv->plat->mdio_bus_data;
12142b070cddSRussell King (Oracle) 	if (mdio_bus_data)
1215e5e5b771SOng Boon Leong 		priv->phylink_config.ovr_an_inband =
121612628565SDavid S. Miller 			mdio_bus_data->xpcs_an_inband;
121774371272SJose Abreu 
1218a014c355SRussell King (Oracle) 	/* Set the platform/firmware specified interface mode. Note, phylink
1219a014c355SRussell King (Oracle) 	 * deals with the PHY interface mode, not the MAC interface mode.
1220a014c355SRussell King (Oracle) 	 */
1221d194923dSRussell King (Oracle) 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1222d194923dSRussell King (Oracle) 
1223d194923dSRussell King (Oracle) 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1224d194923dSRussell King (Oracle) 	if (priv->hw->xpcs)
1225d194923dSRussell King (Oracle) 		xpcs_get_interfaces(priv->hw->xpcs,
1226d194923dSRussell King (Oracle) 				    priv->phylink_config.supported_interfaces);
1227d194923dSRussell King (Oracle) 
1228d42ca04eSRussell King (Oracle) 	/* Get the MAC specific capabilities */
1229d42ca04eSRussell King (Oracle) 	stmmac_mac_phylink_get_caps(priv);
1230d42ca04eSRussell King (Oracle) 
123193d565ebSSerge Semin 	priv->phylink_config.mac_capabilities = priv->hw->link.caps;
123293d565ebSSerge Semin 
1233a4ac612bSRussell King (Oracle) 	max_speed = priv->plat->max_speed;
1234a4ac612bSRussell King (Oracle) 	if (max_speed)
1235a4ac612bSRussell King (Oracle) 		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1236a4ac612bSRussell King (Oracle) 
12371a37c1c1SRussell King (Oracle) 	fwnode = priv->plat->port_node;
12381a37c1c1SRussell King (Oracle) 	if (!fwnode)
12391a37c1c1SRussell King (Oracle) 		fwnode = dev_fwnode(priv->device);
12401a37c1c1SRussell King (Oracle) 
1241c63d1e5cSArnd Bergmann 	phylink = phylink_create(&priv->phylink_config, fwnode,
124274371272SJose Abreu 				 mode, &stmmac_phylink_mac_ops);
124374371272SJose Abreu 	if (IS_ERR(phylink))
124474371272SJose Abreu 		return PTR_ERR(phylink);
124574371272SJose Abreu 
124674371272SJose Abreu 	priv->phylink = phylink;
12477ac6653aSJeff Kirsher 	return 0;
12487ac6653aSJeff Kirsher }
12497ac6653aSJeff Kirsher 
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1250ba39b344SChristian Marangi static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1251ba39b344SChristian Marangi 				    struct stmmac_dma_conf *dma_conf)
1252c24602efSGiuseppe CAVALLARO {
125354139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1254bfaf91caSJoakim Zhang 	unsigned int desc_size;
125571fedb01SJoao Pinto 	void *head_rx;
125654139cf3SJoao Pinto 	u32 queue;
125754139cf3SJoao Pinto 
125854139cf3SJoao Pinto 	/* Display RX rings */
125954139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
1260ba39b344SChristian Marangi 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
126154139cf3SJoao Pinto 
126254139cf3SJoao Pinto 		pr_info("\tRX Queue %u rings\n", queue);
1263d0225e7dSAlexandre TORGUE 
1264bfaf91caSJoakim Zhang 		if (priv->extend_desc) {
126554139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_erx;
1266bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_extended_desc);
1267bfaf91caSJoakim Zhang 		} else {
126854139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_rx;
1269bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_desc);
1270bfaf91caSJoakim Zhang 		}
127171fedb01SJoao Pinto 
127271fedb01SJoao Pinto 		/* Display RX ring */
1273ba39b344SChristian Marangi 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1274bfaf91caSJoakim Zhang 				    rx_q->dma_rx_phy, desc_size);
12755bacd778SLABBE Corentin 	}
127654139cf3SJoao Pinto }
1277d0225e7dSAlexandre TORGUE 
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1278ba39b344SChristian Marangi static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1279ba39b344SChristian Marangi 				    struct stmmac_dma_conf *dma_conf)
128071fedb01SJoao Pinto {
1281ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1282bfaf91caSJoakim Zhang 	unsigned int desc_size;
128371fedb01SJoao Pinto 	void *head_tx;
1284ce736788SJoao Pinto 	u32 queue;
1285ce736788SJoao Pinto 
1286ce736788SJoao Pinto 	/* Display TX rings */
1287ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
1288ba39b344SChristian Marangi 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1289ce736788SJoao Pinto 
1290ce736788SJoao Pinto 		pr_info("\tTX Queue %d rings\n", queue);
129171fedb01SJoao Pinto 
1292bfaf91caSJoakim Zhang 		if (priv->extend_desc) {
1293ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_etx;
1294bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_extended_desc);
1295bfaf91caSJoakim Zhang 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1296579a25a8SJose Abreu 			head_tx = (void *)tx_q->dma_entx;
1297bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_edesc);
1298bfaf91caSJoakim Zhang 		} else {
1299ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_tx;
1300bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_desc);
1301bfaf91caSJoakim Zhang 		}
130271fedb01SJoao Pinto 
1303ba39b344SChristian Marangi 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1304bfaf91caSJoakim Zhang 				    tx_q->dma_tx_phy, desc_size);
1305c24602efSGiuseppe CAVALLARO 	}
1306ce736788SJoao Pinto }
1307c24602efSGiuseppe CAVALLARO 
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1308ba39b344SChristian Marangi static void stmmac_display_rings(struct stmmac_priv *priv,
1309ba39b344SChristian Marangi 				 struct stmmac_dma_conf *dma_conf)
131071fedb01SJoao Pinto {
131171fedb01SJoao Pinto 	/* Display RX ring */
1312ba39b344SChristian Marangi 	stmmac_display_rx_rings(priv, dma_conf);
131371fedb01SJoao Pinto 
131471fedb01SJoao Pinto 	/* Display TX ring */
1315ba39b344SChristian Marangi 	stmmac_display_tx_rings(priv, dma_conf);
131671fedb01SJoao Pinto }
131771fedb01SJoao Pinto 
stmmac_set_bfsize(int mtu,int bufsize)1318286a8372SGiuseppe CAVALLARO static int stmmac_set_bfsize(int mtu, int bufsize)
1319286a8372SGiuseppe CAVALLARO {
1320286a8372SGiuseppe CAVALLARO 	int ret = bufsize;
1321286a8372SGiuseppe CAVALLARO 
1322b2f3a481SJose Abreu 	if (mtu >= BUF_SIZE_8KiB)
1323b2f3a481SJose Abreu 		ret = BUF_SIZE_16KiB;
1324b2f3a481SJose Abreu 	else if (mtu >= BUF_SIZE_4KiB)
1325286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_8KiB;
1326286a8372SGiuseppe CAVALLARO 	else if (mtu >= BUF_SIZE_2KiB)
1327286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_4KiB;
1328d916701cSGiuseppe CAVALLARO 	else if (mtu > DEFAULT_BUFSIZE)
1329286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_2KiB;
1330286a8372SGiuseppe CAVALLARO 	else
1331d916701cSGiuseppe CAVALLARO 		ret = DEFAULT_BUFSIZE;
1332286a8372SGiuseppe CAVALLARO 
1333286a8372SGiuseppe CAVALLARO 	return ret;
1334286a8372SGiuseppe CAVALLARO }
1335286a8372SGiuseppe CAVALLARO 
133632ceabcaSGiuseppe CAVALLARO /**
133771fedb01SJoao Pinto  * stmmac_clear_rx_descriptors - clear RX descriptors
133832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
1339ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
134054139cf3SJoao Pinto  * @queue: RX queue index
134171fedb01SJoao Pinto  * Description: this function is called to clear the RX descriptors
134232ceabcaSGiuseppe CAVALLARO  * in case of both basic and extended descriptors are used.
134332ceabcaSGiuseppe CAVALLARO  */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1344ba39b344SChristian Marangi static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1345ba39b344SChristian Marangi 					struct stmmac_dma_conf *dma_conf,
1346ba39b344SChristian Marangi 					u32 queue)
1347c24602efSGiuseppe CAVALLARO {
1348ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
13495bacd778SLABBE Corentin 	int i;
1350c24602efSGiuseppe CAVALLARO 
135171fedb01SJoao Pinto 	/* Clear the RX descriptors */
1352ba39b344SChristian Marangi 	for (i = 0; i < dma_conf->dma_rx_size; i++)
13535bacd778SLABBE Corentin 		if (priv->extend_desc)
135442de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
13555bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1356ba39b344SChristian Marangi 					(i == dma_conf->dma_rx_size - 1),
1357ba39b344SChristian Marangi 					dma_conf->dma_buf_sz);
13585bacd778SLABBE Corentin 		else
135942de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
13605bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1361ba39b344SChristian Marangi 					(i == dma_conf->dma_rx_size - 1),
1362ba39b344SChristian Marangi 					dma_conf->dma_buf_sz);
136371fedb01SJoao Pinto }
136471fedb01SJoao Pinto 
136571fedb01SJoao Pinto /**
136671fedb01SJoao Pinto  * stmmac_clear_tx_descriptors - clear tx descriptors
136771fedb01SJoao Pinto  * @priv: driver private structure
1368ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1369ce736788SJoao Pinto  * @queue: TX queue index.
137071fedb01SJoao Pinto  * Description: this function is called to clear the TX descriptors
137171fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
137271fedb01SJoao Pinto  */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1373ba39b344SChristian Marangi static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1374ba39b344SChristian Marangi 					struct stmmac_dma_conf *dma_conf,
1375ba39b344SChristian Marangi 					u32 queue)
137671fedb01SJoao Pinto {
1377ba39b344SChristian Marangi 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
137871fedb01SJoao Pinto 	int i;
137971fedb01SJoao Pinto 
138071fedb01SJoao Pinto 	/* Clear the TX descriptors */
1381ba39b344SChristian Marangi 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1382ba39b344SChristian Marangi 		int last = (i == (dma_conf->dma_tx_size - 1));
1383579a25a8SJose Abreu 		struct dma_desc *p;
1384579a25a8SJose Abreu 
13855bacd778SLABBE Corentin 		if (priv->extend_desc)
1386579a25a8SJose Abreu 			p = &tx_q->dma_etx[i].basic;
1387579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1388579a25a8SJose Abreu 			p = &tx_q->dma_entx[i].basic;
13895bacd778SLABBE Corentin 		else
1390579a25a8SJose Abreu 			p = &tx_q->dma_tx[i];
1391579a25a8SJose Abreu 
1392579a25a8SJose Abreu 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1393579a25a8SJose Abreu 	}
1394c24602efSGiuseppe CAVALLARO }
1395c24602efSGiuseppe CAVALLARO 
1396732fdf0eSGiuseppe CAVALLARO /**
139771fedb01SJoao Pinto  * stmmac_clear_descriptors - clear descriptors
139871fedb01SJoao Pinto  * @priv: driver private structure
1399ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
140071fedb01SJoao Pinto  * Description: this function is called to clear the TX and RX descriptors
140171fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
140271fedb01SJoao Pinto  */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1403ba39b344SChristian Marangi static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1404ba39b344SChristian Marangi 				     struct stmmac_dma_conf *dma_conf)
140571fedb01SJoao Pinto {
140654139cf3SJoao Pinto 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1407ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
140854139cf3SJoao Pinto 	u32 queue;
140954139cf3SJoao Pinto 
141071fedb01SJoao Pinto 	/* Clear the RX descriptors */
141154139cf3SJoao Pinto 	for (queue = 0; queue < rx_queue_cnt; queue++)
1412ba39b344SChristian Marangi 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
141371fedb01SJoao Pinto 
141471fedb01SJoao Pinto 	/* Clear the TX descriptors */
1415ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++)
1416ba39b344SChristian Marangi 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
141771fedb01SJoao Pinto }
141871fedb01SJoao Pinto 
141971fedb01SJoao Pinto /**
1420732fdf0eSGiuseppe CAVALLARO  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1421732fdf0eSGiuseppe CAVALLARO  * @priv: driver private structure
1422ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1423732fdf0eSGiuseppe CAVALLARO  * @p: descriptor pointer
1424732fdf0eSGiuseppe CAVALLARO  * @i: descriptor index
142554139cf3SJoao Pinto  * @flags: gfp flag
142654139cf3SJoao Pinto  * @queue: RX queue index
1427732fdf0eSGiuseppe CAVALLARO  * Description: this function is called to allocate a receive buffer, perform
1428732fdf0eSGiuseppe CAVALLARO  * the DMA mapping and init the descriptor.
1429732fdf0eSGiuseppe CAVALLARO  */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1430ba39b344SChristian Marangi static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1431ba39b344SChristian Marangi 				  struct stmmac_dma_conf *dma_conf,
1432ba39b344SChristian Marangi 				  struct dma_desc *p,
143354139cf3SJoao Pinto 				  int i, gfp_t flags, u32 queue)
1434c24602efSGiuseppe CAVALLARO {
1435ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
14362af6106aSJose Abreu 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1437884d2b84SDavid Wu 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1438884d2b84SDavid Wu 
1439070246e4SJochen Henneberg 	if (priv->dma_cap.host_dma_width <= 32)
1440884d2b84SDavid Wu 		gfp |= GFP_DMA32;
1441c24602efSGiuseppe CAVALLARO 
1442da5ec7f2SOng Boon Leong 	if (!buf->page) {
1443884d2b84SDavid Wu 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
14442af6106aSJose Abreu 		if (!buf->page)
144556329137SBartlomiej Zolnierkiewicz 			return -ENOMEM;
14465fabb012SOng Boon Leong 		buf->page_offset = stmmac_rx_offset(priv);
1447da5ec7f2SOng Boon Leong 	}
1448c24602efSGiuseppe CAVALLARO 
1449da5ec7f2SOng Boon Leong 	if (priv->sph && !buf->sec_page) {
1450884d2b84SDavid Wu 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
145167afd6d1SJose Abreu 		if (!buf->sec_page)
145267afd6d1SJose Abreu 			return -ENOMEM;
145367afd6d1SJose Abreu 
145467afd6d1SJose Abreu 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1455396e13e1SJoakim Zhang 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
145667afd6d1SJose Abreu 	} else {
145767afd6d1SJose Abreu 		buf->sec_page = NULL;
1458396e13e1SJoakim Zhang 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
145967afd6d1SJose Abreu 	}
146067afd6d1SJose Abreu 
14615fabb012SOng Boon Leong 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
14625fabb012SOng Boon Leong 
14632af6106aSJose Abreu 	stmmac_set_desc_addr(priv, p, buf->addr);
1464ba39b344SChristian Marangi 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
14652c520b1cSJose Abreu 		stmmac_init_desc3(priv, p);
1466c24602efSGiuseppe CAVALLARO 
1467c24602efSGiuseppe CAVALLARO 	return 0;
1468c24602efSGiuseppe CAVALLARO }
1469c24602efSGiuseppe CAVALLARO 
147071fedb01SJoao Pinto /**
147171fedb01SJoao Pinto  * stmmac_free_rx_buffer - free RX dma buffers
147271fedb01SJoao Pinto  * @priv: private structure
1473ba39b344SChristian Marangi  * @rx_q: RX queue
147471fedb01SJoao Pinto  * @i: buffer index.
147571fedb01SJoao Pinto  */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1476ba39b344SChristian Marangi static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1477ba39b344SChristian Marangi 				  struct stmmac_rx_queue *rx_q,
1478ba39b344SChristian Marangi 				  int i)
147956329137SBartlomiej Zolnierkiewicz {
14802af6106aSJose Abreu 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
148154139cf3SJoao Pinto 
14822af6106aSJose Abreu 	if (buf->page)
1483458de8a9SIlias Apalodimas 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
14842af6106aSJose Abreu 	buf->page = NULL;
148567afd6d1SJose Abreu 
148667afd6d1SJose Abreu 	if (buf->sec_page)
1487458de8a9SIlias Apalodimas 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
148867afd6d1SJose Abreu 	buf->sec_page = NULL;
148956329137SBartlomiej Zolnierkiewicz }
149056329137SBartlomiej Zolnierkiewicz 
14917ac6653aSJeff Kirsher /**
149271fedb01SJoao Pinto  * stmmac_free_tx_buffer - free RX dma buffers
149371fedb01SJoao Pinto  * @priv: private structure
1494ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1495ce736788SJoao Pinto  * @queue: RX queue index
149671fedb01SJoao Pinto  * @i: buffer index.
149771fedb01SJoao Pinto  */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1498ba39b344SChristian Marangi static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1499ba39b344SChristian Marangi 				  struct stmmac_dma_conf *dma_conf,
1500ba39b344SChristian Marangi 				  u32 queue, int i)
150171fedb01SJoao Pinto {
1502ba39b344SChristian Marangi 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1503ce736788SJoao Pinto 
1504be8b38a7SOng Boon Leong 	if (tx_q->tx_skbuff_dma[i].buf &&
1505be8b38a7SOng Boon Leong 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1506ce736788SJoao Pinto 		if (tx_q->tx_skbuff_dma[i].map_as_page)
150771fedb01SJoao Pinto 			dma_unmap_page(priv->device,
1508ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].buf,
1509ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].len,
151071fedb01SJoao Pinto 				       DMA_TO_DEVICE);
151171fedb01SJoao Pinto 		else
151271fedb01SJoao Pinto 			dma_unmap_single(priv->device,
1513ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].buf,
1514ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].len,
151571fedb01SJoao Pinto 					 DMA_TO_DEVICE);
151671fedb01SJoao Pinto 	}
151771fedb01SJoao Pinto 
1518be8b38a7SOng Boon Leong 	if (tx_q->xdpf[i] &&
15198b278a5bSOng Boon Leong 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
15208b278a5bSOng Boon Leong 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1521be8b38a7SOng Boon Leong 		xdp_return_frame(tx_q->xdpf[i]);
1522be8b38a7SOng Boon Leong 		tx_q->xdpf[i] = NULL;
1523be8b38a7SOng Boon Leong 	}
1524be8b38a7SOng Boon Leong 
1525132c32eeSOng Boon Leong 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1526132c32eeSOng Boon Leong 		tx_q->xsk_frames_done++;
1527132c32eeSOng Boon Leong 
1528be8b38a7SOng Boon Leong 	if (tx_q->tx_skbuff[i] &&
1529be8b38a7SOng Boon Leong 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1530ce736788SJoao Pinto 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1531ce736788SJoao Pinto 		tx_q->tx_skbuff[i] = NULL;
1532be8b38a7SOng Boon Leong 	}
1533be8b38a7SOng Boon Leong 
1534ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[i].buf = 0;
1535ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[i].map_as_page = false;
153671fedb01SJoao Pinto }
153771fedb01SJoao Pinto 
153871fedb01SJoao Pinto /**
15394298255fSOng Boon Leong  * dma_free_rx_skbufs - free RX dma buffers
15404298255fSOng Boon Leong  * @priv: private structure
1541ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
15424298255fSOng Boon Leong  * @queue: RX queue index
15434298255fSOng Boon Leong  */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1544ba39b344SChristian Marangi static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1545ba39b344SChristian Marangi 			       struct stmmac_dma_conf *dma_conf,
1546ba39b344SChristian Marangi 			       u32 queue)
15474298255fSOng Boon Leong {
1548ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
15494298255fSOng Boon Leong 	int i;
15504298255fSOng Boon Leong 
1551ba39b344SChristian Marangi 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1552ba39b344SChristian Marangi 		stmmac_free_rx_buffer(priv, rx_q, i);
15534298255fSOng Boon Leong }
15544298255fSOng Boon Leong 
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1555ba39b344SChristian Marangi static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1556ba39b344SChristian Marangi 				   struct stmmac_dma_conf *dma_conf,
1557ba39b344SChristian Marangi 				   u32 queue, gfp_t flags)
15584298255fSOng Boon Leong {
1559ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
15604298255fSOng Boon Leong 	int i;
15614298255fSOng Boon Leong 
1562ba39b344SChristian Marangi 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
15634298255fSOng Boon Leong 		struct dma_desc *p;
15644298255fSOng Boon Leong 		int ret;
15654298255fSOng Boon Leong 
15664298255fSOng Boon Leong 		if (priv->extend_desc)
15674298255fSOng Boon Leong 			p = &((rx_q->dma_erx + i)->basic);
15684298255fSOng Boon Leong 		else
15694298255fSOng Boon Leong 			p = rx_q->dma_rx + i;
15704298255fSOng Boon Leong 
1571ba39b344SChristian Marangi 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
15724298255fSOng Boon Leong 					     queue);
15734298255fSOng Boon Leong 		if (ret)
15744298255fSOng Boon Leong 			return ret;
1575bba2556eSOng Boon Leong 
1576bba2556eSOng Boon Leong 		rx_q->buf_alloc_num++;
15774298255fSOng Boon Leong 	}
15784298255fSOng Boon Leong 
15794298255fSOng Boon Leong 	return 0;
15804298255fSOng Boon Leong }
15814298255fSOng Boon Leong 
15824298255fSOng Boon Leong /**
1583bba2556eSOng Boon Leong  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1584bba2556eSOng Boon Leong  * @priv: private structure
1585ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1586bba2556eSOng Boon Leong  * @queue: RX queue index
1587bba2556eSOng Boon Leong  */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1588ba39b344SChristian Marangi static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1589ba39b344SChristian Marangi 				struct stmmac_dma_conf *dma_conf,
1590ba39b344SChristian Marangi 				u32 queue)
1591bba2556eSOng Boon Leong {
1592ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1593bba2556eSOng Boon Leong 	int i;
1594bba2556eSOng Boon Leong 
1595ba39b344SChristian Marangi 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1596bba2556eSOng Boon Leong 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1597bba2556eSOng Boon Leong 
1598bba2556eSOng Boon Leong 		if (!buf->xdp)
1599bba2556eSOng Boon Leong 			continue;
1600bba2556eSOng Boon Leong 
1601bba2556eSOng Boon Leong 		xsk_buff_free(buf->xdp);
1602bba2556eSOng Boon Leong 		buf->xdp = NULL;
1603bba2556eSOng Boon Leong 	}
1604bba2556eSOng Boon Leong }
1605bba2556eSOng Boon Leong 
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1606ba39b344SChristian Marangi static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1607ba39b344SChristian Marangi 				      struct stmmac_dma_conf *dma_conf,
1608ba39b344SChristian Marangi 				      u32 queue)
1609bba2556eSOng Boon Leong {
1610ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1611bba2556eSOng Boon Leong 	int i;
1612bba2556eSOng Boon Leong 
16139570df35SSong Yoong Siang 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
16149570df35SSong Yoong Siang 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
16159570df35SSong Yoong Siang 	 * use this macro to make sure no size violations.
16169570df35SSong Yoong Siang 	 */
16179570df35SSong Yoong Siang 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
16189570df35SSong Yoong Siang 
1619ba39b344SChristian Marangi 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1620bba2556eSOng Boon Leong 		struct stmmac_rx_buffer *buf;
1621bba2556eSOng Boon Leong 		dma_addr_t dma_addr;
1622bba2556eSOng Boon Leong 		struct dma_desc *p;
1623bba2556eSOng Boon Leong 
1624bba2556eSOng Boon Leong 		if (priv->extend_desc)
1625bba2556eSOng Boon Leong 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1626bba2556eSOng Boon Leong 		else
1627bba2556eSOng Boon Leong 			p = rx_q->dma_rx + i;
1628bba2556eSOng Boon Leong 
1629bba2556eSOng Boon Leong 		buf = &rx_q->buf_pool[i];
1630bba2556eSOng Boon Leong 
1631bba2556eSOng Boon Leong 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1632bba2556eSOng Boon Leong 		if (!buf->xdp)
1633bba2556eSOng Boon Leong 			return -ENOMEM;
1634bba2556eSOng Boon Leong 
1635bba2556eSOng Boon Leong 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1636bba2556eSOng Boon Leong 		stmmac_set_desc_addr(priv, p, dma_addr);
1637bba2556eSOng Boon Leong 		rx_q->buf_alloc_num++;
1638bba2556eSOng Boon Leong 	}
1639bba2556eSOng Boon Leong 
1640bba2556eSOng Boon Leong 	return 0;
1641bba2556eSOng Boon Leong }
1642bba2556eSOng Boon Leong 
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1643bba2556eSOng Boon Leong static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1644bba2556eSOng Boon Leong {
1645bba2556eSOng Boon Leong 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1646bba2556eSOng Boon Leong 		return NULL;
1647bba2556eSOng Boon Leong 
1648bba2556eSOng Boon Leong 	return xsk_get_pool_from_qid(priv->dev, queue);
1649bba2556eSOng Boon Leong }
1650bba2556eSOng Boon Leong 
16519c63faaaSJoakim Zhang /**
1652de0b90e5SOng Boon Leong  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1653de0b90e5SOng Boon Leong  * @priv: driver private structure
1654ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1655de0b90e5SOng Boon Leong  * @queue: RX queue index
16565bacd778SLABBE Corentin  * @flags: gfp flag.
165771fedb01SJoao Pinto  * Description: this function initializes the DMA RX descriptors
16585bacd778SLABBE Corentin  * and allocates the socket buffers. It supports the chained and ring
1659286a8372SGiuseppe CAVALLARO  * modes.
16607ac6653aSJeff Kirsher  */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1661ba39b344SChristian Marangi static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1662ba39b344SChristian Marangi 				    struct stmmac_dma_conf *dma_conf,
1663ba39b344SChristian Marangi 				    u32 queue, gfp_t flags)
16647ac6653aSJeff Kirsher {
1665ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1666de0b90e5SOng Boon Leong 	int ret;
166754139cf3SJoao Pinto 
166854139cf3SJoao Pinto 	netif_dbg(priv, probe, priv->dev,
166954139cf3SJoao Pinto 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
167054139cf3SJoao Pinto 		  (u32)rx_q->dma_rx_phy);
167154139cf3SJoao Pinto 
1672ba39b344SChristian Marangi 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1673cbcf0999SJose Abreu 
1674bba2556eSOng Boon Leong 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1675bba2556eSOng Boon Leong 
1676bba2556eSOng Boon Leong 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1677bba2556eSOng Boon Leong 
1678bba2556eSOng Boon Leong 	if (rx_q->xsk_pool) {
1679bba2556eSOng Boon Leong 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1680bba2556eSOng Boon Leong 						   MEM_TYPE_XSK_BUFF_POOL,
1681bba2556eSOng Boon Leong 						   NULL));
1682bba2556eSOng Boon Leong 		netdev_info(priv->dev,
1683bba2556eSOng Boon Leong 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1684bba2556eSOng Boon Leong 			    rx_q->queue_index);
1685bba2556eSOng Boon Leong 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1686bba2556eSOng Boon Leong 	} else {
1687be8b38a7SOng Boon Leong 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1688be8b38a7SOng Boon Leong 						   MEM_TYPE_PAGE_POOL,
1689be8b38a7SOng Boon Leong 						   rx_q->page_pool));
1690be8b38a7SOng Boon Leong 		netdev_info(priv->dev,
1691be8b38a7SOng Boon Leong 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1692be8b38a7SOng Boon Leong 			    rx_q->queue_index);
1693bba2556eSOng Boon Leong 	}
1694be8b38a7SOng Boon Leong 
1695bba2556eSOng Boon Leong 	if (rx_q->xsk_pool) {
1696bba2556eSOng Boon Leong 		/* RX XDP ZC buffer pool may not be populated, e.g.
1697bba2556eSOng Boon Leong 		 * xdpsock TX-only.
1698bba2556eSOng Boon Leong 		 */
1699ba39b344SChristian Marangi 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1700bba2556eSOng Boon Leong 	} else {
1701ba39b344SChristian Marangi 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
17024298255fSOng Boon Leong 		if (ret < 0)
1703de0b90e5SOng Boon Leong 			return -ENOMEM;
1704bba2556eSOng Boon Leong 	}
170554139cf3SJoao Pinto 
1706c24602efSGiuseppe CAVALLARO 	/* Setup the chained descriptor addresses */
1707c24602efSGiuseppe CAVALLARO 	if (priv->mode == STMMAC_CHAIN_MODE) {
170871fedb01SJoao Pinto 		if (priv->extend_desc)
17092c520b1cSJose Abreu 			stmmac_mode_init(priv, rx_q->dma_erx,
1710aa042f60SSong, Yoong Siang 					 rx_q->dma_rx_phy,
1711ba39b344SChristian Marangi 					 dma_conf->dma_rx_size, 1);
171271fedb01SJoao Pinto 		else
17132c520b1cSJose Abreu 			stmmac_mode_init(priv, rx_q->dma_rx,
1714aa042f60SSong, Yoong Siang 					 rx_q->dma_rx_phy,
1715ba39b344SChristian Marangi 					 dma_conf->dma_rx_size, 0);
171671fedb01SJoao Pinto 	}
1717de0b90e5SOng Boon Leong 
1718de0b90e5SOng Boon Leong 	return 0;
1719de0b90e5SOng Boon Leong }
1720de0b90e5SOng Boon Leong 
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1721ba39b344SChristian Marangi static int init_dma_rx_desc_rings(struct net_device *dev,
1722ba39b344SChristian Marangi 				  struct stmmac_dma_conf *dma_conf,
1723ba39b344SChristian Marangi 				  gfp_t flags)
1724de0b90e5SOng Boon Leong {
1725de0b90e5SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
1726de0b90e5SOng Boon Leong 	u32 rx_count = priv->plat->rx_queues_to_use;
172758e06d05SDan Carpenter 	int queue;
1728de0b90e5SOng Boon Leong 	int ret;
1729de0b90e5SOng Boon Leong 
1730de0b90e5SOng Boon Leong 	/* RX INITIALIZATION */
1731de0b90e5SOng Boon Leong 	netif_dbg(priv, probe, priv->dev,
1732de0b90e5SOng Boon Leong 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1733de0b90e5SOng Boon Leong 
1734de0b90e5SOng Boon Leong 	for (queue = 0; queue < rx_count; queue++) {
1735ba39b344SChristian Marangi 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1736de0b90e5SOng Boon Leong 		if (ret)
1737de0b90e5SOng Boon Leong 			goto err_init_rx_buffers;
173854139cf3SJoao Pinto 	}
173954139cf3SJoao Pinto 
174071fedb01SJoao Pinto 	return 0;
174154139cf3SJoao Pinto 
174271fedb01SJoao Pinto err_init_rx_buffers:
174354139cf3SJoao Pinto 	while (queue >= 0) {
1744ba39b344SChristian Marangi 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1745bba2556eSOng Boon Leong 
1746bba2556eSOng Boon Leong 		if (rx_q->xsk_pool)
1747ba39b344SChristian Marangi 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1748bba2556eSOng Boon Leong 		else
1749ba39b344SChristian Marangi 			dma_free_rx_skbufs(priv, dma_conf, queue);
175054139cf3SJoao Pinto 
1751bba2556eSOng Boon Leong 		rx_q->buf_alloc_num = 0;
1752bba2556eSOng Boon Leong 		rx_q->xsk_pool = NULL;
1753bba2556eSOng Boon Leong 
175454139cf3SJoao Pinto 		queue--;
175554139cf3SJoao Pinto 	}
175654139cf3SJoao Pinto 
175771fedb01SJoao Pinto 	return ret;
175871fedb01SJoao Pinto }
175971fedb01SJoao Pinto 
176071fedb01SJoao Pinto /**
1761de0b90e5SOng Boon Leong  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1762de0b90e5SOng Boon Leong  * @priv: driver private structure
1763ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1764de0b90e5SOng Boon Leong  * @queue: TX queue index
176571fedb01SJoao Pinto  * Description: this function initializes the DMA TX descriptors
176671fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
176771fedb01SJoao Pinto  * modes.
176871fedb01SJoao Pinto  */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1769ba39b344SChristian Marangi static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1770ba39b344SChristian Marangi 				    struct stmmac_dma_conf *dma_conf,
1771ba39b344SChristian Marangi 				    u32 queue)
177271fedb01SJoao Pinto {
1773ba39b344SChristian Marangi 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1774de0b90e5SOng Boon Leong 	int i;
1775ce736788SJoao Pinto 
177671fedb01SJoao Pinto 	netif_dbg(priv, probe, priv->dev,
1777ce736788SJoao Pinto 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1778ce736788SJoao Pinto 		  (u32)tx_q->dma_tx_phy);
177971fedb01SJoao Pinto 
178071fedb01SJoao Pinto 	/* Setup the chained descriptor addresses */
178171fedb01SJoao Pinto 	if (priv->mode == STMMAC_CHAIN_MODE) {
178271fedb01SJoao Pinto 		if (priv->extend_desc)
17832c520b1cSJose Abreu 			stmmac_mode_init(priv, tx_q->dma_etx,
1784aa042f60SSong, Yoong Siang 					 tx_q->dma_tx_phy,
1785ba39b344SChristian Marangi 					 dma_conf->dma_tx_size, 1);
1786579a25a8SJose Abreu 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
17872c520b1cSJose Abreu 			stmmac_mode_init(priv, tx_q->dma_tx,
1788aa042f60SSong, Yoong Siang 					 tx_q->dma_tx_phy,
1789ba39b344SChristian Marangi 					 dma_conf->dma_tx_size, 0);
1790c24602efSGiuseppe CAVALLARO 	}
1791286a8372SGiuseppe CAVALLARO 
1792132c32eeSOng Boon Leong 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1793132c32eeSOng Boon Leong 
1794ba39b344SChristian Marangi 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1795c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
1796de0b90e5SOng Boon Leong 
1797c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
1798ce736788SJoao Pinto 			p = &((tx_q->dma_etx + i)->basic);
1799579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1800579a25a8SJose Abreu 			p = &((tx_q->dma_entx + i)->basic);
1801c24602efSGiuseppe CAVALLARO 		else
1802ce736788SJoao Pinto 			p = tx_q->dma_tx + i;
1803f748be53SAlexandre TORGUE 
180444c67f85SJose Abreu 		stmmac_clear_desc(priv, p);
1805f748be53SAlexandre TORGUE 
1806ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].buf = 0;
1807ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1808ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].len = 0;
1809ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].last_segment = false;
1810ce736788SJoao Pinto 		tx_q->tx_skbuff[i] = NULL;
18114a7d666aSGiuseppe CAVALLARO 	}
1812c24602efSGiuseppe CAVALLARO 
1813de0b90e5SOng Boon Leong 	return 0;
1814c22a3f48SJoao Pinto }
18157ac6653aSJeff Kirsher 
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)1816ba39b344SChristian Marangi static int init_dma_tx_desc_rings(struct net_device *dev,
1817ba39b344SChristian Marangi 				  struct stmmac_dma_conf *dma_conf)
1818de0b90e5SOng Boon Leong {
1819de0b90e5SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
1820de0b90e5SOng Boon Leong 	u32 tx_queue_cnt;
1821de0b90e5SOng Boon Leong 	u32 queue;
1822de0b90e5SOng Boon Leong 
1823de0b90e5SOng Boon Leong 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1824de0b90e5SOng Boon Leong 
1825de0b90e5SOng Boon Leong 	for (queue = 0; queue < tx_queue_cnt; queue++)
1826ba39b344SChristian Marangi 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1827de0b90e5SOng Boon Leong 
182871fedb01SJoao Pinto 	return 0;
182971fedb01SJoao Pinto }
183071fedb01SJoao Pinto 
183171fedb01SJoao Pinto /**
183271fedb01SJoao Pinto  * init_dma_desc_rings - init the RX/TX descriptor rings
183371fedb01SJoao Pinto  * @dev: net device structure
1834ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
183571fedb01SJoao Pinto  * @flags: gfp flag.
183671fedb01SJoao Pinto  * Description: this function initializes the DMA RX/TX descriptors
183771fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
183871fedb01SJoao Pinto  * modes.
183971fedb01SJoao Pinto  */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1840ba39b344SChristian Marangi static int init_dma_desc_rings(struct net_device *dev,
1841ba39b344SChristian Marangi 			       struct stmmac_dma_conf *dma_conf,
1842ba39b344SChristian Marangi 			       gfp_t flags)
184371fedb01SJoao Pinto {
184471fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
184571fedb01SJoao Pinto 	int ret;
184671fedb01SJoao Pinto 
1847ba39b344SChristian Marangi 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
184871fedb01SJoao Pinto 	if (ret)
184971fedb01SJoao Pinto 		return ret;
185071fedb01SJoao Pinto 
1851ba39b344SChristian Marangi 	ret = init_dma_tx_desc_rings(dev, dma_conf);
185271fedb01SJoao Pinto 
1853ba39b344SChristian Marangi 	stmmac_clear_descriptors(priv, dma_conf);
18547ac6653aSJeff Kirsher 
1855c24602efSGiuseppe CAVALLARO 	if (netif_msg_hw(priv))
1856ba39b344SChristian Marangi 		stmmac_display_rings(priv, dma_conf);
185756329137SBartlomiej Zolnierkiewicz 
185856329137SBartlomiej Zolnierkiewicz 	return ret;
18597ac6653aSJeff Kirsher }
18607ac6653aSJeff Kirsher 
186171fedb01SJoao Pinto /**
186271fedb01SJoao Pinto  * dma_free_tx_skbufs - free TX dma buffers
186371fedb01SJoao Pinto  * @priv: private structure
1864ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1865ce736788SJoao Pinto  * @queue: TX queue index
186671fedb01SJoao Pinto  */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1867ba39b344SChristian Marangi static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1868ba39b344SChristian Marangi 			       struct stmmac_dma_conf *dma_conf,
1869ba39b344SChristian Marangi 			       u32 queue)
18707ac6653aSJeff Kirsher {
1871ba39b344SChristian Marangi 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
18727ac6653aSJeff Kirsher 	int i;
18737ac6653aSJeff Kirsher 
1874132c32eeSOng Boon Leong 	tx_q->xsk_frames_done = 0;
1875132c32eeSOng Boon Leong 
1876ba39b344SChristian Marangi 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1877ba39b344SChristian Marangi 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1878132c32eeSOng Boon Leong 
1879132c32eeSOng Boon Leong 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1880132c32eeSOng Boon Leong 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1881132c32eeSOng Boon Leong 		tx_q->xsk_frames_done = 0;
1882132c32eeSOng Boon Leong 		tx_q->xsk_pool = NULL;
1883132c32eeSOng Boon Leong 	}
18847ac6653aSJeff Kirsher }
18857ac6653aSJeff Kirsher 
1886732fdf0eSGiuseppe CAVALLARO /**
18874ec236c7SFugang Duan  * stmmac_free_tx_skbufs - free TX skb buffers
18884ec236c7SFugang Duan  * @priv: private structure
18894ec236c7SFugang Duan  */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)18904ec236c7SFugang Duan static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
18914ec236c7SFugang Duan {
18924ec236c7SFugang Duan 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
18934ec236c7SFugang Duan 	u32 queue;
18944ec236c7SFugang Duan 
18954ec236c7SFugang Duan 	for (queue = 0; queue < tx_queue_cnt; queue++)
1896ba39b344SChristian Marangi 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
18974ec236c7SFugang Duan }
18984ec236c7SFugang Duan 
18994ec236c7SFugang Duan /**
1900da5ec7f2SOng Boon Leong  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
190154139cf3SJoao Pinto  * @priv: private structure
1902ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1903da5ec7f2SOng Boon Leong  * @queue: RX queue index
190454139cf3SJoao Pinto  */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1905ba39b344SChristian Marangi static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1906ba39b344SChristian Marangi 					 struct stmmac_dma_conf *dma_conf,
1907ba39b344SChristian Marangi 					 u32 queue)
190854139cf3SJoao Pinto {
1909ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
191054139cf3SJoao Pinto 
191154139cf3SJoao Pinto 	/* Release the DMA RX socket buffers */
1912bba2556eSOng Boon Leong 	if (rx_q->xsk_pool)
1913ba39b344SChristian Marangi 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1914bba2556eSOng Boon Leong 	else
1915ba39b344SChristian Marangi 		dma_free_rx_skbufs(priv, dma_conf, queue);
191654139cf3SJoao Pinto 
1917bba2556eSOng Boon Leong 	rx_q->buf_alloc_num = 0;
1918bba2556eSOng Boon Leong 	rx_q->xsk_pool = NULL;
1919bba2556eSOng Boon Leong 
192054139cf3SJoao Pinto 	/* Free DMA regions of consistent memory previously allocated */
192154139cf3SJoao Pinto 	if (!priv->extend_desc)
1922ba39b344SChristian Marangi 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1923aa042f60SSong, Yoong Siang 				  sizeof(struct dma_desc),
192454139cf3SJoao Pinto 				  rx_q->dma_rx, rx_q->dma_rx_phy);
192554139cf3SJoao Pinto 	else
1926ba39b344SChristian Marangi 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
192754139cf3SJoao Pinto 				  sizeof(struct dma_extended_desc),
192854139cf3SJoao Pinto 				  rx_q->dma_erx, rx_q->dma_rx_phy);
192954139cf3SJoao Pinto 
1930be8b38a7SOng Boon Leong 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1931be8b38a7SOng Boon Leong 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1932be8b38a7SOng Boon Leong 
19332af6106aSJose Abreu 	kfree(rx_q->buf_pool);
1934c3f812ceSJonathan Lemon 	if (rx_q->page_pool)
19352af6106aSJose Abreu 		page_pool_destroy(rx_q->page_pool);
19362af6106aSJose Abreu }
1937da5ec7f2SOng Boon Leong 
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1938ba39b344SChristian Marangi static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1939ba39b344SChristian Marangi 				       struct stmmac_dma_conf *dma_conf)
1940da5ec7f2SOng Boon Leong {
1941da5ec7f2SOng Boon Leong 	u32 rx_count = priv->plat->rx_queues_to_use;
1942da5ec7f2SOng Boon Leong 	u32 queue;
1943da5ec7f2SOng Boon Leong 
1944da5ec7f2SOng Boon Leong 	/* Free RX queue resources */
1945da5ec7f2SOng Boon Leong 	for (queue = 0; queue < rx_count; queue++)
1946ba39b344SChristian Marangi 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
194754139cf3SJoao Pinto }
194854139cf3SJoao Pinto 
194954139cf3SJoao Pinto /**
1950da5ec7f2SOng Boon Leong  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1951ce736788SJoao Pinto  * @priv: private structure
1952ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
1953da5ec7f2SOng Boon Leong  * @queue: TX queue index
1954ce736788SJoao Pinto  */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1955ba39b344SChristian Marangi static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1956ba39b344SChristian Marangi 					 struct stmmac_dma_conf *dma_conf,
1957ba39b344SChristian Marangi 					 u32 queue)
1958ce736788SJoao Pinto {
1959ba39b344SChristian Marangi 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1960579a25a8SJose Abreu 	size_t size;
1961579a25a8SJose Abreu 	void *addr;
1962ce736788SJoao Pinto 
1963ce736788SJoao Pinto 	/* Release the DMA TX socket buffers */
1964ba39b344SChristian Marangi 	dma_free_tx_skbufs(priv, dma_conf, queue);
1965ce736788SJoao Pinto 
1966579a25a8SJose Abreu 	if (priv->extend_desc) {
1967579a25a8SJose Abreu 		size = sizeof(struct dma_extended_desc);
1968579a25a8SJose Abreu 		addr = tx_q->dma_etx;
1969579a25a8SJose Abreu 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1970579a25a8SJose Abreu 		size = sizeof(struct dma_edesc);
1971579a25a8SJose Abreu 		addr = tx_q->dma_entx;
1972579a25a8SJose Abreu 	} else {
1973579a25a8SJose Abreu 		size = sizeof(struct dma_desc);
1974579a25a8SJose Abreu 		addr = tx_q->dma_tx;
1975579a25a8SJose Abreu 	}
1976579a25a8SJose Abreu 
1977ba39b344SChristian Marangi 	size *= dma_conf->dma_tx_size;
1978579a25a8SJose Abreu 
1979579a25a8SJose Abreu 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1980ce736788SJoao Pinto 
1981ce736788SJoao Pinto 	kfree(tx_q->tx_skbuff_dma);
1982ce736788SJoao Pinto 	kfree(tx_q->tx_skbuff);
1983ce736788SJoao Pinto }
1984da5ec7f2SOng Boon Leong 
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1985ba39b344SChristian Marangi static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1986ba39b344SChristian Marangi 				       struct stmmac_dma_conf *dma_conf)
1987da5ec7f2SOng Boon Leong {
1988da5ec7f2SOng Boon Leong 	u32 tx_count = priv->plat->tx_queues_to_use;
1989da5ec7f2SOng Boon Leong 	u32 queue;
1990da5ec7f2SOng Boon Leong 
1991da5ec7f2SOng Boon Leong 	/* Free TX queue resources */
1992da5ec7f2SOng Boon Leong 	for (queue = 0; queue < tx_count; queue++)
1993ba39b344SChristian Marangi 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
1994ce736788SJoao Pinto }
1995ce736788SJoao Pinto 
1996ce736788SJoao Pinto /**
1997da5ec7f2SOng Boon Leong  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1998732fdf0eSGiuseppe CAVALLARO  * @priv: private structure
1999ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
2000da5ec7f2SOng Boon Leong  * @queue: RX queue index
2001732fdf0eSGiuseppe CAVALLARO  * Description: according to which descriptor can be used (extend or basic)
2002732fdf0eSGiuseppe CAVALLARO  * this function allocates the resources for TX and RX paths. In case of
2003732fdf0eSGiuseppe CAVALLARO  * reception, for example, it pre-allocated the RX socket buffer in order to
2004732fdf0eSGiuseppe CAVALLARO  * allow zero-copy mechanism.
2005732fdf0eSGiuseppe CAVALLARO  */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2006ba39b344SChristian Marangi static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2007ba39b344SChristian Marangi 					 struct stmmac_dma_conf *dma_conf,
2008ba39b344SChristian Marangi 					 u32 queue)
200909f8d696SSrinivas Kandagatla {
2010ba39b344SChristian Marangi 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2011be8b38a7SOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
2012da5ec7f2SOng Boon Leong 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
20132af6106aSJose Abreu 	struct page_pool_params pp_params = { 0 };
20144f28bd95SThierry Reding 	unsigned int num_pages;
2015132c32eeSOng Boon Leong 	unsigned int napi_id;
2016be8b38a7SOng Boon Leong 	int ret;
201754139cf3SJoao Pinto 
201854139cf3SJoao Pinto 	rx_q->queue_index = queue;
201954139cf3SJoao Pinto 	rx_q->priv_data = priv;
202054139cf3SJoao Pinto 
2021826e9b74SJakub Kicinski 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2022ba39b344SChristian Marangi 	pp_params.pool_size = dma_conf->dma_rx_size;
2023ba39b344SChristian Marangi 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
20244f28bd95SThierry Reding 	pp_params.order = ilog2(num_pages);
20252af6106aSJose Abreu 	pp_params.nid = dev_to_node(priv->device);
20262af6106aSJose Abreu 	pp_params.dev = priv->device;
20275fabb012SOng Boon Leong 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
20285fabb012SOng Boon Leong 	pp_params.offset = stmmac_rx_offset(priv);
20295fabb012SOng Boon Leong 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
20305bacd778SLABBE Corentin 
20312af6106aSJose Abreu 	rx_q->page_pool = page_pool_create(&pp_params);
20322af6106aSJose Abreu 	if (IS_ERR(rx_q->page_pool)) {
20332af6106aSJose Abreu 		ret = PTR_ERR(rx_q->page_pool);
20342af6106aSJose Abreu 		rx_q->page_pool = NULL;
2035da5ec7f2SOng Boon Leong 		return ret;
20362af6106aSJose Abreu 	}
20372af6106aSJose Abreu 
2038ba39b344SChristian Marangi 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2039aa042f60SSong, Yoong Siang 				 sizeof(*rx_q->buf_pool),
20405bacd778SLABBE Corentin 				 GFP_KERNEL);
20412af6106aSJose Abreu 	if (!rx_q->buf_pool)
2042da5ec7f2SOng Boon Leong 		return -ENOMEM;
20435bacd778SLABBE Corentin 
20445bacd778SLABBE Corentin 	if (priv->extend_desc) {
2045750afb08SLuis Chamberlain 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2046ba39b344SChristian Marangi 						   dma_conf->dma_rx_size *
2047aa042f60SSong, Yoong Siang 						   sizeof(struct dma_extended_desc),
204854139cf3SJoao Pinto 						   &rx_q->dma_rx_phy,
20495bacd778SLABBE Corentin 						   GFP_KERNEL);
205054139cf3SJoao Pinto 		if (!rx_q->dma_erx)
2051da5ec7f2SOng Boon Leong 			return -ENOMEM;
20525bacd778SLABBE Corentin 
205371fedb01SJoao Pinto 	} else {
2054750afb08SLuis Chamberlain 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2055ba39b344SChristian Marangi 						  dma_conf->dma_rx_size *
2056aa042f60SSong, Yoong Siang 						  sizeof(struct dma_desc),
205754139cf3SJoao Pinto 						  &rx_q->dma_rx_phy,
205871fedb01SJoao Pinto 						  GFP_KERNEL);
205954139cf3SJoao Pinto 		if (!rx_q->dma_rx)
2060da5ec7f2SOng Boon Leong 			return -ENOMEM;
206171fedb01SJoao Pinto 	}
2062be8b38a7SOng Boon Leong 
2063132c32eeSOng Boon Leong 	if (stmmac_xdp_is_enabled(priv) &&
2064132c32eeSOng Boon Leong 	    test_bit(queue, priv->af_xdp_zc_qps))
2065132c32eeSOng Boon Leong 		napi_id = ch->rxtx_napi.napi_id;
2066132c32eeSOng Boon Leong 	else
2067132c32eeSOng Boon Leong 		napi_id = ch->rx_napi.napi_id;
2068132c32eeSOng Boon Leong 
2069be8b38a7SOng Boon Leong 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2070be8b38a7SOng Boon Leong 			       rx_q->queue_index,
2071132c32eeSOng Boon Leong 			       napi_id);
2072be8b38a7SOng Boon Leong 	if (ret) {
2073be8b38a7SOng Boon Leong 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2074da5ec7f2SOng Boon Leong 		return -EINVAL;
2075be8b38a7SOng Boon Leong 	}
2076da5ec7f2SOng Boon Leong 
2077da5ec7f2SOng Boon Leong 	return 0;
2078da5ec7f2SOng Boon Leong }
2079da5ec7f2SOng Boon Leong 
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2080ba39b344SChristian Marangi static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2081ba39b344SChristian Marangi 				       struct stmmac_dma_conf *dma_conf)
2082da5ec7f2SOng Boon Leong {
2083da5ec7f2SOng Boon Leong 	u32 rx_count = priv->plat->rx_queues_to_use;
2084da5ec7f2SOng Boon Leong 	u32 queue;
2085da5ec7f2SOng Boon Leong 	int ret;
2086da5ec7f2SOng Boon Leong 
2087da5ec7f2SOng Boon Leong 	/* RX queues buffers and DMA */
2088da5ec7f2SOng Boon Leong 	for (queue = 0; queue < rx_count; queue++) {
2089ba39b344SChristian Marangi 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2090da5ec7f2SOng Boon Leong 		if (ret)
2091da5ec7f2SOng Boon Leong 			goto err_dma;
209254139cf3SJoao Pinto 	}
209371fedb01SJoao Pinto 
209471fedb01SJoao Pinto 	return 0;
209571fedb01SJoao Pinto 
209671fedb01SJoao Pinto err_dma:
2097ba39b344SChristian Marangi 	free_dma_rx_desc_resources(priv, dma_conf);
209854139cf3SJoao Pinto 
209971fedb01SJoao Pinto 	return ret;
210071fedb01SJoao Pinto }
210171fedb01SJoao Pinto 
210271fedb01SJoao Pinto /**
2103da5ec7f2SOng Boon Leong  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
210471fedb01SJoao Pinto  * @priv: private structure
2105ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
2106da5ec7f2SOng Boon Leong  * @queue: TX queue index
210771fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
210871fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
210971fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
211071fedb01SJoao Pinto  * allow zero-copy mechanism.
211171fedb01SJoao Pinto  */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2112ba39b344SChristian Marangi static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2113ba39b344SChristian Marangi 					 struct stmmac_dma_conf *dma_conf,
2114ba39b344SChristian Marangi 					 u32 queue)
211571fedb01SJoao Pinto {
2116ba39b344SChristian Marangi 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2117579a25a8SJose Abreu 	size_t size;
2118579a25a8SJose Abreu 	void *addr;
2119ce736788SJoao Pinto 
2120ce736788SJoao Pinto 	tx_q->queue_index = queue;
2121ce736788SJoao Pinto 	tx_q->priv_data = priv;
2122ce736788SJoao Pinto 
2123ba39b344SChristian Marangi 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2124ce736788SJoao Pinto 				      sizeof(*tx_q->tx_skbuff_dma),
212571fedb01SJoao Pinto 				      GFP_KERNEL);
2126ce736788SJoao Pinto 	if (!tx_q->tx_skbuff_dma)
2127da5ec7f2SOng Boon Leong 		return -ENOMEM;
212871fedb01SJoao Pinto 
2129ba39b344SChristian Marangi 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2130ce736788SJoao Pinto 				  sizeof(struct sk_buff *),
213171fedb01SJoao Pinto 				  GFP_KERNEL);
2132ce736788SJoao Pinto 	if (!tx_q->tx_skbuff)
2133da5ec7f2SOng Boon Leong 		return -ENOMEM;
213471fedb01SJoao Pinto 
2135579a25a8SJose Abreu 	if (priv->extend_desc)
2136579a25a8SJose Abreu 		size = sizeof(struct dma_extended_desc);
2137579a25a8SJose Abreu 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2138579a25a8SJose Abreu 		size = sizeof(struct dma_edesc);
2139579a25a8SJose Abreu 	else
2140579a25a8SJose Abreu 		size = sizeof(struct dma_desc);
2141579a25a8SJose Abreu 
2142ba39b344SChristian Marangi 	size *= dma_conf->dma_tx_size;
2143579a25a8SJose Abreu 
2144579a25a8SJose Abreu 	addr = dma_alloc_coherent(priv->device, size,
2145579a25a8SJose Abreu 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2146579a25a8SJose Abreu 	if (!addr)
2147da5ec7f2SOng Boon Leong 		return -ENOMEM;
2148579a25a8SJose Abreu 
2149579a25a8SJose Abreu 	if (priv->extend_desc)
2150579a25a8SJose Abreu 		tx_q->dma_etx = addr;
2151579a25a8SJose Abreu 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2152579a25a8SJose Abreu 		tx_q->dma_entx = addr;
2153579a25a8SJose Abreu 	else
2154579a25a8SJose Abreu 		tx_q->dma_tx = addr;
2155da5ec7f2SOng Boon Leong 
2156da5ec7f2SOng Boon Leong 	return 0;
2157da5ec7f2SOng Boon Leong }
2158da5ec7f2SOng Boon Leong 
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2159ba39b344SChristian Marangi static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2160ba39b344SChristian Marangi 				       struct stmmac_dma_conf *dma_conf)
2161da5ec7f2SOng Boon Leong {
2162da5ec7f2SOng Boon Leong 	u32 tx_count = priv->plat->tx_queues_to_use;
2163da5ec7f2SOng Boon Leong 	u32 queue;
2164da5ec7f2SOng Boon Leong 	int ret;
2165da5ec7f2SOng Boon Leong 
2166da5ec7f2SOng Boon Leong 	/* TX queues buffers and DMA */
2167da5ec7f2SOng Boon Leong 	for (queue = 0; queue < tx_count; queue++) {
2168ba39b344SChristian Marangi 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2169da5ec7f2SOng Boon Leong 		if (ret)
2170da5ec7f2SOng Boon Leong 			goto err_dma;
21715bacd778SLABBE Corentin 	}
21725bacd778SLABBE Corentin 
21735bacd778SLABBE Corentin 	return 0;
21745bacd778SLABBE Corentin 
217562242260SChristophe Jaillet err_dma:
2176ba39b344SChristian Marangi 	free_dma_tx_desc_resources(priv, dma_conf);
217709f8d696SSrinivas Kandagatla 	return ret;
21785bacd778SLABBE Corentin }
217909f8d696SSrinivas Kandagatla 
218071fedb01SJoao Pinto /**
218171fedb01SJoao Pinto  * alloc_dma_desc_resources - alloc TX/RX resources.
218271fedb01SJoao Pinto  * @priv: private structure
2183ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
218471fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
218571fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
218671fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
218771fedb01SJoao Pinto  * allow zero-copy mechanism.
218871fedb01SJoao Pinto  */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2189ba39b344SChristian Marangi static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2190ba39b344SChristian Marangi 				    struct stmmac_dma_conf *dma_conf)
21915bacd778SLABBE Corentin {
219254139cf3SJoao Pinto 	/* RX Allocation */
2193ba39b344SChristian Marangi 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
219471fedb01SJoao Pinto 
219571fedb01SJoao Pinto 	if (ret)
219671fedb01SJoao Pinto 		return ret;
219771fedb01SJoao Pinto 
2198ba39b344SChristian Marangi 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
219971fedb01SJoao Pinto 
220071fedb01SJoao Pinto 	return ret;
220171fedb01SJoao Pinto }
220271fedb01SJoao Pinto 
220371fedb01SJoao Pinto /**
220471fedb01SJoao Pinto  * free_dma_desc_resources - free dma desc resources
220571fedb01SJoao Pinto  * @priv: private structure
2206ba39b344SChristian Marangi  * @dma_conf: structure to take the dma data
220771fedb01SJoao Pinto  */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2208ba39b344SChristian Marangi static void free_dma_desc_resources(struct stmmac_priv *priv,
2209ba39b344SChristian Marangi 				    struct stmmac_dma_conf *dma_conf)
221071fedb01SJoao Pinto {
221171fedb01SJoao Pinto 	/* Release the DMA TX socket buffers */
2212ba39b344SChristian Marangi 	free_dma_tx_desc_resources(priv, dma_conf);
2213be8b38a7SOng Boon Leong 
2214be8b38a7SOng Boon Leong 	/* Release the DMA RX socket buffers later
2215be8b38a7SOng Boon Leong 	 * to ensure all pending XDP_TX buffers are returned.
2216be8b38a7SOng Boon Leong 	 */
2217ba39b344SChristian Marangi 	free_dma_rx_desc_resources(priv, dma_conf);
221871fedb01SJoao Pinto }
221971fedb01SJoao Pinto 
222071fedb01SJoao Pinto /**
22219eb12474Sjpinto  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
22229eb12474Sjpinto  *  @priv: driver private structure
22239eb12474Sjpinto  *  Description: It is used for enabling the rx queues in the MAC
22249eb12474Sjpinto  */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)22259eb12474Sjpinto static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
22269eb12474Sjpinto {
22274f6046f5SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
22284f6046f5SJoao Pinto 	int queue;
22294f6046f5SJoao Pinto 	u8 mode;
22309eb12474Sjpinto 
22314f6046f5SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
22324f6046f5SJoao Pinto 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2233c10d4c82SJose Abreu 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
22344f6046f5SJoao Pinto 	}
22359eb12474Sjpinto }
22369eb12474Sjpinto 
22379eb12474Sjpinto /**
2238ae4f0d46SJoao Pinto  * stmmac_start_rx_dma - start RX DMA channel
2239ae4f0d46SJoao Pinto  * @priv: driver private structure
2240ae4f0d46SJoao Pinto  * @chan: RX channel index
2241ae4f0d46SJoao Pinto  * Description:
2242ae4f0d46SJoao Pinto  * This starts a RX DMA channel
2243ae4f0d46SJoao Pinto  */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2244ae4f0d46SJoao Pinto static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2245ae4f0d46SJoao Pinto {
2246ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2247a4e887faSJose Abreu 	stmmac_start_rx(priv, priv->ioaddr, chan);
2248ae4f0d46SJoao Pinto }
2249ae4f0d46SJoao Pinto 
2250ae4f0d46SJoao Pinto /**
2251ae4f0d46SJoao Pinto  * stmmac_start_tx_dma - start TX DMA channel
2252ae4f0d46SJoao Pinto  * @priv: driver private structure
2253ae4f0d46SJoao Pinto  * @chan: TX channel index
2254ae4f0d46SJoao Pinto  * Description:
2255ae4f0d46SJoao Pinto  * This starts a TX DMA channel
2256ae4f0d46SJoao Pinto  */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2257ae4f0d46SJoao Pinto static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2258ae4f0d46SJoao Pinto {
2259ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2260a4e887faSJose Abreu 	stmmac_start_tx(priv, priv->ioaddr, chan);
2261ae4f0d46SJoao Pinto }
2262ae4f0d46SJoao Pinto 
2263ae4f0d46SJoao Pinto /**
2264ae4f0d46SJoao Pinto  * stmmac_stop_rx_dma - stop RX DMA channel
2265ae4f0d46SJoao Pinto  * @priv: driver private structure
2266ae4f0d46SJoao Pinto  * @chan: RX channel index
2267ae4f0d46SJoao Pinto  * Description:
2268ae4f0d46SJoao Pinto  * This stops a RX DMA channel
2269ae4f0d46SJoao Pinto  */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2270ae4f0d46SJoao Pinto static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2271ae4f0d46SJoao Pinto {
2272ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2273a4e887faSJose Abreu 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2274ae4f0d46SJoao Pinto }
2275ae4f0d46SJoao Pinto 
2276ae4f0d46SJoao Pinto /**
2277ae4f0d46SJoao Pinto  * stmmac_stop_tx_dma - stop TX DMA channel
2278ae4f0d46SJoao Pinto  * @priv: driver private structure
2279ae4f0d46SJoao Pinto  * @chan: TX channel index
2280ae4f0d46SJoao Pinto  * Description:
2281ae4f0d46SJoao Pinto  * This stops a TX DMA channel
2282ae4f0d46SJoao Pinto  */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2283ae4f0d46SJoao Pinto static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2284ae4f0d46SJoao Pinto {
2285ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2286a4e887faSJose Abreu 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2287ae4f0d46SJoao Pinto }
2288ae4f0d46SJoao Pinto 
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2289087a7b94SVincent Whitchurch static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2290087a7b94SVincent Whitchurch {
2291087a7b94SVincent Whitchurch 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2292087a7b94SVincent Whitchurch 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2293087a7b94SVincent Whitchurch 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2294087a7b94SVincent Whitchurch 	u32 chan;
2295087a7b94SVincent Whitchurch 
2296087a7b94SVincent Whitchurch 	for (chan = 0; chan < dma_csr_ch; chan++) {
2297087a7b94SVincent Whitchurch 		struct stmmac_channel *ch = &priv->channel[chan];
2298087a7b94SVincent Whitchurch 		unsigned long flags;
2299087a7b94SVincent Whitchurch 
2300087a7b94SVincent Whitchurch 		spin_lock_irqsave(&ch->lock, flags);
2301087a7b94SVincent Whitchurch 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2302087a7b94SVincent Whitchurch 		spin_unlock_irqrestore(&ch->lock, flags);
2303087a7b94SVincent Whitchurch 	}
2304087a7b94SVincent Whitchurch }
2305087a7b94SVincent Whitchurch 
2306ae4f0d46SJoao Pinto /**
2307ae4f0d46SJoao Pinto  * stmmac_start_all_dma - start all RX and TX DMA channels
2308ae4f0d46SJoao Pinto  * @priv: driver private structure
2309ae4f0d46SJoao Pinto  * Description:
2310ae4f0d46SJoao Pinto  * This starts all the RX and TX DMA channels
2311ae4f0d46SJoao Pinto  */
stmmac_start_all_dma(struct stmmac_priv * priv)2312ae4f0d46SJoao Pinto static void stmmac_start_all_dma(struct stmmac_priv *priv)
2313ae4f0d46SJoao Pinto {
2314ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2315ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2316ae4f0d46SJoao Pinto 	u32 chan = 0;
2317ae4f0d46SJoao Pinto 
2318ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2319ae4f0d46SJoao Pinto 		stmmac_start_rx_dma(priv, chan);
2320ae4f0d46SJoao Pinto 
2321ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2322ae4f0d46SJoao Pinto 		stmmac_start_tx_dma(priv, chan);
2323ae4f0d46SJoao Pinto }
2324ae4f0d46SJoao Pinto 
2325ae4f0d46SJoao Pinto /**
2326ae4f0d46SJoao Pinto  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2327ae4f0d46SJoao Pinto  * @priv: driver private structure
2328ae4f0d46SJoao Pinto  * Description:
2329ae4f0d46SJoao Pinto  * This stops the RX and TX DMA channels
2330ae4f0d46SJoao Pinto  */
stmmac_stop_all_dma(struct stmmac_priv * priv)2331ae4f0d46SJoao Pinto static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2332ae4f0d46SJoao Pinto {
2333ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2334ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2335ae4f0d46SJoao Pinto 	u32 chan = 0;
2336ae4f0d46SJoao Pinto 
2337ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2338ae4f0d46SJoao Pinto 		stmmac_stop_rx_dma(priv, chan);
2339ae4f0d46SJoao Pinto 
2340ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2341ae4f0d46SJoao Pinto 		stmmac_stop_tx_dma(priv, chan);
2342ae4f0d46SJoao Pinto }
2343ae4f0d46SJoao Pinto 
2344ae4f0d46SJoao Pinto /**
23457ac6653aSJeff Kirsher  *  stmmac_dma_operation_mode - HW DMA operation mode
234632ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
2347732fdf0eSGiuseppe CAVALLARO  *  Description: it is used for configuring the DMA operation mode register in
2348732fdf0eSGiuseppe CAVALLARO  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
23497ac6653aSJeff Kirsher  */
stmmac_dma_operation_mode(struct stmmac_priv * priv)23507ac6653aSJeff Kirsher static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
23517ac6653aSJeff Kirsher {
23526deee222SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
23536deee222SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2354f88203a2SVince Bridgers 	int rxfifosz = priv->plat->rx_fifo_size;
235552a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
23566deee222SJoao Pinto 	u32 txmode = 0;
23576deee222SJoao Pinto 	u32 rxmode = 0;
23586deee222SJoao Pinto 	u32 chan = 0;
2359a0daae13SJose Abreu 	u8 qmode = 0;
2360f88203a2SVince Bridgers 
236111fbf811SThierry Reding 	if (rxfifosz == 0)
236211fbf811SThierry Reding 		rxfifosz = priv->dma_cap.rx_fifo_size;
236352a76235SJose Abreu 	if (txfifosz == 0)
236452a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
236552a76235SJose Abreu 
236652a76235SJose Abreu 	/* Adjust for real per queue fifo size */
236752a76235SJose Abreu 	rxfifosz /= rx_channels_count;
236852a76235SJose Abreu 	txfifosz /= tx_channels_count;
236911fbf811SThierry Reding 
23706deee222SJoao Pinto 	if (priv->plat->force_thresh_dma_mode) {
23716deee222SJoao Pinto 		txmode = tc;
23726deee222SJoao Pinto 		rxmode = tc;
23736deee222SJoao Pinto 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
23747ac6653aSJeff Kirsher 		/*
23757ac6653aSJeff Kirsher 		 * In case of GMAC, SF mode can be enabled
23767ac6653aSJeff Kirsher 		 * to perform the TX COE in HW. This depends on:
23777ac6653aSJeff Kirsher 		 * 1) TX COE if actually supported
23787ac6653aSJeff Kirsher 		 * 2) There is no bugged Jumbo frame support
23797ac6653aSJeff Kirsher 		 *    that needs to not insert csum in the TDES.
23807ac6653aSJeff Kirsher 		 */
23816deee222SJoao Pinto 		txmode = SF_DMA_MODE;
23826deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
2383b2dec116SSonic Zhang 		priv->xstats.threshold = SF_DMA_MODE;
23846deee222SJoao Pinto 	} else {
23856deee222SJoao Pinto 		txmode = tc;
23866deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
23876deee222SJoao Pinto 	}
23886deee222SJoao Pinto 
23896deee222SJoao Pinto 	/* configure all channels */
2390a0daae13SJose Abreu 	for (chan = 0; chan < rx_channels_count; chan++) {
23918531c808SChristian Marangi 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2392bba2556eSOng Boon Leong 		u32 buf_size;
2393bba2556eSOng Boon Leong 
2394a0daae13SJose Abreu 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
23956deee222SJoao Pinto 
2396a4e887faSJose Abreu 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2397a0daae13SJose Abreu 				rxfifosz, qmode);
2398bba2556eSOng Boon Leong 
2399bba2556eSOng Boon Leong 		if (rx_q->xsk_pool) {
2400bba2556eSOng Boon Leong 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2401bba2556eSOng Boon Leong 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2402bba2556eSOng Boon Leong 					      buf_size,
24034205c88eSJose Abreu 					      chan);
2404bba2556eSOng Boon Leong 		} else {
2405bba2556eSOng Boon Leong 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
24068531c808SChristian Marangi 					      priv->dma_conf.dma_buf_sz,
2407bba2556eSOng Boon Leong 					      chan);
2408bba2556eSOng Boon Leong 		}
2409a0daae13SJose Abreu 	}
2410a0daae13SJose Abreu 
2411a0daae13SJose Abreu 	for (chan = 0; chan < tx_channels_count; chan++) {
2412a0daae13SJose Abreu 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2413a0daae13SJose Abreu 
2414a4e887faSJose Abreu 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2415a0daae13SJose Abreu 				txfifosz, qmode);
2416a0daae13SJose Abreu 	}
24177ac6653aSJeff Kirsher }
24187ac6653aSJeff Kirsher 
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2419132c32eeSOng Boon Leong static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2420132c32eeSOng Boon Leong {
2421132c32eeSOng Boon Leong 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
24228531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
24238070274bSJisheng Zhang 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2424132c32eeSOng Boon Leong 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2425132c32eeSOng Boon Leong 	unsigned int entry = tx_q->cur_tx;
2426132c32eeSOng Boon Leong 	struct dma_desc *tx_desc = NULL;
2427132c32eeSOng Boon Leong 	struct xdp_desc xdp_desc;
2428132c32eeSOng Boon Leong 	bool work_done = true;
2429133466c3SJisheng Zhang 	u32 tx_set_ic_bit = 0;
2430132c32eeSOng Boon Leong 
2431132c32eeSOng Boon Leong 	/* Avoids TX time-out as we are sharing with slow path */
2432e92af33eSAlexander Lobakin 	txq_trans_cond_update(nq);
2433132c32eeSOng Boon Leong 
2434132c32eeSOng Boon Leong 	budget = min(budget, stmmac_tx_avail(priv, queue));
2435132c32eeSOng Boon Leong 
2436132c32eeSOng Boon Leong 	while (budget-- > 0) {
2437132c32eeSOng Boon Leong 		dma_addr_t dma_addr;
2438132c32eeSOng Boon Leong 		bool set_ic;
2439132c32eeSOng Boon Leong 
2440132c32eeSOng Boon Leong 		/* We are sharing with slow path and stop XSK TX desc submission when
2441132c32eeSOng Boon Leong 		 * available TX ring is less than threshold.
2442132c32eeSOng Boon Leong 		 */
2443132c32eeSOng Boon Leong 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2444132c32eeSOng Boon Leong 		    !netif_carrier_ok(priv->dev)) {
2445132c32eeSOng Boon Leong 			work_done = false;
2446132c32eeSOng Boon Leong 			break;
2447132c32eeSOng Boon Leong 		}
2448132c32eeSOng Boon Leong 
2449132c32eeSOng Boon Leong 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2450132c32eeSOng Boon Leong 			break;
2451132c32eeSOng Boon Leong 
2452132c32eeSOng Boon Leong 		if (likely(priv->extend_desc))
2453132c32eeSOng Boon Leong 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2454132c32eeSOng Boon Leong 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2455132c32eeSOng Boon Leong 			tx_desc = &tx_q->dma_entx[entry].basic;
2456132c32eeSOng Boon Leong 		else
2457132c32eeSOng Boon Leong 			tx_desc = tx_q->dma_tx + entry;
2458132c32eeSOng Boon Leong 
2459132c32eeSOng Boon Leong 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2460132c32eeSOng Boon Leong 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2461132c32eeSOng Boon Leong 
2462132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2463132c32eeSOng Boon Leong 
2464132c32eeSOng Boon Leong 		/* To return XDP buffer to XSK pool, we simple call
2465132c32eeSOng Boon Leong 		 * xsk_tx_completed(), so we don't need to fill up
2466132c32eeSOng Boon Leong 		 * 'buf' and 'xdpf'.
2467132c32eeSOng Boon Leong 		 */
2468132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].buf = 0;
2469132c32eeSOng Boon Leong 		tx_q->xdpf[entry] = NULL;
2470132c32eeSOng Boon Leong 
2471132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2472132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2473132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2474132c32eeSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2475132c32eeSOng Boon Leong 
2476132c32eeSOng Boon Leong 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2477132c32eeSOng Boon Leong 
2478132c32eeSOng Boon Leong 		tx_q->tx_count_frames++;
2479132c32eeSOng Boon Leong 
2480132c32eeSOng Boon Leong 		if (!priv->tx_coal_frames[queue])
2481132c32eeSOng Boon Leong 			set_ic = false;
2482132c32eeSOng Boon Leong 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2483132c32eeSOng Boon Leong 			set_ic = true;
2484132c32eeSOng Boon Leong 		else
2485132c32eeSOng Boon Leong 			set_ic = false;
2486132c32eeSOng Boon Leong 
2487132c32eeSOng Boon Leong 		if (set_ic) {
2488132c32eeSOng Boon Leong 			tx_q->tx_count_frames = 0;
2489132c32eeSOng Boon Leong 			stmmac_set_tx_ic(priv, tx_desc);
2490133466c3SJisheng Zhang 			tx_set_ic_bit++;
2491132c32eeSOng Boon Leong 		}
2492132c32eeSOng Boon Leong 
2493132c32eeSOng Boon Leong 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2494132c32eeSOng Boon Leong 				       true, priv->mode, true, true,
2495132c32eeSOng Boon Leong 				       xdp_desc.len);
2496132c32eeSOng Boon Leong 
2497132c32eeSOng Boon Leong 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2498132c32eeSOng Boon Leong 
24998531c808SChristian Marangi 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2500132c32eeSOng Boon Leong 		entry = tx_q->cur_tx;
2501132c32eeSOng Boon Leong 	}
25029680b2abSPetr Tesarik 	u64_stats_update_begin(&txq_stats->napi_syncp);
25039680b2abSPetr Tesarik 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
25049680b2abSPetr Tesarik 	u64_stats_update_end(&txq_stats->napi_syncp);
2505132c32eeSOng Boon Leong 
2506132c32eeSOng Boon Leong 	if (tx_desc) {
2507132c32eeSOng Boon Leong 		stmmac_flush_tx_descriptors(priv, queue);
2508132c32eeSOng Boon Leong 		xsk_tx_release(pool);
2509132c32eeSOng Boon Leong 	}
2510132c32eeSOng Boon Leong 
2511132c32eeSOng Boon Leong 	/* Return true if all of the 3 conditions are met
2512132c32eeSOng Boon Leong 	 *  a) TX Budget is still available
2513132c32eeSOng Boon Leong 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2514132c32eeSOng Boon Leong 	 *     pending XSK TX for transmission)
2515132c32eeSOng Boon Leong 	 */
2516132c32eeSOng Boon Leong 	return !!budget && work_done;
2517132c32eeSOng Boon Leong }
2518132c32eeSOng Boon Leong 
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)25193a6c12a0SXiaoliang Yang static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
25203a6c12a0SXiaoliang Yang {
25213a6c12a0SXiaoliang Yang 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
25223a6c12a0SXiaoliang Yang 		tc += 64;
25233a6c12a0SXiaoliang Yang 
25243a6c12a0SXiaoliang Yang 		if (priv->plat->force_thresh_dma_mode)
25253a6c12a0SXiaoliang Yang 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
25263a6c12a0SXiaoliang Yang 		else
25273a6c12a0SXiaoliang Yang 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
25283a6c12a0SXiaoliang Yang 						      chan);
25293a6c12a0SXiaoliang Yang 
25303a6c12a0SXiaoliang Yang 		priv->xstats.threshold = tc;
25313a6c12a0SXiaoliang Yang 	}
25323a6c12a0SXiaoliang Yang }
25333a6c12a0SXiaoliang Yang 
25347ac6653aSJeff Kirsher /**
2535732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_clean - to manage the transmission completion
253632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
2537d0ea5cbdSJesse Brandeburg  * @budget: napi budget limiting this functions packet handling
2538ce736788SJoao Pinto  * @queue: TX queue index
2539732fdf0eSGiuseppe CAVALLARO  * Description: it reclaims the transmit resources after transmission completes.
25407ac6653aSJeff Kirsher  */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue)25418fce3331SJose Abreu static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
25427ac6653aSJeff Kirsher {
25438531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
25448070274bSJisheng Zhang 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
254538979574SBeniamino Galvani 	unsigned int bytes_compl = 0, pkts_compl = 0;
2546132c32eeSOng Boon Leong 	unsigned int entry, xmits = 0, count = 0;
2547133466c3SJisheng Zhang 	u32 tx_packets = 0, tx_errors = 0;
25487ac6653aSJeff Kirsher 
25498fce3331SJose Abreu 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2550a9097a96SGiuseppe CAVALLARO 
2551132c32eeSOng Boon Leong 	tx_q->xsk_frames_done = 0;
2552132c32eeSOng Boon Leong 
25538d5f4b07SBernd Edlinger 	entry = tx_q->dirty_tx;
2554132c32eeSOng Boon Leong 
2555132c32eeSOng Boon Leong 	/* Try to clean all TX complete frame in 1 shot */
25568531c808SChristian Marangi 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2557be8b38a7SOng Boon Leong 		struct xdp_frame *xdpf;
2558be8b38a7SOng Boon Leong 		struct sk_buff *skb;
2559c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
2560c363b658SFabrice Gasnier 		int status;
2561c24602efSGiuseppe CAVALLARO 
25628b278a5bSOng Boon Leong 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
25638b278a5bSOng Boon Leong 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2564be8b38a7SOng Boon Leong 			xdpf = tx_q->xdpf[entry];
2565be8b38a7SOng Boon Leong 			skb = NULL;
2566be8b38a7SOng Boon Leong 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2567be8b38a7SOng Boon Leong 			xdpf = NULL;
2568be8b38a7SOng Boon Leong 			skb = tx_q->tx_skbuff[entry];
2569be8b38a7SOng Boon Leong 		} else {
2570be8b38a7SOng Boon Leong 			xdpf = NULL;
2571be8b38a7SOng Boon Leong 			skb = NULL;
2572be8b38a7SOng Boon Leong 		}
2573be8b38a7SOng Boon Leong 
2574c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
2575ce736788SJoao Pinto 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2576579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2577579a25a8SJose Abreu 			p = &tx_q->dma_entx[entry].basic;
2578c24602efSGiuseppe CAVALLARO 		else
2579ce736788SJoao Pinto 			p = tx_q->dma_tx + entry;
25807ac6653aSJeff Kirsher 
2581133466c3SJisheng Zhang 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2582c363b658SFabrice Gasnier 		/* Check if the descriptor is owned by the DMA */
2583c363b658SFabrice Gasnier 		if (unlikely(status & tx_dma_own))
2584c363b658SFabrice Gasnier 			break;
2585c363b658SFabrice Gasnier 
25868fce3331SJose Abreu 		count++;
25878fce3331SJose Abreu 
2588a6b25da5SNiklas Cassel 		/* Make sure descriptor fields are read after reading
2589a6b25da5SNiklas Cassel 		 * the own bit.
2590a6b25da5SNiklas Cassel 		 */
2591a6b25da5SNiklas Cassel 		dma_rmb();
2592a6b25da5SNiklas Cassel 
2593c363b658SFabrice Gasnier 		/* Just consider the last segment and ...*/
2594c363b658SFabrice Gasnier 		if (likely(!(status & tx_not_ls))) {
2595c363b658SFabrice Gasnier 			/* ... verify the status error condition */
2596c363b658SFabrice Gasnier 			if (unlikely(status & tx_err)) {
2597133466c3SJisheng Zhang 				tx_errors++;
25983a6c12a0SXiaoliang Yang 				if (unlikely(status & tx_err_bump_tc))
25993a6c12a0SXiaoliang Yang 					stmmac_bump_dma_threshold(priv, queue);
2600c363b658SFabrice Gasnier 			} else {
2601133466c3SJisheng Zhang 				tx_packets++;
2602c363b658SFabrice Gasnier 			}
2603be8b38a7SOng Boon Leong 			if (skb)
2604ba1ffd74SGiuseppe CAVALLARO 				stmmac_get_tx_hwtstamp(priv, p, skb);
26057ac6653aSJeff Kirsher 		}
26067ac6653aSJeff Kirsher 
2607be8b38a7SOng Boon Leong 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2608be8b38a7SOng Boon Leong 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2609ce736788SJoao Pinto 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2610362b37beSGiuseppe CAVALLARO 				dma_unmap_page(priv->device,
2611ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].buf,
2612ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].len,
26137ac6653aSJeff Kirsher 					       DMA_TO_DEVICE);
2614362b37beSGiuseppe CAVALLARO 			else
2615362b37beSGiuseppe CAVALLARO 				dma_unmap_single(priv->device,
2616ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].buf,
2617ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].len,
2618362b37beSGiuseppe CAVALLARO 						 DMA_TO_DEVICE);
2619ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].buf = 0;
2620ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].len = 0;
2621ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2622cf32deecSRayagond Kokatanur 		}
2623f748be53SAlexandre TORGUE 
26242c520b1cSJose Abreu 		stmmac_clean_desc3(priv, tx_q, p);
2625f748be53SAlexandre TORGUE 
2626ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2627ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
26287ac6653aSJeff Kirsher 
2629be8b38a7SOng Boon Leong 		if (xdpf &&
2630be8b38a7SOng Boon Leong 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2631be8b38a7SOng Boon Leong 			xdp_return_frame_rx_napi(xdpf);
2632be8b38a7SOng Boon Leong 			tx_q->xdpf[entry] = NULL;
2633be8b38a7SOng Boon Leong 		}
2634be8b38a7SOng Boon Leong 
26358b278a5bSOng Boon Leong 		if (xdpf &&
26368b278a5bSOng Boon Leong 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
26378b278a5bSOng Boon Leong 			xdp_return_frame(xdpf);
26388b278a5bSOng Boon Leong 			tx_q->xdpf[entry] = NULL;
26398b278a5bSOng Boon Leong 		}
26408b278a5bSOng Boon Leong 
2641132c32eeSOng Boon Leong 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2642132c32eeSOng Boon Leong 			tx_q->xsk_frames_done++;
2643132c32eeSOng Boon Leong 
2644be8b38a7SOng Boon Leong 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2645be8b38a7SOng Boon Leong 			if (likely(skb)) {
264638979574SBeniamino Galvani 				pkts_compl++;
264738979574SBeniamino Galvani 				bytes_compl += skb->len;
26487c565c33SEric W. Biederman 				dev_consume_skb_any(skb);
2649ce736788SJoao Pinto 				tx_q->tx_skbuff[entry] = NULL;
26507ac6653aSJeff Kirsher 			}
2651be8b38a7SOng Boon Leong 		}
26527ac6653aSJeff Kirsher 
265342de047dSJose Abreu 		stmmac_release_tx_desc(priv, p, priv->mode);
26547ac6653aSJeff Kirsher 
26558531c808SChristian Marangi 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
26567ac6653aSJeff Kirsher 	}
2657ce736788SJoao Pinto 	tx_q->dirty_tx = entry;
265838979574SBeniamino Galvani 
2659c22a3f48SJoao Pinto 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2660c22a3f48SJoao Pinto 				  pkts_compl, bytes_compl);
266138979574SBeniamino Galvani 
2662c22a3f48SJoao Pinto 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2663c22a3f48SJoao Pinto 								queue))) &&
2664aa042f60SSong, Yoong Siang 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2665c22a3f48SJoao Pinto 
2666b3e51069SLABBE Corentin 		netif_dbg(priv, tx_done, priv->dev,
2667b3e51069SLABBE Corentin 			  "%s: restart transmit\n", __func__);
2668c22a3f48SJoao Pinto 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
26697ac6653aSJeff Kirsher 	}
2670d765955dSGiuseppe CAVALLARO 
2671132c32eeSOng Boon Leong 	if (tx_q->xsk_pool) {
2672132c32eeSOng Boon Leong 		bool work_done;
2673132c32eeSOng Boon Leong 
2674132c32eeSOng Boon Leong 		if (tx_q->xsk_frames_done)
2675132c32eeSOng Boon Leong 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2676132c32eeSOng Boon Leong 
2677132c32eeSOng Boon Leong 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2678132c32eeSOng Boon Leong 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2679132c32eeSOng Boon Leong 
2680132c32eeSOng Boon Leong 		/* For XSK TX, we try to send as many as possible.
2681132c32eeSOng Boon Leong 		 * If XSK work done (XSK TX desc empty and budget still
2682132c32eeSOng Boon Leong 		 * available), return "budget - 1" to reenable TX IRQ.
2683132c32eeSOng Boon Leong 		 * Else, return "budget" to make NAPI continue polling.
2684132c32eeSOng Boon Leong 		 */
2685132c32eeSOng Boon Leong 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2686132c32eeSOng Boon Leong 					       STMMAC_XSK_TX_BUDGET_MAX);
2687132c32eeSOng Boon Leong 		if (work_done)
2688132c32eeSOng Boon Leong 			xmits = budget - 1;
2689132c32eeSOng Boon Leong 		else
2690132c32eeSOng Boon Leong 			xmits = budget;
2691132c32eeSOng Boon Leong 	}
2692132c32eeSOng Boon Leong 
2693be1c7eaeSVineetha G. Jaya Kumaran 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2694be1c7eaeSVineetha G. Jaya Kumaran 	    priv->eee_sw_timer_en) {
2695c74ead22SJisheng Zhang 		if (stmmac_enable_eee_mode(priv))
2696388e201dSVineetha G. Jaya Kumaran 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2697d765955dSGiuseppe CAVALLARO 	}
26988fce3331SJose Abreu 
26994ccb4585SJose Abreu 	/* We still have pending packets, let's call for a new scheduling */
27004ccb4585SJose Abreu 	if (tx_q->dirty_tx != tx_q->cur_tx)
2701fa60b816SVincent Whitchurch 		stmmac_tx_timer_arm(priv, queue);
27024ccb4585SJose Abreu 
27039680b2abSPetr Tesarik 	u64_stats_update_begin(&txq_stats->napi_syncp);
27049680b2abSPetr Tesarik 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
27059680b2abSPetr Tesarik 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
27069680b2abSPetr Tesarik 	u64_stats_inc(&txq_stats->napi.tx_clean);
27079680b2abSPetr Tesarik 	u64_stats_update_end(&txq_stats->napi_syncp);
2708133466c3SJisheng Zhang 
2709133466c3SJisheng Zhang 	priv->xstats.tx_errors += tx_errors;
2710133466c3SJisheng Zhang 
27118fce3331SJose Abreu 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
27128fce3331SJose Abreu 
2713132c32eeSOng Boon Leong 	/* Combine decisions from TX clean and XSK TX */
2714132c32eeSOng Boon Leong 	return max(count, xmits);
27157ac6653aSJeff Kirsher }
27167ac6653aSJeff Kirsher 
27177ac6653aSJeff Kirsher /**
2718732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_err - to manage the tx error
271932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
27205bacd778SLABBE Corentin  * @chan: channel index
27217ac6653aSJeff Kirsher  * Description: it cleans the descriptors and restarts the transmission
2722732fdf0eSGiuseppe CAVALLARO  * in case of transmission errors.
27237ac6653aSJeff Kirsher  */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)27245bacd778SLABBE Corentin static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
27257ac6653aSJeff Kirsher {
27268531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2727ce736788SJoao Pinto 
2728c22a3f48SJoao Pinto 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
27297ac6653aSJeff Kirsher 
2730ae4f0d46SJoao Pinto 	stmmac_stop_tx_dma(priv, chan);
2731ba39b344SChristian Marangi 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2732ba39b344SChristian Marangi 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2733f9ec5723SChristian Marangi 	stmmac_reset_tx_queue(priv, chan);
2734f421031eSJongsung Kim 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2735f421031eSJongsung Kim 			    tx_q->dma_tx_phy, chan);
2736ae4f0d46SJoao Pinto 	stmmac_start_tx_dma(priv, chan);
27377ac6653aSJeff Kirsher 
2738133466c3SJisheng Zhang 	priv->xstats.tx_errors++;
2739c22a3f48SJoao Pinto 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
27407ac6653aSJeff Kirsher }
27417ac6653aSJeff Kirsher 
274232ceabcaSGiuseppe CAVALLARO /**
27436deee222SJoao Pinto  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
27446deee222SJoao Pinto  *  @priv: driver private structure
27456deee222SJoao Pinto  *  @txmode: TX operating mode
27466deee222SJoao Pinto  *  @rxmode: RX operating mode
27476deee222SJoao Pinto  *  @chan: channel index
27486deee222SJoao Pinto  *  Description: it is used for configuring of the DMA operation mode in
27496deee222SJoao Pinto  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
27506deee222SJoao Pinto  *  mode.
27516deee222SJoao Pinto  */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)27526deee222SJoao Pinto static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
27536deee222SJoao Pinto 					  u32 rxmode, u32 chan)
27546deee222SJoao Pinto {
2755a0daae13SJose Abreu 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2756a0daae13SJose Abreu 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
275752a76235SJose Abreu 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
275852a76235SJose Abreu 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
27596deee222SJoao Pinto 	int rxfifosz = priv->plat->rx_fifo_size;
276052a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
27616deee222SJoao Pinto 
27626deee222SJoao Pinto 	if (rxfifosz == 0)
27636deee222SJoao Pinto 		rxfifosz = priv->dma_cap.rx_fifo_size;
276452a76235SJose Abreu 	if (txfifosz == 0)
276552a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
276652a76235SJose Abreu 
276752a76235SJose Abreu 	/* Adjust for real per queue fifo size */
276852a76235SJose Abreu 	rxfifosz /= rx_channels_count;
276952a76235SJose Abreu 	txfifosz /= tx_channels_count;
27706deee222SJoao Pinto 
2771ab0204e3SJose Abreu 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2772ab0204e3SJose Abreu 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
27736deee222SJoao Pinto }
27746deee222SJoao Pinto 
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)27758bf993a5SJose Abreu static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
27768bf993a5SJose Abreu {
277763a550fcSJose Abreu 	int ret;
27788bf993a5SJose Abreu 
2779c10d4c82SJose Abreu 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
27808bf993a5SJose Abreu 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2781c10d4c82SJose Abreu 	if (ret && (ret != -EINVAL)) {
27828bf993a5SJose Abreu 		stmmac_global_err(priv);
2783c10d4c82SJose Abreu 		return true;
2784c10d4c82SJose Abreu 	}
2785c10d4c82SJose Abreu 
2786c10d4c82SJose Abreu 	return false;
27878bf993a5SJose Abreu }
27888bf993a5SJose Abreu 
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)27897e1c520cSOng Boon Leong static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
27908fce3331SJose Abreu {
27918fce3331SJose Abreu 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
27927e1c520cSOng Boon Leong 						 &priv->xstats, chan, dir);
27938531c808SChristian Marangi 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
27948531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
27958fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[chan];
2796132c32eeSOng Boon Leong 	struct napi_struct *rx_napi;
2797132c32eeSOng Boon Leong 	struct napi_struct *tx_napi;
2798021bd5e3SJose Abreu 	unsigned long flags;
27998fce3331SJose Abreu 
2800132c32eeSOng Boon Leong 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2801132c32eeSOng Boon Leong 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2802132c32eeSOng Boon Leong 
28034ccb4585SJose Abreu 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2804132c32eeSOng Boon Leong 		if (napi_schedule_prep(rx_napi)) {
2805021bd5e3SJose Abreu 			spin_lock_irqsave(&ch->lock, flags);
2806021bd5e3SJose Abreu 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2807021bd5e3SJose Abreu 			spin_unlock_irqrestore(&ch->lock, flags);
2808132c32eeSOng Boon Leong 			__napi_schedule(rx_napi);
28093ba07debSJose Abreu 		}
28104ccb4585SJose Abreu 	}
28114ccb4585SJose Abreu 
2812021bd5e3SJose Abreu 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2813132c32eeSOng Boon Leong 		if (napi_schedule_prep(tx_napi)) {
2814021bd5e3SJose Abreu 			spin_lock_irqsave(&ch->lock, flags);
2815021bd5e3SJose Abreu 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2816021bd5e3SJose Abreu 			spin_unlock_irqrestore(&ch->lock, flags);
2817132c32eeSOng Boon Leong 			__napi_schedule(tx_napi);
2818021bd5e3SJose Abreu 		}
2819021bd5e3SJose Abreu 	}
28208fce3331SJose Abreu 
28218fce3331SJose Abreu 	return status;
28228fce3331SJose Abreu }
28238fce3331SJose Abreu 
28246deee222SJoao Pinto /**
2825732fdf0eSGiuseppe CAVALLARO  * stmmac_dma_interrupt - DMA ISR
282632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
282732ceabcaSGiuseppe CAVALLARO  * Description: this is the DMA ISR. It is called by the main ISR.
2828732fdf0eSGiuseppe CAVALLARO  * It calls the dwmac dma routine and schedule poll method in case of some
2829732fdf0eSGiuseppe CAVALLARO  * work can be done.
283032ceabcaSGiuseppe CAVALLARO  */
stmmac_dma_interrupt(struct stmmac_priv * priv)28317ac6653aSJeff Kirsher static void stmmac_dma_interrupt(struct stmmac_priv *priv)
28327ac6653aSJeff Kirsher {
2833d62a107aSJoao Pinto 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
28345a6a0445SNiklas Cassel 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
28355a6a0445SNiklas Cassel 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
28365a6a0445SNiklas Cassel 				tx_channel_count : rx_channel_count;
2837d62a107aSJoao Pinto 	u32 chan;
28388ac60ffbSKees Cook 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
28398ac60ffbSKees Cook 
28408ac60ffbSKees Cook 	/* Make sure we never check beyond our status buffer. */
28418ac60ffbSKees Cook 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
28428ac60ffbSKees Cook 		channels_to_check = ARRAY_SIZE(status);
284368e5cfafSJoao Pinto 
28445a6a0445SNiklas Cassel 	for (chan = 0; chan < channels_to_check; chan++)
28457e1c520cSOng Boon Leong 		status[chan] = stmmac_napi_check(priv, chan,
28467e1c520cSOng Boon Leong 						 DMA_DIR_RXTX);
2847d62a107aSJoao Pinto 
28485a6a0445SNiklas Cassel 	for (chan = 0; chan < tx_channel_count; chan++) {
28495a6a0445SNiklas Cassel 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
28507ac6653aSJeff Kirsher 			/* Try to bump up the dma threshold on this failure */
28513a6c12a0SXiaoliang Yang 			stmmac_bump_dma_threshold(priv, chan);
28525a6a0445SNiklas Cassel 		} else if (unlikely(status[chan] == tx_hard_error)) {
28534e593262SJoao Pinto 			stmmac_tx_err(priv, chan);
28547ac6653aSJeff Kirsher 		}
2855d62a107aSJoao Pinto 	}
2856d62a107aSJoao Pinto }
28577ac6653aSJeff Kirsher 
285832ceabcaSGiuseppe CAVALLARO /**
285932ceabcaSGiuseppe CAVALLARO  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
286032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
286132ceabcaSGiuseppe CAVALLARO  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
286232ceabcaSGiuseppe CAVALLARO  */
stmmac_mmc_setup(struct stmmac_priv * priv)28631c901a46SGiuseppe CAVALLARO static void stmmac_mmc_setup(struct stmmac_priv *priv)
28641c901a46SGiuseppe CAVALLARO {
28651c901a46SGiuseppe CAVALLARO 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
28661c901a46SGiuseppe CAVALLARO 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
28671c901a46SGiuseppe CAVALLARO 
28683b1dd2c5SJose Abreu 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
28694f795b25SGiuseppe CAVALLARO 
28704f795b25SGiuseppe CAVALLARO 	if (priv->dma_cap.rmon) {
28713b1dd2c5SJose Abreu 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
28721c901a46SGiuseppe CAVALLARO 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
28734f795b25SGiuseppe CAVALLARO 	} else
287438ddc59dSLABBE Corentin 		netdev_info(priv->dev, "No MAC Management Counters available\n");
28751c901a46SGiuseppe CAVALLARO }
28761c901a46SGiuseppe CAVALLARO 
2877732fdf0eSGiuseppe CAVALLARO /**
2878732fdf0eSGiuseppe CAVALLARO  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
287932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
288019e30c14SGiuseppe CAVALLARO  * Description:
288119e30c14SGiuseppe CAVALLARO  *  new GMAC chip generations have a new register to indicate the
2882e7434821SGiuseppe CAVALLARO  *  presence of the optional feature/functions.
288319e30c14SGiuseppe CAVALLARO  *  This can be also used to override the value passed through the
288419e30c14SGiuseppe CAVALLARO  *  platform and necessary for old MAC10/100 and GMAC chips.
2885e7434821SGiuseppe CAVALLARO  */
stmmac_get_hw_features(struct stmmac_priv * priv)2886e7434821SGiuseppe CAVALLARO static int stmmac_get_hw_features(struct stmmac_priv *priv)
2887e7434821SGiuseppe CAVALLARO {
2888a4e887faSJose Abreu 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2889e7434821SGiuseppe CAVALLARO }
2890e7434821SGiuseppe CAVALLARO 
289132ceabcaSGiuseppe CAVALLARO /**
2892732fdf0eSGiuseppe CAVALLARO  * stmmac_check_ether_addr - check if the MAC addr is valid
289332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
289432ceabcaSGiuseppe CAVALLARO  * Description:
289532ceabcaSGiuseppe CAVALLARO  * it is to verify if the MAC address is valid, in case of failures it
289632ceabcaSGiuseppe CAVALLARO  * generates a random MAC address
289732ceabcaSGiuseppe CAVALLARO  */
stmmac_check_ether_addr(struct stmmac_priv * priv)2898bfab27a1SGiuseppe CAVALLARO static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2899bfab27a1SGiuseppe CAVALLARO {
29007f9b8fe5SJakub Kicinski 	u8 addr[ETH_ALEN];
29017f9b8fe5SJakub Kicinski 
2902bfab27a1SGiuseppe CAVALLARO 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
29037f9b8fe5SJakub Kicinski 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
29047f9b8fe5SJakub Kicinski 		if (is_valid_ether_addr(addr))
29057f9b8fe5SJakub Kicinski 			eth_hw_addr_set(priv->dev, addr);
29067f9b8fe5SJakub Kicinski 		else
2907f2cedb63SDanny Kukawka 			eth_hw_addr_random(priv->dev);
2908af649352SJisheng Zhang 		dev_info(priv->device, "device MAC address %pM\n",
2909bfab27a1SGiuseppe CAVALLARO 			 priv->dev->dev_addr);
2910bfab27a1SGiuseppe CAVALLARO 	}
2911c88460b7SHans de Goede }
2912bfab27a1SGiuseppe CAVALLARO 
291332ceabcaSGiuseppe CAVALLARO /**
2914732fdf0eSGiuseppe CAVALLARO  * stmmac_init_dma_engine - DMA init.
291532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
291632ceabcaSGiuseppe CAVALLARO  * Description:
291732ceabcaSGiuseppe CAVALLARO  * It inits the DMA invoking the specific MAC/GMAC callback.
291832ceabcaSGiuseppe CAVALLARO  * Some DMA parameters can be passed from the platform;
291932ceabcaSGiuseppe CAVALLARO  * in case of these are not passed a default is kept for the MAC or GMAC.
292032ceabcaSGiuseppe CAVALLARO  */
stmmac_init_dma_engine(struct stmmac_priv * priv)29210f1f88a8SGiuseppe CAVALLARO static int stmmac_init_dma_engine(struct stmmac_priv *priv)
29220f1f88a8SGiuseppe CAVALLARO {
292347f2a9ceSJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
292447f2a9ceSJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
292524aaed0cSJose Abreu 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
292654139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q;
2927ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
292847f2a9ceSJoao Pinto 	u32 chan = 0;
2929c24602efSGiuseppe CAVALLARO 	int atds = 0;
2930495db273SGiuseppe Cavallaro 	int ret = 0;
29310f1f88a8SGiuseppe CAVALLARO 
2932a332e2faSNiklas Cassel 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2933a332e2faSNiklas Cassel 		dev_err(priv->device, "Invalid DMA configuration\n");
293489ab75bfSNiklas Cassel 		return -EINVAL;
29350f1f88a8SGiuseppe CAVALLARO 	}
29360f1f88a8SGiuseppe CAVALLARO 
2937c24602efSGiuseppe CAVALLARO 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2938c24602efSGiuseppe CAVALLARO 		atds = 1;
2939c24602efSGiuseppe CAVALLARO 
2940a4e887faSJose Abreu 	ret = stmmac_reset(priv, priv->ioaddr);
2941495db273SGiuseppe Cavallaro 	if (ret) {
2942495db273SGiuseppe Cavallaro 		dev_err(priv->device, "Failed to reset the dma\n");
2943495db273SGiuseppe Cavallaro 		return ret;
2944495db273SGiuseppe Cavallaro 	}
2945495db273SGiuseppe Cavallaro 
29467d9e6c5aSJose Abreu 	/* DMA Configuration */
29477d9e6c5aSJose Abreu 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
29487d9e6c5aSJose Abreu 
29497d9e6c5aSJose Abreu 	if (priv->plat->axi)
29507d9e6c5aSJose Abreu 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
29517d9e6c5aSJose Abreu 
2952af8f3fb7SWeifeng Voon 	/* DMA CSR Channel configuration */
2953087a7b94SVincent Whitchurch 	for (chan = 0; chan < dma_csr_ch; chan++) {
2954af8f3fb7SWeifeng Voon 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2955087a7b94SVincent Whitchurch 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2956087a7b94SVincent Whitchurch 	}
2957af8f3fb7SWeifeng Voon 
295847f2a9ceSJoao Pinto 	/* DMA RX Channel Configuration */
295947f2a9ceSJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++) {
29608531c808SChristian Marangi 		rx_q = &priv->dma_conf.rx_queue[chan];
296154139cf3SJoao Pinto 
296224aaed0cSJose Abreu 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
296324aaed0cSJose Abreu 				    rx_q->dma_rx_phy, chan);
296447f2a9ceSJoao Pinto 
296554139cf3SJoao Pinto 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2966bba2556eSOng Boon Leong 				     (rx_q->buf_alloc_num *
2967aa042f60SSong, Yoong Siang 				      sizeof(struct dma_desc));
2968a4e887faSJose Abreu 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2969a4e887faSJose Abreu 				       rx_q->rx_tail_addr, chan);
297047f2a9ceSJoao Pinto 	}
297147f2a9ceSJoao Pinto 
297247f2a9ceSJoao Pinto 	/* DMA TX Channel Configuration */
297347f2a9ceSJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++) {
29748531c808SChristian Marangi 		tx_q = &priv->dma_conf.tx_queue[chan];
2975ce736788SJoao Pinto 
297624aaed0cSJose Abreu 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
297724aaed0cSJose Abreu 				    tx_q->dma_tx_phy, chan);
2978f748be53SAlexandre TORGUE 
29790431100bSJose Abreu 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2980a4e887faSJose Abreu 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2981a4e887faSJose Abreu 				       tx_q->tx_tail_addr, chan);
298247f2a9ceSJoao Pinto 	}
298324aaed0cSJose Abreu 
2984495db273SGiuseppe Cavallaro 	return ret;
29850f1f88a8SGiuseppe CAVALLARO }
29860f1f88a8SGiuseppe CAVALLARO 
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)29878fce3331SJose Abreu static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
29888fce3331SJose Abreu {
29898531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2990fa60b816SVincent Whitchurch 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
2991fa60b816SVincent Whitchurch 
2992fa60b816SVincent Whitchurch 	if (!tx_coal_timer)
2993fa60b816SVincent Whitchurch 		return;
29948fce3331SJose Abreu 
2995db2f2842SOng Boon Leong 	hrtimer_start(&tx_q->txtimer,
2996fa60b816SVincent Whitchurch 		      STMMAC_COAL_TIMER(tx_coal_timer),
2997d5a05e69SVincent Whitchurch 		      HRTIMER_MODE_REL);
29988fce3331SJose Abreu }
29998fce3331SJose Abreu 
3000bfab27a1SGiuseppe CAVALLARO /**
3001732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_timer - mitigation sw timer for tx.
3002d0ea5cbdSJesse Brandeburg  * @t: data pointer
30039125cdd1SGiuseppe CAVALLARO  * Description:
30049125cdd1SGiuseppe CAVALLARO  * This is the timer handler to directly invoke the stmmac_tx_clean.
30059125cdd1SGiuseppe CAVALLARO  */
stmmac_tx_timer(struct hrtimer * t)3006d5a05e69SVincent Whitchurch static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
30079125cdd1SGiuseppe CAVALLARO {
3008d5a05e69SVincent Whitchurch 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
30098fce3331SJose Abreu 	struct stmmac_priv *priv = tx_q->priv_data;
30108fce3331SJose Abreu 	struct stmmac_channel *ch;
3011132c32eeSOng Boon Leong 	struct napi_struct *napi;
30129125cdd1SGiuseppe CAVALLARO 
30138fce3331SJose Abreu 	ch = &priv->channel[tx_q->queue_index];
3014132c32eeSOng Boon Leong 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
30158fce3331SJose Abreu 
3016132c32eeSOng Boon Leong 	if (likely(napi_schedule_prep(napi))) {
3017021bd5e3SJose Abreu 		unsigned long flags;
3018021bd5e3SJose Abreu 
3019021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
3020021bd5e3SJose Abreu 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3021021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
3022132c32eeSOng Boon Leong 		__napi_schedule(napi);
3023021bd5e3SJose Abreu 	}
3024d5a05e69SVincent Whitchurch 
3025d5a05e69SVincent Whitchurch 	return HRTIMER_NORESTART;
30269125cdd1SGiuseppe CAVALLARO }
30279125cdd1SGiuseppe CAVALLARO 
30289125cdd1SGiuseppe CAVALLARO /**
3029d429b66eSJose Abreu  * stmmac_init_coalesce - init mitigation options.
303032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
30319125cdd1SGiuseppe CAVALLARO  * Description:
3032d429b66eSJose Abreu  * This inits the coalesce parameters: i.e. timer rate,
30339125cdd1SGiuseppe CAVALLARO  * timer handler and default threshold used for enabling the
30349125cdd1SGiuseppe CAVALLARO  * interrupt on completion bit.
30359125cdd1SGiuseppe CAVALLARO  */
stmmac_init_coalesce(struct stmmac_priv * priv)3036d429b66eSJose Abreu static void stmmac_init_coalesce(struct stmmac_priv *priv)
30379125cdd1SGiuseppe CAVALLARO {
30388fce3331SJose Abreu 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3039db2f2842SOng Boon Leong 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
30408fce3331SJose Abreu 	u32 chan;
30418fce3331SJose Abreu 
30428fce3331SJose Abreu 	for (chan = 0; chan < tx_channel_count; chan++) {
30438531c808SChristian Marangi 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
30448fce3331SJose Abreu 
3045db2f2842SOng Boon Leong 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3046db2f2842SOng Boon Leong 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3047db2f2842SOng Boon Leong 
3048d5a05e69SVincent Whitchurch 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3049d5a05e69SVincent Whitchurch 		tx_q->txtimer.function = stmmac_tx_timer;
30508fce3331SJose Abreu 	}
3051db2f2842SOng Boon Leong 
3052db2f2842SOng Boon Leong 	for (chan = 0; chan < rx_channel_count; chan++)
3053db2f2842SOng Boon Leong 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
30549125cdd1SGiuseppe CAVALLARO }
30559125cdd1SGiuseppe CAVALLARO 
stmmac_set_rings_length(struct stmmac_priv * priv)30564854ab99SJoao Pinto static void stmmac_set_rings_length(struct stmmac_priv *priv)
30574854ab99SJoao Pinto {
30584854ab99SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
30594854ab99SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
30604854ab99SJoao Pinto 	u32 chan;
30614854ab99SJoao Pinto 
30624854ab99SJoao Pinto 	/* set TX ring length */
30634854ab99SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
3064a4e887faSJose Abreu 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
30658531c808SChristian Marangi 				       (priv->dma_conf.dma_tx_size - 1), chan);
30664854ab99SJoao Pinto 
30674854ab99SJoao Pinto 	/* set RX ring length */
30684854ab99SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
3069a4e887faSJose Abreu 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
30708531c808SChristian Marangi 				       (priv->dma_conf.dma_rx_size - 1), chan);
30714854ab99SJoao Pinto }
30724854ab99SJoao Pinto 
30739125cdd1SGiuseppe CAVALLARO /**
30746a3a7193SJoao Pinto  *  stmmac_set_tx_queue_weight - Set TX queue weight
30756a3a7193SJoao Pinto  *  @priv: driver private structure
30766a3a7193SJoao Pinto  *  Description: It is used for setting TX queues weight
30776a3a7193SJoao Pinto  */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)30786a3a7193SJoao Pinto static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
30796a3a7193SJoao Pinto {
30806a3a7193SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
30816a3a7193SJoao Pinto 	u32 weight;
30826a3a7193SJoao Pinto 	u32 queue;
30836a3a7193SJoao Pinto 
30846a3a7193SJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
30856a3a7193SJoao Pinto 		weight = priv->plat->tx_queues_cfg[queue].weight;
3086c10d4c82SJose Abreu 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
30876a3a7193SJoao Pinto 	}
30886a3a7193SJoao Pinto }
30896a3a7193SJoao Pinto 
30906a3a7193SJoao Pinto /**
309119d91873SJoao Pinto  *  stmmac_configure_cbs - Configure CBS in TX queue
309219d91873SJoao Pinto  *  @priv: driver private structure
309319d91873SJoao Pinto  *  Description: It is used for configuring CBS in AVB TX queues
309419d91873SJoao Pinto  */
stmmac_configure_cbs(struct stmmac_priv * priv)309519d91873SJoao Pinto static void stmmac_configure_cbs(struct stmmac_priv *priv)
309619d91873SJoao Pinto {
309719d91873SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
309819d91873SJoao Pinto 	u32 mode_to_use;
309919d91873SJoao Pinto 	u32 queue;
310019d91873SJoao Pinto 
310144781fefSJoao Pinto 	/* queue 0 is reserved for legacy traffic */
310244781fefSJoao Pinto 	for (queue = 1; queue < tx_queues_count; queue++) {
310319d91873SJoao Pinto 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
310419d91873SJoao Pinto 		if (mode_to_use == MTL_QUEUE_DCB)
310519d91873SJoao Pinto 			continue;
310619d91873SJoao Pinto 
3107c10d4c82SJose Abreu 		stmmac_config_cbs(priv, priv->hw,
310819d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].send_slope,
310919d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].idle_slope,
311019d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].high_credit,
311119d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].low_credit,
311219d91873SJoao Pinto 				queue);
311319d91873SJoao Pinto 	}
311419d91873SJoao Pinto }
311519d91873SJoao Pinto 
311619d91873SJoao Pinto /**
3117d43042f4SJoao Pinto  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3118d43042f4SJoao Pinto  *  @priv: driver private structure
3119d43042f4SJoao Pinto  *  Description: It is used for mapping RX queues to RX dma channels
3120d43042f4SJoao Pinto  */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3121d43042f4SJoao Pinto static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3122d43042f4SJoao Pinto {
3123d43042f4SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3124d43042f4SJoao Pinto 	u32 queue;
3125d43042f4SJoao Pinto 	u32 chan;
3126d43042f4SJoao Pinto 
3127d43042f4SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
3128d43042f4SJoao Pinto 		chan = priv->plat->rx_queues_cfg[queue].chan;
3129c10d4c82SJose Abreu 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3130d43042f4SJoao Pinto 	}
3131d43042f4SJoao Pinto }
3132d43042f4SJoao Pinto 
3133d43042f4SJoao Pinto /**
3134a8f5102aSJoao Pinto  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3135a8f5102aSJoao Pinto  *  @priv: driver private structure
3136a8f5102aSJoao Pinto  *  Description: It is used for configuring the RX Queue Priority
3137a8f5102aSJoao Pinto  */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3138a8f5102aSJoao Pinto static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3139a8f5102aSJoao Pinto {
3140a8f5102aSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3141a8f5102aSJoao Pinto 	u32 queue;
3142a8f5102aSJoao Pinto 	u32 prio;
3143a8f5102aSJoao Pinto 
3144a8f5102aSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
3145a8f5102aSJoao Pinto 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3146a8f5102aSJoao Pinto 			continue;
3147a8f5102aSJoao Pinto 
3148a8f5102aSJoao Pinto 		prio = priv->plat->rx_queues_cfg[queue].prio;
3149c10d4c82SJose Abreu 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3150a8f5102aSJoao Pinto 	}
3151a8f5102aSJoao Pinto }
3152a8f5102aSJoao Pinto 
3153a8f5102aSJoao Pinto /**
3154a8f5102aSJoao Pinto  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3155a8f5102aSJoao Pinto  *  @priv: driver private structure
3156a8f5102aSJoao Pinto  *  Description: It is used for configuring the TX Queue Priority
3157a8f5102aSJoao Pinto  */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3158a8f5102aSJoao Pinto static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3159a8f5102aSJoao Pinto {
3160a8f5102aSJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3161a8f5102aSJoao Pinto 	u32 queue;
3162a8f5102aSJoao Pinto 	u32 prio;
3163a8f5102aSJoao Pinto 
3164a8f5102aSJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
3165a8f5102aSJoao Pinto 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3166a8f5102aSJoao Pinto 			continue;
3167a8f5102aSJoao Pinto 
3168a8f5102aSJoao Pinto 		prio = priv->plat->tx_queues_cfg[queue].prio;
3169c10d4c82SJose Abreu 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3170a8f5102aSJoao Pinto 	}
3171a8f5102aSJoao Pinto }
3172a8f5102aSJoao Pinto 
3173a8f5102aSJoao Pinto /**
3174abe80fdcSJoao Pinto  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3175abe80fdcSJoao Pinto  *  @priv: driver private structure
3176abe80fdcSJoao Pinto  *  Description: It is used for configuring the RX queue routing
3177abe80fdcSJoao Pinto  */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3178abe80fdcSJoao Pinto static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3179abe80fdcSJoao Pinto {
3180abe80fdcSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3181abe80fdcSJoao Pinto 	u32 queue;
3182abe80fdcSJoao Pinto 	u8 packet;
3183abe80fdcSJoao Pinto 
3184abe80fdcSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
3185abe80fdcSJoao Pinto 		/* no specific packet type routing specified for the queue */
3186abe80fdcSJoao Pinto 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3187abe80fdcSJoao Pinto 			continue;
3188abe80fdcSJoao Pinto 
3189abe80fdcSJoao Pinto 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3190c10d4c82SJose Abreu 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3191abe80fdcSJoao Pinto 	}
3192abe80fdcSJoao Pinto }
3193abe80fdcSJoao Pinto 
stmmac_mac_config_rss(struct stmmac_priv * priv)319476067459SJose Abreu static void stmmac_mac_config_rss(struct stmmac_priv *priv)
319576067459SJose Abreu {
319676067459SJose Abreu 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
319776067459SJose Abreu 		priv->rss.enable = false;
319876067459SJose Abreu 		return;
319976067459SJose Abreu 	}
320076067459SJose Abreu 
320176067459SJose Abreu 	if (priv->dev->features & NETIF_F_RXHASH)
320276067459SJose Abreu 		priv->rss.enable = true;
320376067459SJose Abreu 	else
320476067459SJose Abreu 		priv->rss.enable = false;
320576067459SJose Abreu 
320676067459SJose Abreu 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
320776067459SJose Abreu 			     priv->plat->rx_queues_to_use);
320876067459SJose Abreu }
320976067459SJose Abreu 
3210abe80fdcSJoao Pinto /**
3211d0a9c9f9SJoao Pinto  *  stmmac_mtl_configuration - Configure MTL
3212d0a9c9f9SJoao Pinto  *  @priv: driver private structure
3213d0a9c9f9SJoao Pinto  *  Description: It is used for configurring MTL
3214d0a9c9f9SJoao Pinto  */
stmmac_mtl_configuration(struct stmmac_priv * priv)3215d0a9c9f9SJoao Pinto static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3216d0a9c9f9SJoao Pinto {
3217d0a9c9f9SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3218d0a9c9f9SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3219d0a9c9f9SJoao Pinto 
3220c10d4c82SJose Abreu 	if (tx_queues_count > 1)
32216a3a7193SJoao Pinto 		stmmac_set_tx_queue_weight(priv);
32226a3a7193SJoao Pinto 
3223d0a9c9f9SJoao Pinto 	/* Configure MTL RX algorithms */
3224c10d4c82SJose Abreu 	if (rx_queues_count > 1)
3225c10d4c82SJose Abreu 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3226d0a9c9f9SJoao Pinto 				priv->plat->rx_sched_algorithm);
3227d0a9c9f9SJoao Pinto 
3228d0a9c9f9SJoao Pinto 	/* Configure MTL TX algorithms */
3229c10d4c82SJose Abreu 	if (tx_queues_count > 1)
3230c10d4c82SJose Abreu 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3231d0a9c9f9SJoao Pinto 				priv->plat->tx_sched_algorithm);
3232d0a9c9f9SJoao Pinto 
323319d91873SJoao Pinto 	/* Configure CBS in AVB TX queues */
3234c10d4c82SJose Abreu 	if (tx_queues_count > 1)
323519d91873SJoao Pinto 		stmmac_configure_cbs(priv);
323619d91873SJoao Pinto 
3237d43042f4SJoao Pinto 	/* Map RX MTL to DMA channels */
3238d43042f4SJoao Pinto 	stmmac_rx_queue_dma_chan_map(priv);
3239d43042f4SJoao Pinto 
3240d0a9c9f9SJoao Pinto 	/* Enable MAC RX Queues */
3241d0a9c9f9SJoao Pinto 	stmmac_mac_enable_rx_queues(priv);
32426deee222SJoao Pinto 
3243a8f5102aSJoao Pinto 	/* Set RX priorities */
3244c10d4c82SJose Abreu 	if (rx_queues_count > 1)
3245a8f5102aSJoao Pinto 		stmmac_mac_config_rx_queues_prio(priv);
3246a8f5102aSJoao Pinto 
3247a8f5102aSJoao Pinto 	/* Set TX priorities */
3248c10d4c82SJose Abreu 	if (tx_queues_count > 1)
3249a8f5102aSJoao Pinto 		stmmac_mac_config_tx_queues_prio(priv);
3250abe80fdcSJoao Pinto 
3251abe80fdcSJoao Pinto 	/* Set RX routing */
3252c10d4c82SJose Abreu 	if (rx_queues_count > 1)
3253abe80fdcSJoao Pinto 		stmmac_mac_config_rx_queues_routing(priv);
325476067459SJose Abreu 
325576067459SJose Abreu 	/* Receive Side Scaling */
325676067459SJose Abreu 	if (rx_queues_count > 1)
325776067459SJose Abreu 		stmmac_mac_config_rss(priv);
3258d0a9c9f9SJoao Pinto }
3259d0a9c9f9SJoao Pinto 
stmmac_safety_feat_configuration(struct stmmac_priv * priv)32608bf993a5SJose Abreu static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
32618bf993a5SJose Abreu {
3262c10d4c82SJose Abreu 	if (priv->dma_cap.asp) {
32638bf993a5SJose Abreu 		netdev_info(priv->dev, "Enabling Safety Features\n");
32645ac712dcSWong Vee Khee 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
32655ac712dcSWong Vee Khee 					  priv->plat->safety_feat_cfg);
32668bf993a5SJose Abreu 	} else {
32678bf993a5SJose Abreu 		netdev_info(priv->dev, "No Safety Features support found\n");
32688bf993a5SJose Abreu 	}
32698bf993a5SJose Abreu }
32708bf993a5SJose Abreu 
stmmac_fpe_start_wq(struct stmmac_priv * priv)32715a558611SOng Boon Leong static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
32725a558611SOng Boon Leong {
32735a558611SOng Boon Leong 	char *name;
32745a558611SOng Boon Leong 
32755a558611SOng Boon Leong 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3276db7c691dSMohammad Athari Bin Ismail 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
32775a558611SOng Boon Leong 
32785a558611SOng Boon Leong 	name = priv->wq_name;
32795a558611SOng Boon Leong 	sprintf(name, "%s-fpe", priv->dev->name);
32805a558611SOng Boon Leong 
32815a558611SOng Boon Leong 	priv->fpe_wq = create_singlethread_workqueue(name);
32825a558611SOng Boon Leong 	if (!priv->fpe_wq) {
32835a558611SOng Boon Leong 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
32845a558611SOng Boon Leong 
32855a558611SOng Boon Leong 		return -ENOMEM;
32865a558611SOng Boon Leong 	}
32875a558611SOng Boon Leong 	netdev_info(priv->dev, "FPE workqueue start");
32885a558611SOng Boon Leong 
32895a558611SOng Boon Leong 	return 0;
32905a558611SOng Boon Leong }
32915a558611SOng Boon Leong 
3292d0a9c9f9SJoao Pinto /**
3293732fdf0eSGiuseppe CAVALLARO  * stmmac_hw_setup - setup mac in a usable state.
3294523f11b5SSrinivas Kandagatla  *  @dev : pointer to the device structure.
32950735e639SMohammad Athari Bin Ismail  *  @ptp_register: register PTP if set
3296523f11b5SSrinivas Kandagatla  *  Description:
3297732fdf0eSGiuseppe CAVALLARO  *  this is the main function to setup the HW in a usable state because the
3298732fdf0eSGiuseppe CAVALLARO  *  dma engine is reset, the core registers are configured (e.g. AXI,
3299732fdf0eSGiuseppe CAVALLARO  *  Checksum features, timers). The DMA is ready to start receiving and
3300732fdf0eSGiuseppe CAVALLARO  *  transmitting.
3301523f11b5SSrinivas Kandagatla  *  Return value:
3302523f11b5SSrinivas Kandagatla  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3303523f11b5SSrinivas Kandagatla  *  file on failure.
3304523f11b5SSrinivas Kandagatla  */
stmmac_hw_setup(struct net_device * dev,bool ptp_register)33050735e639SMohammad Athari Bin Ismail static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3306523f11b5SSrinivas Kandagatla {
3307523f11b5SSrinivas Kandagatla 	struct stmmac_priv *priv = netdev_priv(dev);
33083c55d4d0SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3309146617b8SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3310d08d32d1SOng Boon Leong 	bool sph_en;
3311146617b8SJoao Pinto 	u32 chan;
3312523f11b5SSrinivas Kandagatla 	int ret;
3313523f11b5SSrinivas Kandagatla 
3314523f11b5SSrinivas Kandagatla 	/* DMA initialization and SW reset */
3315523f11b5SSrinivas Kandagatla 	ret = stmmac_init_dma_engine(priv);
3316523f11b5SSrinivas Kandagatla 	if (ret < 0) {
331738ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
331838ddc59dSLABBE Corentin 			   __func__);
3319523f11b5SSrinivas Kandagatla 		return ret;
3320523f11b5SSrinivas Kandagatla 	}
3321523f11b5SSrinivas Kandagatla 
3322523f11b5SSrinivas Kandagatla 	/* Copy the MAC addr into the HW  */
3323c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3324523f11b5SSrinivas Kandagatla 
332502e57b9dSGiuseppe CAVALLARO 	/* PS and related bits will be programmed according to the speed */
332602e57b9dSGiuseppe CAVALLARO 	if (priv->hw->pcs) {
332702e57b9dSGiuseppe CAVALLARO 		int speed = priv->plat->mac_port_sel_speed;
332802e57b9dSGiuseppe CAVALLARO 
332902e57b9dSGiuseppe CAVALLARO 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
333002e57b9dSGiuseppe CAVALLARO 		    (speed == SPEED_1000)) {
333102e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = speed;
333202e57b9dSGiuseppe CAVALLARO 		} else {
333302e57b9dSGiuseppe CAVALLARO 			dev_warn(priv->device, "invalid port speed\n");
333402e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = 0;
333502e57b9dSGiuseppe CAVALLARO 		}
333602e57b9dSGiuseppe CAVALLARO 	}
333702e57b9dSGiuseppe CAVALLARO 
3338523f11b5SSrinivas Kandagatla 	/* Initialize the MAC Core */
3339c10d4c82SJose Abreu 	stmmac_core_init(priv, priv->hw, dev);
3340523f11b5SSrinivas Kandagatla 
3341d0a9c9f9SJoao Pinto 	/* Initialize MTL*/
3342d0a9c9f9SJoao Pinto 	stmmac_mtl_configuration(priv);
33439eb12474Sjpinto 
33448bf993a5SJose Abreu 	/* Initialize Safety Features */
33458bf993a5SJose Abreu 	stmmac_safety_feat_configuration(priv);
33468bf993a5SJose Abreu 
3347c10d4c82SJose Abreu 	ret = stmmac_rx_ipc(priv, priv->hw);
3348978aded4SGiuseppe CAVALLARO 	if (!ret) {
334938ddc59dSLABBE Corentin 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3350978aded4SGiuseppe CAVALLARO 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3351d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
3352978aded4SGiuseppe CAVALLARO 	}
3353978aded4SGiuseppe CAVALLARO 
3354523f11b5SSrinivas Kandagatla 	/* Enable the MAC Rx/Tx */
3355c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
3356523f11b5SSrinivas Kandagatla 
3357b4f0a661SJoao Pinto 	/* Set the HW DMA mode and the COE */
3358b4f0a661SJoao Pinto 	stmmac_dma_operation_mode(priv);
3359b4f0a661SJoao Pinto 
3360523f11b5SSrinivas Kandagatla 	stmmac_mmc_setup(priv);
3361523f11b5SSrinivas Kandagatla 
3362f4c7d894SBiao Huang 	if (ptp_register) {
3363f4c7d894SBiao Huang 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3364f4c7d894SBiao Huang 		if (ret < 0)
3365f4c7d894SBiao Huang 			netdev_warn(priv->dev,
3366f4c7d894SBiao Huang 				    "failed to enable PTP reference clock: %pe\n",
3367f4c7d894SBiao Huang 				    ERR_PTR(ret));
3368f4c7d894SBiao Huang 	}
3369f4c7d894SBiao Huang 
3370523f11b5SSrinivas Kandagatla 	ret = stmmac_init_ptp(priv);
3371722eef28SHeiner Kallweit 	if (ret == -EOPNOTSUPP)
33721a212771SHeiner Kallweit 		netdev_info(priv->dev, "PTP not supported by HW\n");
3373722eef28SHeiner Kallweit 	else if (ret)
3374722eef28SHeiner Kallweit 		netdev_warn(priv->dev, "PTP init failed\n");
33750735e639SMohammad Athari Bin Ismail 	else if (ptp_register)
33760735e639SMohammad Athari Bin Ismail 		stmmac_ptp_register(priv);
3377523f11b5SSrinivas Kandagatla 
3378388e201dSVineetha G. Jaya Kumaran 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3379388e201dSVineetha G. Jaya Kumaran 
3380388e201dSVineetha G. Jaya Kumaran 	/* Convert the timer from msec to usec */
3381388e201dSVineetha G. Jaya Kumaran 	if (!priv->tx_lpi_timer)
3382388e201dSVineetha G. Jaya Kumaran 		priv->tx_lpi_timer = eee_timer * 1000;
3383523f11b5SSrinivas Kandagatla 
3384a4e887faSJose Abreu 	if (priv->use_riwt) {
3385db2f2842SOng Boon Leong 		u32 queue;
33864e4337ccSJose Abreu 
3387db2f2842SOng Boon Leong 		for (queue = 0; queue < rx_cnt; queue++) {
3388db2f2842SOng Boon Leong 			if (!priv->rx_riwt[queue])
3389db2f2842SOng Boon Leong 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3390db2f2842SOng Boon Leong 
3391db2f2842SOng Boon Leong 			stmmac_rx_watchdog(priv, priv->ioaddr,
3392db2f2842SOng Boon Leong 					   priv->rx_riwt[queue], queue);
3393db2f2842SOng Boon Leong 		}
3394523f11b5SSrinivas Kandagatla 	}
3395523f11b5SSrinivas Kandagatla 
3396c10d4c82SJose Abreu 	if (priv->hw->pcs)
3397c9ad4c10SBen Dooks (Codethink) 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3398523f11b5SSrinivas Kandagatla 
33994854ab99SJoao Pinto 	/* set TX and RX rings length */
34004854ab99SJoao Pinto 	stmmac_set_rings_length(priv);
34014854ab99SJoao Pinto 
3402f748be53SAlexandre TORGUE 	/* Enable TSO */
3403146617b8SJoao Pinto 	if (priv->tso) {
34045e6038b8SOng Boon Leong 		for (chan = 0; chan < tx_cnt; chan++) {
34058531c808SChristian Marangi 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
34065e6038b8SOng Boon Leong 
34075e6038b8SOng Boon Leong 			/* TSO and TBS cannot co-exist */
34085e6038b8SOng Boon Leong 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
34095e6038b8SOng Boon Leong 				continue;
34105e6038b8SOng Boon Leong 
3411a4e887faSJose Abreu 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3412146617b8SJoao Pinto 		}
34135e6038b8SOng Boon Leong 	}
3414f748be53SAlexandre TORGUE 
341567afd6d1SJose Abreu 	/* Enable Split Header */
3416d08d32d1SOng Boon Leong 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
341767afd6d1SJose Abreu 	for (chan = 0; chan < rx_cnt; chan++)
3418d08d32d1SOng Boon Leong 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3419d08d32d1SOng Boon Leong 
342067afd6d1SJose Abreu 
342130d93227SJose Abreu 	/* VLAN Tag Insertion */
342230d93227SJose Abreu 	if (priv->dma_cap.vlins)
342330d93227SJose Abreu 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
342430d93227SJose Abreu 
3425579a25a8SJose Abreu 	/* TBS */
3426579a25a8SJose Abreu 	for (chan = 0; chan < tx_cnt; chan++) {
34278531c808SChristian Marangi 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3428579a25a8SJose Abreu 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3429579a25a8SJose Abreu 
3430579a25a8SJose Abreu 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3431579a25a8SJose Abreu 	}
3432579a25a8SJose Abreu 
3433686cff3dSAashish Verma 	/* Configure real RX and TX queues */
3434686cff3dSAashish Verma 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3435686cff3dSAashish Verma 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3436686cff3dSAashish Verma 
34377d9e6c5aSJose Abreu 	/* Start the ball rolling... */
34387d9e6c5aSJose Abreu 	stmmac_start_all_dma(priv);
34397d9e6c5aSJose Abreu 
34405a558611SOng Boon Leong 	if (priv->dma_cap.fpesel) {
34415a558611SOng Boon Leong 		stmmac_fpe_start_wq(priv);
34425a558611SOng Boon Leong 
34435a558611SOng Boon Leong 		if (priv->plat->fpe_cfg->enable)
34445a558611SOng Boon Leong 			stmmac_fpe_handshake(priv, true);
34455a558611SOng Boon Leong 	}
34465a558611SOng Boon Leong 
3447523f11b5SSrinivas Kandagatla 	return 0;
3448523f11b5SSrinivas Kandagatla }
3449523f11b5SSrinivas Kandagatla 
stmmac_hw_teardown(struct net_device * dev)3450c66f6c37SThierry Reding static void stmmac_hw_teardown(struct net_device *dev)
3451c66f6c37SThierry Reding {
3452c66f6c37SThierry Reding 	struct stmmac_priv *priv = netdev_priv(dev);
3453c66f6c37SThierry Reding 
3454c66f6c37SThierry Reding 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3455c66f6c37SThierry Reding }
3456c66f6c37SThierry Reding 
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)34578532f613SOng Boon Leong static void stmmac_free_irq(struct net_device *dev,
34588532f613SOng Boon Leong 			    enum request_irq_err irq_err, int irq_idx)
34598532f613SOng Boon Leong {
34608532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
34618532f613SOng Boon Leong 	int j;
34628532f613SOng Boon Leong 
34638532f613SOng Boon Leong 	switch (irq_err) {
34648532f613SOng Boon Leong 	case REQ_IRQ_ERR_ALL:
34658532f613SOng Boon Leong 		irq_idx = priv->plat->tx_queues_to_use;
34668532f613SOng Boon Leong 		fallthrough;
34678532f613SOng Boon Leong 	case REQ_IRQ_ERR_TX:
34688532f613SOng Boon Leong 		for (j = irq_idx - 1; j >= 0; j--) {
34698deec94cSOng Boon Leong 			if (priv->tx_irq[j] > 0) {
34708deec94cSOng Boon Leong 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
34718531c808SChristian Marangi 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
34728532f613SOng Boon Leong 			}
34738deec94cSOng Boon Leong 		}
34748532f613SOng Boon Leong 		irq_idx = priv->plat->rx_queues_to_use;
34758532f613SOng Boon Leong 		fallthrough;
34768532f613SOng Boon Leong 	case REQ_IRQ_ERR_RX:
34778532f613SOng Boon Leong 		for (j = irq_idx - 1; j >= 0; j--) {
34788deec94cSOng Boon Leong 			if (priv->rx_irq[j] > 0) {
34798deec94cSOng Boon Leong 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
34808531c808SChristian Marangi 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
34818532f613SOng Boon Leong 			}
34828deec94cSOng Boon Leong 		}
34838532f613SOng Boon Leong 
34848532f613SOng Boon Leong 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
34858532f613SOng Boon Leong 			free_irq(priv->sfty_ue_irq, dev);
34868532f613SOng Boon Leong 		fallthrough;
34878532f613SOng Boon Leong 	case REQ_IRQ_ERR_SFTY_UE:
34888532f613SOng Boon Leong 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
34898532f613SOng Boon Leong 			free_irq(priv->sfty_ce_irq, dev);
34908532f613SOng Boon Leong 		fallthrough;
34918532f613SOng Boon Leong 	case REQ_IRQ_ERR_SFTY_CE:
34928532f613SOng Boon Leong 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
34938532f613SOng Boon Leong 			free_irq(priv->lpi_irq, dev);
34948532f613SOng Boon Leong 		fallthrough;
34958532f613SOng Boon Leong 	case REQ_IRQ_ERR_LPI:
34968532f613SOng Boon Leong 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
34978532f613SOng Boon Leong 			free_irq(priv->wol_irq, dev);
34988532f613SOng Boon Leong 		fallthrough;
34998532f613SOng Boon Leong 	case REQ_IRQ_ERR_WOL:
35008532f613SOng Boon Leong 		free_irq(dev->irq, dev);
35018532f613SOng Boon Leong 		fallthrough;
35028532f613SOng Boon Leong 	case REQ_IRQ_ERR_MAC:
35038532f613SOng Boon Leong 	case REQ_IRQ_ERR_NO:
35048532f613SOng Boon Leong 		/* If MAC IRQ request error, no more IRQ to free */
35058532f613SOng Boon Leong 		break;
35068532f613SOng Boon Leong 	}
35078532f613SOng Boon Leong }
35088532f613SOng Boon Leong 
stmmac_request_irq_multi_msi(struct net_device * dev)35098532f613SOng Boon Leong static int stmmac_request_irq_multi_msi(struct net_device *dev)
35108532f613SOng Boon Leong {
35118532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
35123e6dc7b6SWong Vee Khee 	enum request_irq_err irq_err;
35138deec94cSOng Boon Leong 	cpumask_t cpu_mask;
35148532f613SOng Boon Leong 	int irq_idx = 0;
35158532f613SOng Boon Leong 	char *int_name;
35168532f613SOng Boon Leong 	int ret;
35178532f613SOng Boon Leong 	int i;
35188532f613SOng Boon Leong 
35198532f613SOng Boon Leong 	/* For common interrupt */
35208532f613SOng Boon Leong 	int_name = priv->int_name_mac;
35218532f613SOng Boon Leong 	sprintf(int_name, "%s:%s", dev->name, "mac");
35228532f613SOng Boon Leong 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
35238532f613SOng Boon Leong 			  0, int_name, dev);
35248532f613SOng Boon Leong 	if (unlikely(ret < 0)) {
35258532f613SOng Boon Leong 		netdev_err(priv->dev,
35268532f613SOng Boon Leong 			   "%s: alloc mac MSI %d (error: %d)\n",
35278532f613SOng Boon Leong 			   __func__, dev->irq, ret);
35288532f613SOng Boon Leong 		irq_err = REQ_IRQ_ERR_MAC;
35298532f613SOng Boon Leong 		goto irq_error;
35308532f613SOng Boon Leong 	}
35318532f613SOng Boon Leong 
35328532f613SOng Boon Leong 	/* Request the Wake IRQ in case of another line
35338532f613SOng Boon Leong 	 * is used for WoL
35348532f613SOng Boon Leong 	 */
3535fed034d2SQiang Ma 	priv->wol_irq_disabled = true;
35368532f613SOng Boon Leong 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
35378532f613SOng Boon Leong 		int_name = priv->int_name_wol;
35388532f613SOng Boon Leong 		sprintf(int_name, "%s:%s", dev->name, "wol");
35398532f613SOng Boon Leong 		ret = request_irq(priv->wol_irq,
35408532f613SOng Boon Leong 				  stmmac_mac_interrupt,
35418532f613SOng Boon Leong 				  0, int_name, dev);
35428532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
35438532f613SOng Boon Leong 			netdev_err(priv->dev,
35448532f613SOng Boon Leong 				   "%s: alloc wol MSI %d (error: %d)\n",
35458532f613SOng Boon Leong 				   __func__, priv->wol_irq, ret);
35468532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_WOL;
35478532f613SOng Boon Leong 			goto irq_error;
35488532f613SOng Boon Leong 		}
35498532f613SOng Boon Leong 	}
35508532f613SOng Boon Leong 
35518532f613SOng Boon Leong 	/* Request the LPI IRQ in case of another line
35528532f613SOng Boon Leong 	 * is used for LPI
35538532f613SOng Boon Leong 	 */
35548532f613SOng Boon Leong 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
35558532f613SOng Boon Leong 		int_name = priv->int_name_lpi;
35568532f613SOng Boon Leong 		sprintf(int_name, "%s:%s", dev->name, "lpi");
35578532f613SOng Boon Leong 		ret = request_irq(priv->lpi_irq,
35588532f613SOng Boon Leong 				  stmmac_mac_interrupt,
35598532f613SOng Boon Leong 				  0, int_name, dev);
35608532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
35618532f613SOng Boon Leong 			netdev_err(priv->dev,
35628532f613SOng Boon Leong 				   "%s: alloc lpi MSI %d (error: %d)\n",
35638532f613SOng Boon Leong 				   __func__, priv->lpi_irq, ret);
35648532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_LPI;
35658532f613SOng Boon Leong 			goto irq_error;
35668532f613SOng Boon Leong 		}
35678532f613SOng Boon Leong 	}
35688532f613SOng Boon Leong 
35698532f613SOng Boon Leong 	/* Request the Safety Feature Correctible Error line in
35708532f613SOng Boon Leong 	 * case of another line is used
35718532f613SOng Boon Leong 	 */
35728532f613SOng Boon Leong 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
35738532f613SOng Boon Leong 		int_name = priv->int_name_sfty_ce;
35748532f613SOng Boon Leong 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
35758532f613SOng Boon Leong 		ret = request_irq(priv->sfty_ce_irq,
35768532f613SOng Boon Leong 				  stmmac_safety_interrupt,
35778532f613SOng Boon Leong 				  0, int_name, dev);
35788532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
35798532f613SOng Boon Leong 			netdev_err(priv->dev,
35808532f613SOng Boon Leong 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
35818532f613SOng Boon Leong 				   __func__, priv->sfty_ce_irq, ret);
35828532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_SFTY_CE;
35838532f613SOng Boon Leong 			goto irq_error;
35848532f613SOng Boon Leong 		}
35858532f613SOng Boon Leong 	}
35868532f613SOng Boon Leong 
35878532f613SOng Boon Leong 	/* Request the Safety Feature Uncorrectible Error line in
35888532f613SOng Boon Leong 	 * case of another line is used
35898532f613SOng Boon Leong 	 */
35908532f613SOng Boon Leong 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
35918532f613SOng Boon Leong 		int_name = priv->int_name_sfty_ue;
35928532f613SOng Boon Leong 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
35938532f613SOng Boon Leong 		ret = request_irq(priv->sfty_ue_irq,
35948532f613SOng Boon Leong 				  stmmac_safety_interrupt,
35958532f613SOng Boon Leong 				  0, int_name, dev);
35968532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
35978532f613SOng Boon Leong 			netdev_err(priv->dev,
35988532f613SOng Boon Leong 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
35998532f613SOng Boon Leong 				   __func__, priv->sfty_ue_irq, ret);
36008532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_SFTY_UE;
36018532f613SOng Boon Leong 			goto irq_error;
36028532f613SOng Boon Leong 		}
36038532f613SOng Boon Leong 	}
36048532f613SOng Boon Leong 
36058532f613SOng Boon Leong 	/* Request Rx MSI irq */
36068532f613SOng Boon Leong 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3607d68c2e1dSArnd Bergmann 		if (i >= MTL_MAX_RX_QUEUES)
36083e0d5699SArnd Bergmann 			break;
36098532f613SOng Boon Leong 		if (priv->rx_irq[i] == 0)
36108532f613SOng Boon Leong 			continue;
36118532f613SOng Boon Leong 
36128532f613SOng Boon Leong 		int_name = priv->int_name_rx_irq[i];
36138532f613SOng Boon Leong 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
36148532f613SOng Boon Leong 		ret = request_irq(priv->rx_irq[i],
36158532f613SOng Boon Leong 				  stmmac_msi_intr_rx,
36168531c808SChristian Marangi 				  0, int_name, &priv->dma_conf.rx_queue[i]);
36178532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
36188532f613SOng Boon Leong 			netdev_err(priv->dev,
36198532f613SOng Boon Leong 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
36208532f613SOng Boon Leong 				   __func__, i, priv->rx_irq[i], ret);
36218532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_RX;
36228532f613SOng Boon Leong 			irq_idx = i;
36238532f613SOng Boon Leong 			goto irq_error;
36248532f613SOng Boon Leong 		}
36258deec94cSOng Boon Leong 		cpumask_clear(&cpu_mask);
36268deec94cSOng Boon Leong 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
36278deec94cSOng Boon Leong 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
36288532f613SOng Boon Leong 	}
36298532f613SOng Boon Leong 
36308532f613SOng Boon Leong 	/* Request Tx MSI irq */
36318532f613SOng Boon Leong 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3632d68c2e1dSArnd Bergmann 		if (i >= MTL_MAX_TX_QUEUES)
36333e0d5699SArnd Bergmann 			break;
36348532f613SOng Boon Leong 		if (priv->tx_irq[i] == 0)
36358532f613SOng Boon Leong 			continue;
36368532f613SOng Boon Leong 
36378532f613SOng Boon Leong 		int_name = priv->int_name_tx_irq[i];
36388532f613SOng Boon Leong 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
36398532f613SOng Boon Leong 		ret = request_irq(priv->tx_irq[i],
36408532f613SOng Boon Leong 				  stmmac_msi_intr_tx,
36418531c808SChristian Marangi 				  0, int_name, &priv->dma_conf.tx_queue[i]);
36428532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
36438532f613SOng Boon Leong 			netdev_err(priv->dev,
36448532f613SOng Boon Leong 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
36458532f613SOng Boon Leong 				   __func__, i, priv->tx_irq[i], ret);
36468532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_TX;
36478532f613SOng Boon Leong 			irq_idx = i;
36488532f613SOng Boon Leong 			goto irq_error;
36498532f613SOng Boon Leong 		}
36508deec94cSOng Boon Leong 		cpumask_clear(&cpu_mask);
36518deec94cSOng Boon Leong 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
36528deec94cSOng Boon Leong 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
36538532f613SOng Boon Leong 	}
36548532f613SOng Boon Leong 
36558532f613SOng Boon Leong 	return 0;
36568532f613SOng Boon Leong 
36578532f613SOng Boon Leong irq_error:
36588532f613SOng Boon Leong 	stmmac_free_irq(dev, irq_err, irq_idx);
36598532f613SOng Boon Leong 	return ret;
36608532f613SOng Boon Leong }
36618532f613SOng Boon Leong 
stmmac_request_irq_single(struct net_device * dev)36628532f613SOng Boon Leong static int stmmac_request_irq_single(struct net_device *dev)
36638532f613SOng Boon Leong {
36648532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
36653e6dc7b6SWong Vee Khee 	enum request_irq_err irq_err;
36668532f613SOng Boon Leong 	int ret;
36678532f613SOng Boon Leong 
36688532f613SOng Boon Leong 	ret = request_irq(dev->irq, stmmac_interrupt,
36698532f613SOng Boon Leong 			  IRQF_SHARED, dev->name, dev);
36708532f613SOng Boon Leong 	if (unlikely(ret < 0)) {
36718532f613SOng Boon Leong 		netdev_err(priv->dev,
36728532f613SOng Boon Leong 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
36738532f613SOng Boon Leong 			   __func__, dev->irq, ret);
36748532f613SOng Boon Leong 		irq_err = REQ_IRQ_ERR_MAC;
36753e6dc7b6SWong Vee Khee 		goto irq_error;
36768532f613SOng Boon Leong 	}
36778532f613SOng Boon Leong 
36788532f613SOng Boon Leong 	/* Request the Wake IRQ in case of another line
36798532f613SOng Boon Leong 	 * is used for WoL
36808532f613SOng Boon Leong 	 */
3681b0a3c915SNícolas F. R. A. Prado 	priv->wol_irq_disabled = true;
36828532f613SOng Boon Leong 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
36838532f613SOng Boon Leong 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
36848532f613SOng Boon Leong 				  IRQF_SHARED, dev->name, dev);
36858532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
36868532f613SOng Boon Leong 			netdev_err(priv->dev,
36878532f613SOng Boon Leong 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
36888532f613SOng Boon Leong 				   __func__, priv->wol_irq, ret);
36898532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_WOL;
36903e6dc7b6SWong Vee Khee 			goto irq_error;
36918532f613SOng Boon Leong 		}
36928532f613SOng Boon Leong 	}
36938532f613SOng Boon Leong 
36948532f613SOng Boon Leong 	/* Request the IRQ lines */
36958532f613SOng Boon Leong 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
36968532f613SOng Boon Leong 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
36978532f613SOng Boon Leong 				  IRQF_SHARED, dev->name, dev);
36988532f613SOng Boon Leong 		if (unlikely(ret < 0)) {
36998532f613SOng Boon Leong 			netdev_err(priv->dev,
37008532f613SOng Boon Leong 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
37018532f613SOng Boon Leong 				   __func__, priv->lpi_irq, ret);
37028532f613SOng Boon Leong 			irq_err = REQ_IRQ_ERR_LPI;
37038532f613SOng Boon Leong 			goto irq_error;
37048532f613SOng Boon Leong 		}
37058532f613SOng Boon Leong 	}
37068532f613SOng Boon Leong 
37078532f613SOng Boon Leong 	return 0;
37088532f613SOng Boon Leong 
37098532f613SOng Boon Leong irq_error:
37108532f613SOng Boon Leong 	stmmac_free_irq(dev, irq_err, 0);
37118532f613SOng Boon Leong 	return ret;
37128532f613SOng Boon Leong }
37138532f613SOng Boon Leong 
stmmac_request_irq(struct net_device * dev)37148532f613SOng Boon Leong static int stmmac_request_irq(struct net_device *dev)
37158532f613SOng Boon Leong {
37168532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
37178532f613SOng Boon Leong 	int ret;
37188532f613SOng Boon Leong 
37198532f613SOng Boon Leong 	/* Request the IRQ lines */
3720956c3f09SBartosz Golaszewski 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
37218532f613SOng Boon Leong 		ret = stmmac_request_irq_multi_msi(dev);
37228532f613SOng Boon Leong 	else
37238532f613SOng Boon Leong 		ret = stmmac_request_irq_single(dev);
37248532f613SOng Boon Leong 
37258532f613SOng Boon Leong 	return ret;
37268532f613SOng Boon Leong }
37278532f613SOng Boon Leong 
3728523f11b5SSrinivas Kandagatla /**
3729ba39b344SChristian Marangi  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3730ba39b344SChristian Marangi  *  @priv: driver private structure
3731ba39b344SChristian Marangi  *  @mtu: MTU to setup the dma queue and buf with
3732ba39b344SChristian Marangi  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3733ba39b344SChristian Marangi  *  Allocate the Tx/Rx DMA queue and init them.
3734ba39b344SChristian Marangi  *  Return value:
3735ba39b344SChristian Marangi  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3736ba39b344SChristian Marangi  */
3737ba39b344SChristian Marangi static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)3738ba39b344SChristian Marangi stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3739ba39b344SChristian Marangi {
3740ba39b344SChristian Marangi 	struct stmmac_dma_conf *dma_conf;
3741ba39b344SChristian Marangi 	int chan, bfsize, ret;
3742ba39b344SChristian Marangi 
3743ba39b344SChristian Marangi 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3744ba39b344SChristian Marangi 	if (!dma_conf) {
3745ba39b344SChristian Marangi 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3746ba39b344SChristian Marangi 			   __func__);
3747ba39b344SChristian Marangi 		return ERR_PTR(-ENOMEM);
3748ba39b344SChristian Marangi 	}
3749ba39b344SChristian Marangi 
3750ba39b344SChristian Marangi 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3751ba39b344SChristian Marangi 	if (bfsize < 0)
3752ba39b344SChristian Marangi 		bfsize = 0;
3753ba39b344SChristian Marangi 
3754ba39b344SChristian Marangi 	if (bfsize < BUF_SIZE_16KiB)
3755ba39b344SChristian Marangi 		bfsize = stmmac_set_bfsize(mtu, 0);
3756ba39b344SChristian Marangi 
3757ba39b344SChristian Marangi 	dma_conf->dma_buf_sz = bfsize;
3758ba39b344SChristian Marangi 	/* Chose the tx/rx size from the already defined one in the
3759ba39b344SChristian Marangi 	 * priv struct. (if defined)
3760ba39b344SChristian Marangi 	 */
3761ba39b344SChristian Marangi 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3762ba39b344SChristian Marangi 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3763ba39b344SChristian Marangi 
3764ba39b344SChristian Marangi 	if (!dma_conf->dma_tx_size)
3765ba39b344SChristian Marangi 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3766ba39b344SChristian Marangi 	if (!dma_conf->dma_rx_size)
3767ba39b344SChristian Marangi 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3768ba39b344SChristian Marangi 
3769ba39b344SChristian Marangi 	/* Earlier check for TBS */
3770ba39b344SChristian Marangi 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3771ba39b344SChristian Marangi 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3772ba39b344SChristian Marangi 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3773ba39b344SChristian Marangi 
3774ba39b344SChristian Marangi 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3775ba39b344SChristian Marangi 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3776ba39b344SChristian Marangi 	}
3777ba39b344SChristian Marangi 
3778ba39b344SChristian Marangi 	ret = alloc_dma_desc_resources(priv, dma_conf);
3779ba39b344SChristian Marangi 	if (ret < 0) {
3780ba39b344SChristian Marangi 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3781ba39b344SChristian Marangi 			   __func__);
3782ba39b344SChristian Marangi 		goto alloc_error;
3783ba39b344SChristian Marangi 	}
3784ba39b344SChristian Marangi 
3785ba39b344SChristian Marangi 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3786ba39b344SChristian Marangi 	if (ret < 0) {
3787ba39b344SChristian Marangi 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3788ba39b344SChristian Marangi 			   __func__);
3789ba39b344SChristian Marangi 		goto init_error;
3790ba39b344SChristian Marangi 	}
3791ba39b344SChristian Marangi 
3792ba39b344SChristian Marangi 	return dma_conf;
3793ba39b344SChristian Marangi 
3794ba39b344SChristian Marangi init_error:
3795ba39b344SChristian Marangi 	free_dma_desc_resources(priv, dma_conf);
3796ba39b344SChristian Marangi alloc_error:
3797ba39b344SChristian Marangi 	kfree(dma_conf);
3798ba39b344SChristian Marangi 	return ERR_PTR(ret);
3799ba39b344SChristian Marangi }
3800ba39b344SChristian Marangi 
3801ba39b344SChristian Marangi /**
3802ba39b344SChristian Marangi  *  __stmmac_open - open entry point of the driver
38037ac6653aSJeff Kirsher  *  @dev : pointer to the device structure.
3804ba39b344SChristian Marangi  *  @dma_conf :  structure to take the dma data
38057ac6653aSJeff Kirsher  *  Description:
38067ac6653aSJeff Kirsher  *  This function is the open entry point of the driver.
38077ac6653aSJeff Kirsher  *  Return value:
38087ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
38097ac6653aSJeff Kirsher  *  file on failure.
38107ac6653aSJeff Kirsher  */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)3811ba39b344SChristian Marangi static int __stmmac_open(struct net_device *dev,
3812ba39b344SChristian Marangi 			 struct stmmac_dma_conf *dma_conf)
38137ac6653aSJeff Kirsher {
38147ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
38159900074eSVladimir Oltean 	int mode = priv->plat->phy_interface;
38168fce3331SJose Abreu 	u32 chan;
38177ac6653aSJeff Kirsher 	int ret;
38187ac6653aSJeff Kirsher 
381985648865SMinghao Chi 	ret = pm_runtime_resume_and_get(priv->device);
382085648865SMinghao Chi 	if (ret < 0)
38215ec55823SJoakim Zhang 		return ret;
38225ec55823SJoakim Zhang 
3823a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3824f213bbe8SJose Abreu 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
38259900074eSVladimir Oltean 	    (!priv->hw->xpcs ||
38265d1f3fe7SMaxime Chevallier 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
38275d1f3fe7SMaxime Chevallier 	    !priv->hw->lynx_pcs) {
38287ac6653aSJeff Kirsher 		ret = stmmac_init_phy(dev);
3829e58bb43fSGiuseppe CAVALLARO 		if (ret) {
383038ddc59dSLABBE Corentin 			netdev_err(priv->dev,
383138ddc59dSLABBE Corentin 				   "%s: Cannot attach to PHY (error: %d)\n",
3832e58bb43fSGiuseppe CAVALLARO 				   __func__, ret);
38335ec55823SJoakim Zhang 			goto init_phy_error;
38347ac6653aSJeff Kirsher 		}
3835e58bb43fSGiuseppe CAVALLARO 	}
38367ac6653aSJeff Kirsher 
383722ad3838SGiuseppe Cavallaro 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
383856329137SBartlomiej Zolnierkiewicz 
3839ba39b344SChristian Marangi 	buf_sz = dma_conf->dma_buf_sz;
38402524299bSEsben Haabendal 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
38412524299bSEsben Haabendal 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
38422524299bSEsben Haabendal 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3843ba39b344SChristian Marangi 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
38445bacd778SLABBE Corentin 
3845f9ec5723SChristian Marangi 	stmmac_reset_queues_param(priv);
3846f9ec5723SChristian Marangi 
3847efe92571SBartosz Golaszewski 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3848efe92571SBartosz Golaszewski 	    priv->plat->serdes_powerup) {
384949725ffcSJunxiao Chang 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
385049725ffcSJunxiao Chang 		if (ret < 0) {
385149725ffcSJunxiao Chang 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
385249725ffcSJunxiao Chang 				   __func__);
385349725ffcSJunxiao Chang 			goto init_error;
385449725ffcSJunxiao Chang 		}
385549725ffcSJunxiao Chang 	}
385649725ffcSJunxiao Chang 
3857fe131929SHuacai Chen 	ret = stmmac_hw_setup(dev, true);
385856329137SBartlomiej Zolnierkiewicz 	if (ret < 0) {
385938ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3860c9324d18SGiuseppe CAVALLARO 		goto init_error;
38617ac6653aSJeff Kirsher 	}
38627ac6653aSJeff Kirsher 
3863d429b66eSJose Abreu 	stmmac_init_coalesce(priv);
3864777da230SGiuseppe CAVALLARO 
386574371272SJose Abreu 	phylink_start(priv->phylink);
386677b28983SJisheng Zhang 	/* We may have called phylink_speed_down before */
386777b28983SJisheng Zhang 	phylink_speed_up(priv->phylink);
38687ac6653aSJeff Kirsher 
38698532f613SOng Boon Leong 	ret = stmmac_request_irq(dev);
38708532f613SOng Boon Leong 	if (ret)
38716c1e5abeSThierry Reding 		goto irq_error;
3872d765955dSGiuseppe CAVALLARO 
3873c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
38749f19306dSOng Boon Leong 	netif_tx_start_all_queues(priv->dev);
3875087a7b94SVincent Whitchurch 	stmmac_enable_all_dma_irq(priv);
38767ac6653aSJeff Kirsher 
38777ac6653aSJeff Kirsher 	return 0;
38787ac6653aSJeff Kirsher 
38796c1e5abeSThierry Reding irq_error:
388074371272SJose Abreu 	phylink_stop(priv->phylink);
38817a13f8f5SFrancesco Virlinzi 
38828fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
38838531c808SChristian Marangi 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
38848fce3331SJose Abreu 
3885c66f6c37SThierry Reding 	stmmac_hw_teardown(dev);
3886c9324d18SGiuseppe CAVALLARO init_error:
388774371272SJose Abreu 	phylink_disconnect_phy(priv->phylink);
38885ec55823SJoakim Zhang init_phy_error:
38895ec55823SJoakim Zhang 	pm_runtime_put(priv->device);
38907ac6653aSJeff Kirsher 	return ret;
38917ac6653aSJeff Kirsher }
38927ac6653aSJeff Kirsher 
stmmac_open(struct net_device * dev)3893ba39b344SChristian Marangi static int stmmac_open(struct net_device *dev)
3894ba39b344SChristian Marangi {
3895ba39b344SChristian Marangi 	struct stmmac_priv *priv = netdev_priv(dev);
3896ba39b344SChristian Marangi 	struct stmmac_dma_conf *dma_conf;
3897ba39b344SChristian Marangi 	int ret;
3898ba39b344SChristian Marangi 
3899ba39b344SChristian Marangi 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3900ba39b344SChristian Marangi 	if (IS_ERR(dma_conf))
3901ba39b344SChristian Marangi 		return PTR_ERR(dma_conf);
3902ba39b344SChristian Marangi 
3903ba39b344SChristian Marangi 	ret = __stmmac_open(dev, dma_conf);
390430134b7cSChristian Marangi 	if (ret)
390530134b7cSChristian Marangi 		free_dma_desc_resources(priv, dma_conf);
390630134b7cSChristian Marangi 
3907ba39b344SChristian Marangi 	kfree(dma_conf);
3908ba39b344SChristian Marangi 	return ret;
3909ba39b344SChristian Marangi }
3910ba39b344SChristian Marangi 
stmmac_fpe_stop_wq(struct stmmac_priv * priv)39115a558611SOng Boon Leong static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
39125a558611SOng Boon Leong {
39135a558611SOng Boon Leong 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
39145a558611SOng Boon Leong 
3915699b103eSJakub Raczynski 	if (priv->fpe_wq) {
39165a558611SOng Boon Leong 		destroy_workqueue(priv->fpe_wq);
3917699b103eSJakub Raczynski 		priv->fpe_wq = NULL;
3918699b103eSJakub Raczynski 	}
39195a558611SOng Boon Leong 
39205a558611SOng Boon Leong 	netdev_info(priv->dev, "FPE workqueue stop");
39215a558611SOng Boon Leong }
39225a558611SOng Boon Leong 
39237ac6653aSJeff Kirsher /**
39247ac6653aSJeff Kirsher  *  stmmac_release - close entry point of the driver
39257ac6653aSJeff Kirsher  *  @dev : device pointer.
39267ac6653aSJeff Kirsher  *  Description:
39277ac6653aSJeff Kirsher  *  This is the stop entry point of the driver.
39287ac6653aSJeff Kirsher  */
stmmac_release(struct net_device * dev)3929ac746c85SOng Boon Leong static int stmmac_release(struct net_device *dev)
39307ac6653aSJeff Kirsher {
39317ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
39328fce3331SJose Abreu 	u32 chan;
39337ac6653aSJeff Kirsher 
393477b28983SJisheng Zhang 	if (device_may_wakeup(priv->device))
393577b28983SJisheng Zhang 		phylink_speed_down(priv->phylink, false);
39367ac6653aSJeff Kirsher 	/* Stop and disconnect the PHY */
393774371272SJose Abreu 	phylink_stop(priv->phylink);
393874371272SJose Abreu 	phylink_disconnect_phy(priv->phylink);
39397ac6653aSJeff Kirsher 
3940c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
39417ac6653aSJeff Kirsher 
39428fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
39438531c808SChristian Marangi 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
39449125cdd1SGiuseppe CAVALLARO 
39457028471eSChristian Marangi 	netif_tx_disable(dev);
39467028471eSChristian Marangi 
39477ac6653aSJeff Kirsher 	/* Free the IRQ lines */
39488532f613SOng Boon Leong 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
39497ac6653aSJeff Kirsher 
39505f585913SFugang Duan 	if (priv->eee_enabled) {
39515f585913SFugang Duan 		priv->tx_path_in_lpi_mode = false;
39525f585913SFugang Duan 		del_timer_sync(&priv->eee_ctrl_timer);
39535f585913SFugang Duan 	}
39545f585913SFugang Duan 
39557ac6653aSJeff Kirsher 	/* Stop TX/RX DMA and clear the descriptors */
3956ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
39577ac6653aSJeff Kirsher 
39587ac6653aSJeff Kirsher 	/* Release and free the Rx/Tx resources */
3959ba39b344SChristian Marangi 	free_dma_desc_resources(priv, &priv->dma_conf);
39607ac6653aSJeff Kirsher 
39617ac6653aSJeff Kirsher 	/* Disable the MAC Rx/Tx */
3962c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
39637ac6653aSJeff Kirsher 
396449725ffcSJunxiao Chang 	/* Powerdown Serdes if there is */
396549725ffcSJunxiao Chang 	if (priv->plat->serdes_powerdown)
396649725ffcSJunxiao Chang 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
396749725ffcSJunxiao Chang 
39687ac6653aSJeff Kirsher 	netif_carrier_off(dev);
39697ac6653aSJeff Kirsher 
397092ba6888SRayagond Kokatanur 	stmmac_release_ptp(priv);
397192ba6888SRayagond Kokatanur 
39725ec55823SJoakim Zhang 	pm_runtime_put(priv->device);
39735ec55823SJoakim Zhang 
39745a558611SOng Boon Leong 	if (priv->dma_cap.fpesel)
39755a558611SOng Boon Leong 		stmmac_fpe_stop_wq(priv);
39765a558611SOng Boon Leong 
39777ac6653aSJeff Kirsher 	return 0;
39787ac6653aSJeff Kirsher }
39797ac6653aSJeff Kirsher 
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)398030d93227SJose Abreu static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
398130d93227SJose Abreu 			       struct stmmac_tx_queue *tx_q)
398230d93227SJose Abreu {
398330d93227SJose Abreu 	u16 tag = 0x0, inner_tag = 0x0;
398430d93227SJose Abreu 	u32 inner_type = 0x0;
398530d93227SJose Abreu 	struct dma_desc *p;
398630d93227SJose Abreu 
398730d93227SJose Abreu 	if (!priv->dma_cap.vlins)
398830d93227SJose Abreu 		return false;
398930d93227SJose Abreu 	if (!skb_vlan_tag_present(skb))
399030d93227SJose Abreu 		return false;
399130d93227SJose Abreu 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
399230d93227SJose Abreu 		inner_tag = skb_vlan_tag_get(skb);
399330d93227SJose Abreu 		inner_type = STMMAC_VLAN_INSERT;
399430d93227SJose Abreu 	}
399530d93227SJose Abreu 
399630d93227SJose Abreu 	tag = skb_vlan_tag_get(skb);
399730d93227SJose Abreu 
3998579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3999579a25a8SJose Abreu 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4000579a25a8SJose Abreu 	else
4001579a25a8SJose Abreu 		p = &tx_q->dma_tx[tx_q->cur_tx];
4002579a25a8SJose Abreu 
400330d93227SJose Abreu 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
400430d93227SJose Abreu 		return false;
400530d93227SJose Abreu 
400630d93227SJose Abreu 	stmmac_set_tx_owner(priv, p);
40078531c808SChristian Marangi 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
400830d93227SJose Abreu 	return true;
400930d93227SJose Abreu }
401030d93227SJose Abreu 
40117ac6653aSJeff Kirsher /**
4012f748be53SAlexandre TORGUE  *  stmmac_tso_allocator - close entry point of the driver
4013f748be53SAlexandre TORGUE  *  @priv: driver private structure
4014f748be53SAlexandre TORGUE  *  @des: buffer start address
4015f748be53SAlexandre TORGUE  *  @total_len: total length to fill in descriptors
4016d0ea5cbdSJesse Brandeburg  *  @last_segment: condition for the last descriptor
4017ce736788SJoao Pinto  *  @queue: TX queue index
4018f748be53SAlexandre TORGUE  *  Description:
4019f748be53SAlexandre TORGUE  *  This function fills descriptor and request new descriptors according to
4020f748be53SAlexandre TORGUE  *  buffer length to fill
4021f748be53SAlexandre TORGUE  */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4022a993db88SJose Abreu static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4023ce736788SJoao Pinto 				 int total_len, bool last_segment, u32 queue)
4024f748be53SAlexandre TORGUE {
40258531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4026f748be53SAlexandre TORGUE 	struct dma_desc *desc;
40275bacd778SLABBE Corentin 	u32 buff_size;
4028ce736788SJoao Pinto 	int tmp_len;
4029f748be53SAlexandre TORGUE 
4030f748be53SAlexandre TORGUE 	tmp_len = total_len;
4031f748be53SAlexandre TORGUE 
4032f748be53SAlexandre TORGUE 	while (tmp_len > 0) {
4033a993db88SJose Abreu 		dma_addr_t curr_addr;
4034a993db88SJose Abreu 
4035aa042f60SSong, Yoong Siang 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
40368531c808SChristian Marangi 						priv->dma_conf.dma_tx_size);
4037b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4038579a25a8SJose Abreu 
4039579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4040579a25a8SJose Abreu 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4041579a25a8SJose Abreu 		else
4042579a25a8SJose Abreu 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4043f748be53SAlexandre TORGUE 
4044a993db88SJose Abreu 		curr_addr = des + (total_len - tmp_len);
4045a993db88SJose Abreu 		if (priv->dma_cap.addr64 <= 32)
4046a993db88SJose Abreu 			desc->des0 = cpu_to_le32(curr_addr);
4047a993db88SJose Abreu 		else
4048a993db88SJose Abreu 			stmmac_set_desc_addr(priv, desc, curr_addr);
4049a993db88SJose Abreu 
4050f748be53SAlexandre TORGUE 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4051f748be53SAlexandre TORGUE 			    TSO_MAX_BUFF_SIZE : tmp_len;
4052f748be53SAlexandre TORGUE 
405342de047dSJose Abreu 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4054f748be53SAlexandre TORGUE 				0, 1,
4055426849e6SNiklas Cassel 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4056f748be53SAlexandre TORGUE 				0, 0);
4057f748be53SAlexandre TORGUE 
4058f748be53SAlexandre TORGUE 		tmp_len -= TSO_MAX_BUFF_SIZE;
4059f748be53SAlexandre TORGUE 	}
4060f748be53SAlexandre TORGUE }
4061f748be53SAlexandre TORGUE 
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4062d96febedSOng Boon Leong static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4063d96febedSOng Boon Leong {
40648531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4065d96febedSOng Boon Leong 	int desc_size;
4066d96febedSOng Boon Leong 
4067d96febedSOng Boon Leong 	if (likely(priv->extend_desc))
4068d96febedSOng Boon Leong 		desc_size = sizeof(struct dma_extended_desc);
4069d96febedSOng Boon Leong 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4070d96febedSOng Boon Leong 		desc_size = sizeof(struct dma_edesc);
4071d96febedSOng Boon Leong 	else
4072d96febedSOng Boon Leong 		desc_size = sizeof(struct dma_desc);
4073d96febedSOng Boon Leong 
4074d96febedSOng Boon Leong 	/* The own bit must be the latest setting done when prepare the
4075d96febedSOng Boon Leong 	 * descriptor and then barrier is needed to make sure that
4076d96febedSOng Boon Leong 	 * all is coherent before granting the DMA engine.
4077d96febedSOng Boon Leong 	 */
4078d96febedSOng Boon Leong 	wmb();
4079d96febedSOng Boon Leong 
4080d96febedSOng Boon Leong 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4081d96febedSOng Boon Leong 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4082d96febedSOng Boon Leong }
4083d96febedSOng Boon Leong 
4084f748be53SAlexandre TORGUE /**
4085f748be53SAlexandre TORGUE  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4086f748be53SAlexandre TORGUE  *  @skb : the socket buffer
4087f748be53SAlexandre TORGUE  *  @dev : device pointer
4088f748be53SAlexandre TORGUE  *  Description: this is the transmit function that is called on TSO frames
4089f748be53SAlexandre TORGUE  *  (support available on GMAC4 and newer chips).
4090f748be53SAlexandre TORGUE  *  Diagram below show the ring programming in case of TSO frames:
4091f748be53SAlexandre TORGUE  *
4092f748be53SAlexandre TORGUE  *  First Descriptor
4093f748be53SAlexandre TORGUE  *   --------
4094f748be53SAlexandre TORGUE  *   | DES0 |---> buffer1 = L2/L3/L4 header
4095f748be53SAlexandre TORGUE  *   | DES1 |---> TCP Payload (can continue on next descr...)
4096f748be53SAlexandre TORGUE  *   | DES2 |---> buffer 1 and 2 len
4097f748be53SAlexandre TORGUE  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4098f748be53SAlexandre TORGUE  *   --------
4099f748be53SAlexandre TORGUE  *	|
4100f748be53SAlexandre TORGUE  *     ...
4101f748be53SAlexandre TORGUE  *	|
4102f748be53SAlexandre TORGUE  *   --------
4103f748be53SAlexandre TORGUE  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4104f748be53SAlexandre TORGUE  *   | DES1 | --|
4105f748be53SAlexandre TORGUE  *   | DES2 | --> buffer 1 and 2 len
4106f748be53SAlexandre TORGUE  *   | DES3 |
4107f748be53SAlexandre TORGUE  *   --------
4108f748be53SAlexandre TORGUE  *
4109f748be53SAlexandre TORGUE  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4110f748be53SAlexandre TORGUE  */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4111f748be53SAlexandre TORGUE static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4112f748be53SAlexandre TORGUE {
4113ce736788SJoao Pinto 	struct dma_desc *desc, *first, *mss_desc = NULL;
4114f748be53SAlexandre TORGUE 	struct stmmac_priv *priv = netdev_priv(dev);
4115f748be53SAlexandre TORGUE 	int nfrags = skb_shinfo(skb)->nr_frags;
4116ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
4117c2837423SJose Abreu 	unsigned int first_entry, tx_packets;
41188070274bSJisheng Zhang 	struct stmmac_txq_stats *txq_stats;
4119d96febedSOng Boon Leong 	int tmp_pay_len = 0, first_tx;
4120ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
4121c2837423SJose Abreu 	bool has_vlan, set_ic;
4122db3667c9SRussell King (Oracle) 	dma_addr_t tso_des, des;
4123579a25a8SJose Abreu 	u8 proto_hdr_len, hdr;
4124ce736788SJoao Pinto 	u32 pay_len, mss;
4125f748be53SAlexandre TORGUE 	int i;
4126f748be53SAlexandre TORGUE 
41278531c808SChristian Marangi 	tx_q = &priv->dma_conf.tx_queue[queue];
41288070274bSJisheng Zhang 	txq_stats = &priv->xstats.txq_stats[queue];
4129c2837423SJose Abreu 	first_tx = tx_q->cur_tx;
4130ce736788SJoao Pinto 
4131f748be53SAlexandre TORGUE 	/* Compute header lengths */
4132b7766206SJose Abreu 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4133b7766206SJose Abreu 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4134b7766206SJose Abreu 		hdr = sizeof(struct udphdr);
4135b7766206SJose Abreu 	} else {
4136504148feSEric Dumazet 		proto_hdr_len = skb_tcp_all_headers(skb);
4137b7766206SJose Abreu 		hdr = tcp_hdrlen(skb);
4138b7766206SJose Abreu 	}
4139f748be53SAlexandre TORGUE 
4140f748be53SAlexandre TORGUE 	/* Desc availability based on threshold should be enough safe */
4141ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <
4142f748be53SAlexandre TORGUE 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4143c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4144c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4145c22a3f48SJoao Pinto 								queue));
4146f748be53SAlexandre TORGUE 			/* This is a hard error, log it. */
414738ddc59dSLABBE Corentin 			netdev_err(priv->dev,
414838ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
414938ddc59dSLABBE Corentin 				   __func__);
4150f748be53SAlexandre TORGUE 		}
4151f748be53SAlexandre TORGUE 		return NETDEV_TX_BUSY;
4152f748be53SAlexandre TORGUE 	}
4153f748be53SAlexandre TORGUE 
4154f748be53SAlexandre TORGUE 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4155f748be53SAlexandre TORGUE 
4156f748be53SAlexandre TORGUE 	mss = skb_shinfo(skb)->gso_size;
4157f748be53SAlexandre TORGUE 
4158f748be53SAlexandre TORGUE 	/* set new MSS value if needed */
41598d212a9eSNiklas Cassel 	if (mss != tx_q->mss) {
4160579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4161579a25a8SJose Abreu 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4162579a25a8SJose Abreu 		else
4163579a25a8SJose Abreu 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4164579a25a8SJose Abreu 
416542de047dSJose Abreu 		stmmac_set_mss(priv, mss_desc, mss);
41668d212a9eSNiklas Cassel 		tx_q->mss = mss;
4167aa042f60SSong, Yoong Siang 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
41688531c808SChristian Marangi 						priv->dma_conf.dma_tx_size);
4169b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4170f748be53SAlexandre TORGUE 	}
4171f748be53SAlexandre TORGUE 
4172f748be53SAlexandre TORGUE 	if (netif_msg_tx_queued(priv)) {
4173b7766206SJose Abreu 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4174b7766206SJose Abreu 			__func__, hdr, proto_hdr_len, pay_len, mss);
4175f748be53SAlexandre TORGUE 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4176f748be53SAlexandre TORGUE 			skb->data_len);
4177f748be53SAlexandre TORGUE 	}
4178f748be53SAlexandre TORGUE 
417930d93227SJose Abreu 	/* Check if VLAN can be inserted by HW */
418030d93227SJose Abreu 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
418130d93227SJose Abreu 
4182ce736788SJoao Pinto 	first_entry = tx_q->cur_tx;
4183b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4184f748be53SAlexandre TORGUE 
4185579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4186579a25a8SJose Abreu 		desc = &tx_q->dma_entx[first_entry].basic;
4187579a25a8SJose Abreu 	else
4188579a25a8SJose Abreu 		desc = &tx_q->dma_tx[first_entry];
4189f748be53SAlexandre TORGUE 	first = desc;
4190f748be53SAlexandre TORGUE 
419130d93227SJose Abreu 	if (has_vlan)
419230d93227SJose Abreu 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
419330d93227SJose Abreu 
4194f748be53SAlexandre TORGUE 	/* first descriptor: fill Headers on Buf1 */
4195f748be53SAlexandre TORGUE 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4196f748be53SAlexandre TORGUE 			     DMA_TO_DEVICE);
4197f748be53SAlexandre TORGUE 	if (dma_mapping_error(priv->device, des))
4198f748be53SAlexandre TORGUE 		goto dma_map_err;
4199f748be53SAlexandre TORGUE 
4200a993db88SJose Abreu 	if (priv->dma_cap.addr64 <= 32) {
4201f8be0d78SMichael Weiser 		first->des0 = cpu_to_le32(des);
4202f748be53SAlexandre TORGUE 
4203f748be53SAlexandre TORGUE 		/* Fill start of payload in buff2 of first descriptor */
4204f748be53SAlexandre TORGUE 		if (pay_len)
4205f8be0d78SMichael Weiser 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4206f748be53SAlexandre TORGUE 
4207f748be53SAlexandre TORGUE 		/* If needed take extra descriptors to fill the remaining payload */
4208f748be53SAlexandre TORGUE 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4209db3667c9SRussell King (Oracle) 		tso_des = des;
4210a993db88SJose Abreu 	} else {
4211a993db88SJose Abreu 		stmmac_set_desc_addr(priv, first, des);
4212a993db88SJose Abreu 		tmp_pay_len = pay_len;
4213db3667c9SRussell King (Oracle) 		tso_des = des + proto_hdr_len;
4214b2f07199SJose Abreu 		pay_len = 0;
4215a993db88SJose Abreu 	}
4216f748be53SAlexandre TORGUE 
4217db3667c9SRussell King (Oracle) 	stmmac_tso_allocator(priv, tso_des, tmp_pay_len, (nfrags == 0), queue);
4218f748be53SAlexandre TORGUE 
421907c9c26eSFurong Xu 	/* In case two or more DMA transmit descriptors are allocated for this
422007c9c26eSFurong Xu 	 * non-paged SKB data, the DMA buffer address should be saved to
422107c9c26eSFurong Xu 	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
422207c9c26eSFurong Xu 	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
422307c9c26eSFurong Xu 	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
422407c9c26eSFurong Xu 	 * since the tail areas of the DMA buffer can be accessed by DMA engine
422507c9c26eSFurong Xu 	 * sooner or later.
422607c9c26eSFurong Xu 	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
422707c9c26eSFurong Xu 	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
422807c9c26eSFurong Xu 	 * this DMA buffer right after the DMA engine completely finishes the
422907c9c26eSFurong Xu 	 * full buffer transmission.
423007c9c26eSFurong Xu 	 */
423107c9c26eSFurong Xu 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
423207c9c26eSFurong Xu 	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
423307c9c26eSFurong Xu 	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
423407c9c26eSFurong Xu 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
423507c9c26eSFurong Xu 
4236f748be53SAlexandre TORGUE 	/* Prepare fragments */
4237f748be53SAlexandre TORGUE 	for (i = 0; i < nfrags; i++) {
4238f748be53SAlexandre TORGUE 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4239f748be53SAlexandre TORGUE 
4240f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0,
4241f748be53SAlexandre TORGUE 				       skb_frag_size(frag),
4242f748be53SAlexandre TORGUE 				       DMA_TO_DEVICE);
4243937071c1SThierry Reding 		if (dma_mapping_error(priv->device, des))
4244937071c1SThierry Reding 			goto dma_map_err;
4245f748be53SAlexandre TORGUE 
4246f748be53SAlexandre TORGUE 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4247ce736788SJoao Pinto 				     (i == nfrags - 1), queue);
4248f748be53SAlexandre TORGUE 
4249ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4250ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4251ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4252be8b38a7SOng Boon Leong 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4253f748be53SAlexandre TORGUE 	}
4254f748be53SAlexandre TORGUE 
4255ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4256f748be53SAlexandre TORGUE 
425705cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
425805cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4259be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
426005cf0d1bSNiklas Cassel 
42617df4a3a7SJose Abreu 	/* Manage tx mitigation */
4262c2837423SJose Abreu 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4263c2837423SJose Abreu 	tx_q->tx_count_frames += tx_packets;
4264c2837423SJose Abreu 
4265c2837423SJose Abreu 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4266c2837423SJose Abreu 		set_ic = true;
4267db2f2842SOng Boon Leong 	else if (!priv->tx_coal_frames[queue])
4268c2837423SJose Abreu 		set_ic = false;
4269db2f2842SOng Boon Leong 	else if (tx_packets > priv->tx_coal_frames[queue])
4270c2837423SJose Abreu 		set_ic = true;
4271db2f2842SOng Boon Leong 	else if ((tx_q->tx_count_frames %
4272db2f2842SOng Boon Leong 		  priv->tx_coal_frames[queue]) < tx_packets)
4273c2837423SJose Abreu 		set_ic = true;
4274c2837423SJose Abreu 	else
4275c2837423SJose Abreu 		set_ic = false;
4276c2837423SJose Abreu 
4277c2837423SJose Abreu 	if (set_ic) {
4278579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4279579a25a8SJose Abreu 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4280579a25a8SJose Abreu 		else
42817df4a3a7SJose Abreu 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4282579a25a8SJose Abreu 
42837df4a3a7SJose Abreu 		tx_q->tx_count_frames = 0;
42847df4a3a7SJose Abreu 		stmmac_set_tx_ic(priv, desc);
42857df4a3a7SJose Abreu 	}
42867df4a3a7SJose Abreu 
428705cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
428805cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
428905cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
429005cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
429105cf0d1bSNiklas Cassel 	 */
42928531c808SChristian Marangi 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4293f748be53SAlexandre TORGUE 
4294ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4295b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
429638ddc59dSLABBE Corentin 			  __func__);
4297c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4298f748be53SAlexandre TORGUE 	}
4299f748be53SAlexandre TORGUE 
43009680b2abSPetr Tesarik 	u64_stats_update_begin(&txq_stats->q_syncp);
43019680b2abSPetr Tesarik 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
43029680b2abSPetr Tesarik 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
43039680b2abSPetr Tesarik 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4304133466c3SJisheng Zhang 	if (set_ic)
43059680b2abSPetr Tesarik 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
43069680b2abSPetr Tesarik 	u64_stats_update_end(&txq_stats->q_syncp);
4307f748be53SAlexandre TORGUE 
43088000ddc0SJose Abreu 	if (priv->sarc_type)
43098000ddc0SJose Abreu 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
43108000ddc0SJose Abreu 
4311f748be53SAlexandre TORGUE 	skb_tx_timestamp(skb);
4312f748be53SAlexandre TORGUE 
4313f748be53SAlexandre TORGUE 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4314f748be53SAlexandre TORGUE 		     priv->hwts_tx_en)) {
4315f748be53SAlexandre TORGUE 		/* declare that device is doing timestamping */
4316f748be53SAlexandre TORGUE 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
431742de047dSJose Abreu 		stmmac_enable_tx_timestamp(priv, first);
4318f748be53SAlexandre TORGUE 	}
4319f748be53SAlexandre TORGUE 
4320f748be53SAlexandre TORGUE 	/* Complete the first descriptor before granting the DMA */
432142de047dSJose Abreu 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4322f748be53SAlexandre TORGUE 			proto_hdr_len,
4323f748be53SAlexandre TORGUE 			pay_len,
4324ce736788SJoao Pinto 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4325b7766206SJose Abreu 			hdr / 4, (skb->len - proto_hdr_len));
4326f748be53SAlexandre TORGUE 
4327f748be53SAlexandre TORGUE 	/* If context desc is used to change MSS */
432815d2ee42SNiklas Cassel 	if (mss_desc) {
432915d2ee42SNiklas Cassel 		/* Make sure that first descriptor has been completely
433015d2ee42SNiklas Cassel 		 * written, including its own bit. This is because MSS is
433115d2ee42SNiklas Cassel 		 * actually before first descriptor, so we need to make
433215d2ee42SNiklas Cassel 		 * sure that MSS's own bit is the last thing written.
433315d2ee42SNiklas Cassel 		 */
433415d2ee42SNiklas Cassel 		dma_wmb();
433542de047dSJose Abreu 		stmmac_set_tx_owner(priv, mss_desc);
433615d2ee42SNiklas Cassel 	}
4337f748be53SAlexandre TORGUE 
4338f748be53SAlexandre TORGUE 	if (netif_msg_pktdata(priv)) {
4339f748be53SAlexandre TORGUE 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4340ce736788SJoao Pinto 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4341ce736788SJoao Pinto 			tx_q->cur_tx, first, nfrags);
4342f748be53SAlexandre TORGUE 		pr_info(">>> frame to be transmitted: ");
4343f748be53SAlexandre TORGUE 		print_pkt(skb->data, skb_headlen(skb));
4344f748be53SAlexandre TORGUE 	}
4345f748be53SAlexandre TORGUE 
4346c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4347f748be53SAlexandre TORGUE 
4348d96febedSOng Boon Leong 	stmmac_flush_tx_descriptors(priv, queue);
43494772f26dSJose Abreu 	stmmac_tx_timer_arm(priv, queue);
4350f748be53SAlexandre TORGUE 
4351f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
4352f748be53SAlexandre TORGUE 
4353f748be53SAlexandre TORGUE dma_map_err:
4354f748be53SAlexandre TORGUE 	dev_err(priv->device, "Tx dma map failed\n");
4355f748be53SAlexandre TORGUE 	dev_kfree_skb(skb);
4356133466c3SJisheng Zhang 	priv->xstats.tx_dropped++;
4357f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
4358f748be53SAlexandre TORGUE }
4359f748be53SAlexandre TORGUE 
4360f748be53SAlexandre TORGUE /**
436197d574fcSRomain Gantois  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
436297d574fcSRomain Gantois  * @skb: socket buffer to check
436397d574fcSRomain Gantois  *
436497d574fcSRomain Gantois  * Check if a packet has an ethertype that will trigger the IP header checks
436597d574fcSRomain Gantois  * and IP/TCP checksum engine of the stmmac core.
436697d574fcSRomain Gantois  *
436797d574fcSRomain Gantois  * Return: true if the ethertype can trigger the checksum engine, false
436897d574fcSRomain Gantois  * otherwise
436997d574fcSRomain Gantois  */
stmmac_has_ip_ethertype(struct sk_buff * skb)437097d574fcSRomain Gantois static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
437197d574fcSRomain Gantois {
437297d574fcSRomain Gantois 	int depth = 0;
437397d574fcSRomain Gantois 	__be16 proto;
437497d574fcSRomain Gantois 
437597d574fcSRomain Gantois 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
437697d574fcSRomain Gantois 				    &depth);
437797d574fcSRomain Gantois 
437897d574fcSRomain Gantois 	return (depth <= ETH_HLEN) &&
437997d574fcSRomain Gantois 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
438097d574fcSRomain Gantois }
438197d574fcSRomain Gantois 
438297d574fcSRomain Gantois /**
4383732fdf0eSGiuseppe CAVALLARO  *  stmmac_xmit - Tx entry point of the driver
43847ac6653aSJeff Kirsher  *  @skb : the socket buffer
43857ac6653aSJeff Kirsher  *  @dev : device pointer
438632ceabcaSGiuseppe CAVALLARO  *  Description : this is the tx entry point of the driver.
438732ceabcaSGiuseppe CAVALLARO  *  It programs the chain or the ring and supports oversized frames
438832ceabcaSGiuseppe CAVALLARO  *  and SG feature.
43897ac6653aSJeff Kirsher  */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)43907ac6653aSJeff Kirsher static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
43917ac6653aSJeff Kirsher {
4392c2837423SJose Abreu 	unsigned int first_entry, tx_packets, enh_desc;
43937ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
43940e80bdc9SGiuseppe Cavallaro 	unsigned int nopaged_len = skb_headlen(skb);
43954a7d666aSGiuseppe CAVALLARO 	int i, csum_insertion = 0, is_jumbo = 0;
4396ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
43977ac6653aSJeff Kirsher 	int nfrags = skb_shinfo(skb)->nr_frags;
4398b7766206SJose Abreu 	int gso = skb_shinfo(skb)->gso_type;
43998070274bSJisheng Zhang 	struct stmmac_txq_stats *txq_stats;
4400579a25a8SJose Abreu 	struct dma_edesc *tbs_desc = NULL;
44017ac6653aSJeff Kirsher 	struct dma_desc *desc, *first;
4402ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
4403c2837423SJose Abreu 	bool has_vlan, set_ic;
4404d96febedSOng Boon Leong 	int entry, first_tx;
4405a993db88SJose Abreu 	dma_addr_t des;
4406f748be53SAlexandre TORGUE 
44078531c808SChristian Marangi 	tx_q = &priv->dma_conf.tx_queue[queue];
44088070274bSJisheng Zhang 	txq_stats = &priv->xstats.txq_stats[queue];
4409c2837423SJose Abreu 	first_tx = tx_q->cur_tx;
4410ce736788SJoao Pinto 
4411be1c7eaeSVineetha G. Jaya Kumaran 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4412e2cd682dSJose Abreu 		stmmac_disable_eee_mode(priv);
4413e2cd682dSJose Abreu 
4414f748be53SAlexandre TORGUE 	/* Manage oversized TCP frames for GMAC4 device */
4415f748be53SAlexandre TORGUE 	if (skb_is_gso(skb) && priv->tso) {
4416b7766206SJose Abreu 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4417b7766206SJose Abreu 			return stmmac_tso_xmit(skb, dev);
4418b7766206SJose Abreu 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4419f748be53SAlexandre TORGUE 			return stmmac_tso_xmit(skb, dev);
4420f748be53SAlexandre TORGUE 	}
44217ac6653aSJeff Kirsher 
4422ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4423c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4424c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4425c22a3f48SJoao Pinto 								queue));
44267ac6653aSJeff Kirsher 			/* This is a hard error, log it. */
442738ddc59dSLABBE Corentin 			netdev_err(priv->dev,
442838ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
442938ddc59dSLABBE Corentin 				   __func__);
44307ac6653aSJeff Kirsher 		}
44317ac6653aSJeff Kirsher 		return NETDEV_TX_BUSY;
44327ac6653aSJeff Kirsher 	}
44337ac6653aSJeff Kirsher 
443430d93227SJose Abreu 	/* Check if VLAN can be inserted by HW */
443530d93227SJose Abreu 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
443630d93227SJose Abreu 
4437ce736788SJoao Pinto 	entry = tx_q->cur_tx;
44380e80bdc9SGiuseppe Cavallaro 	first_entry = entry;
4439b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
44407ac6653aSJeff Kirsher 
44417ac6653aSJeff Kirsher 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4442b643b836SRohan G Thomas 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4443b643b836SRohan G Thomas 	 * queues. In that case, checksum offloading for those queues that don't
4444b643b836SRohan G Thomas 	 * support tx coe needs to fallback to software checksum calculation.
444597d574fcSRomain Gantois 	 *
444697d574fcSRomain Gantois 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
444797d574fcSRomain Gantois 	 * also have to be checksummed in software.
4448b643b836SRohan G Thomas 	 */
4449b643b836SRohan G Thomas 	if (csum_insertion &&
445097d574fcSRomain Gantois 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
445197d574fcSRomain Gantois 	     !stmmac_has_ip_ethertype(skb))) {
4452b643b836SRohan G Thomas 		if (unlikely(skb_checksum_help(skb)))
4453b643b836SRohan G Thomas 			goto dma_map_err;
4454b643b836SRohan G Thomas 		csum_insertion = !csum_insertion;
4455b643b836SRohan G Thomas 	}
44567ac6653aSJeff Kirsher 
44570e80bdc9SGiuseppe Cavallaro 	if (likely(priv->extend_desc))
4458ce736788SJoao Pinto 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4459579a25a8SJose Abreu 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4460579a25a8SJose Abreu 		desc = &tx_q->dma_entx[entry].basic;
4461c24602efSGiuseppe CAVALLARO 	else
4462ce736788SJoao Pinto 		desc = tx_q->dma_tx + entry;
4463c24602efSGiuseppe CAVALLARO 
44647ac6653aSJeff Kirsher 	first = desc;
44657ac6653aSJeff Kirsher 
446630d93227SJose Abreu 	if (has_vlan)
446730d93227SJose Abreu 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
446830d93227SJose Abreu 
44690e80bdc9SGiuseppe Cavallaro 	enh_desc = priv->plat->enh_desc;
44704a7d666aSGiuseppe CAVALLARO 	/* To program the descriptors according to the size of the frame */
447129896a67SGiuseppe CAVALLARO 	if (enh_desc)
44722c520b1cSJose Abreu 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
447329896a67SGiuseppe CAVALLARO 
447463a550fcSJose Abreu 	if (unlikely(is_jumbo)) {
44752c520b1cSJose Abreu 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
447663a550fcSJose Abreu 		if (unlikely(entry < 0) && (entry != -EINVAL))
4477362b37beSGiuseppe CAVALLARO 			goto dma_map_err;
447829896a67SGiuseppe CAVALLARO 	}
44797ac6653aSJeff Kirsher 
44807ac6653aSJeff Kirsher 	for (i = 0; i < nfrags; i++) {
44819e903e08SEric Dumazet 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
44829e903e08SEric Dumazet 		int len = skb_frag_size(frag);
4483be434d50SGiuseppe Cavallaro 		bool last_segment = (i == (nfrags - 1));
44847ac6653aSJeff Kirsher 
44858531c808SChristian Marangi 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4486b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[entry]);
4487e3ad57c9SGiuseppe Cavallaro 
44880e80bdc9SGiuseppe Cavallaro 		if (likely(priv->extend_desc))
4489ce736788SJoao Pinto 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4490579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4491579a25a8SJose Abreu 			desc = &tx_q->dma_entx[entry].basic;
4492c24602efSGiuseppe CAVALLARO 		else
4493ce736788SJoao Pinto 			desc = tx_q->dma_tx + entry;
44947ac6653aSJeff Kirsher 
4495f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4496f722380dSIan Campbell 				       DMA_TO_DEVICE);
4497f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
4498362b37beSGiuseppe CAVALLARO 			goto dma_map_err; /* should reuse desc w/o issues */
4499362b37beSGiuseppe CAVALLARO 
4500ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].buf = des;
45016844171dSJose Abreu 
45026844171dSJose Abreu 		stmmac_set_desc_addr(priv, desc, des);
4503f748be53SAlexandre TORGUE 
4504ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4505ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].len = len;
4506ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4507be8b38a7SOng Boon Leong 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
45080e80bdc9SGiuseppe Cavallaro 
45090e80bdc9SGiuseppe Cavallaro 		/* Prepare the descriptor and set the own bit too */
451042de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
451142de047dSJose Abreu 				priv->mode, 1, last_segment, skb->len);
45127ac6653aSJeff Kirsher 	}
45137ac6653aSJeff Kirsher 
451405cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
451505cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[entry] = skb;
4516be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4517e3ad57c9SGiuseppe Cavallaro 
45187df4a3a7SJose Abreu 	/* According to the coalesce parameter the IC bit for the latest
45197df4a3a7SJose Abreu 	 * segment is reset and the timer re-started to clean the tx status.
45207df4a3a7SJose Abreu 	 * This approach takes care about the fragments: desc is the first
45217df4a3a7SJose Abreu 	 * element in case of no SG.
45227df4a3a7SJose Abreu 	 */
4523c2837423SJose Abreu 	tx_packets = (entry + 1) - first_tx;
4524c2837423SJose Abreu 	tx_q->tx_count_frames += tx_packets;
4525c2837423SJose Abreu 
4526c2837423SJose Abreu 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4527c2837423SJose Abreu 		set_ic = true;
4528db2f2842SOng Boon Leong 	else if (!priv->tx_coal_frames[queue])
4529c2837423SJose Abreu 		set_ic = false;
4530db2f2842SOng Boon Leong 	else if (tx_packets > priv->tx_coal_frames[queue])
4531c2837423SJose Abreu 		set_ic = true;
4532db2f2842SOng Boon Leong 	else if ((tx_q->tx_count_frames %
4533db2f2842SOng Boon Leong 		  priv->tx_coal_frames[queue]) < tx_packets)
4534c2837423SJose Abreu 		set_ic = true;
4535c2837423SJose Abreu 	else
4536c2837423SJose Abreu 		set_ic = false;
4537c2837423SJose Abreu 
4538c2837423SJose Abreu 	if (set_ic) {
45397df4a3a7SJose Abreu 		if (likely(priv->extend_desc))
45407df4a3a7SJose Abreu 			desc = &tx_q->dma_etx[entry].basic;
4541579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4542579a25a8SJose Abreu 			desc = &tx_q->dma_entx[entry].basic;
45437df4a3a7SJose Abreu 		else
45447df4a3a7SJose Abreu 			desc = &tx_q->dma_tx[entry];
45457df4a3a7SJose Abreu 
45467df4a3a7SJose Abreu 		tx_q->tx_count_frames = 0;
45477df4a3a7SJose Abreu 		stmmac_set_tx_ic(priv, desc);
45487df4a3a7SJose Abreu 	}
45497df4a3a7SJose Abreu 
455005cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
455105cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
455205cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
455305cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
455405cf0d1bSNiklas Cassel 	 */
45558531c808SChristian Marangi 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4556ce736788SJoao Pinto 	tx_q->cur_tx = entry;
45577ac6653aSJeff Kirsher 
45587ac6653aSJeff Kirsher 	if (netif_msg_pktdata(priv)) {
455938ddc59dSLABBE Corentin 		netdev_dbg(priv->dev,
456038ddc59dSLABBE Corentin 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4561ce736788SJoao Pinto 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
45620e80bdc9SGiuseppe Cavallaro 			   entry, first, nfrags);
456383d7af64SGiuseppe CAVALLARO 
456438ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
45657ac6653aSJeff Kirsher 		print_pkt(skb->data, skb->len);
45667ac6653aSJeff Kirsher 	}
45670e80bdc9SGiuseppe Cavallaro 
4568ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4569b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4570b3e51069SLABBE Corentin 			  __func__);
4571c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
45727ac6653aSJeff Kirsher 	}
45737ac6653aSJeff Kirsher 
45749680b2abSPetr Tesarik 	u64_stats_update_begin(&txq_stats->q_syncp);
45759680b2abSPetr Tesarik 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4576133466c3SJisheng Zhang 	if (set_ic)
45779680b2abSPetr Tesarik 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
45789680b2abSPetr Tesarik 	u64_stats_update_end(&txq_stats->q_syncp);
45797ac6653aSJeff Kirsher 
45808000ddc0SJose Abreu 	if (priv->sarc_type)
45818000ddc0SJose Abreu 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
45828000ddc0SJose Abreu 
45830e80bdc9SGiuseppe Cavallaro 	skb_tx_timestamp(skb);
45840e80bdc9SGiuseppe Cavallaro 
45850e80bdc9SGiuseppe Cavallaro 	/* Ready to fill the first descriptor and set the OWN bit w/o any
45860e80bdc9SGiuseppe Cavallaro 	 * problems because all the descriptors are actually ready to be
45870e80bdc9SGiuseppe Cavallaro 	 * passed to the DMA engine.
45880e80bdc9SGiuseppe Cavallaro 	 */
45890e80bdc9SGiuseppe Cavallaro 	if (likely(!is_jumbo)) {
45900e80bdc9SGiuseppe Cavallaro 		bool last_segment = (nfrags == 0);
45910e80bdc9SGiuseppe Cavallaro 
4592f748be53SAlexandre TORGUE 		des = dma_map_single(priv->device, skb->data,
45930e80bdc9SGiuseppe Cavallaro 				     nopaged_len, DMA_TO_DEVICE);
4594f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
45950e80bdc9SGiuseppe Cavallaro 			goto dma_map_err;
45960e80bdc9SGiuseppe Cavallaro 
4597ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4598be8b38a7SOng Boon Leong 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4599be8b38a7SOng Boon Leong 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
46006844171dSJose Abreu 
46016844171dSJose Abreu 		stmmac_set_desc_addr(priv, first, des);
4602f748be53SAlexandre TORGUE 
4603ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4604ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
46050e80bdc9SGiuseppe Cavallaro 
4606891434b1SRayagond Kokatanur 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4607891434b1SRayagond Kokatanur 			     priv->hwts_tx_en)) {
4608891434b1SRayagond Kokatanur 			/* declare that device is doing timestamping */
4609891434b1SRayagond Kokatanur 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
461042de047dSJose Abreu 			stmmac_enable_tx_timestamp(priv, first);
4611891434b1SRayagond Kokatanur 		}
4612891434b1SRayagond Kokatanur 
46130e80bdc9SGiuseppe Cavallaro 		/* Prepare the first descriptor setting the OWN bit too */
461442de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4615579a25a8SJose Abreu 				csum_insertion, priv->mode, 0, last_segment,
461642de047dSJose Abreu 				skb->len);
461780acbed9SAaro Koskinen 	}
46180e80bdc9SGiuseppe Cavallaro 
4619579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_EN) {
4620579a25a8SJose Abreu 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4621579a25a8SJose Abreu 
4622579a25a8SJose Abreu 		tbs_desc = &tx_q->dma_entx[first_entry];
4623579a25a8SJose Abreu 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4624579a25a8SJose Abreu 	}
4625579a25a8SJose Abreu 
4626579a25a8SJose Abreu 	stmmac_set_tx_owner(priv, first);
4627579a25a8SJose Abreu 
4628c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4629f748be53SAlexandre TORGUE 
4630a4e887faSJose Abreu 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
46318fce3331SJose Abreu 
4632d96febedSOng Boon Leong 	stmmac_flush_tx_descriptors(priv, queue);
46334772f26dSJose Abreu 	stmmac_tx_timer_arm(priv, queue);
46347ac6653aSJeff Kirsher 
4635362b37beSGiuseppe CAVALLARO 	return NETDEV_TX_OK;
4636a9097a96SGiuseppe CAVALLARO 
4637362b37beSGiuseppe CAVALLARO dma_map_err:
463838ddc59dSLABBE Corentin 	netdev_err(priv->dev, "Tx DMA map failed\n");
4639362b37beSGiuseppe CAVALLARO 	dev_kfree_skb(skb);
4640133466c3SJisheng Zhang 	priv->xstats.tx_dropped++;
46417ac6653aSJeff Kirsher 	return NETDEV_TX_OK;
46427ac6653aSJeff Kirsher }
46437ac6653aSJeff Kirsher 
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)4644b9381985SVince Bridgers static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4645b9381985SVince Bridgers {
46461f5020acSVladimir Oltean 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
46471f5020acSVladimir Oltean 	__be16 vlan_proto = veth->h_vlan_proto;
4648b9381985SVince Bridgers 	u16 vlanid;
4649b9381985SVince Bridgers 
4650ab188e8fSElad Nachman 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4651ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4652ab188e8fSElad Nachman 	    (vlan_proto == htons(ETH_P_8021AD) &&
4653ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4654b9381985SVince Bridgers 		/* pop the vlan tag */
4655ab188e8fSElad Nachman 		vlanid = ntohs(veth->h_vlan_TCI);
4656ab188e8fSElad Nachman 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4657b9381985SVince Bridgers 		skb_pull(skb, VLAN_HLEN);
4658ab188e8fSElad Nachman 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4659b9381985SVince Bridgers 	}
4660b9381985SVince Bridgers }
4661b9381985SVince Bridgers 
466232ceabcaSGiuseppe CAVALLARO /**
4663732fdf0eSGiuseppe CAVALLARO  * stmmac_rx_refill - refill used skb preallocated buffers
466432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
466554139cf3SJoao Pinto  * @queue: RX queue index
466632ceabcaSGiuseppe CAVALLARO  * Description : this is to reallocate the skb for the reception process
466732ceabcaSGiuseppe CAVALLARO  * that is based on zero-copy.
466832ceabcaSGiuseppe CAVALLARO  */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)466954139cf3SJoao Pinto static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
46707ac6653aSJeff Kirsher {
46718531c808SChristian Marangi 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
46725fabb012SOng Boon Leong 	int dirty = stmmac_rx_dirty(priv, queue);
467354139cf3SJoao Pinto 	unsigned int entry = rx_q->dirty_rx;
4674884d2b84SDavid Wu 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4675884d2b84SDavid Wu 
4676070246e4SJochen Henneberg 	if (priv->dma_cap.host_dma_width <= 32)
4677884d2b84SDavid Wu 		gfp |= GFP_DMA32;
467854139cf3SJoao Pinto 
4679e3ad57c9SGiuseppe Cavallaro 	while (dirty-- > 0) {
46802af6106aSJose Abreu 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4681c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
4682d429b66eSJose Abreu 		bool use_rx_wd;
4683c24602efSGiuseppe CAVALLARO 
4684c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
468554139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4686c24602efSGiuseppe CAVALLARO 		else
468754139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
4688c24602efSGiuseppe CAVALLARO 
46892af6106aSJose Abreu 		if (!buf->page) {
4690884d2b84SDavid Wu 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
46912af6106aSJose Abreu 			if (!buf->page)
46927ac6653aSJeff Kirsher 				break;
4693120e87f9SGiuseppe Cavallaro 		}
46947ac6653aSJeff Kirsher 
469567afd6d1SJose Abreu 		if (priv->sph && !buf->sec_page) {
4696884d2b84SDavid Wu 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
469767afd6d1SJose Abreu 			if (!buf->sec_page)
469867afd6d1SJose Abreu 				break;
469967afd6d1SJose Abreu 
470067afd6d1SJose Abreu 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
470167afd6d1SJose Abreu 		}
470267afd6d1SJose Abreu 
47035fabb012SOng Boon Leong 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
47043caa61c2SJose Abreu 
47052af6106aSJose Abreu 		stmmac_set_desc_addr(priv, p, buf->addr);
4706396e13e1SJoakim Zhang 		if (priv->sph)
4707396e13e1SJoakim Zhang 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4708396e13e1SJoakim Zhang 		else
4709396e13e1SJoakim Zhang 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
47102c520b1cSJose Abreu 		stmmac_refill_desc3(priv, rx_q, p);
4711286a8372SGiuseppe CAVALLARO 
4712d429b66eSJose Abreu 		rx_q->rx_count_frames++;
4713db2f2842SOng Boon Leong 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4714db2f2842SOng Boon Leong 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
47156fa9d691SJose Abreu 			rx_q->rx_count_frames = 0;
471609146abeSJose Abreu 
4717db2f2842SOng Boon Leong 		use_rx_wd = !priv->rx_coal_frames[queue];
471809146abeSJose Abreu 		use_rx_wd |= rx_q->rx_count_frames > 0;
471909146abeSJose Abreu 		if (!priv->use_riwt)
472009146abeSJose Abreu 			use_rx_wd = false;
4721d429b66eSJose Abreu 
4722ad688cdbSPavel Machek 		dma_wmb();
47232af6106aSJose Abreu 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4724e3ad57c9SGiuseppe Cavallaro 
47258531c808SChristian Marangi 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
47267ac6653aSJeff Kirsher 	}
472754139cf3SJoao Pinto 	rx_q->dirty_rx = entry;
4728858a31ffSJose Abreu 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4729858a31ffSJose Abreu 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
47304523a561SBiao Huang 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
47317ac6653aSJeff Kirsher }
47327ac6653aSJeff Kirsher 
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)473388ebe2cfSJose Abreu static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
473488ebe2cfSJose Abreu 				       struct dma_desc *p,
473588ebe2cfSJose Abreu 				       int status, unsigned int len)
473688ebe2cfSJose Abreu {
473788ebe2cfSJose Abreu 	unsigned int plen = 0, hlen = 0;
473831f2760eSLuo Jiaxing 	int coe = priv->hw->rx_csum;
473988ebe2cfSJose Abreu 
474088ebe2cfSJose Abreu 	/* Not first descriptor, buffer is always zero */
474188ebe2cfSJose Abreu 	if (priv->sph && len)
474288ebe2cfSJose Abreu 		return 0;
474388ebe2cfSJose Abreu 
474488ebe2cfSJose Abreu 	/* First descriptor, get split header length */
474531f2760eSLuo Jiaxing 	stmmac_get_rx_header_len(priv, p, &hlen);
474688ebe2cfSJose Abreu 	if (priv->sph && hlen) {
474788ebe2cfSJose Abreu 		priv->xstats.rx_split_hdr_pkt_n++;
474888ebe2cfSJose Abreu 		return hlen;
474988ebe2cfSJose Abreu 	}
475088ebe2cfSJose Abreu 
475188ebe2cfSJose Abreu 	/* First descriptor, not last descriptor and not split header */
475288ebe2cfSJose Abreu 	if (status & rx_not_ls)
47538531c808SChristian Marangi 		return priv->dma_conf.dma_buf_sz;
475488ebe2cfSJose Abreu 
475588ebe2cfSJose Abreu 	plen = stmmac_get_rx_frame_len(priv, p, coe);
475688ebe2cfSJose Abreu 
475788ebe2cfSJose Abreu 	/* First descriptor and last descriptor and not split header */
47588531c808SChristian Marangi 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
475988ebe2cfSJose Abreu }
476088ebe2cfSJose Abreu 
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)476188ebe2cfSJose Abreu static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
476288ebe2cfSJose Abreu 				       struct dma_desc *p,
476388ebe2cfSJose Abreu 				       int status, unsigned int len)
476488ebe2cfSJose Abreu {
476588ebe2cfSJose Abreu 	int coe = priv->hw->rx_csum;
476688ebe2cfSJose Abreu 	unsigned int plen = 0;
476788ebe2cfSJose Abreu 
476888ebe2cfSJose Abreu 	/* Not split header, buffer is not available */
476988ebe2cfSJose Abreu 	if (!priv->sph)
477088ebe2cfSJose Abreu 		return 0;
477188ebe2cfSJose Abreu 
477288ebe2cfSJose Abreu 	/* Not last descriptor */
477388ebe2cfSJose Abreu 	if (status & rx_not_ls)
47748531c808SChristian Marangi 		return priv->dma_conf.dma_buf_sz;
477588ebe2cfSJose Abreu 
477688ebe2cfSJose Abreu 	plen = stmmac_get_rx_frame_len(priv, p, coe);
477788ebe2cfSJose Abreu 
477888ebe2cfSJose Abreu 	/* Last descriptor */
477988ebe2cfSJose Abreu 	return plen - len;
478088ebe2cfSJose Abreu }
478188ebe2cfSJose Abreu 
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)4782be8b38a7SOng Boon Leong static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
47838b278a5bSOng Boon Leong 				struct xdp_frame *xdpf, bool dma_map)
4784be8b38a7SOng Boon Leong {
47858070274bSJisheng Zhang 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
47868531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4787be8b38a7SOng Boon Leong 	unsigned int entry = tx_q->cur_tx;
4788be8b38a7SOng Boon Leong 	struct dma_desc *tx_desc;
4789be8b38a7SOng Boon Leong 	dma_addr_t dma_addr;
4790be8b38a7SOng Boon Leong 	bool set_ic;
4791be8b38a7SOng Boon Leong 
4792be8b38a7SOng Boon Leong 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4793be8b38a7SOng Boon Leong 		return STMMAC_XDP_CONSUMED;
4794be8b38a7SOng Boon Leong 
4795be8b38a7SOng Boon Leong 	if (likely(priv->extend_desc))
4796be8b38a7SOng Boon Leong 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4797be8b38a7SOng Boon Leong 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4798be8b38a7SOng Boon Leong 		tx_desc = &tx_q->dma_entx[entry].basic;
4799be8b38a7SOng Boon Leong 	else
4800be8b38a7SOng Boon Leong 		tx_desc = tx_q->dma_tx + entry;
4801be8b38a7SOng Boon Leong 
48028b278a5bSOng Boon Leong 	if (dma_map) {
48038b278a5bSOng Boon Leong 		dma_addr = dma_map_single(priv->device, xdpf->data,
48048b278a5bSOng Boon Leong 					  xdpf->len, DMA_TO_DEVICE);
48058b278a5bSOng Boon Leong 		if (dma_mapping_error(priv->device, dma_addr))
48068b278a5bSOng Boon Leong 			return STMMAC_XDP_CONSUMED;
48078b278a5bSOng Boon Leong 
48088b278a5bSOng Boon Leong 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
48098b278a5bSOng Boon Leong 	} else {
48108b278a5bSOng Boon Leong 		struct page *page = virt_to_page(xdpf->data);
48118b278a5bSOng Boon Leong 
4812be8b38a7SOng Boon Leong 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4813be8b38a7SOng Boon Leong 			   xdpf->headroom;
4814be8b38a7SOng Boon Leong 		dma_sync_single_for_device(priv->device, dma_addr,
4815be8b38a7SOng Boon Leong 					   xdpf->len, DMA_BIDIRECTIONAL);
4816be8b38a7SOng Boon Leong 
4817be8b38a7SOng Boon Leong 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
48188b278a5bSOng Boon Leong 	}
4819be8b38a7SOng Boon Leong 
4820be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4821be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4822be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4823be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4824be8b38a7SOng Boon Leong 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4825be8b38a7SOng Boon Leong 
4826be8b38a7SOng Boon Leong 	tx_q->xdpf[entry] = xdpf;
4827be8b38a7SOng Boon Leong 
4828be8b38a7SOng Boon Leong 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4829be8b38a7SOng Boon Leong 
4830be8b38a7SOng Boon Leong 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4831be8b38a7SOng Boon Leong 			       true, priv->mode, true, true,
4832be8b38a7SOng Boon Leong 			       xdpf->len);
4833be8b38a7SOng Boon Leong 
4834be8b38a7SOng Boon Leong 	tx_q->tx_count_frames++;
4835be8b38a7SOng Boon Leong 
4836be8b38a7SOng Boon Leong 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4837be8b38a7SOng Boon Leong 		set_ic = true;
4838be8b38a7SOng Boon Leong 	else
4839be8b38a7SOng Boon Leong 		set_ic = false;
4840be8b38a7SOng Boon Leong 
4841be8b38a7SOng Boon Leong 	if (set_ic) {
4842be8b38a7SOng Boon Leong 		tx_q->tx_count_frames = 0;
4843be8b38a7SOng Boon Leong 		stmmac_set_tx_ic(priv, tx_desc);
48449680b2abSPetr Tesarik 		u64_stats_update_begin(&txq_stats->q_syncp);
48459680b2abSPetr Tesarik 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
48469680b2abSPetr Tesarik 		u64_stats_update_end(&txq_stats->q_syncp);
4847be8b38a7SOng Boon Leong 	}
4848be8b38a7SOng Boon Leong 
4849be8b38a7SOng Boon Leong 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4850be8b38a7SOng Boon Leong 
48518531c808SChristian Marangi 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4852be8b38a7SOng Boon Leong 	tx_q->cur_tx = entry;
4853be8b38a7SOng Boon Leong 
4854be8b38a7SOng Boon Leong 	return STMMAC_XDP_TX;
4855be8b38a7SOng Boon Leong }
4856be8b38a7SOng Boon Leong 
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)4857be8b38a7SOng Boon Leong static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4858be8b38a7SOng Boon Leong 				   int cpu)
4859be8b38a7SOng Boon Leong {
4860be8b38a7SOng Boon Leong 	int index = cpu;
4861be8b38a7SOng Boon Leong 
4862be8b38a7SOng Boon Leong 	if (unlikely(index < 0))
4863be8b38a7SOng Boon Leong 		index = 0;
4864be8b38a7SOng Boon Leong 
4865be8b38a7SOng Boon Leong 	while (index >= priv->plat->tx_queues_to_use)
4866be8b38a7SOng Boon Leong 		index -= priv->plat->tx_queues_to_use;
4867be8b38a7SOng Boon Leong 
4868be8b38a7SOng Boon Leong 	return index;
4869be8b38a7SOng Boon Leong }
4870be8b38a7SOng Boon Leong 
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)4871be8b38a7SOng Boon Leong static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4872be8b38a7SOng Boon Leong 				struct xdp_buff *xdp)
4873be8b38a7SOng Boon Leong {
4874be8b38a7SOng Boon Leong 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4875be8b38a7SOng Boon Leong 	int cpu = smp_processor_id();
4876be8b38a7SOng Boon Leong 	struct netdev_queue *nq;
4877be8b38a7SOng Boon Leong 	int queue;
4878be8b38a7SOng Boon Leong 	int res;
4879be8b38a7SOng Boon Leong 
4880be8b38a7SOng Boon Leong 	if (unlikely(!xdpf))
4881be8b38a7SOng Boon Leong 		return STMMAC_XDP_CONSUMED;
4882be8b38a7SOng Boon Leong 
4883be8b38a7SOng Boon Leong 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4884be8b38a7SOng Boon Leong 	nq = netdev_get_tx_queue(priv->dev, queue);
4885be8b38a7SOng Boon Leong 
4886be8b38a7SOng Boon Leong 	__netif_tx_lock(nq, cpu);
4887be8b38a7SOng Boon Leong 	/* Avoids TX time-out as we are sharing with slow path */
4888e92af33eSAlexander Lobakin 	txq_trans_cond_update(nq);
4889be8b38a7SOng Boon Leong 
48908b278a5bSOng Boon Leong 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4891be8b38a7SOng Boon Leong 	if (res == STMMAC_XDP_TX)
4892be8b38a7SOng Boon Leong 		stmmac_flush_tx_descriptors(priv, queue);
4893be8b38a7SOng Boon Leong 
4894be8b38a7SOng Boon Leong 	__netif_tx_unlock(nq);
4895be8b38a7SOng Boon Leong 
4896be8b38a7SOng Boon Leong 	return res;
4897be8b38a7SOng Boon Leong }
4898be8b38a7SOng Boon Leong 
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)4899bba71cacSOng Boon Leong static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4900bba71cacSOng Boon Leong 				 struct bpf_prog *prog,
49015fabb012SOng Boon Leong 				 struct xdp_buff *xdp)
49025fabb012SOng Boon Leong {
49035fabb012SOng Boon Leong 	u32 act;
4904bba71cacSOng Boon Leong 	int res;
49055fabb012SOng Boon Leong 
49065fabb012SOng Boon Leong 	act = bpf_prog_run_xdp(prog, xdp);
49075fabb012SOng Boon Leong 	switch (act) {
49085fabb012SOng Boon Leong 	case XDP_PASS:
49095fabb012SOng Boon Leong 		res = STMMAC_XDP_PASS;
49105fabb012SOng Boon Leong 		break;
4911be8b38a7SOng Boon Leong 	case XDP_TX:
4912be8b38a7SOng Boon Leong 		res = stmmac_xdp_xmit_back(priv, xdp);
4913be8b38a7SOng Boon Leong 		break;
49148b278a5bSOng Boon Leong 	case XDP_REDIRECT:
49158b278a5bSOng Boon Leong 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
49168b278a5bSOng Boon Leong 			res = STMMAC_XDP_CONSUMED;
49178b278a5bSOng Boon Leong 		else
49188b278a5bSOng Boon Leong 			res = STMMAC_XDP_REDIRECT;
49198b278a5bSOng Boon Leong 		break;
49205fabb012SOng Boon Leong 	default:
4921c8064e5bSPaolo Abeni 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
49225fabb012SOng Boon Leong 		fallthrough;
49235fabb012SOng Boon Leong 	case XDP_ABORTED:
49245fabb012SOng Boon Leong 		trace_xdp_exception(priv->dev, prog, act);
49255fabb012SOng Boon Leong 		fallthrough;
49265fabb012SOng Boon Leong 	case XDP_DROP:
49275fabb012SOng Boon Leong 		res = STMMAC_XDP_CONSUMED;
49285fabb012SOng Boon Leong 		break;
49295fabb012SOng Boon Leong 	}
49305fabb012SOng Boon Leong 
4931bba71cacSOng Boon Leong 	return res;
4932bba71cacSOng Boon Leong }
4933bba71cacSOng Boon Leong 
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)4934bba71cacSOng Boon Leong static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4935bba71cacSOng Boon Leong 					   struct xdp_buff *xdp)
4936bba71cacSOng Boon Leong {
4937bba71cacSOng Boon Leong 	struct bpf_prog *prog;
4938bba71cacSOng Boon Leong 	int res;
4939bba71cacSOng Boon Leong 
4940bba71cacSOng Boon Leong 	prog = READ_ONCE(priv->xdp_prog);
4941bba71cacSOng Boon Leong 	if (!prog) {
4942bba71cacSOng Boon Leong 		res = STMMAC_XDP_PASS;
49432f1e432dSToke Høiland-Jørgensen 		goto out;
4944bba71cacSOng Boon Leong 	}
4945bba71cacSOng Boon Leong 
4946bba71cacSOng Boon Leong 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
49472f1e432dSToke Høiland-Jørgensen out:
49485fabb012SOng Boon Leong 	return ERR_PTR(-res);
49495fabb012SOng Boon Leong }
49505fabb012SOng Boon Leong 
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)4951be8b38a7SOng Boon Leong static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4952be8b38a7SOng Boon Leong 				   int xdp_status)
4953be8b38a7SOng Boon Leong {
4954be8b38a7SOng Boon Leong 	int cpu = smp_processor_id();
4955be8b38a7SOng Boon Leong 	int queue;
4956be8b38a7SOng Boon Leong 
4957be8b38a7SOng Boon Leong 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4958be8b38a7SOng Boon Leong 
4959be8b38a7SOng Boon Leong 	if (xdp_status & STMMAC_XDP_TX)
4960be8b38a7SOng Boon Leong 		stmmac_tx_timer_arm(priv, queue);
49618b278a5bSOng Boon Leong 
49628b278a5bSOng Boon Leong 	if (xdp_status & STMMAC_XDP_REDIRECT)
49638b278a5bSOng Boon Leong 		xdp_do_flush();
4964be8b38a7SOng Boon Leong }
4965be8b38a7SOng Boon Leong 
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)4966bba2556eSOng Boon Leong static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4967bba2556eSOng Boon Leong 					       struct xdp_buff *xdp)
4968bba2556eSOng Boon Leong {
4969bba2556eSOng Boon Leong 	unsigned int metasize = xdp->data - xdp->data_meta;
4970bba2556eSOng Boon Leong 	unsigned int datasize = xdp->data_end - xdp->data;
4971bba2556eSOng Boon Leong 	struct sk_buff *skb;
4972bba2556eSOng Boon Leong 
4973132c32eeSOng Boon Leong 	skb = __napi_alloc_skb(&ch->rxtx_napi,
4974bba2556eSOng Boon Leong 			       xdp->data_end - xdp->data_hard_start,
4975bba2556eSOng Boon Leong 			       GFP_ATOMIC | __GFP_NOWARN);
4976bba2556eSOng Boon Leong 	if (unlikely(!skb))
4977bba2556eSOng Boon Leong 		return NULL;
4978bba2556eSOng Boon Leong 
4979bba2556eSOng Boon Leong 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
4980bba2556eSOng Boon Leong 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4981bba2556eSOng Boon Leong 	if (metasize)
4982bba2556eSOng Boon Leong 		skb_metadata_set(skb, metasize);
4983bba2556eSOng Boon Leong 
4984bba2556eSOng Boon Leong 	return skb;
4985bba2556eSOng Boon Leong }
4986bba2556eSOng Boon Leong 
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)4987bba2556eSOng Boon Leong static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4988bba2556eSOng Boon Leong 				   struct dma_desc *p, struct dma_desc *np,
4989bba2556eSOng Boon Leong 				   struct xdp_buff *xdp)
4990bba2556eSOng Boon Leong {
49918070274bSJisheng Zhang 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
4992bba2556eSOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
4993bba2556eSOng Boon Leong 	unsigned int len = xdp->data_end - xdp->data;
4994bba2556eSOng Boon Leong 	enum pkt_hash_types hash_type;
4995bba2556eSOng Boon Leong 	int coe = priv->hw->rx_csum;
4996bba2556eSOng Boon Leong 	struct sk_buff *skb;
4997bba2556eSOng Boon Leong 	u32 hash;
4998bba2556eSOng Boon Leong 
4999bba2556eSOng Boon Leong 	skb = stmmac_construct_skb_zc(ch, xdp);
5000bba2556eSOng Boon Leong 	if (!skb) {
5001133466c3SJisheng Zhang 		priv->xstats.rx_dropped++;
5002bba2556eSOng Boon Leong 		return;
5003bba2556eSOng Boon Leong 	}
5004bba2556eSOng Boon Leong 
5005bba2556eSOng Boon Leong 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5006bba2556eSOng Boon Leong 	stmmac_rx_vlan(priv->dev, skb);
5007bba2556eSOng Boon Leong 	skb->protocol = eth_type_trans(skb, priv->dev);
5008bba2556eSOng Boon Leong 
500997d574fcSRomain Gantois 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5010bba2556eSOng Boon Leong 		skb_checksum_none_assert(skb);
5011bba2556eSOng Boon Leong 	else
5012bba2556eSOng Boon Leong 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5013bba2556eSOng Boon Leong 
5014bba2556eSOng Boon Leong 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5015bba2556eSOng Boon Leong 		skb_set_hash(skb, hash, hash_type);
5016bba2556eSOng Boon Leong 
5017bba2556eSOng Boon Leong 	skb_record_rx_queue(skb, queue);
5018132c32eeSOng Boon Leong 	napi_gro_receive(&ch->rxtx_napi, skb);
5019bba2556eSOng Boon Leong 
50209680b2abSPetr Tesarik 	u64_stats_update_begin(&rxq_stats->napi_syncp);
50219680b2abSPetr Tesarik 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
50229680b2abSPetr Tesarik 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
50239680b2abSPetr Tesarik 	u64_stats_update_end(&rxq_stats->napi_syncp);
5024bba2556eSOng Boon Leong }
5025bba2556eSOng Boon Leong 
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)5026bba2556eSOng Boon Leong static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5027bba2556eSOng Boon Leong {
50288531c808SChristian Marangi 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5029bba2556eSOng Boon Leong 	unsigned int entry = rx_q->dirty_rx;
5030bba2556eSOng Boon Leong 	struct dma_desc *rx_desc = NULL;
5031bba2556eSOng Boon Leong 	bool ret = true;
5032bba2556eSOng Boon Leong 
5033bba2556eSOng Boon Leong 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5034bba2556eSOng Boon Leong 
5035bba2556eSOng Boon Leong 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5036bba2556eSOng Boon Leong 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5037bba2556eSOng Boon Leong 		dma_addr_t dma_addr;
5038bba2556eSOng Boon Leong 		bool use_rx_wd;
5039bba2556eSOng Boon Leong 
5040bba2556eSOng Boon Leong 		if (!buf->xdp) {
5041bba2556eSOng Boon Leong 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5042bba2556eSOng Boon Leong 			if (!buf->xdp) {
5043bba2556eSOng Boon Leong 				ret = false;
5044bba2556eSOng Boon Leong 				break;
5045bba2556eSOng Boon Leong 			}
5046bba2556eSOng Boon Leong 		}
5047bba2556eSOng Boon Leong 
5048bba2556eSOng Boon Leong 		if (priv->extend_desc)
5049bba2556eSOng Boon Leong 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5050bba2556eSOng Boon Leong 		else
5051bba2556eSOng Boon Leong 			rx_desc = rx_q->dma_rx + entry;
5052bba2556eSOng Boon Leong 
5053bba2556eSOng Boon Leong 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5054bba2556eSOng Boon Leong 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5055bba2556eSOng Boon Leong 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5056bba2556eSOng Boon Leong 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5057bba2556eSOng Boon Leong 
5058bba2556eSOng Boon Leong 		rx_q->rx_count_frames++;
5059bba2556eSOng Boon Leong 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5060bba2556eSOng Boon Leong 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5061bba2556eSOng Boon Leong 			rx_q->rx_count_frames = 0;
5062bba2556eSOng Boon Leong 
5063bba2556eSOng Boon Leong 		use_rx_wd = !priv->rx_coal_frames[queue];
5064bba2556eSOng Boon Leong 		use_rx_wd |= rx_q->rx_count_frames > 0;
5065bba2556eSOng Boon Leong 		if (!priv->use_riwt)
5066bba2556eSOng Boon Leong 			use_rx_wd = false;
5067bba2556eSOng Boon Leong 
5068bba2556eSOng Boon Leong 		dma_wmb();
5069bba2556eSOng Boon Leong 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5070bba2556eSOng Boon Leong 
50718531c808SChristian Marangi 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5072bba2556eSOng Boon Leong 	}
5073bba2556eSOng Boon Leong 
5074bba2556eSOng Boon Leong 	if (rx_desc) {
5075bba2556eSOng Boon Leong 		rx_q->dirty_rx = entry;
5076bba2556eSOng Boon Leong 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5077bba2556eSOng Boon Leong 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5078bba2556eSOng Boon Leong 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5079bba2556eSOng Boon Leong 	}
5080bba2556eSOng Boon Leong 
5081bba2556eSOng Boon Leong 	return ret;
5082bba2556eSOng Boon Leong }
5083bba2556eSOng Boon Leong 
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)50849570df35SSong Yoong Siang static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
50859570df35SSong Yoong Siang {
50869570df35SSong Yoong Siang 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
50879570df35SSong Yoong Siang 	 * to represent incoming packet, whereas cb field in the same structure
50889570df35SSong Yoong Siang 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
50899570df35SSong Yoong Siang 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
50909570df35SSong Yoong Siang 	 */
50919570df35SSong Yoong Siang 	return (struct stmmac_xdp_buff *)xdp;
50929570df35SSong Yoong Siang }
50939570df35SSong Yoong Siang 
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5094bba2556eSOng Boon Leong static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5095bba2556eSOng Boon Leong {
50968070274bSJisheng Zhang 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
50978531c808SChristian Marangi 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5098bba2556eSOng Boon Leong 	unsigned int count = 0, error = 0, len = 0;
5099bba2556eSOng Boon Leong 	int dirty = stmmac_rx_dirty(priv, queue);
5100bba2556eSOng Boon Leong 	unsigned int next_entry = rx_q->cur_rx;
5101133466c3SJisheng Zhang 	u32 rx_errors = 0, rx_dropped = 0;
5102bba2556eSOng Boon Leong 	unsigned int desc_size;
5103bba2556eSOng Boon Leong 	struct bpf_prog *prog;
5104bba2556eSOng Boon Leong 	bool failure = false;
5105bba2556eSOng Boon Leong 	int xdp_status = 0;
5106bba2556eSOng Boon Leong 	int status = 0;
5107bba2556eSOng Boon Leong 
5108bba2556eSOng Boon Leong 	if (netif_msg_rx_status(priv)) {
5109bba2556eSOng Boon Leong 		void *rx_head;
5110bba2556eSOng Boon Leong 
5111bba2556eSOng Boon Leong 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5112bba2556eSOng Boon Leong 		if (priv->extend_desc) {
5113bba2556eSOng Boon Leong 			rx_head = (void *)rx_q->dma_erx;
5114bba2556eSOng Boon Leong 			desc_size = sizeof(struct dma_extended_desc);
5115bba2556eSOng Boon Leong 		} else {
5116bba2556eSOng Boon Leong 			rx_head = (void *)rx_q->dma_rx;
5117bba2556eSOng Boon Leong 			desc_size = sizeof(struct dma_desc);
5118bba2556eSOng Boon Leong 		}
5119bba2556eSOng Boon Leong 
51208531c808SChristian Marangi 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5121bba2556eSOng Boon Leong 				    rx_q->dma_rx_phy, desc_size);
5122bba2556eSOng Boon Leong 	}
5123bba2556eSOng Boon Leong 	while (count < limit) {
5124bba2556eSOng Boon Leong 		struct stmmac_rx_buffer *buf;
51259570df35SSong Yoong Siang 		struct stmmac_xdp_buff *ctx;
5126bba2556eSOng Boon Leong 		unsigned int buf1_len = 0;
5127bba2556eSOng Boon Leong 		struct dma_desc *np, *p;
5128bba2556eSOng Boon Leong 		int entry;
5129bba2556eSOng Boon Leong 		int res;
5130bba2556eSOng Boon Leong 
5131bba2556eSOng Boon Leong 		if (!count && rx_q->state_saved) {
5132bba2556eSOng Boon Leong 			error = rx_q->state.error;
5133bba2556eSOng Boon Leong 			len = rx_q->state.len;
5134bba2556eSOng Boon Leong 		} else {
5135bba2556eSOng Boon Leong 			rx_q->state_saved = false;
5136bba2556eSOng Boon Leong 			error = 0;
5137bba2556eSOng Boon Leong 			len = 0;
5138bba2556eSOng Boon Leong 		}
5139bba2556eSOng Boon Leong 
5140bba2556eSOng Boon Leong 		if (count >= limit)
5141bba2556eSOng Boon Leong 			break;
5142bba2556eSOng Boon Leong 
5143bba2556eSOng Boon Leong read_again:
5144bba2556eSOng Boon Leong 		buf1_len = 0;
5145bba2556eSOng Boon Leong 		entry = next_entry;
5146bba2556eSOng Boon Leong 		buf = &rx_q->buf_pool[entry];
5147bba2556eSOng Boon Leong 
5148bba2556eSOng Boon Leong 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5149bba2556eSOng Boon Leong 			failure = failure ||
5150bba2556eSOng Boon Leong 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5151bba2556eSOng Boon Leong 			dirty = 0;
5152bba2556eSOng Boon Leong 		}
5153bba2556eSOng Boon Leong 
5154bba2556eSOng Boon Leong 		if (priv->extend_desc)
5155bba2556eSOng Boon Leong 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5156bba2556eSOng Boon Leong 		else
5157bba2556eSOng Boon Leong 			p = rx_q->dma_rx + entry;
5158bba2556eSOng Boon Leong 
5159bba2556eSOng Boon Leong 		/* read the status of the incoming frame */
5160133466c3SJisheng Zhang 		status = stmmac_rx_status(priv, &priv->xstats, p);
5161bba2556eSOng Boon Leong 		/* check if managed by the DMA otherwise go ahead */
5162bba2556eSOng Boon Leong 		if (unlikely(status & dma_own))
5163bba2556eSOng Boon Leong 			break;
5164bba2556eSOng Boon Leong 
5165bba2556eSOng Boon Leong 		/* Prefetch the next RX descriptor */
5166bba2556eSOng Boon Leong 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
51678531c808SChristian Marangi 						priv->dma_conf.dma_rx_size);
5168bba2556eSOng Boon Leong 		next_entry = rx_q->cur_rx;
5169bba2556eSOng Boon Leong 
5170bba2556eSOng Boon Leong 		if (priv->extend_desc)
5171bba2556eSOng Boon Leong 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5172bba2556eSOng Boon Leong 		else
5173bba2556eSOng Boon Leong 			np = rx_q->dma_rx + next_entry;
5174bba2556eSOng Boon Leong 
5175bba2556eSOng Boon Leong 		prefetch(np);
5176bba2556eSOng Boon Leong 
51772b9fff64SSong Yoong Siang 		/* Ensure a valid XSK buffer before proceed */
51782b9fff64SSong Yoong Siang 		if (!buf->xdp)
51792b9fff64SSong Yoong Siang 			break;
51802b9fff64SSong Yoong Siang 
5181bba2556eSOng Boon Leong 		if (priv->extend_desc)
5182133466c3SJisheng Zhang 			stmmac_rx_extended_status(priv, &priv->xstats,
5183bba2556eSOng Boon Leong 						  rx_q->dma_erx + entry);
5184bba2556eSOng Boon Leong 		if (unlikely(status == discard_frame)) {
5185bba2556eSOng Boon Leong 			xsk_buff_free(buf->xdp);
5186bba2556eSOng Boon Leong 			buf->xdp = NULL;
5187bba2556eSOng Boon Leong 			dirty++;
5188bba2556eSOng Boon Leong 			error = 1;
5189bba2556eSOng Boon Leong 			if (!priv->hwts_rx_en)
5190133466c3SJisheng Zhang 				rx_errors++;
5191bba2556eSOng Boon Leong 		}
5192bba2556eSOng Boon Leong 
5193bba2556eSOng Boon Leong 		if (unlikely(error && (status & rx_not_ls)))
5194bba2556eSOng Boon Leong 			goto read_again;
5195bba2556eSOng Boon Leong 		if (unlikely(error)) {
5196bba2556eSOng Boon Leong 			count++;
5197bba2556eSOng Boon Leong 			continue;
5198bba2556eSOng Boon Leong 		}
5199bba2556eSOng Boon Leong 
5200bba2556eSOng Boon Leong 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5201bba2556eSOng Boon Leong 		if (likely(status & rx_not_ls)) {
5202bba2556eSOng Boon Leong 			xsk_buff_free(buf->xdp);
5203bba2556eSOng Boon Leong 			buf->xdp = NULL;
5204bba2556eSOng Boon Leong 			dirty++;
5205bba2556eSOng Boon Leong 			count++;
5206bba2556eSOng Boon Leong 			goto read_again;
5207bba2556eSOng Boon Leong 		}
5208bba2556eSOng Boon Leong 
52099570df35SSong Yoong Siang 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
52109570df35SSong Yoong Siang 		ctx->priv = priv;
52119570df35SSong Yoong Siang 		ctx->desc = p;
52129570df35SSong Yoong Siang 		ctx->ndesc = np;
52139570df35SSong Yoong Siang 
5214bba2556eSOng Boon Leong 		/* XDP ZC Frame only support primary buffers for now */
5215bba2556eSOng Boon Leong 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5216bba2556eSOng Boon Leong 		len += buf1_len;
5217bba2556eSOng Boon Leong 
5218929d4342SKurt Kanzenbach 		/* ACS is disabled; strip manually. */
5219929d4342SKurt Kanzenbach 		if (likely(!(status & rx_not_ls))) {
5220bba2556eSOng Boon Leong 			buf1_len -= ETH_FCS_LEN;
5221bba2556eSOng Boon Leong 			len -= ETH_FCS_LEN;
5222bba2556eSOng Boon Leong 		}
5223bba2556eSOng Boon Leong 
5224bba2556eSOng Boon Leong 		/* RX buffer is good and fit into a XSK pool buffer */
5225bba2556eSOng Boon Leong 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5226bba2556eSOng Boon Leong 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5227bba2556eSOng Boon Leong 
5228bba2556eSOng Boon Leong 		prog = READ_ONCE(priv->xdp_prog);
5229bba2556eSOng Boon Leong 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5230bba2556eSOng Boon Leong 
5231bba2556eSOng Boon Leong 		switch (res) {
5232bba2556eSOng Boon Leong 		case STMMAC_XDP_PASS:
5233bba2556eSOng Boon Leong 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5234bba2556eSOng Boon Leong 			xsk_buff_free(buf->xdp);
5235bba2556eSOng Boon Leong 			break;
5236bba2556eSOng Boon Leong 		case STMMAC_XDP_CONSUMED:
5237bba2556eSOng Boon Leong 			xsk_buff_free(buf->xdp);
5238133466c3SJisheng Zhang 			rx_dropped++;
5239bba2556eSOng Boon Leong 			break;
5240bba2556eSOng Boon Leong 		case STMMAC_XDP_TX:
5241bba2556eSOng Boon Leong 		case STMMAC_XDP_REDIRECT:
5242bba2556eSOng Boon Leong 			xdp_status |= res;
5243bba2556eSOng Boon Leong 			break;
5244bba2556eSOng Boon Leong 		}
5245bba2556eSOng Boon Leong 
5246bba2556eSOng Boon Leong 		buf->xdp = NULL;
5247bba2556eSOng Boon Leong 		dirty++;
5248bba2556eSOng Boon Leong 		count++;
5249bba2556eSOng Boon Leong 	}
5250bba2556eSOng Boon Leong 
5251bba2556eSOng Boon Leong 	if (status & rx_not_ls) {
5252bba2556eSOng Boon Leong 		rx_q->state_saved = true;
5253bba2556eSOng Boon Leong 		rx_q->state.error = error;
5254bba2556eSOng Boon Leong 		rx_q->state.len = len;
5255bba2556eSOng Boon Leong 	}
5256bba2556eSOng Boon Leong 
5257bba2556eSOng Boon Leong 	stmmac_finalize_xdp_rx(priv, xdp_status);
5258bba2556eSOng Boon Leong 
52599680b2abSPetr Tesarik 	u64_stats_update_begin(&rxq_stats->napi_syncp);
52609680b2abSPetr Tesarik 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
52619680b2abSPetr Tesarik 	u64_stats_update_end(&rxq_stats->napi_syncp);
5262133466c3SJisheng Zhang 
5263133466c3SJisheng Zhang 	priv->xstats.rx_dropped += rx_dropped;
5264133466c3SJisheng Zhang 	priv->xstats.rx_errors += rx_errors;
526568e9c5deSVijayakannan Ayyathurai 
5266bba2556eSOng Boon Leong 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5267bba2556eSOng Boon Leong 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5268bba2556eSOng Boon Leong 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5269bba2556eSOng Boon Leong 		else
5270bba2556eSOng Boon Leong 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5271bba2556eSOng Boon Leong 
5272bba2556eSOng Boon Leong 		return (int)count;
5273bba2556eSOng Boon Leong 	}
5274bba2556eSOng Boon Leong 
5275bba2556eSOng Boon Leong 	return failure ? limit : (int)count;
5276bba2556eSOng Boon Leong }
5277bba2556eSOng Boon Leong 
527832ceabcaSGiuseppe CAVALLARO /**
5279732fdf0eSGiuseppe CAVALLARO  * stmmac_rx - manage the receive process
528032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
528154139cf3SJoao Pinto  * @limit: napi bugget
528254139cf3SJoao Pinto  * @queue: RX queue index.
528332ceabcaSGiuseppe CAVALLARO  * Description :  this the function called by the napi poll method.
528432ceabcaSGiuseppe CAVALLARO  * It gets all the frames inside the ring.
528532ceabcaSGiuseppe CAVALLARO  */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)528654139cf3SJoao Pinto static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
52877ac6653aSJeff Kirsher {
5288133466c3SJisheng Zhang 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
52898070274bSJisheng Zhang 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
52908531c808SChristian Marangi 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
52918fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[queue];
5292ec222003SJose Abreu 	unsigned int count = 0, error = 0, len = 0;
5293ec222003SJose Abreu 	int status = 0, coe = priv->hw->rx_csum;
529407b39753SAaro Koskinen 	unsigned int next_entry = rx_q->cur_rx;
52955fabb012SOng Boon Leong 	enum dma_data_direction dma_dir;
5296bfaf91caSJoakim Zhang 	unsigned int desc_size;
5297ec222003SJose Abreu 	struct sk_buff *skb = NULL;
52985b24324aSSong Yoong Siang 	struct stmmac_xdp_buff ctx;
5299be8b38a7SOng Boon Leong 	int xdp_status = 0;
53005fabb012SOng Boon Leong 	int buf_sz;
53015fabb012SOng Boon Leong 
53025fabb012SOng Boon Leong 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
53038531c808SChristian Marangi 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5304779334e5SBaruch Siach 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
53057ac6653aSJeff Kirsher 
530683d7af64SGiuseppe CAVALLARO 	if (netif_msg_rx_status(priv)) {
5307d0225e7dSAlexandre TORGUE 		void *rx_head;
5308d0225e7dSAlexandre TORGUE 
530938ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5310bfaf91caSJoakim Zhang 		if (priv->extend_desc) {
531154139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_erx;
5312bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_extended_desc);
5313bfaf91caSJoakim Zhang 		} else {
531454139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_rx;
5315bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_desc);
5316bfaf91caSJoakim Zhang 		}
5317d0225e7dSAlexandre TORGUE 
53188531c808SChristian Marangi 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5319bfaf91caSJoakim Zhang 				    rx_q->dma_rx_phy, desc_size);
53207ac6653aSJeff Kirsher 	}
5321c24602efSGiuseppe CAVALLARO 	while (count < limit) {
532288ebe2cfSJose Abreu 		unsigned int buf1_len = 0, buf2_len = 0;
5323ec222003SJose Abreu 		enum pkt_hash_types hash_type;
53242af6106aSJose Abreu 		struct stmmac_rx_buffer *buf;
53252af6106aSJose Abreu 		struct dma_desc *np, *p;
5326ec222003SJose Abreu 		int entry;
5327ec222003SJose Abreu 		u32 hash;
53287ac6653aSJeff Kirsher 
5329ec222003SJose Abreu 		if (!count && rx_q->state_saved) {
5330ec222003SJose Abreu 			skb = rx_q->state.skb;
5331ec222003SJose Abreu 			error = rx_q->state.error;
5332ec222003SJose Abreu 			len = rx_q->state.len;
5333ec222003SJose Abreu 		} else {
5334ec222003SJose Abreu 			rx_q->state_saved = false;
5335ec222003SJose Abreu 			skb = NULL;
5336ec222003SJose Abreu 			error = 0;
5337ec222003SJose Abreu 			len = 0;
5338ec222003SJose Abreu 		}
5339ec222003SJose Abreu 
5340e5d20035SBaruch Siach read_again:
5341ec222003SJose Abreu 		if (count >= limit)
5342ec222003SJose Abreu 			break;
5343ec222003SJose Abreu 
534488ebe2cfSJose Abreu 		buf1_len = 0;
534588ebe2cfSJose Abreu 		buf2_len = 0;
534607b39753SAaro Koskinen 		entry = next_entry;
53472af6106aSJose Abreu 		buf = &rx_q->buf_pool[entry];
534807b39753SAaro Koskinen 
5349c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
535054139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5351c24602efSGiuseppe CAVALLARO 		else
535254139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
5353c24602efSGiuseppe CAVALLARO 
5354c1fa3212SFabrice Gasnier 		/* read the status of the incoming frame */
5355133466c3SJisheng Zhang 		status = stmmac_rx_status(priv, &priv->xstats, p);
5356c1fa3212SFabrice Gasnier 		/* check if managed by the DMA otherwise go ahead */
5357c1fa3212SFabrice Gasnier 		if (unlikely(status & dma_own))
53587ac6653aSJeff Kirsher 			break;
53597ac6653aSJeff Kirsher 
5360aa042f60SSong, Yoong Siang 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
53618531c808SChristian Marangi 						priv->dma_conf.dma_rx_size);
536254139cf3SJoao Pinto 		next_entry = rx_q->cur_rx;
5363e3ad57c9SGiuseppe Cavallaro 
5364c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
536554139cf3SJoao Pinto 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5366c24602efSGiuseppe CAVALLARO 		else
536754139cf3SJoao Pinto 			np = rx_q->dma_rx + next_entry;
5368ba1ffd74SGiuseppe CAVALLARO 
5369ba1ffd74SGiuseppe CAVALLARO 		prefetch(np);
53707ac6653aSJeff Kirsher 
537142de047dSJose Abreu 		if (priv->extend_desc)
5372133466c3SJisheng Zhang 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5373891434b1SRayagond Kokatanur 		if (unlikely(status == discard_frame)) {
53742af6106aSJose Abreu 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
53752af6106aSJose Abreu 			buf->page = NULL;
5376ec222003SJose Abreu 			error = 1;
53770b273ca4SJose Abreu 			if (!priv->hwts_rx_en)
5378133466c3SJisheng Zhang 				rx_errors++;
5379ec222003SJose Abreu 		}
5380f748be53SAlexandre TORGUE 
5381ec222003SJose Abreu 		if (unlikely(error && (status & rx_not_ls)))
5382ec222003SJose Abreu 			goto read_again;
5383ec222003SJose Abreu 		if (unlikely(error)) {
5384ec222003SJose Abreu 			dev_kfree_skb(skb);
538588ebe2cfSJose Abreu 			skb = NULL;
5386cda4985aSJose Abreu 			count++;
538707b39753SAaro Koskinen 			continue;
5388e527c4a7SGiuseppe CAVALLARO 		}
5389e527c4a7SGiuseppe CAVALLARO 
5390ec222003SJose Abreu 		/* Buffer is good. Go on. */
5391ec222003SJose Abreu 
53924744bf07SMatteo Croce 		prefetch(page_address(buf->page) + buf->page_offset);
539388ebe2cfSJose Abreu 		if (buf->sec_page)
539488ebe2cfSJose Abreu 			prefetch(page_address(buf->sec_page));
539588ebe2cfSJose Abreu 
539688ebe2cfSJose Abreu 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
539788ebe2cfSJose Abreu 		len += buf1_len;
539888ebe2cfSJose Abreu 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
539988ebe2cfSJose Abreu 		len += buf2_len;
5400ec222003SJose Abreu 
5401929d4342SKurt Kanzenbach 		/* ACS is disabled; strip manually. */
5402929d4342SKurt Kanzenbach 		if (likely(!(status & rx_not_ls))) {
54030f296e78SZekun Shen 			if (buf2_len) {
540488ebe2cfSJose Abreu 				buf2_len -= ETH_FCS_LEN;
5405ec222003SJose Abreu 				len -= ETH_FCS_LEN;
54060f296e78SZekun Shen 			} else if (buf1_len) {
54070f296e78SZekun Shen 				buf1_len -= ETH_FCS_LEN;
54080f296e78SZekun Shen 				len -= ETH_FCS_LEN;
54090f296e78SZekun Shen 			}
541083d7af64SGiuseppe CAVALLARO 		}
541122ad3838SGiuseppe Cavallaro 
5412ec222003SJose Abreu 		if (!skb) {
5413be8b38a7SOng Boon Leong 			unsigned int pre_len, sync_len;
5414be8b38a7SOng Boon Leong 
54155fabb012SOng Boon Leong 			dma_sync_single_for_cpu(priv->device, buf->addr,
54165fabb012SOng Boon Leong 						buf1_len, dma_dir);
54175fabb012SOng Boon Leong 
54185b24324aSSong Yoong Siang 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
54195b24324aSSong Yoong Siang 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5420e3f9c3e3SSong Yoong Siang 					 buf->page_offset, buf1_len, true);
54215fabb012SOng Boon Leong 
54225b24324aSSong Yoong Siang 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5423be8b38a7SOng Boon Leong 				  buf->page_offset;
5424e3f9c3e3SSong Yoong Siang 
5425e3f9c3e3SSong Yoong Siang 			ctx.priv = priv;
5426e3f9c3e3SSong Yoong Siang 			ctx.desc = p;
5427e3f9c3e3SSong Yoong Siang 			ctx.ndesc = np;
5428e3f9c3e3SSong Yoong Siang 
54295b24324aSSong Yoong Siang 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5430be8b38a7SOng Boon Leong 			/* Due xdp_adjust_tail: DMA sync for_device
5431be8b38a7SOng Boon Leong 			 * cover max len CPU touch
5432be8b38a7SOng Boon Leong 			 */
54335b24324aSSong Yoong Siang 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5434be8b38a7SOng Boon Leong 				   buf->page_offset;
5435be8b38a7SOng Boon Leong 			sync_len = max(sync_len, pre_len);
54365fabb012SOng Boon Leong 
54375fabb012SOng Boon Leong 			/* For Not XDP_PASS verdict */
54385fabb012SOng Boon Leong 			if (IS_ERR(skb)) {
54395fabb012SOng Boon Leong 				unsigned int xdp_res = -PTR_ERR(skb);
54405fabb012SOng Boon Leong 
54415fabb012SOng Boon Leong 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5442be8b38a7SOng Boon Leong 					page_pool_put_page(rx_q->page_pool,
54435b24324aSSong Yoong Siang 							   virt_to_head_page(ctx.xdp.data),
5444be8b38a7SOng Boon Leong 							   sync_len, true);
54455fabb012SOng Boon Leong 					buf->page = NULL;
5446133466c3SJisheng Zhang 					rx_dropped++;
54475fabb012SOng Boon Leong 
54485fabb012SOng Boon Leong 					/* Clear skb as it was set as
54495fabb012SOng Boon Leong 					 * status by XDP program.
54505fabb012SOng Boon Leong 					 */
54515fabb012SOng Boon Leong 					skb = NULL;
54525fabb012SOng Boon Leong 
54535fabb012SOng Boon Leong 					if (unlikely((status & rx_not_ls)))
54545fabb012SOng Boon Leong 						goto read_again;
54555fabb012SOng Boon Leong 
54565fabb012SOng Boon Leong 					count++;
54575fabb012SOng Boon Leong 					continue;
54588b278a5bSOng Boon Leong 				} else if (xdp_res & (STMMAC_XDP_TX |
54598b278a5bSOng Boon Leong 						      STMMAC_XDP_REDIRECT)) {
5460be8b38a7SOng Boon Leong 					xdp_status |= xdp_res;
5461be8b38a7SOng Boon Leong 					buf->page = NULL;
5462be8b38a7SOng Boon Leong 					skb = NULL;
5463be8b38a7SOng Boon Leong 					count++;
5464be8b38a7SOng Boon Leong 					continue;
54655fabb012SOng Boon Leong 				}
54665fabb012SOng Boon Leong 			}
54675fabb012SOng Boon Leong 		}
54685fabb012SOng Boon Leong 
54695fabb012SOng Boon Leong 		if (!skb) {
54705fabb012SOng Boon Leong 			/* XDP program may expand or reduce tail */
54715b24324aSSong Yoong Siang 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
54725fabb012SOng Boon Leong 
547388ebe2cfSJose Abreu 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5474ec222003SJose Abreu 			if (!skb) {
5475133466c3SJisheng Zhang 				rx_dropped++;
5476cda4985aSJose Abreu 				count++;
547788ebe2cfSJose Abreu 				goto drain_data;
547822ad3838SGiuseppe Cavallaro 			}
547922ad3838SGiuseppe Cavallaro 
54805fabb012SOng Boon Leong 			/* XDP program may adjust header */
54815b24324aSSong Yoong Siang 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
548288ebe2cfSJose Abreu 			skb_put(skb, buf1_len);
548322ad3838SGiuseppe Cavallaro 
5484ec222003SJose Abreu 			/* Data payload copied into SKB, page ready for recycle */
5485ec222003SJose Abreu 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5486ec222003SJose Abreu 			buf->page = NULL;
548788ebe2cfSJose Abreu 		} else if (buf1_len) {
5488ec222003SJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->addr,
54895fabb012SOng Boon Leong 						buf1_len, dma_dir);
5490ec222003SJose Abreu 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
54915fabb012SOng Boon Leong 					buf->page, buf->page_offset, buf1_len,
54928531c808SChristian Marangi 					priv->dma_conf.dma_buf_sz);
5493ec222003SJose Abreu 
5494ec222003SJose Abreu 			/* Data payload appended into SKB */
549598e2727cSJakub Kicinski 			skb_mark_for_recycle(skb);
5496ec222003SJose Abreu 			buf->page = NULL;
54977ac6653aSJeff Kirsher 		}
549883d7af64SGiuseppe CAVALLARO 
549988ebe2cfSJose Abreu 		if (buf2_len) {
550067afd6d1SJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
55015fabb012SOng Boon Leong 						buf2_len, dma_dir);
550267afd6d1SJose Abreu 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
550388ebe2cfSJose Abreu 					buf->sec_page, 0, buf2_len,
55048531c808SChristian Marangi 					priv->dma_conf.dma_buf_sz);
550567afd6d1SJose Abreu 
550667afd6d1SJose Abreu 			/* Data payload appended into SKB */
550798e2727cSJakub Kicinski 			skb_mark_for_recycle(skb);
550867afd6d1SJose Abreu 			buf->sec_page = NULL;
550967afd6d1SJose Abreu 		}
551067afd6d1SJose Abreu 
551188ebe2cfSJose Abreu drain_data:
5512ec222003SJose Abreu 		if (likely(status & rx_not_ls))
5513ec222003SJose Abreu 			goto read_again;
551488ebe2cfSJose Abreu 		if (!skb)
551588ebe2cfSJose Abreu 			continue;
5516ec222003SJose Abreu 
5517ec222003SJose Abreu 		/* Got entire packet into SKB. Finish it. */
5518ec222003SJose Abreu 
5519ba1ffd74SGiuseppe CAVALLARO 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5520b9381985SVince Bridgers 		stmmac_rx_vlan(priv->dev, skb);
55217ac6653aSJeff Kirsher 		skb->protocol = eth_type_trans(skb, priv->dev);
55227ac6653aSJeff Kirsher 
552397d574fcSRomain Gantois 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
55247ac6653aSJeff Kirsher 			skb_checksum_none_assert(skb);
552562a2ab93SGiuseppe CAVALLARO 		else
55267ac6653aSJeff Kirsher 			skb->ip_summed = CHECKSUM_UNNECESSARY;
552762a2ab93SGiuseppe CAVALLARO 
552876067459SJose Abreu 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
552976067459SJose Abreu 			skb_set_hash(skb, hash, hash_type);
553076067459SJose Abreu 
553176067459SJose Abreu 		skb_record_rx_queue(skb, queue);
55324ccb4585SJose Abreu 		napi_gro_receive(&ch->rx_napi, skb);
553388ebe2cfSJose Abreu 		skb = NULL;
55347ac6653aSJeff Kirsher 
5535133466c3SJisheng Zhang 		rx_packets++;
5536133466c3SJisheng Zhang 		rx_bytes += len;
5537cda4985aSJose Abreu 		count++;
55387ac6653aSJeff Kirsher 	}
5539ec222003SJose Abreu 
554088ebe2cfSJose Abreu 	if (status & rx_not_ls || skb) {
5541ec222003SJose Abreu 		rx_q->state_saved = true;
5542ec222003SJose Abreu 		rx_q->state.skb = skb;
5543ec222003SJose Abreu 		rx_q->state.error = error;
5544ec222003SJose Abreu 		rx_q->state.len = len;
55457ac6653aSJeff Kirsher 	}
55467ac6653aSJeff Kirsher 
5547be8b38a7SOng Boon Leong 	stmmac_finalize_xdp_rx(priv, xdp_status);
5548be8b38a7SOng Boon Leong 
554954139cf3SJoao Pinto 	stmmac_rx_refill(priv, queue);
55507ac6653aSJeff Kirsher 
55519680b2abSPetr Tesarik 	u64_stats_update_begin(&rxq_stats->napi_syncp);
55529680b2abSPetr Tesarik 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
55539680b2abSPetr Tesarik 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
55549680b2abSPetr Tesarik 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
55559680b2abSPetr Tesarik 	u64_stats_update_end(&rxq_stats->napi_syncp);
5556133466c3SJisheng Zhang 
5557133466c3SJisheng Zhang 	priv->xstats.rx_dropped += rx_dropped;
5558133466c3SJisheng Zhang 	priv->xstats.rx_errors += rx_errors;
55597ac6653aSJeff Kirsher 
55607ac6653aSJeff Kirsher 	return count;
55617ac6653aSJeff Kirsher }
55627ac6653aSJeff Kirsher 
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)55634ccb4585SJose Abreu static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
55647ac6653aSJeff Kirsher {
55658fce3331SJose Abreu 	struct stmmac_channel *ch =
55664ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, rx_napi);
55678fce3331SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
55688070274bSJisheng Zhang 	struct stmmac_rxq_stats *rxq_stats;
55698fce3331SJose Abreu 	u32 chan = ch->index;
55704ccb4585SJose Abreu 	int work_done;
55717ac6653aSJeff Kirsher 
55728070274bSJisheng Zhang 	rxq_stats = &priv->xstats.rxq_stats[chan];
55739680b2abSPetr Tesarik 	u64_stats_update_begin(&rxq_stats->napi_syncp);
55749680b2abSPetr Tesarik 	u64_stats_inc(&rxq_stats->napi.poll);
55759680b2abSPetr Tesarik 	u64_stats_update_end(&rxq_stats->napi_syncp);
5576ce736788SJoao Pinto 
5577132c32eeSOng Boon Leong 	work_done = stmmac_rx(priv, budget, chan);
5578021bd5e3SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5579021bd5e3SJose Abreu 		unsigned long flags;
5580021bd5e3SJose Abreu 
5581021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
5582021bd5e3SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5583021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
5584021bd5e3SJose Abreu 	}
5585021bd5e3SJose Abreu 
55864ccb4585SJose Abreu 	return work_done;
55874ccb4585SJose Abreu }
5588ce736788SJoao Pinto 
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)55894ccb4585SJose Abreu static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
55904ccb4585SJose Abreu {
55914ccb4585SJose Abreu 	struct stmmac_channel *ch =
55924ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, tx_napi);
55934ccb4585SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
55948070274bSJisheng Zhang 	struct stmmac_txq_stats *txq_stats;
55954ccb4585SJose Abreu 	u32 chan = ch->index;
55964ccb4585SJose Abreu 	int work_done;
55974ccb4585SJose Abreu 
55988070274bSJisheng Zhang 	txq_stats = &priv->xstats.txq_stats[chan];
55999680b2abSPetr Tesarik 	u64_stats_update_begin(&txq_stats->napi_syncp);
56009680b2abSPetr Tesarik 	u64_stats_inc(&txq_stats->napi.poll);
56019680b2abSPetr Tesarik 	u64_stats_update_end(&txq_stats->napi_syncp);
56024ccb4585SJose Abreu 
5603132c32eeSOng Boon Leong 	work_done = stmmac_tx_clean(priv, budget, chan);
5604fa0be0a4SJose Abreu 	work_done = min(work_done, budget);
56058fce3331SJose Abreu 
5606021bd5e3SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5607021bd5e3SJose Abreu 		unsigned long flags;
56084ccb4585SJose Abreu 
5609021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
5610021bd5e3SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5611021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
5612fa0be0a4SJose Abreu 	}
56138fce3331SJose Abreu 
56147ac6653aSJeff Kirsher 	return work_done;
56157ac6653aSJeff Kirsher }
56167ac6653aSJeff Kirsher 
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5617132c32eeSOng Boon Leong static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5618132c32eeSOng Boon Leong {
5619132c32eeSOng Boon Leong 	struct stmmac_channel *ch =
5620132c32eeSOng Boon Leong 		container_of(napi, struct stmmac_channel, rxtx_napi);
5621132c32eeSOng Boon Leong 	struct stmmac_priv *priv = ch->priv_data;
562281d0885dSSong Yoong Siang 	int rx_done, tx_done, rxtx_done;
56238070274bSJisheng Zhang 	struct stmmac_rxq_stats *rxq_stats;
56248070274bSJisheng Zhang 	struct stmmac_txq_stats *txq_stats;
5625132c32eeSOng Boon Leong 	u32 chan = ch->index;
5626132c32eeSOng Boon Leong 
56278070274bSJisheng Zhang 	rxq_stats = &priv->xstats.rxq_stats[chan];
56289680b2abSPetr Tesarik 	u64_stats_update_begin(&rxq_stats->napi_syncp);
56299680b2abSPetr Tesarik 	u64_stats_inc(&rxq_stats->napi.poll);
56309680b2abSPetr Tesarik 	u64_stats_update_end(&rxq_stats->napi_syncp);
5631133466c3SJisheng Zhang 
56328070274bSJisheng Zhang 	txq_stats = &priv->xstats.txq_stats[chan];
56339680b2abSPetr Tesarik 	u64_stats_update_begin(&txq_stats->napi_syncp);
56349680b2abSPetr Tesarik 	u64_stats_inc(&txq_stats->napi.poll);
56359680b2abSPetr Tesarik 	u64_stats_update_end(&txq_stats->napi_syncp);
5636132c32eeSOng Boon Leong 
5637132c32eeSOng Boon Leong 	tx_done = stmmac_tx_clean(priv, budget, chan);
5638132c32eeSOng Boon Leong 	tx_done = min(tx_done, budget);
5639132c32eeSOng Boon Leong 
5640132c32eeSOng Boon Leong 	rx_done = stmmac_rx_zc(priv, budget, chan);
5641132c32eeSOng Boon Leong 
564281d0885dSSong Yoong Siang 	rxtx_done = max(tx_done, rx_done);
564381d0885dSSong Yoong Siang 
5644132c32eeSOng Boon Leong 	/* If either TX or RX work is not complete, return budget
5645132c32eeSOng Boon Leong 	 * and keep pooling
5646132c32eeSOng Boon Leong 	 */
564781d0885dSSong Yoong Siang 	if (rxtx_done >= budget)
5648132c32eeSOng Boon Leong 		return budget;
5649132c32eeSOng Boon Leong 
5650132c32eeSOng Boon Leong 	/* all work done, exit the polling mode */
565181d0885dSSong Yoong Siang 	if (napi_complete_done(napi, rxtx_done)) {
5652132c32eeSOng Boon Leong 		unsigned long flags;
5653132c32eeSOng Boon Leong 
5654132c32eeSOng Boon Leong 		spin_lock_irqsave(&ch->lock, flags);
5655132c32eeSOng Boon Leong 		/* Both RX and TX work done are compelte,
5656132c32eeSOng Boon Leong 		 * so enable both RX & TX IRQs.
5657132c32eeSOng Boon Leong 		 */
5658132c32eeSOng Boon Leong 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5659132c32eeSOng Boon Leong 		spin_unlock_irqrestore(&ch->lock, flags);
5660132c32eeSOng Boon Leong 	}
5661132c32eeSOng Boon Leong 
566281d0885dSSong Yoong Siang 	return min(rxtx_done, budget - 1);
5663132c32eeSOng Boon Leong }
5664132c32eeSOng Boon Leong 
56657ac6653aSJeff Kirsher /**
56667ac6653aSJeff Kirsher  *  stmmac_tx_timeout
56677ac6653aSJeff Kirsher  *  @dev : Pointer to net device structure
5668d0ea5cbdSJesse Brandeburg  *  @txqueue: the index of the hanging transmit queue
56697ac6653aSJeff Kirsher  *  Description: this function is called when a packet transmission fails to
56707284a3f1SGiuseppe CAVALLARO  *   complete within a reasonable time. The driver will mark the error in the
56717ac6653aSJeff Kirsher  *   netdev structure and arrange for the device to be reset to a sane state
56727ac6653aSJeff Kirsher  *   in order to transmit a new packet.
56737ac6653aSJeff Kirsher  */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)56740290bd29SMichael S. Tsirkin static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
56757ac6653aSJeff Kirsher {
56767ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
56777ac6653aSJeff Kirsher 
567834877a15SJose Abreu 	stmmac_global_err(priv);
56797ac6653aSJeff Kirsher }
56807ac6653aSJeff Kirsher 
56817ac6653aSJeff Kirsher /**
568201789349SJiri Pirko  *  stmmac_set_rx_mode - entry point for multicast addressing
56837ac6653aSJeff Kirsher  *  @dev : pointer to the device structure
56847ac6653aSJeff Kirsher  *  Description:
56857ac6653aSJeff Kirsher  *  This function is a driver entry point which gets called by the kernel
56867ac6653aSJeff Kirsher  *  whenever multicast addresses must be enabled/disabled.
56877ac6653aSJeff Kirsher  *  Return value:
56887ac6653aSJeff Kirsher  *  void.
56897ac6653aSJeff Kirsher  */
stmmac_set_rx_mode(struct net_device * dev)569001789349SJiri Pirko static void stmmac_set_rx_mode(struct net_device *dev)
56917ac6653aSJeff Kirsher {
56927ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
56937ac6653aSJeff Kirsher 
5694c10d4c82SJose Abreu 	stmmac_set_filter(priv, priv->hw, dev);
56957ac6653aSJeff Kirsher }
56967ac6653aSJeff Kirsher 
56977ac6653aSJeff Kirsher /**
56987ac6653aSJeff Kirsher  *  stmmac_change_mtu - entry point to change MTU size for the device.
56997ac6653aSJeff Kirsher  *  @dev : device pointer.
57007ac6653aSJeff Kirsher  *  @new_mtu : the new MTU size for the device.
57017ac6653aSJeff Kirsher  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
57027ac6653aSJeff Kirsher  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
57037ac6653aSJeff Kirsher  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
57047ac6653aSJeff Kirsher  *  Return value:
57057ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
57067ac6653aSJeff Kirsher  *  file on failure.
57077ac6653aSJeff Kirsher  */
stmmac_change_mtu(struct net_device * dev,int new_mtu)57087ac6653aSJeff Kirsher static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
57097ac6653aSJeff Kirsher {
571038ddc59dSLABBE Corentin 	struct stmmac_priv *priv = netdev_priv(dev);
5711eaf4fac4SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
571234700796SChristian Marangi 	struct stmmac_dma_conf *dma_conf;
57135b55299eSDavid Wu 	const int mtu = new_mtu;
571434700796SChristian Marangi 	int ret;
5715eaf4fac4SJose Abreu 
5716eaf4fac4SJose Abreu 	if (txfifosz == 0)
5717eaf4fac4SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
5718eaf4fac4SJose Abreu 
5719eaf4fac4SJose Abreu 	txfifosz /= priv->plat->tx_queues_to_use;
572038ddc59dSLABBE Corentin 
57215fabb012SOng Boon Leong 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
57225fabb012SOng Boon Leong 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
57235fabb012SOng Boon Leong 		return -EINVAL;
57245fabb012SOng Boon Leong 	}
57255fabb012SOng Boon Leong 
5726eaf4fac4SJose Abreu 	new_mtu = STMMAC_ALIGN(new_mtu);
5727eaf4fac4SJose Abreu 
5728eaf4fac4SJose Abreu 	/* If condition true, FIFO is too small or MTU too large */
5729eaf4fac4SJose Abreu 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5730eaf4fac4SJose Abreu 		return -EINVAL;
5731eaf4fac4SJose Abreu 
573234700796SChristian Marangi 	if (netif_running(dev)) {
573334700796SChristian Marangi 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
573434700796SChristian Marangi 		/* Try to allocate the new DMA conf with the new mtu */
573534700796SChristian Marangi 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
573634700796SChristian Marangi 		if (IS_ERR(dma_conf)) {
573734700796SChristian Marangi 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
573834700796SChristian Marangi 				   mtu);
573934700796SChristian Marangi 			return PTR_ERR(dma_conf);
574034700796SChristian Marangi 		}
5741f748be53SAlexandre TORGUE 
574234700796SChristian Marangi 		stmmac_release(dev);
574334700796SChristian Marangi 
574434700796SChristian Marangi 		ret = __stmmac_open(dev, dma_conf);
574534700796SChristian Marangi 		if (ret) {
574630134b7cSChristian Marangi 			free_dma_desc_resources(priv, dma_conf);
574730134b7cSChristian Marangi 			kfree(dma_conf);
574834700796SChristian Marangi 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
574934700796SChristian Marangi 			return ret;
575034700796SChristian Marangi 		}
575134700796SChristian Marangi 
575230134b7cSChristian Marangi 		kfree(dma_conf);
575330134b7cSChristian Marangi 
575434700796SChristian Marangi 		stmmac_set_rx_mode(dev);
575534700796SChristian Marangi 	}
575634700796SChristian Marangi 
575734700796SChristian Marangi 	dev->mtu = mtu;
57587ac6653aSJeff Kirsher 	netdev_update_features(dev);
57597ac6653aSJeff Kirsher 
57607ac6653aSJeff Kirsher 	return 0;
57617ac6653aSJeff Kirsher }
57627ac6653aSJeff Kirsher 
stmmac_fix_features(struct net_device * dev,netdev_features_t features)5763c8f44affSMichał Mirosław static netdev_features_t stmmac_fix_features(struct net_device *dev,
5764c8f44affSMichał Mirosław 					     netdev_features_t features)
57657ac6653aSJeff Kirsher {
57667ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
57677ac6653aSJeff Kirsher 
576838912bdbSDeepak SIKRI 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
57697ac6653aSJeff Kirsher 		features &= ~NETIF_F_RXCSUM;
5770d2afb5bdSGiuseppe CAVALLARO 
57717ac6653aSJeff Kirsher 	if (!priv->plat->tx_coe)
5772a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
57737ac6653aSJeff Kirsher 
57747ac6653aSJeff Kirsher 	/* Some GMAC devices have a bugged Jumbo frame support that
57757ac6653aSJeff Kirsher 	 * needs to have the Tx COE disabled for oversized frames
57767ac6653aSJeff Kirsher 	 * (due to limited buffer sizes). In this case we disable
5777ceb69499SGiuseppe CAVALLARO 	 * the TX csum insertion in the TDES and not use SF.
5778ceb69499SGiuseppe CAVALLARO 	 */
57797ac6653aSJeff Kirsher 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5780a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
57817ac6653aSJeff Kirsher 
5782f748be53SAlexandre TORGUE 	/* Disable tso if asked by ethtool */
578368861a3bSBartosz Golaszewski 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5784f748be53SAlexandre TORGUE 		if (features & NETIF_F_TSO)
5785f748be53SAlexandre TORGUE 			priv->tso = true;
5786f748be53SAlexandre TORGUE 		else
5787f748be53SAlexandre TORGUE 			priv->tso = false;
5788f748be53SAlexandre TORGUE 	}
5789f748be53SAlexandre TORGUE 
57907ac6653aSJeff Kirsher 	return features;
57917ac6653aSJeff Kirsher }
57927ac6653aSJeff Kirsher 
stmmac_set_features(struct net_device * netdev,netdev_features_t features)5793d2afb5bdSGiuseppe CAVALLARO static int stmmac_set_features(struct net_device *netdev,
5794d2afb5bdSGiuseppe CAVALLARO 			       netdev_features_t features)
5795d2afb5bdSGiuseppe CAVALLARO {
5796d2afb5bdSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(netdev);
5797d2afb5bdSGiuseppe CAVALLARO 
5798d2afb5bdSGiuseppe CAVALLARO 	/* Keep the COE Type in case of csum is supporting */
5799d2afb5bdSGiuseppe CAVALLARO 	if (features & NETIF_F_RXCSUM)
5800d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
5801d2afb5bdSGiuseppe CAVALLARO 	else
5802d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
5803d2afb5bdSGiuseppe CAVALLARO 	/* No check needed because rx_coe has been set before and it will be
5804d2afb5bdSGiuseppe CAVALLARO 	 * fixed in case of issue.
5805d2afb5bdSGiuseppe CAVALLARO 	 */
5806c10d4c82SJose Abreu 	stmmac_rx_ipc(priv, priv->hw);
5807d2afb5bdSGiuseppe CAVALLARO 
5808f8e7dfd6SVincent Whitchurch 	if (priv->sph_cap) {
5809f8e7dfd6SVincent Whitchurch 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5810f8e7dfd6SVincent Whitchurch 		u32 chan;
58115fabb012SOng Boon Leong 
581267afd6d1SJose Abreu 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
581367afd6d1SJose Abreu 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5814f8e7dfd6SVincent Whitchurch 	}
581567afd6d1SJose Abreu 
5816d2afb5bdSGiuseppe CAVALLARO 	return 0;
5817d2afb5bdSGiuseppe CAVALLARO }
5818d2afb5bdSGiuseppe CAVALLARO 
stmmac_fpe_event_status(struct stmmac_priv * priv,int status)58195a558611SOng Boon Leong static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
58205a558611SOng Boon Leong {
58215a558611SOng Boon Leong 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
58225a558611SOng Boon Leong 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
58235a558611SOng Boon Leong 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
58245a558611SOng Boon Leong 	bool *hs_enable = &fpe_cfg->hs_enable;
58255a558611SOng Boon Leong 
58265a558611SOng Boon Leong 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
58275a558611SOng Boon Leong 		return;
58285a558611SOng Boon Leong 
58295a558611SOng Boon Leong 	/* If LP has sent verify mPacket, LP is FPE capable */
58305a558611SOng Boon Leong 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
58315a558611SOng Boon Leong 		if (*lp_state < FPE_STATE_CAPABLE)
58325a558611SOng Boon Leong 			*lp_state = FPE_STATE_CAPABLE;
58335a558611SOng Boon Leong 
58345a558611SOng Boon Leong 		/* If user has requested FPE enable, quickly response */
58355a558611SOng Boon Leong 		if (*hs_enable)
58365a558611SOng Boon Leong 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5837e1fbdef9SJianheng Zhang 						fpe_cfg,
58385a558611SOng Boon Leong 						MPACKET_RESPONSE);
58395a558611SOng Boon Leong 	}
58405a558611SOng Boon Leong 
58415a558611SOng Boon Leong 	/* If Local has sent verify mPacket, Local is FPE capable */
58425a558611SOng Boon Leong 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
58435a558611SOng Boon Leong 		if (*lo_state < FPE_STATE_CAPABLE)
58445a558611SOng Boon Leong 			*lo_state = FPE_STATE_CAPABLE;
58455a558611SOng Boon Leong 	}
58465a558611SOng Boon Leong 
58475a558611SOng Boon Leong 	/* If LP has sent response mPacket, LP is entering FPE ON */
58485a558611SOng Boon Leong 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
58495a558611SOng Boon Leong 		*lp_state = FPE_STATE_ENTERING_ON;
58505a558611SOng Boon Leong 
58515a558611SOng Boon Leong 	/* If Local has sent response mPacket, Local is entering FPE ON */
58525a558611SOng Boon Leong 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
58535a558611SOng Boon Leong 		*lo_state = FPE_STATE_ENTERING_ON;
58545a558611SOng Boon Leong 
58555a558611SOng Boon Leong 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
58565a558611SOng Boon Leong 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
58575a558611SOng Boon Leong 	    priv->fpe_wq) {
58585a558611SOng Boon Leong 		queue_work(priv->fpe_wq, &priv->fpe_task);
58595a558611SOng Boon Leong 	}
58605a558611SOng Boon Leong }
58615a558611SOng Boon Leong 
stmmac_common_interrupt(struct stmmac_priv * priv)586229e6573cSOng Boon Leong static void stmmac_common_interrupt(struct stmmac_priv *priv)
58637ac6653aSJeff Kirsher {
58647bac4e1eSJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
58657bac4e1eSJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
58667bac4e1eSJoao Pinto 	u32 queues_count;
58677bac4e1eSJoao Pinto 	u32 queue;
58687d9e6c5aSJose Abreu 	bool xmac;
58697bac4e1eSJoao Pinto 
58707d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
58717bac4e1eSJoao Pinto 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
58727ac6653aSJeff Kirsher 
587389f7f2cfSSrinivas Kandagatla 	if (priv->irq_wake)
587489f7f2cfSSrinivas Kandagatla 		pm_wakeup_event(priv->device, 0);
587589f7f2cfSSrinivas Kandagatla 
5876e49aa315SVoon Weifeng 	if (priv->dma_cap.estsel)
58779f298959SOng Boon Leong 		stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
58789f298959SOng Boon Leong 				      &priv->xstats, tx_cnt);
5879e49aa315SVoon Weifeng 
58805a558611SOng Boon Leong 	if (priv->dma_cap.fpesel) {
58815a558611SOng Boon Leong 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
58825a558611SOng Boon Leong 						   priv->dev);
58835a558611SOng Boon Leong 
58845a558611SOng Boon Leong 		stmmac_fpe_event_status(priv, status);
58855a558611SOng Boon Leong 	}
58865a558611SOng Boon Leong 
58877ac6653aSJeff Kirsher 	/* To handle GMAC own interrupts */
58887d9e6c5aSJose Abreu 	if ((priv->plat->has_gmac) || xmac) {
5889c10d4c82SJose Abreu 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
58908f71a88dSJoao Pinto 
5891d765955dSGiuseppe CAVALLARO 		if (unlikely(status)) {
5892d765955dSGiuseppe CAVALLARO 			/* For LPI we need to save the tx status */
58930982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5894d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = true;
58950982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5896d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = false;
58977bac4e1eSJoao Pinto 		}
58987bac4e1eSJoao Pinto 
58997bac4e1eSJoao Pinto 		for (queue = 0; queue < queues_count; queue++) {
59008a7cb245SYannick Vignon 			status = stmmac_host_mtl_irq_status(priv, priv->hw,
59017bac4e1eSJoao Pinto 							    queue);
59027bac4e1eSJoao Pinto 		}
590370523e63SGiuseppe CAVALLARO 
590470523e63SGiuseppe CAVALLARO 		/* PCS link status */
5905d26979f1SBartosz Golaszewski 		if (priv->hw->pcs &&
5906d26979f1SBartosz Golaszewski 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
590770523e63SGiuseppe CAVALLARO 			if (priv->xstats.pcs_link)
590829e6573cSOng Boon Leong 				netif_carrier_on(priv->dev);
590970523e63SGiuseppe CAVALLARO 			else
591029e6573cSOng Boon Leong 				netif_carrier_off(priv->dev);
591170523e63SGiuseppe CAVALLARO 		}
5912f4da5652STan Tee Min 
5913f4da5652STan Tee Min 		stmmac_timestamp_interrupt(priv, priv);
5914d765955dSGiuseppe CAVALLARO 	}
591529e6573cSOng Boon Leong }
591629e6573cSOng Boon Leong 
591729e6573cSOng Boon Leong /**
591829e6573cSOng Boon Leong  *  stmmac_interrupt - main ISR
591929e6573cSOng Boon Leong  *  @irq: interrupt number.
592029e6573cSOng Boon Leong  *  @dev_id: to pass the net device pointer.
592129e6573cSOng Boon Leong  *  Description: this is the main driver interrupt service routine.
592229e6573cSOng Boon Leong  *  It can call:
592329e6573cSOng Boon Leong  *  o DMA service routine (to manage incoming frame reception and transmission
592429e6573cSOng Boon Leong  *    status)
592529e6573cSOng Boon Leong  *  o Core interrupts to manage: remote wake-up, management counter, LPI
592629e6573cSOng Boon Leong  *    interrupts.
592729e6573cSOng Boon Leong  */
stmmac_interrupt(int irq,void * dev_id)592829e6573cSOng Boon Leong static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
592929e6573cSOng Boon Leong {
593029e6573cSOng Boon Leong 	struct net_device *dev = (struct net_device *)dev_id;
593129e6573cSOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
593229e6573cSOng Boon Leong 
593329e6573cSOng Boon Leong 	/* Check if adapter is up */
593429e6573cSOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
593529e6573cSOng Boon Leong 		return IRQ_HANDLED;
593629e6573cSOng Boon Leong 
593729e6573cSOng Boon Leong 	/* Check if a fatal error happened */
593829e6573cSOng Boon Leong 	if (stmmac_safety_feat_interrupt(priv))
593929e6573cSOng Boon Leong 		return IRQ_HANDLED;
594029e6573cSOng Boon Leong 
594129e6573cSOng Boon Leong 	/* To handle Common interrupts */
594229e6573cSOng Boon Leong 	stmmac_common_interrupt(priv);
5943d765955dSGiuseppe CAVALLARO 
5944d765955dSGiuseppe CAVALLARO 	/* To handle DMA interrupts */
59457ac6653aSJeff Kirsher 	stmmac_dma_interrupt(priv);
59467ac6653aSJeff Kirsher 
59477ac6653aSJeff Kirsher 	return IRQ_HANDLED;
59487ac6653aSJeff Kirsher }
59497ac6653aSJeff Kirsher 
stmmac_mac_interrupt(int irq,void * dev_id)59508532f613SOng Boon Leong static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
59518532f613SOng Boon Leong {
59528532f613SOng Boon Leong 	struct net_device *dev = (struct net_device *)dev_id;
59538532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
59548532f613SOng Boon Leong 
59558532f613SOng Boon Leong 	/* Check if adapter is up */
59568532f613SOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
59578532f613SOng Boon Leong 		return IRQ_HANDLED;
59588532f613SOng Boon Leong 
59598532f613SOng Boon Leong 	/* To handle Common interrupts */
59608532f613SOng Boon Leong 	stmmac_common_interrupt(priv);
59618532f613SOng Boon Leong 
59628532f613SOng Boon Leong 	return IRQ_HANDLED;
59638532f613SOng Boon Leong }
59648532f613SOng Boon Leong 
stmmac_safety_interrupt(int irq,void * dev_id)59658532f613SOng Boon Leong static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
59668532f613SOng Boon Leong {
59678532f613SOng Boon Leong 	struct net_device *dev = (struct net_device *)dev_id;
59688532f613SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
59698532f613SOng Boon Leong 
59708532f613SOng Boon Leong 	/* Check if adapter is up */
59718532f613SOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
59728532f613SOng Boon Leong 		return IRQ_HANDLED;
59738532f613SOng Boon Leong 
59748532f613SOng Boon Leong 	/* Check if a fatal error happened */
59758532f613SOng Boon Leong 	stmmac_safety_feat_interrupt(priv);
59768532f613SOng Boon Leong 
59778532f613SOng Boon Leong 	return IRQ_HANDLED;
59788532f613SOng Boon Leong }
59798532f613SOng Boon Leong 
stmmac_msi_intr_tx(int irq,void * data)59808532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
59818532f613SOng Boon Leong {
59828532f613SOng Boon Leong 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
59838531c808SChristian Marangi 	struct stmmac_dma_conf *dma_conf;
59848532f613SOng Boon Leong 	int chan = tx_q->queue_index;
59858532f613SOng Boon Leong 	struct stmmac_priv *priv;
59868532f613SOng Boon Leong 	int status;
59878532f613SOng Boon Leong 
59888531c808SChristian Marangi 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
59898531c808SChristian Marangi 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
59908532f613SOng Boon Leong 
59918532f613SOng Boon Leong 	/* Check if adapter is up */
59928532f613SOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
59938532f613SOng Boon Leong 		return IRQ_HANDLED;
59948532f613SOng Boon Leong 
59958532f613SOng Boon Leong 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
59968532f613SOng Boon Leong 
59978532f613SOng Boon Leong 	if (unlikely(status & tx_hard_error_bump_tc)) {
59988532f613SOng Boon Leong 		/* Try to bump up the dma threshold on this failure */
59993a6c12a0SXiaoliang Yang 		stmmac_bump_dma_threshold(priv, chan);
60008532f613SOng Boon Leong 	} else if (unlikely(status == tx_hard_error)) {
60018532f613SOng Boon Leong 		stmmac_tx_err(priv, chan);
60028532f613SOng Boon Leong 	}
60038532f613SOng Boon Leong 
60048532f613SOng Boon Leong 	return IRQ_HANDLED;
60058532f613SOng Boon Leong }
60068532f613SOng Boon Leong 
stmmac_msi_intr_rx(int irq,void * data)60078532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
60088532f613SOng Boon Leong {
60098532f613SOng Boon Leong 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
60108531c808SChristian Marangi 	struct stmmac_dma_conf *dma_conf;
60118532f613SOng Boon Leong 	int chan = rx_q->queue_index;
60128532f613SOng Boon Leong 	struct stmmac_priv *priv;
60138532f613SOng Boon Leong 
60148531c808SChristian Marangi 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
60158531c808SChristian Marangi 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
60168532f613SOng Boon Leong 
60178532f613SOng Boon Leong 	/* Check if adapter is up */
60188532f613SOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state))
60198532f613SOng Boon Leong 		return IRQ_HANDLED;
60208532f613SOng Boon Leong 
60218532f613SOng Boon Leong 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
60228532f613SOng Boon Leong 
60238532f613SOng Boon Leong 	return IRQ_HANDLED;
60248532f613SOng Boon Leong }
60258532f613SOng Boon Leong 
60267ac6653aSJeff Kirsher /**
60277ac6653aSJeff Kirsher  *  stmmac_ioctl - Entry point for the Ioctl
60287ac6653aSJeff Kirsher  *  @dev: Device pointer.
60297ac6653aSJeff Kirsher  *  @rq: An IOCTL specefic structure, that can contain a pointer to
60307ac6653aSJeff Kirsher  *  a proprietary structure used to pass information to the driver.
60317ac6653aSJeff Kirsher  *  @cmd: IOCTL command
60327ac6653aSJeff Kirsher  *  Description:
603332ceabcaSGiuseppe CAVALLARO  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
60347ac6653aSJeff Kirsher  */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)60357ac6653aSJeff Kirsher static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
60367ac6653aSJeff Kirsher {
603774371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv (dev);
6038891434b1SRayagond Kokatanur 	int ret = -EOPNOTSUPP;
60397ac6653aSJeff Kirsher 
60407ac6653aSJeff Kirsher 	if (!netif_running(dev))
60417ac6653aSJeff Kirsher 		return -EINVAL;
60427ac6653aSJeff Kirsher 
6043891434b1SRayagond Kokatanur 	switch (cmd) {
6044891434b1SRayagond Kokatanur 	case SIOCGMIIPHY:
6045891434b1SRayagond Kokatanur 	case SIOCGMIIREG:
6046891434b1SRayagond Kokatanur 	case SIOCSMIIREG:
604774371272SJose Abreu 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6048891434b1SRayagond Kokatanur 		break;
6049891434b1SRayagond Kokatanur 	case SIOCSHWTSTAMP:
6050d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_set(dev, rq);
6051d6228b7cSArtem Panfilov 		break;
6052d6228b7cSArtem Panfilov 	case SIOCGHWTSTAMP:
6053d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_get(dev, rq);
6054891434b1SRayagond Kokatanur 		break;
6055891434b1SRayagond Kokatanur 	default:
6056891434b1SRayagond Kokatanur 		break;
6057891434b1SRayagond Kokatanur 	}
60587ac6653aSJeff Kirsher 
60597ac6653aSJeff Kirsher 	return ret;
60607ac6653aSJeff Kirsher }
60617ac6653aSJeff Kirsher 
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)60624dbbe8ddSJose Abreu static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
60634dbbe8ddSJose Abreu 				    void *cb_priv)
60644dbbe8ddSJose Abreu {
60654dbbe8ddSJose Abreu 	struct stmmac_priv *priv = cb_priv;
60664dbbe8ddSJose Abreu 	int ret = -EOPNOTSUPP;
60674dbbe8ddSJose Abreu 
6068425eabddSJose Abreu 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6069425eabddSJose Abreu 		return ret;
6070425eabddSJose Abreu 
6071bba2556eSOng Boon Leong 	__stmmac_disable_all_queues(priv);
60724dbbe8ddSJose Abreu 
60734dbbe8ddSJose Abreu 	switch (type) {
60744dbbe8ddSJose Abreu 	case TC_SETUP_CLSU32:
60754dbbe8ddSJose Abreu 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
60764dbbe8ddSJose Abreu 		break;
6077425eabddSJose Abreu 	case TC_SETUP_CLSFLOWER:
6078425eabddSJose Abreu 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6079425eabddSJose Abreu 		break;
60804dbbe8ddSJose Abreu 	default:
60814dbbe8ddSJose Abreu 		break;
60824dbbe8ddSJose Abreu 	}
60834dbbe8ddSJose Abreu 
60844dbbe8ddSJose Abreu 	stmmac_enable_all_queues(priv);
60854dbbe8ddSJose Abreu 	return ret;
60864dbbe8ddSJose Abreu }
60874dbbe8ddSJose Abreu 
6088955bcb6eSPablo Neira Ayuso static LIST_HEAD(stmmac_block_cb_list);
6089955bcb6eSPablo Neira Ayuso 
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)60904dbbe8ddSJose Abreu static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
60914dbbe8ddSJose Abreu 			   void *type_data)
60924dbbe8ddSJose Abreu {
60934dbbe8ddSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
60944dbbe8ddSJose Abreu 
60954dbbe8ddSJose Abreu 	switch (type) {
6096522d15eaSVladimir Oltean 	case TC_QUERY_CAPS:
6097522d15eaSVladimir Oltean 		return stmmac_tc_query_caps(priv, priv, type_data);
60984dbbe8ddSJose Abreu 	case TC_SETUP_BLOCK:
6099955bcb6eSPablo Neira Ayuso 		return flow_block_cb_setup_simple(type_data,
6100955bcb6eSPablo Neira Ayuso 						  &stmmac_block_cb_list,
61014e95bc26SPablo Neira Ayuso 						  stmmac_setup_tc_block_cb,
61024e95bc26SPablo Neira Ayuso 						  priv, priv, true);
61031f705bc6SJose Abreu 	case TC_SETUP_QDISC_CBS:
61041f705bc6SJose Abreu 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6105b60189e0SJose Abreu 	case TC_SETUP_QDISC_TAPRIO:
6106b60189e0SJose Abreu 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6107430b383cSJose Abreu 	case TC_SETUP_QDISC_ETF:
6108430b383cSJose Abreu 		return stmmac_tc_setup_etf(priv, priv, type_data);
61094dbbe8ddSJose Abreu 	default:
61104dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
61114dbbe8ddSJose Abreu 	}
61124dbbe8ddSJose Abreu }
61134dbbe8ddSJose Abreu 
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)61144993e5b3SJose Abreu static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
61154993e5b3SJose Abreu 			       struct net_device *sb_dev)
61164993e5b3SJose Abreu {
6117b7766206SJose Abreu 	int gso = skb_shinfo(skb)->gso_type;
6118b7766206SJose Abreu 
6119b7766206SJose Abreu 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
61204993e5b3SJose Abreu 		/*
6121b7766206SJose Abreu 		 * There is no way to determine the number of TSO/USO
61224993e5b3SJose Abreu 		 * capable Queues. Let's use always the Queue 0
6123b7766206SJose Abreu 		 * because if TSO/USO is supported then at least this
61244993e5b3SJose Abreu 		 * one will be capable.
61254993e5b3SJose Abreu 		 */
61264993e5b3SJose Abreu 		return 0;
61274993e5b3SJose Abreu 	}
61284993e5b3SJose Abreu 
61294993e5b3SJose Abreu 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
61304993e5b3SJose Abreu }
61314993e5b3SJose Abreu 
stmmac_set_mac_address(struct net_device * ndev,void * addr)6132a830405eSBhadram Varka static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6133a830405eSBhadram Varka {
6134a830405eSBhadram Varka 	struct stmmac_priv *priv = netdev_priv(ndev);
6135a830405eSBhadram Varka 	int ret = 0;
6136a830405eSBhadram Varka 
613785648865SMinghao Chi 	ret = pm_runtime_resume_and_get(priv->device);
613885648865SMinghao Chi 	if (ret < 0)
61394691ffb1SJoakim Zhang 		return ret;
61404691ffb1SJoakim Zhang 
6141a830405eSBhadram Varka 	ret = eth_mac_addr(ndev, addr);
6142a830405eSBhadram Varka 	if (ret)
61434691ffb1SJoakim Zhang 		goto set_mac_error;
6144a830405eSBhadram Varka 
6145c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6146a830405eSBhadram Varka 
61474691ffb1SJoakim Zhang set_mac_error:
61484691ffb1SJoakim Zhang 	pm_runtime_put(priv->device);
61494691ffb1SJoakim Zhang 
6150a830405eSBhadram Varka 	return ret;
6151a830405eSBhadram Varka }
6152a830405eSBhadram Varka 
615350fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
61547ac29055SGiuseppe CAVALLARO static struct dentry *stmmac_fs_dir;
61557ac29055SGiuseppe CAVALLARO 
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6156c24602efSGiuseppe CAVALLARO static void sysfs_display_ring(void *head, int size, int extend_desc,
6157bfaf91caSJoakim Zhang 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
61587ac29055SGiuseppe CAVALLARO {
61597ac29055SGiuseppe CAVALLARO 	int i;
6160c24602efSGiuseppe CAVALLARO 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6161c24602efSGiuseppe CAVALLARO 	struct dma_desc *p = (struct dma_desc *)head;
6162bfaf91caSJoakim Zhang 	dma_addr_t dma_addr;
61637ac29055SGiuseppe CAVALLARO 
6164c24602efSGiuseppe CAVALLARO 	for (i = 0; i < size; i++) {
6165c24602efSGiuseppe CAVALLARO 		if (extend_desc) {
6166bfaf91caSJoakim Zhang 			dma_addr = dma_phy_addr + i * sizeof(*ep);
6167bfaf91caSJoakim Zhang 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6168bfaf91caSJoakim Zhang 				   i, &dma_addr,
6169f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des0),
6170f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des1),
6171f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des2),
6172f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des3));
6173c24602efSGiuseppe CAVALLARO 			ep++;
6174c24602efSGiuseppe CAVALLARO 		} else {
6175bfaf91caSJoakim Zhang 			dma_addr = dma_phy_addr + i * sizeof(*p);
6176bfaf91caSJoakim Zhang 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6177bfaf91caSJoakim Zhang 				   i, &dma_addr,
6178f8be0d78SMichael Weiser 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6179f8be0d78SMichael Weiser 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6180c24602efSGiuseppe CAVALLARO 			p++;
6181c24602efSGiuseppe CAVALLARO 		}
61827ac29055SGiuseppe CAVALLARO 		seq_printf(seq, "\n");
61837ac29055SGiuseppe CAVALLARO 	}
6184c24602efSGiuseppe CAVALLARO }
61857ac29055SGiuseppe CAVALLARO 
stmmac_rings_status_show(struct seq_file * seq,void * v)6186fb0d9c63SYangtao Li static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6187c24602efSGiuseppe CAVALLARO {
6188c24602efSGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
6189c24602efSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
619054139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
6191ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
619254139cf3SJoao Pinto 	u32 queue;
619354139cf3SJoao Pinto 
61945f2b8b62SThierry Reding 	if ((dev->flags & IFF_UP) == 0)
61955f2b8b62SThierry Reding 		return 0;
61965f2b8b62SThierry Reding 
619754139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
61988531c808SChristian Marangi 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
619954139cf3SJoao Pinto 
620054139cf3SJoao Pinto 		seq_printf(seq, "RX Queue %d:\n", queue);
62017ac29055SGiuseppe CAVALLARO 
6202c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc) {
620354139cf3SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
620454139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_erx,
62058531c808SChristian Marangi 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
620654139cf3SJoao Pinto 		} else {
620754139cf3SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
620854139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_rx,
62098531c808SChristian Marangi 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
621054139cf3SJoao Pinto 		}
621154139cf3SJoao Pinto 	}
621254139cf3SJoao Pinto 
6213ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
62148531c808SChristian Marangi 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6215ce736788SJoao Pinto 
6216ce736788SJoao Pinto 		seq_printf(seq, "TX Queue %d:\n", queue);
6217ce736788SJoao Pinto 
621854139cf3SJoao Pinto 		if (priv->extend_desc) {
6219ce736788SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
6220ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_etx,
62218531c808SChristian Marangi 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6222579a25a8SJose Abreu 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6223ce736788SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
6224ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_tx,
62258531c808SChristian Marangi 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6226ce736788SJoao Pinto 		}
62277ac29055SGiuseppe CAVALLARO 	}
62287ac29055SGiuseppe CAVALLARO 
62297ac29055SGiuseppe CAVALLARO 	return 0;
62307ac29055SGiuseppe CAVALLARO }
6231fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
62327ac29055SGiuseppe CAVALLARO 
stmmac_dma_cap_show(struct seq_file * seq,void * v)6233fb0d9c63SYangtao Li static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6234e7434821SGiuseppe CAVALLARO {
623558c1e0baSFurong Xu 	static const char * const dwxgmac_timestamp_source[] = {
623658c1e0baSFurong Xu 		"None",
623758c1e0baSFurong Xu 		"Internal",
623858c1e0baSFurong Xu 		"External",
623958c1e0baSFurong Xu 		"Both",
624058c1e0baSFurong Xu 	};
6241669a5556SFurong Xu 	static const char * const dwxgmac_safety_feature_desc[] = {
6242669a5556SFurong Xu 		"No",
6243669a5556SFurong Xu 		"All Safety Features with ECC and Parity",
6244669a5556SFurong Xu 		"All Safety Features without ECC or Parity",
6245669a5556SFurong Xu 		"All Safety Features with Parity Only",
6246669a5556SFurong Xu 		"ECC Only",
6247669a5556SFurong Xu 		"UNDEFINED",
6248669a5556SFurong Xu 		"UNDEFINED",
6249669a5556SFurong Xu 		"UNDEFINED",
6250669a5556SFurong Xu 	};
6251e7434821SGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
6252e7434821SGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
6253e7434821SGiuseppe CAVALLARO 
625419e30c14SGiuseppe CAVALLARO 	if (!priv->hw_cap_support) {
6255e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "DMA HW features not supported\n");
6256e7434821SGiuseppe CAVALLARO 		return 0;
6257e7434821SGiuseppe CAVALLARO 	}
6258e7434821SGiuseppe CAVALLARO 
6259e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
6260e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tDMA HW features\n");
6261e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
6262e7434821SGiuseppe CAVALLARO 
626322d3efe5SPavel Machek 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6264e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
626522d3efe5SPavel Machek 	seq_printf(seq, "\t1000 Mbps: %s\n",
6266e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
626722d3efe5SPavel Machek 	seq_printf(seq, "\tHalf duplex: %s\n",
6268e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6269669a5556SFurong Xu 	if (priv->plat->has_xgmac) {
627058c1e0baSFurong Xu 		seq_printf(seq,
627158c1e0baSFurong Xu 			   "\tNumber of Additional MAC address registers: %d\n",
627258c1e0baSFurong Xu 			   priv->dma_cap.multi_addr);
6273669a5556SFurong Xu 	} else {
6274669a5556SFurong Xu 		seq_printf(seq, "\tHash Filter: %s\n",
6275669a5556SFurong Xu 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6276e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6277e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6278669a5556SFurong Xu 	}
62798d45e42bSLABBE Corentin 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6280e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pcs) ? "Y" : "N");
6281e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6282e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6283e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6284e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6285e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6286e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6287e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRMON module: %s\n",
6288e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rmon) ? "Y" : "N");
6289e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6290e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6291e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6292e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
629358c1e0baSFurong Xu 	if (priv->plat->has_xgmac)
629458c1e0baSFurong Xu 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
629558c1e0baSFurong Xu 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
629622d3efe5SPavel Machek 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6297e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.eee) ? "Y" : "N");
6298e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6299e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6300e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
630158c1e0baSFurong Xu 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
630258c1e0baSFurong Xu 	    priv->plat->has_xgmac) {
6303f748be53SAlexandre TORGUE 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6304f748be53SAlexandre TORGUE 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6305f748be53SAlexandre TORGUE 	} else {
6306e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6307e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6308e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6309e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6310e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6311e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
631258c1e0baSFurong Xu 	}
6313e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6314e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_rx_channel);
6315e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6316e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_tx_channel);
63177d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
63187d0b447aSJose Abreu 		   priv->dma_cap.number_rx_queues);
63197d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
63207d0b447aSJose Abreu 		   priv->dma_cap.number_tx_queues);
6321e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6322e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
63237d0b447aSJose Abreu 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
63247d0b447aSJose Abreu 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6325669a5556SFurong Xu 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6326669a5556SFurong Xu 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
63277d0b447aSJose Abreu 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
63287d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
63297d0b447aSJose Abreu 		   priv->dma_cap.pps_out_num);
63307d0b447aSJose Abreu 	seq_printf(seq, "\tSafety Features: %s\n",
6331669a5556SFurong Xu 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
63327d0b447aSJose Abreu 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
63337d0b447aSJose Abreu 		   priv->dma_cap.frpsel ? "Y" : "N");
63347d0b447aSJose Abreu 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6335070246e4SJochen Henneberg 		   priv->dma_cap.host_dma_width);
63367d0b447aSJose Abreu 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
63377d0b447aSJose Abreu 		   priv->dma_cap.rssen ? "Y" : "N");
63387d0b447aSJose Abreu 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
63397d0b447aSJose Abreu 		   priv->dma_cap.vlhash ? "Y" : "N");
63407d0b447aSJose Abreu 	seq_printf(seq, "\tSplit Header: %s\n",
63417d0b447aSJose Abreu 		   priv->dma_cap.sphen ? "Y" : "N");
63427d0b447aSJose Abreu 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
63437d0b447aSJose Abreu 		   priv->dma_cap.vlins ? "Y" : "N");
63447d0b447aSJose Abreu 	seq_printf(seq, "\tDouble VLAN: %s\n",
63457d0b447aSJose Abreu 		   priv->dma_cap.dvlan ? "Y" : "N");
63467d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
63477d0b447aSJose Abreu 		   priv->dma_cap.l3l4fnum);
63487d0b447aSJose Abreu 	seq_printf(seq, "\tARP Offloading: %s\n",
63497d0b447aSJose Abreu 		   priv->dma_cap.arpoffsel ? "Y" : "N");
635044e65475SJose Abreu 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
635144e65475SJose Abreu 		   priv->dma_cap.estsel ? "Y" : "N");
635244e65475SJose Abreu 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
635344e65475SJose Abreu 		   priv->dma_cap.fpesel ? "Y" : "N");
635444e65475SJose Abreu 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
635544e65475SJose Abreu 		   priv->dma_cap.tbssel ? "Y" : "N");
6356669a5556SFurong Xu 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6357669a5556SFurong Xu 		   priv->dma_cap.tbs_ch_num);
6358669a5556SFurong Xu 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6359669a5556SFurong Xu 		   priv->dma_cap.sgfsel ? "Y" : "N");
6360669a5556SFurong Xu 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6361669a5556SFurong Xu 		   BIT(priv->dma_cap.ttsfd) >> 1);
6362669a5556SFurong Xu 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6363669a5556SFurong Xu 		   priv->dma_cap.numtc);
6364669a5556SFurong Xu 	seq_printf(seq, "\tDCB Feature: %s\n",
6365669a5556SFurong Xu 		   priv->dma_cap.dcben ? "Y" : "N");
6366669a5556SFurong Xu 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6367669a5556SFurong Xu 		   priv->dma_cap.advthword ? "Y" : "N");
6368669a5556SFurong Xu 	seq_printf(seq, "\tPTP Offload: %s\n",
6369669a5556SFurong Xu 		   priv->dma_cap.ptoen ? "Y" : "N");
6370669a5556SFurong Xu 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6371669a5556SFurong Xu 		   priv->dma_cap.osten ? "Y" : "N");
6372669a5556SFurong Xu 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6373669a5556SFurong Xu 		   priv->dma_cap.pfcen ? "Y" : "N");
6374669a5556SFurong Xu 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6375669a5556SFurong Xu 		   BIT(priv->dma_cap.frpes) << 6);
6376669a5556SFurong Xu 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6377669a5556SFurong Xu 		   BIT(priv->dma_cap.frpbs) << 6);
6378669a5556SFurong Xu 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6379669a5556SFurong Xu 		   priv->dma_cap.frppipe_num);
6380669a5556SFurong Xu 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6381669a5556SFurong Xu 		   priv->dma_cap.nrvf_num ?
6382669a5556SFurong Xu 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6383669a5556SFurong Xu 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6384669a5556SFurong Xu 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6385669a5556SFurong Xu 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6386669a5556SFurong Xu 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6387669a5556SFurong Xu 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6388669a5556SFurong Xu 		   priv->dma_cap.cbtisel ? "Y" : "N");
6389669a5556SFurong Xu 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6390669a5556SFurong Xu 		   priv->dma_cap.aux_snapshot_n);
6391669a5556SFurong Xu 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6392669a5556SFurong Xu 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6393669a5556SFurong Xu 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6394669a5556SFurong Xu 		   priv->dma_cap.edma ? "Y" : "N");
6395669a5556SFurong Xu 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6396669a5556SFurong Xu 		   priv->dma_cap.ediffc ? "Y" : "N");
6397669a5556SFurong Xu 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6398669a5556SFurong Xu 		   priv->dma_cap.vxn ? "Y" : "N");
6399669a5556SFurong Xu 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6400669a5556SFurong Xu 		   priv->dma_cap.dbgmem ? "Y" : "N");
6401669a5556SFurong Xu 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6402669a5556SFurong Xu 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6403e7434821SGiuseppe CAVALLARO 	return 0;
6404e7434821SGiuseppe CAVALLARO }
6405fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6406e7434821SGiuseppe CAVALLARO 
6407481a7d15SJiping Ma /* Use network device events to rename debugfs file entries.
6408481a7d15SJiping Ma  */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6409481a7d15SJiping Ma static int stmmac_device_event(struct notifier_block *unused,
6410481a7d15SJiping Ma 			       unsigned long event, void *ptr)
6411481a7d15SJiping Ma {
6412481a7d15SJiping Ma 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6413481a7d15SJiping Ma 	struct stmmac_priv *priv = netdev_priv(dev);
6414481a7d15SJiping Ma 
6415481a7d15SJiping Ma 	if (dev->netdev_ops != &stmmac_netdev_ops)
6416481a7d15SJiping Ma 		goto done;
6417481a7d15SJiping Ma 
6418481a7d15SJiping Ma 	switch (event) {
6419481a7d15SJiping Ma 	case NETDEV_CHANGENAME:
6420481a7d15SJiping Ma 		if (priv->dbgfs_dir)
6421481a7d15SJiping Ma 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6422481a7d15SJiping Ma 							 priv->dbgfs_dir,
6423481a7d15SJiping Ma 							 stmmac_fs_dir,
6424481a7d15SJiping Ma 							 dev->name);
6425481a7d15SJiping Ma 		break;
6426481a7d15SJiping Ma 	}
6427481a7d15SJiping Ma done:
6428481a7d15SJiping Ma 	return NOTIFY_DONE;
6429481a7d15SJiping Ma }
6430481a7d15SJiping Ma 
6431481a7d15SJiping Ma static struct notifier_block stmmac_notifier = {
6432481a7d15SJiping Ma 	.notifier_call = stmmac_device_event,
6433481a7d15SJiping Ma };
6434481a7d15SJiping Ma 
stmmac_init_fs(struct net_device * dev)64358d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev)
64367ac29055SGiuseppe CAVALLARO {
6437466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
64387ac29055SGiuseppe CAVALLARO 
6439474a31e1SAaro Koskinen 	rtnl_lock();
6440474a31e1SAaro Koskinen 
6441466c5ac8SMathieu Olivari 	/* Create per netdev entries */
6442466c5ac8SMathieu Olivari 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6443466c5ac8SMathieu Olivari 
64447ac29055SGiuseppe CAVALLARO 	/* Entry to report DMA RX/TX rings */
64458d72ab11SGreg Kroah-Hartman 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
64467ac29055SGiuseppe CAVALLARO 			    &stmmac_rings_status_fops);
64477ac29055SGiuseppe CAVALLARO 
6448e7434821SGiuseppe CAVALLARO 	/* Entry to report the DMA HW features */
64498d72ab11SGreg Kroah-Hartman 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
64508d72ab11SGreg Kroah-Hartman 			    &stmmac_dma_cap_fops);
6451481a7d15SJiping Ma 
6452474a31e1SAaro Koskinen 	rtnl_unlock();
64537ac29055SGiuseppe CAVALLARO }
64547ac29055SGiuseppe CAVALLARO 
stmmac_exit_fs(struct net_device * dev)6455466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev)
64567ac29055SGiuseppe CAVALLARO {
6457466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
6458466c5ac8SMathieu Olivari 
6459466c5ac8SMathieu Olivari 	debugfs_remove_recursive(priv->dbgfs_dir);
64607ac29055SGiuseppe CAVALLARO }
646150fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
64627ac29055SGiuseppe CAVALLARO 
stmmac_vid_crc32_le(__le16 vid_le)64633cd1cfcbSJose Abreu static u32 stmmac_vid_crc32_le(__le16 vid_le)
64643cd1cfcbSJose Abreu {
64653cd1cfcbSJose Abreu 	unsigned char *data = (unsigned char *)&vid_le;
64663cd1cfcbSJose Abreu 	unsigned char data_byte = 0;
64673cd1cfcbSJose Abreu 	u32 crc = ~0x0;
64683cd1cfcbSJose Abreu 	u32 temp = 0;
64693cd1cfcbSJose Abreu 	int i, bits;
64703cd1cfcbSJose Abreu 
64713cd1cfcbSJose Abreu 	bits = get_bitmask_order(VLAN_VID_MASK);
64723cd1cfcbSJose Abreu 	for (i = 0; i < bits; i++) {
64733cd1cfcbSJose Abreu 		if ((i % 8) == 0)
64743cd1cfcbSJose Abreu 			data_byte = data[i / 8];
64753cd1cfcbSJose Abreu 
64763cd1cfcbSJose Abreu 		temp = ((crc & 1) ^ data_byte) & 1;
64773cd1cfcbSJose Abreu 		crc >>= 1;
64783cd1cfcbSJose Abreu 		data_byte >>= 1;
64793cd1cfcbSJose Abreu 
64803cd1cfcbSJose Abreu 		if (temp)
64813cd1cfcbSJose Abreu 			crc ^= 0xedb88320;
64823cd1cfcbSJose Abreu 	}
64833cd1cfcbSJose Abreu 
64843cd1cfcbSJose Abreu 	return crc;
64853cd1cfcbSJose Abreu }
64863cd1cfcbSJose Abreu 
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)64873cd1cfcbSJose Abreu static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
64883cd1cfcbSJose Abreu {
64893cd1cfcbSJose Abreu 	u32 crc, hash = 0;
64904384135dSSimon Horman 	u16 pmatch = 0;
6491c7ab0b80SJose Abreu 	int count = 0;
6492c7ab0b80SJose Abreu 	u16 vid = 0;
64933cd1cfcbSJose Abreu 
64943cd1cfcbSJose Abreu 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
64953cd1cfcbSJose Abreu 		__le16 vid_le = cpu_to_le16(vid);
64963cd1cfcbSJose Abreu 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
64973cd1cfcbSJose Abreu 		hash |= (1 << crc);
6498c7ab0b80SJose Abreu 		count++;
64993cd1cfcbSJose Abreu 	}
65003cd1cfcbSJose Abreu 
6501c7ab0b80SJose Abreu 	if (!priv->dma_cap.vlhash) {
6502c7ab0b80SJose Abreu 		if (count > 2) /* VID = 0 always passes filter */
6503c7ab0b80SJose Abreu 			return -EOPNOTSUPP;
6504c7ab0b80SJose Abreu 
65054384135dSSimon Horman 		pmatch = vid;
6506c7ab0b80SJose Abreu 		hash = 0;
6507c7ab0b80SJose Abreu 	}
6508c7ab0b80SJose Abreu 
6509a24cae70SJose Abreu 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
65103cd1cfcbSJose Abreu }
65113cd1cfcbSJose Abreu 
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)65123cd1cfcbSJose Abreu static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
65133cd1cfcbSJose Abreu {
65143cd1cfcbSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
65153cd1cfcbSJose Abreu 	bool is_double = false;
65163cd1cfcbSJose Abreu 	int ret;
65173cd1cfcbSJose Abreu 
651835226750SYan Wang 	ret = pm_runtime_resume_and_get(priv->device);
651935226750SYan Wang 	if (ret < 0)
652035226750SYan Wang 		return ret;
652135226750SYan Wang 
65223cd1cfcbSJose Abreu 	if (be16_to_cpu(proto) == ETH_P_8021AD)
65233cd1cfcbSJose Abreu 		is_double = true;
65243cd1cfcbSJose Abreu 
65253cd1cfcbSJose Abreu 	set_bit(vid, priv->active_vlans);
65263cd1cfcbSJose Abreu 	ret = stmmac_vlan_update(priv, is_double);
65273cd1cfcbSJose Abreu 	if (ret) {
65283cd1cfcbSJose Abreu 		clear_bit(vid, priv->active_vlans);
652935226750SYan Wang 		goto err_pm_put;
65303cd1cfcbSJose Abreu 	}
65313cd1cfcbSJose Abreu 
6532dd6a4998SJose Abreu 	if (priv->hw->num_vlan) {
6533ed64639bSWong Vee Khee 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6534dd6a4998SJose Abreu 		if (ret)
653535226750SYan Wang 			goto err_pm_put;
65363cd1cfcbSJose Abreu 	}
653735226750SYan Wang err_pm_put:
653835226750SYan Wang 	pm_runtime_put(priv->device);
65393cd1cfcbSJose Abreu 
654035226750SYan Wang 	return ret;
6541dd6a4998SJose Abreu }
6542dd6a4998SJose Abreu 
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)65433cd1cfcbSJose Abreu static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
65443cd1cfcbSJose Abreu {
65453cd1cfcbSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
65463cd1cfcbSJose Abreu 	bool is_double = false;
6547ed64639bSWong Vee Khee 	int ret;
65483cd1cfcbSJose Abreu 
654985648865SMinghao Chi 	ret = pm_runtime_resume_and_get(priv->device);
655085648865SMinghao Chi 	if (ret < 0)
6551b3dcb312SJoakim Zhang 		return ret;
6552b3dcb312SJoakim Zhang 
65533cd1cfcbSJose Abreu 	if (be16_to_cpu(proto) == ETH_P_8021AD)
65543cd1cfcbSJose Abreu 		is_double = true;
65553cd1cfcbSJose Abreu 
65563cd1cfcbSJose Abreu 	clear_bit(vid, priv->active_vlans);
6557dd6a4998SJose Abreu 
6558dd6a4998SJose Abreu 	if (priv->hw->num_vlan) {
6559ed64639bSWong Vee Khee 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6560ed64639bSWong Vee Khee 		if (ret)
65615ec55823SJoakim Zhang 			goto del_vlan_error;
6562dd6a4998SJose Abreu 	}
6563ed64639bSWong Vee Khee 
65645ec55823SJoakim Zhang 	ret = stmmac_vlan_update(priv, is_double);
65655ec55823SJoakim Zhang 
65665ec55823SJoakim Zhang del_vlan_error:
65675ec55823SJoakim Zhang 	pm_runtime_put(priv->device);
65685ec55823SJoakim Zhang 
65695ec55823SJoakim Zhang 	return ret;
65703cd1cfcbSJose Abreu }
65713cd1cfcbSJose Abreu 
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)65725fabb012SOng Boon Leong static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
65735fabb012SOng Boon Leong {
65745fabb012SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
65755fabb012SOng Boon Leong 
65765fabb012SOng Boon Leong 	switch (bpf->command) {
65775fabb012SOng Boon Leong 	case XDP_SETUP_PROG:
65785fabb012SOng Boon Leong 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6579bba2556eSOng Boon Leong 	case XDP_SETUP_XSK_POOL:
6580bba2556eSOng Boon Leong 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6581bba2556eSOng Boon Leong 					     bpf->xsk.queue_id);
65825fabb012SOng Boon Leong 	default:
65835fabb012SOng Boon Leong 		return -EOPNOTSUPP;
65845fabb012SOng Boon Leong 	}
65855fabb012SOng Boon Leong }
65865fabb012SOng Boon Leong 
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)65878b278a5bSOng Boon Leong static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
65888b278a5bSOng Boon Leong 			   struct xdp_frame **frames, u32 flags)
65898b278a5bSOng Boon Leong {
65908b278a5bSOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
65918b278a5bSOng Boon Leong 	int cpu = smp_processor_id();
65928b278a5bSOng Boon Leong 	struct netdev_queue *nq;
65938b278a5bSOng Boon Leong 	int i, nxmit = 0;
65948b278a5bSOng Boon Leong 	int queue;
65958b278a5bSOng Boon Leong 
65968b278a5bSOng Boon Leong 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
65978b278a5bSOng Boon Leong 		return -ENETDOWN;
65988b278a5bSOng Boon Leong 
65998b278a5bSOng Boon Leong 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
66008b278a5bSOng Boon Leong 		return -EINVAL;
66018b278a5bSOng Boon Leong 
66028b278a5bSOng Boon Leong 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
66038b278a5bSOng Boon Leong 	nq = netdev_get_tx_queue(priv->dev, queue);
66048b278a5bSOng Boon Leong 
66058b278a5bSOng Boon Leong 	__netif_tx_lock(nq, cpu);
66068b278a5bSOng Boon Leong 	/* Avoids TX time-out as we are sharing with slow path */
66075337824fSEric Dumazet 	txq_trans_cond_update(nq);
66088b278a5bSOng Boon Leong 
66098b278a5bSOng Boon Leong 	for (i = 0; i < num_frames; i++) {
66108b278a5bSOng Boon Leong 		int res;
66118b278a5bSOng Boon Leong 
66128b278a5bSOng Boon Leong 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
66138b278a5bSOng Boon Leong 		if (res == STMMAC_XDP_CONSUMED)
66148b278a5bSOng Boon Leong 			break;
66158b278a5bSOng Boon Leong 
66168b278a5bSOng Boon Leong 		nxmit++;
66178b278a5bSOng Boon Leong 	}
66188b278a5bSOng Boon Leong 
66198b278a5bSOng Boon Leong 	if (flags & XDP_XMIT_FLUSH) {
66208b278a5bSOng Boon Leong 		stmmac_flush_tx_descriptors(priv, queue);
66218b278a5bSOng Boon Leong 		stmmac_tx_timer_arm(priv, queue);
66228b278a5bSOng Boon Leong 	}
66238b278a5bSOng Boon Leong 
66248b278a5bSOng Boon Leong 	__netif_tx_unlock(nq);
66258b278a5bSOng Boon Leong 
66268b278a5bSOng Boon Leong 	return nxmit;
66278b278a5bSOng Boon Leong }
66288b278a5bSOng Boon Leong 
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6629bba2556eSOng Boon Leong void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6630bba2556eSOng Boon Leong {
6631bba2556eSOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
6632bba2556eSOng Boon Leong 	unsigned long flags;
6633bba2556eSOng Boon Leong 
6634bba2556eSOng Boon Leong 	spin_lock_irqsave(&ch->lock, flags);
6635bba2556eSOng Boon Leong 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6636bba2556eSOng Boon Leong 	spin_unlock_irqrestore(&ch->lock, flags);
6637bba2556eSOng Boon Leong 
6638bba2556eSOng Boon Leong 	stmmac_stop_rx_dma(priv, queue);
6639ba39b344SChristian Marangi 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6640bba2556eSOng Boon Leong }
6641bba2556eSOng Boon Leong 
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)6642bba2556eSOng Boon Leong void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6643bba2556eSOng Boon Leong {
66448531c808SChristian Marangi 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6645bba2556eSOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
6646bba2556eSOng Boon Leong 	unsigned long flags;
6647bba2556eSOng Boon Leong 	u32 buf_size;
6648bba2556eSOng Boon Leong 	int ret;
6649bba2556eSOng Boon Leong 
6650ba39b344SChristian Marangi 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6651bba2556eSOng Boon Leong 	if (ret) {
6652bba2556eSOng Boon Leong 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6653bba2556eSOng Boon Leong 		return;
6654bba2556eSOng Boon Leong 	}
6655bba2556eSOng Boon Leong 
6656ba39b344SChristian Marangi 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6657bba2556eSOng Boon Leong 	if (ret) {
6658ba39b344SChristian Marangi 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6659bba2556eSOng Boon Leong 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6660bba2556eSOng Boon Leong 		return;
6661bba2556eSOng Boon Leong 	}
6662bba2556eSOng Boon Leong 
6663f9ec5723SChristian Marangi 	stmmac_reset_rx_queue(priv, queue);
6664ba39b344SChristian Marangi 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6665bba2556eSOng Boon Leong 
6666bba2556eSOng Boon Leong 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6667bba2556eSOng Boon Leong 			    rx_q->dma_rx_phy, rx_q->queue_index);
6668bba2556eSOng Boon Leong 
6669bba2556eSOng Boon Leong 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6670bba2556eSOng Boon Leong 			     sizeof(struct dma_desc));
6671bba2556eSOng Boon Leong 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6672bba2556eSOng Boon Leong 			       rx_q->rx_tail_addr, rx_q->queue_index);
6673bba2556eSOng Boon Leong 
6674bba2556eSOng Boon Leong 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6675bba2556eSOng Boon Leong 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6676bba2556eSOng Boon Leong 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6677bba2556eSOng Boon Leong 				      buf_size,
6678bba2556eSOng Boon Leong 				      rx_q->queue_index);
6679bba2556eSOng Boon Leong 	} else {
6680bba2556eSOng Boon Leong 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
66818531c808SChristian Marangi 				      priv->dma_conf.dma_buf_sz,
6682bba2556eSOng Boon Leong 				      rx_q->queue_index);
6683bba2556eSOng Boon Leong 	}
6684bba2556eSOng Boon Leong 
6685bba2556eSOng Boon Leong 	stmmac_start_rx_dma(priv, queue);
6686bba2556eSOng Boon Leong 
6687bba2556eSOng Boon Leong 	spin_lock_irqsave(&ch->lock, flags);
6688bba2556eSOng Boon Leong 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6689bba2556eSOng Boon Leong 	spin_unlock_irqrestore(&ch->lock, flags);
6690bba2556eSOng Boon Leong }
6691bba2556eSOng Boon Leong 
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)6692132c32eeSOng Boon Leong void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6693132c32eeSOng Boon Leong {
6694132c32eeSOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
6695132c32eeSOng Boon Leong 	unsigned long flags;
6696132c32eeSOng Boon Leong 
6697132c32eeSOng Boon Leong 	spin_lock_irqsave(&ch->lock, flags);
6698132c32eeSOng Boon Leong 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6699132c32eeSOng Boon Leong 	spin_unlock_irqrestore(&ch->lock, flags);
6700132c32eeSOng Boon Leong 
6701132c32eeSOng Boon Leong 	stmmac_stop_tx_dma(priv, queue);
6702ba39b344SChristian Marangi 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6703132c32eeSOng Boon Leong }
6704132c32eeSOng Boon Leong 
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)6705132c32eeSOng Boon Leong void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6706132c32eeSOng Boon Leong {
67078531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6708132c32eeSOng Boon Leong 	struct stmmac_channel *ch = &priv->channel[queue];
6709132c32eeSOng Boon Leong 	unsigned long flags;
6710132c32eeSOng Boon Leong 	int ret;
6711132c32eeSOng Boon Leong 
6712ba39b344SChristian Marangi 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6713132c32eeSOng Boon Leong 	if (ret) {
6714132c32eeSOng Boon Leong 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6715132c32eeSOng Boon Leong 		return;
6716132c32eeSOng Boon Leong 	}
6717132c32eeSOng Boon Leong 
6718ba39b344SChristian Marangi 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6719132c32eeSOng Boon Leong 	if (ret) {
6720ba39b344SChristian Marangi 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6721132c32eeSOng Boon Leong 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6722132c32eeSOng Boon Leong 		return;
6723132c32eeSOng Boon Leong 	}
6724132c32eeSOng Boon Leong 
6725f9ec5723SChristian Marangi 	stmmac_reset_tx_queue(priv, queue);
6726ba39b344SChristian Marangi 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6727132c32eeSOng Boon Leong 
6728132c32eeSOng Boon Leong 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6729132c32eeSOng Boon Leong 			    tx_q->dma_tx_phy, tx_q->queue_index);
6730132c32eeSOng Boon Leong 
6731132c32eeSOng Boon Leong 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6732132c32eeSOng Boon Leong 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6733132c32eeSOng Boon Leong 
6734132c32eeSOng Boon Leong 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6735132c32eeSOng Boon Leong 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6736132c32eeSOng Boon Leong 			       tx_q->tx_tail_addr, tx_q->queue_index);
6737132c32eeSOng Boon Leong 
6738132c32eeSOng Boon Leong 	stmmac_start_tx_dma(priv, queue);
6739132c32eeSOng Boon Leong 
6740132c32eeSOng Boon Leong 	spin_lock_irqsave(&ch->lock, flags);
6741132c32eeSOng Boon Leong 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6742132c32eeSOng Boon Leong 	spin_unlock_irqrestore(&ch->lock, flags);
6743132c32eeSOng Boon Leong }
6744132c32eeSOng Boon Leong 
stmmac_xdp_release(struct net_device * dev)6745ac746c85SOng Boon Leong void stmmac_xdp_release(struct net_device *dev)
6746ac746c85SOng Boon Leong {
6747ac746c85SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
6748ac746c85SOng Boon Leong 	u32 chan;
6749ac746c85SOng Boon Leong 
675077711683SMohd Faizal Abdul Rahim 	/* Ensure tx function is not running */
675177711683SMohd Faizal Abdul Rahim 	netif_tx_disable(dev);
675277711683SMohd Faizal Abdul Rahim 
6753ac746c85SOng Boon Leong 	/* Disable NAPI process */
6754ac746c85SOng Boon Leong 	stmmac_disable_all_queues(priv);
6755ac746c85SOng Boon Leong 
6756ac746c85SOng Boon Leong 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
67578531c808SChristian Marangi 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6758ac746c85SOng Boon Leong 
6759ac746c85SOng Boon Leong 	/* Free the IRQ lines */
6760ac746c85SOng Boon Leong 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6761ac746c85SOng Boon Leong 
6762ac746c85SOng Boon Leong 	/* Stop TX/RX DMA channels */
6763ac746c85SOng Boon Leong 	stmmac_stop_all_dma(priv);
6764ac746c85SOng Boon Leong 
6765ac746c85SOng Boon Leong 	/* Release and free the Rx/Tx resources */
6766ba39b344SChristian Marangi 	free_dma_desc_resources(priv, &priv->dma_conf);
6767ac746c85SOng Boon Leong 
6768ac746c85SOng Boon Leong 	/* Disable the MAC Rx/Tx */
6769ac746c85SOng Boon Leong 	stmmac_mac_set(priv, priv->ioaddr, false);
6770ac746c85SOng Boon Leong 
6771ac746c85SOng Boon Leong 	/* set trans_start so we don't get spurious
6772ac746c85SOng Boon Leong 	 * watchdogs during reset
6773ac746c85SOng Boon Leong 	 */
6774ac746c85SOng Boon Leong 	netif_trans_update(dev);
6775ac746c85SOng Boon Leong 	netif_carrier_off(dev);
6776ac746c85SOng Boon Leong }
6777ac746c85SOng Boon Leong 
stmmac_xdp_open(struct net_device * dev)6778ac746c85SOng Boon Leong int stmmac_xdp_open(struct net_device *dev)
6779ac746c85SOng Boon Leong {
6780ac746c85SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
6781ac746c85SOng Boon Leong 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6782ac746c85SOng Boon Leong 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6783ac746c85SOng Boon Leong 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6784ac746c85SOng Boon Leong 	struct stmmac_rx_queue *rx_q;
6785ac746c85SOng Boon Leong 	struct stmmac_tx_queue *tx_q;
6786ac746c85SOng Boon Leong 	u32 buf_size;
6787ac746c85SOng Boon Leong 	bool sph_en;
6788ac746c85SOng Boon Leong 	u32 chan;
6789ac746c85SOng Boon Leong 	int ret;
6790ac746c85SOng Boon Leong 
6791ba39b344SChristian Marangi 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6792ac746c85SOng Boon Leong 	if (ret < 0) {
6793ac746c85SOng Boon Leong 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6794ac746c85SOng Boon Leong 			   __func__);
6795ac746c85SOng Boon Leong 		goto dma_desc_error;
6796ac746c85SOng Boon Leong 	}
6797ac746c85SOng Boon Leong 
6798ba39b344SChristian Marangi 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6799ac746c85SOng Boon Leong 	if (ret < 0) {
6800ac746c85SOng Boon Leong 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6801ac746c85SOng Boon Leong 			   __func__);
6802ac746c85SOng Boon Leong 		goto init_error;
6803ac746c85SOng Boon Leong 	}
6804ac746c85SOng Boon Leong 
680524e3fce0SSong Yoong Siang 	stmmac_reset_queues_param(priv);
680624e3fce0SSong Yoong Siang 
6807ac746c85SOng Boon Leong 	/* DMA CSR Channel configuration */
6808087a7b94SVincent Whitchurch 	for (chan = 0; chan < dma_csr_ch; chan++) {
6809ac746c85SOng Boon Leong 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6810087a7b94SVincent Whitchurch 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6811087a7b94SVincent Whitchurch 	}
6812ac746c85SOng Boon Leong 
6813ac746c85SOng Boon Leong 	/* Adjust Split header */
6814ac746c85SOng Boon Leong 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6815ac746c85SOng Boon Leong 
6816ac746c85SOng Boon Leong 	/* DMA RX Channel Configuration */
6817ac746c85SOng Boon Leong 	for (chan = 0; chan < rx_cnt; chan++) {
68188531c808SChristian Marangi 		rx_q = &priv->dma_conf.rx_queue[chan];
6819ac746c85SOng Boon Leong 
6820ac746c85SOng Boon Leong 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6821ac746c85SOng Boon Leong 				    rx_q->dma_rx_phy, chan);
6822ac746c85SOng Boon Leong 
6823ac746c85SOng Boon Leong 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6824ac746c85SOng Boon Leong 				     (rx_q->buf_alloc_num *
6825ac746c85SOng Boon Leong 				      sizeof(struct dma_desc));
6826ac746c85SOng Boon Leong 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6827ac746c85SOng Boon Leong 				       rx_q->rx_tail_addr, chan);
6828ac746c85SOng Boon Leong 
6829ac746c85SOng Boon Leong 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6830ac746c85SOng Boon Leong 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6831ac746c85SOng Boon Leong 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6832ac746c85SOng Boon Leong 					      buf_size,
6833ac746c85SOng Boon Leong 					      rx_q->queue_index);
6834ac746c85SOng Boon Leong 		} else {
6835ac746c85SOng Boon Leong 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
68368531c808SChristian Marangi 					      priv->dma_conf.dma_buf_sz,
6837ac746c85SOng Boon Leong 					      rx_q->queue_index);
6838ac746c85SOng Boon Leong 		}
6839ac746c85SOng Boon Leong 
6840ac746c85SOng Boon Leong 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6841ac746c85SOng Boon Leong 	}
6842ac746c85SOng Boon Leong 
6843ac746c85SOng Boon Leong 	/* DMA TX Channel Configuration */
6844ac746c85SOng Boon Leong 	for (chan = 0; chan < tx_cnt; chan++) {
68458531c808SChristian Marangi 		tx_q = &priv->dma_conf.tx_queue[chan];
6846ac746c85SOng Boon Leong 
6847ac746c85SOng Boon Leong 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6848ac746c85SOng Boon Leong 				    tx_q->dma_tx_phy, chan);
6849ac746c85SOng Boon Leong 
6850ac746c85SOng Boon Leong 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6851ac746c85SOng Boon Leong 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6852ac746c85SOng Boon Leong 				       tx_q->tx_tail_addr, chan);
685361da6ac7SOng Boon Leong 
685461da6ac7SOng Boon Leong 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
685561da6ac7SOng Boon Leong 		tx_q->txtimer.function = stmmac_tx_timer;
6856ac746c85SOng Boon Leong 	}
6857ac746c85SOng Boon Leong 
6858ac746c85SOng Boon Leong 	/* Enable the MAC Rx/Tx */
6859ac746c85SOng Boon Leong 	stmmac_mac_set(priv, priv->ioaddr, true);
6860ac746c85SOng Boon Leong 
6861ac746c85SOng Boon Leong 	/* Start Rx & Tx DMA Channels */
6862ac746c85SOng Boon Leong 	stmmac_start_all_dma(priv);
6863ac746c85SOng Boon Leong 
6864ac746c85SOng Boon Leong 	ret = stmmac_request_irq(dev);
6865ac746c85SOng Boon Leong 	if (ret)
6866ac746c85SOng Boon Leong 		goto irq_error;
6867ac746c85SOng Boon Leong 
6868ac746c85SOng Boon Leong 	/* Enable NAPI process*/
6869ac746c85SOng Boon Leong 	stmmac_enable_all_queues(priv);
6870ac746c85SOng Boon Leong 	netif_carrier_on(dev);
6871ac746c85SOng Boon Leong 	netif_tx_start_all_queues(dev);
6872087a7b94SVincent Whitchurch 	stmmac_enable_all_dma_irq(priv);
6873ac746c85SOng Boon Leong 
6874ac746c85SOng Boon Leong 	return 0;
6875ac746c85SOng Boon Leong 
6876ac746c85SOng Boon Leong irq_error:
6877ac746c85SOng Boon Leong 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
68788531c808SChristian Marangi 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6879ac746c85SOng Boon Leong 
6880ac746c85SOng Boon Leong 	stmmac_hw_teardown(dev);
6881ac746c85SOng Boon Leong init_error:
6882ba39b344SChristian Marangi 	free_dma_desc_resources(priv, &priv->dma_conf);
6883ac746c85SOng Boon Leong dma_desc_error:
6884ac746c85SOng Boon Leong 	return ret;
6885ac746c85SOng Boon Leong }
6886ac746c85SOng Boon Leong 
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)6887bba2556eSOng Boon Leong int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6888bba2556eSOng Boon Leong {
6889bba2556eSOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
6890bba2556eSOng Boon Leong 	struct stmmac_rx_queue *rx_q;
6891132c32eeSOng Boon Leong 	struct stmmac_tx_queue *tx_q;
6892bba2556eSOng Boon Leong 	struct stmmac_channel *ch;
6893bba2556eSOng Boon Leong 
6894bba2556eSOng Boon Leong 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6895bba2556eSOng Boon Leong 	    !netif_carrier_ok(priv->dev))
6896bba2556eSOng Boon Leong 		return -ENETDOWN;
6897bba2556eSOng Boon Leong 
6898bba2556eSOng Boon Leong 	if (!stmmac_xdp_is_enabled(priv))
6899a817ead4SMaciej Fijalkowski 		return -EINVAL;
6900bba2556eSOng Boon Leong 
6901132c32eeSOng Boon Leong 	if (queue >= priv->plat->rx_queues_to_use ||
6902132c32eeSOng Boon Leong 	    queue >= priv->plat->tx_queues_to_use)
6903bba2556eSOng Boon Leong 		return -EINVAL;
6904bba2556eSOng Boon Leong 
69058531c808SChristian Marangi 	rx_q = &priv->dma_conf.rx_queue[queue];
69068531c808SChristian Marangi 	tx_q = &priv->dma_conf.tx_queue[queue];
6907bba2556eSOng Boon Leong 	ch = &priv->channel[queue];
6908bba2556eSOng Boon Leong 
6909132c32eeSOng Boon Leong 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6910a817ead4SMaciej Fijalkowski 		return -EINVAL;
6911bba2556eSOng Boon Leong 
6912132c32eeSOng Boon Leong 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6913bba2556eSOng Boon Leong 		/* EQoS does not have per-DMA channel SW interrupt,
6914bba2556eSOng Boon Leong 		 * so we schedule RX Napi straight-away.
6915bba2556eSOng Boon Leong 		 */
6916132c32eeSOng Boon Leong 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6917132c32eeSOng Boon Leong 			__napi_schedule(&ch->rxtx_napi);
6918bba2556eSOng Boon Leong 	}
6919bba2556eSOng Boon Leong 
6920bba2556eSOng Boon Leong 	return 0;
6921bba2556eSOng Boon Leong }
6922bba2556eSOng Boon Leong 
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)6923133466c3SJisheng Zhang static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6924133466c3SJisheng Zhang {
6925133466c3SJisheng Zhang 	struct stmmac_priv *priv = netdev_priv(dev);
6926133466c3SJisheng Zhang 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6927133466c3SJisheng Zhang 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6928133466c3SJisheng Zhang 	unsigned int start;
6929133466c3SJisheng Zhang 	int q;
6930133466c3SJisheng Zhang 
6931133466c3SJisheng Zhang 	for (q = 0; q < tx_cnt; q++) {
69328070274bSJisheng Zhang 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
6933133466c3SJisheng Zhang 		u64 tx_packets;
6934133466c3SJisheng Zhang 		u64 tx_bytes;
6935133466c3SJisheng Zhang 
6936133466c3SJisheng Zhang 		do {
69379680b2abSPetr Tesarik 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
69389680b2abSPetr Tesarik 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
69399680b2abSPetr Tesarik 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
69409680b2abSPetr Tesarik 		do {
69419680b2abSPetr Tesarik 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
69429680b2abSPetr Tesarik 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
69439680b2abSPetr Tesarik 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
6944133466c3SJisheng Zhang 
6945133466c3SJisheng Zhang 		stats->tx_packets += tx_packets;
6946133466c3SJisheng Zhang 		stats->tx_bytes += tx_bytes;
6947133466c3SJisheng Zhang 	}
6948133466c3SJisheng Zhang 
6949133466c3SJisheng Zhang 	for (q = 0; q < rx_cnt; q++) {
69508070274bSJisheng Zhang 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
6951133466c3SJisheng Zhang 		u64 rx_packets;
6952133466c3SJisheng Zhang 		u64 rx_bytes;
6953133466c3SJisheng Zhang 
6954133466c3SJisheng Zhang 		do {
69559680b2abSPetr Tesarik 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
69569680b2abSPetr Tesarik 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
69579680b2abSPetr Tesarik 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
69589680b2abSPetr Tesarik 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
6959133466c3SJisheng Zhang 
6960133466c3SJisheng Zhang 		stats->rx_packets += rx_packets;
6961133466c3SJisheng Zhang 		stats->rx_bytes += rx_bytes;
6962133466c3SJisheng Zhang 	}
6963133466c3SJisheng Zhang 
6964133466c3SJisheng Zhang 	stats->rx_dropped = priv->xstats.rx_dropped;
6965133466c3SJisheng Zhang 	stats->rx_errors = priv->xstats.rx_errors;
6966133466c3SJisheng Zhang 	stats->tx_dropped = priv->xstats.tx_dropped;
6967133466c3SJisheng Zhang 	stats->tx_errors = priv->xstats.tx_errors;
6968133466c3SJisheng Zhang 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
6969133466c3SJisheng Zhang 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
6970133466c3SJisheng Zhang 	stats->rx_length_errors = priv->xstats.rx_length;
6971133466c3SJisheng Zhang 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
6972133466c3SJisheng Zhang 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
6973133466c3SJisheng Zhang 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
6974133466c3SJisheng Zhang }
6975133466c3SJisheng Zhang 
69767ac6653aSJeff Kirsher static const struct net_device_ops stmmac_netdev_ops = {
69777ac6653aSJeff Kirsher 	.ndo_open = stmmac_open,
69787ac6653aSJeff Kirsher 	.ndo_start_xmit = stmmac_xmit,
69797ac6653aSJeff Kirsher 	.ndo_stop = stmmac_release,
69807ac6653aSJeff Kirsher 	.ndo_change_mtu = stmmac_change_mtu,
69817ac6653aSJeff Kirsher 	.ndo_fix_features = stmmac_fix_features,
6982d2afb5bdSGiuseppe CAVALLARO 	.ndo_set_features = stmmac_set_features,
698301789349SJiri Pirko 	.ndo_set_rx_mode = stmmac_set_rx_mode,
69847ac6653aSJeff Kirsher 	.ndo_tx_timeout = stmmac_tx_timeout,
6985a7605370SArnd Bergmann 	.ndo_eth_ioctl = stmmac_ioctl,
6986133466c3SJisheng Zhang 	.ndo_get_stats64 = stmmac_get_stats64,
69874dbbe8ddSJose Abreu 	.ndo_setup_tc = stmmac_setup_tc,
69884993e5b3SJose Abreu 	.ndo_select_queue = stmmac_select_queue,
6989a830405eSBhadram Varka 	.ndo_set_mac_address = stmmac_set_mac_address,
69903cd1cfcbSJose Abreu 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
69913cd1cfcbSJose Abreu 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
69925fabb012SOng Boon Leong 	.ndo_bpf = stmmac_bpf,
69938b278a5bSOng Boon Leong 	.ndo_xdp_xmit = stmmac_xdp_xmit,
6994bba2556eSOng Boon Leong 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
69957ac6653aSJeff Kirsher };
69967ac6653aSJeff Kirsher 
stmmac_reset_subtask(struct stmmac_priv * priv)699734877a15SJose Abreu static void stmmac_reset_subtask(struct stmmac_priv *priv)
699834877a15SJose Abreu {
699934877a15SJose Abreu 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
700034877a15SJose Abreu 		return;
700134877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
700234877a15SJose Abreu 		return;
700334877a15SJose Abreu 
700434877a15SJose Abreu 	netdev_err(priv->dev, "Reset adapter.\n");
700534877a15SJose Abreu 
700634877a15SJose Abreu 	rtnl_lock();
700734877a15SJose Abreu 	netif_trans_update(priv->dev);
700834877a15SJose Abreu 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
700934877a15SJose Abreu 		usleep_range(1000, 2000);
701034877a15SJose Abreu 
701134877a15SJose Abreu 	set_bit(STMMAC_DOWN, &priv->state);
701234877a15SJose Abreu 	dev_close(priv->dev);
701300f54e68SPetr Machata 	dev_open(priv->dev, NULL);
701434877a15SJose Abreu 	clear_bit(STMMAC_DOWN, &priv->state);
701534877a15SJose Abreu 	clear_bit(STMMAC_RESETING, &priv->state);
701634877a15SJose Abreu 	rtnl_unlock();
701734877a15SJose Abreu }
701834877a15SJose Abreu 
stmmac_service_task(struct work_struct * work)701934877a15SJose Abreu static void stmmac_service_task(struct work_struct *work)
702034877a15SJose Abreu {
702134877a15SJose Abreu 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
702234877a15SJose Abreu 			service_task);
702334877a15SJose Abreu 
702434877a15SJose Abreu 	stmmac_reset_subtask(priv);
702534877a15SJose Abreu 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
702634877a15SJose Abreu }
702734877a15SJose Abreu 
70287ac6653aSJeff Kirsher /**
7029cf3f047bSGiuseppe CAVALLARO  *  stmmac_hw_init - Init the MAC device
703032ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
7031732fdf0eSGiuseppe CAVALLARO  *  Description: this function is to configure the MAC device according to
7032732fdf0eSGiuseppe CAVALLARO  *  some platform parameters or the HW capability register. It prepares the
7033732fdf0eSGiuseppe CAVALLARO  *  driver to use either ring or chain modes and to setup either enhanced or
7034732fdf0eSGiuseppe CAVALLARO  *  normal descriptors.
7035cf3f047bSGiuseppe CAVALLARO  */
stmmac_hw_init(struct stmmac_priv * priv)7036cf3f047bSGiuseppe CAVALLARO static int stmmac_hw_init(struct stmmac_priv *priv)
7037cf3f047bSGiuseppe CAVALLARO {
70385f0456b4SJose Abreu 	int ret;
7039cf3f047bSGiuseppe CAVALLARO 
70409f93ac8dSLABBE Corentin 	/* dwmac-sun8i only work in chain mode */
7041d8daff28SBartosz Golaszewski 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
70429f93ac8dSLABBE Corentin 		chain_mode = 1;
70435f0456b4SJose Abreu 	priv->chain_mode = chain_mode;
70449f93ac8dSLABBE Corentin 
70455f0456b4SJose Abreu 	/* Initialize HW Interface */
70465f0456b4SJose Abreu 	ret = stmmac_hwif_init(priv);
70475f0456b4SJose Abreu 	if (ret)
70485f0456b4SJose Abreu 		return ret;
70494a7d666aSGiuseppe CAVALLARO 
7050cf3f047bSGiuseppe CAVALLARO 	/* Get the HW capability (new GMAC newer than 3.50a) */
7051cf3f047bSGiuseppe CAVALLARO 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7052cf3f047bSGiuseppe CAVALLARO 	if (priv->hw_cap_support) {
705338ddc59dSLABBE Corentin 		dev_info(priv->device, "DMA HW capability register supported\n");
7054cf3f047bSGiuseppe CAVALLARO 
7055cf3f047bSGiuseppe CAVALLARO 		/* We can override some gmac/dma configuration fields: e.g.
7056cf3f047bSGiuseppe CAVALLARO 		 * enh_desc, tx_coe (e.g. that are passed through the
7057cf3f047bSGiuseppe CAVALLARO 		 * platform) with the values from the HW capability
7058cf3f047bSGiuseppe CAVALLARO 		 * register (if supported).
7059cf3f047bSGiuseppe CAVALLARO 		 */
7060cf3f047bSGiuseppe CAVALLARO 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
70615a9b876eSLing Pei Lee 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7062fd1d62d8SBartosz Golaszewski 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
70633fe5cadbSGiuseppe CAVALLARO 		priv->hw->pmt = priv->plat->pmt;
7064b8ef7020SBiao Huang 		if (priv->dma_cap.hash_tb_sz) {
7065b8ef7020SBiao Huang 			priv->hw->multicast_filter_bins =
7066b8ef7020SBiao Huang 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7067b8ef7020SBiao Huang 			priv->hw->mcast_bits_log2 =
7068b8ef7020SBiao Huang 					ilog2(priv->hw->multicast_filter_bins);
7069b8ef7020SBiao Huang 		}
707038912bdbSDeepak SIKRI 
7071a8df35d4SEzequiel Garcia 		/* TXCOE doesn't work in thresh DMA mode */
7072a8df35d4SEzequiel Garcia 		if (priv->plat->force_thresh_dma_mode)
7073a8df35d4SEzequiel Garcia 			priv->plat->tx_coe = 0;
7074a8df35d4SEzequiel Garcia 		else
707538912bdbSDeepak SIKRI 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7076a8df35d4SEzequiel Garcia 
7077f748be53SAlexandre TORGUE 		/* In case of GMAC4 rx_coe is from HW cap register. */
7078f748be53SAlexandre TORGUE 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
707938912bdbSDeepak SIKRI 
708038912bdbSDeepak SIKRI 		if (priv->dma_cap.rx_coe_type2)
708138912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
708238912bdbSDeepak SIKRI 		else if (priv->dma_cap.rx_coe_type1)
708338912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
708438912bdbSDeepak SIKRI 
708538ddc59dSLABBE Corentin 	} else {
708638ddc59dSLABBE Corentin 		dev_info(priv->device, "No HW DMA feature register supported\n");
708738ddc59dSLABBE Corentin 	}
7088cf3f047bSGiuseppe CAVALLARO 
7089d2afb5bdSGiuseppe CAVALLARO 	if (priv->plat->rx_coe) {
7090d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
709138ddc59dSLABBE Corentin 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7092f748be53SAlexandre TORGUE 		if (priv->synopsys_id < DWMAC_CORE_4_00)
709338ddc59dSLABBE Corentin 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7094d2afb5bdSGiuseppe CAVALLARO 	}
7095cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->tx_coe)
709638ddc59dSLABBE Corentin 		dev_info(priv->device, "TX Checksum insertion supported\n");
7097cf3f047bSGiuseppe CAVALLARO 
7098cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->pmt) {
709938ddc59dSLABBE Corentin 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7100cf3f047bSGiuseppe CAVALLARO 		device_set_wakeup_capable(priv->device, 1);
7101cf3f047bSGiuseppe CAVALLARO 	}
7102cf3f047bSGiuseppe CAVALLARO 
7103f748be53SAlexandre TORGUE 	if (priv->dma_cap.tsoen)
710438ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO supported\n");
7105f748be53SAlexandre TORGUE 
710680083bd4SKunihiko Hayashi 	if (priv->dma_cap.number_rx_queues &&
710780083bd4SKunihiko Hayashi 	    priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
710880083bd4SKunihiko Hayashi 		dev_warn(priv->device,
710980083bd4SKunihiko Hayashi 			 "Number of Rx queues (%u) exceeds dma capability\n",
711080083bd4SKunihiko Hayashi 			 priv->plat->rx_queues_to_use);
711180083bd4SKunihiko Hayashi 		priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
711280083bd4SKunihiko Hayashi 	}
711380083bd4SKunihiko Hayashi 	if (priv->dma_cap.number_tx_queues &&
711480083bd4SKunihiko Hayashi 	    priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
711580083bd4SKunihiko Hayashi 		dev_warn(priv->device,
711680083bd4SKunihiko Hayashi 			 "Number of Tx queues (%u) exceeds dma capability\n",
711780083bd4SKunihiko Hayashi 			 priv->plat->tx_queues_to_use);
711880083bd4SKunihiko Hayashi 		priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
711980083bd4SKunihiko Hayashi 	}
712080083bd4SKunihiko Hayashi 
7121*6d3693d4SKunihiko Hayashi 	if (priv->dma_cap.rx_fifo_size &&
7122*6d3693d4SKunihiko Hayashi 	    priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7123*6d3693d4SKunihiko Hayashi 		dev_warn(priv->device,
7124*6d3693d4SKunihiko Hayashi 			 "Rx FIFO size (%u) exceeds dma capability\n",
7125*6d3693d4SKunihiko Hayashi 			 priv->plat->rx_fifo_size);
7126*6d3693d4SKunihiko Hayashi 		priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7127*6d3693d4SKunihiko Hayashi 	}
7128*6d3693d4SKunihiko Hayashi 	if (priv->dma_cap.tx_fifo_size &&
7129*6d3693d4SKunihiko Hayashi 	    priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7130*6d3693d4SKunihiko Hayashi 		dev_warn(priv->device,
7131*6d3693d4SKunihiko Hayashi 			 "Tx FIFO size (%u) exceeds dma capability\n",
7132*6d3693d4SKunihiko Hayashi 			 priv->plat->tx_fifo_size);
7133*6d3693d4SKunihiko Hayashi 		priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7134*6d3693d4SKunihiko Hayashi 	}
7135*6d3693d4SKunihiko Hayashi 
7136fc02152bSBartosz Golaszewski 	priv->hw->vlan_fail_q_en =
7137fc02152bSBartosz Golaszewski 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7138e0f9956aSChuah, Kim Tatt 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7139e0f9956aSChuah, Kim Tatt 
71407cfde0afSJose Abreu 	/* Run HW quirks, if any */
71417cfde0afSJose Abreu 	if (priv->hwif_quirks) {
71427cfde0afSJose Abreu 		ret = priv->hwif_quirks(priv);
71437cfde0afSJose Abreu 		if (ret)
71447cfde0afSJose Abreu 			return ret;
71457cfde0afSJose Abreu 	}
71467cfde0afSJose Abreu 
71473b509466SJose Abreu 	/* Rx Watchdog is available in the COREs newer than the 3.40.
71483b509466SJose Abreu 	 * In some case, for example on bugged HW this feature
71493b509466SJose Abreu 	 * has to be disable and this can be done by passing the
71503b509466SJose Abreu 	 * riwt_off field from the platform.
71513b509466SJose Abreu 	 */
71523b509466SJose Abreu 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
71533b509466SJose Abreu 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
71543b509466SJose Abreu 		priv->use_riwt = 1;
71553b509466SJose Abreu 		dev_info(priv->device,
71563b509466SJose Abreu 			 "Enable RX Mitigation via HW Watchdog Timer\n");
71573b509466SJose Abreu 	}
71583b509466SJose Abreu 
7159c24602efSGiuseppe CAVALLARO 	return 0;
7160cf3f047bSGiuseppe CAVALLARO }
7161cf3f047bSGiuseppe CAVALLARO 
stmmac_napi_add(struct net_device * dev)71620366f7e0SOng Boon Leong static void stmmac_napi_add(struct net_device *dev)
71630366f7e0SOng Boon Leong {
71640366f7e0SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
71650366f7e0SOng Boon Leong 	u32 queue, maxq;
71660366f7e0SOng Boon Leong 
71670366f7e0SOng Boon Leong 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
71680366f7e0SOng Boon Leong 
71690366f7e0SOng Boon Leong 	for (queue = 0; queue < maxq; queue++) {
71700366f7e0SOng Boon Leong 		struct stmmac_channel *ch = &priv->channel[queue];
71710366f7e0SOng Boon Leong 
71720366f7e0SOng Boon Leong 		ch->priv_data = priv;
71730366f7e0SOng Boon Leong 		ch->index = queue;
71742b94f526SMarek Szyprowski 		spin_lock_init(&ch->lock);
71750366f7e0SOng Boon Leong 
71760366f7e0SOng Boon Leong 		if (queue < priv->plat->rx_queues_to_use) {
7177b48b89f9SJakub Kicinski 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
71780366f7e0SOng Boon Leong 		}
71790366f7e0SOng Boon Leong 		if (queue < priv->plat->tx_queues_to_use) {
718016d083e2SJakub Kicinski 			netif_napi_add_tx(dev, &ch->tx_napi,
718116d083e2SJakub Kicinski 					  stmmac_napi_poll_tx);
71820366f7e0SOng Boon Leong 		}
7183132c32eeSOng Boon Leong 		if (queue < priv->plat->rx_queues_to_use &&
7184132c32eeSOng Boon Leong 		    queue < priv->plat->tx_queues_to_use) {
7185132c32eeSOng Boon Leong 			netif_napi_add(dev, &ch->rxtx_napi,
7186b48b89f9SJakub Kicinski 				       stmmac_napi_poll_rxtx);
7187132c32eeSOng Boon Leong 		}
71880366f7e0SOng Boon Leong 	}
71890366f7e0SOng Boon Leong }
71900366f7e0SOng Boon Leong 
stmmac_napi_del(struct net_device * dev)71910366f7e0SOng Boon Leong static void stmmac_napi_del(struct net_device *dev)
71920366f7e0SOng Boon Leong {
71930366f7e0SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
71940366f7e0SOng Boon Leong 	u32 queue, maxq;
71950366f7e0SOng Boon Leong 
71960366f7e0SOng Boon Leong 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
71970366f7e0SOng Boon Leong 
71980366f7e0SOng Boon Leong 	for (queue = 0; queue < maxq; queue++) {
71990366f7e0SOng Boon Leong 		struct stmmac_channel *ch = &priv->channel[queue];
72000366f7e0SOng Boon Leong 
72010366f7e0SOng Boon Leong 		if (queue < priv->plat->rx_queues_to_use)
72020366f7e0SOng Boon Leong 			netif_napi_del(&ch->rx_napi);
72030366f7e0SOng Boon Leong 		if (queue < priv->plat->tx_queues_to_use)
72040366f7e0SOng Boon Leong 			netif_napi_del(&ch->tx_napi);
7205132c32eeSOng Boon Leong 		if (queue < priv->plat->rx_queues_to_use &&
7206132c32eeSOng Boon Leong 		    queue < priv->plat->tx_queues_to_use) {
7207132c32eeSOng Boon Leong 			netif_napi_del(&ch->rxtx_napi);
7208132c32eeSOng Boon Leong 		}
72090366f7e0SOng Boon Leong 	}
72100366f7e0SOng Boon Leong }
72110366f7e0SOng Boon Leong 
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)72120366f7e0SOng Boon Leong int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
72130366f7e0SOng Boon Leong {
72140366f7e0SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
7215218c5973SCorinna Vinschen 	int ret = 0, i;
7216178856bfSSerge Semin 	int max_speed;
72170366f7e0SOng Boon Leong 
72180366f7e0SOng Boon Leong 	if (netif_running(dev))
72190366f7e0SOng Boon Leong 		stmmac_release(dev);
72200366f7e0SOng Boon Leong 
72210366f7e0SOng Boon Leong 	stmmac_napi_del(dev);
72220366f7e0SOng Boon Leong 
72230366f7e0SOng Boon Leong 	priv->plat->rx_queues_to_use = rx_cnt;
72240366f7e0SOng Boon Leong 	priv->plat->tx_queues_to_use = tx_cnt;
7225218c5973SCorinna Vinschen 	if (!netif_is_rxfh_configured(dev))
7226218c5973SCorinna Vinschen 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7227218c5973SCorinna Vinschen 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7228218c5973SCorinna Vinschen 									rx_cnt);
72290366f7e0SOng Boon Leong 
72300580dcc5SSerge Semin 	stmmac_mac_phylink_get_caps(priv);
72310580dcc5SSerge Semin 
723293d565ebSSerge Semin 	priv->phylink_config.mac_capabilities = priv->hw->link.caps;
723393d565ebSSerge Semin 
7234178856bfSSerge Semin 	max_speed = priv->plat->max_speed;
7235178856bfSSerge Semin 	if (max_speed)
7236178856bfSSerge Semin 		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
7237178856bfSSerge Semin 
72380366f7e0SOng Boon Leong 	stmmac_napi_add(dev);
72390366f7e0SOng Boon Leong 
72400366f7e0SOng Boon Leong 	if (netif_running(dev))
72410366f7e0SOng Boon Leong 		ret = stmmac_open(dev);
72420366f7e0SOng Boon Leong 
72430366f7e0SOng Boon Leong 	return ret;
72440366f7e0SOng Boon Leong }
72450366f7e0SOng Boon Leong 
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7246aa042f60SSong, Yoong Siang int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7247aa042f60SSong, Yoong Siang {
7248aa042f60SSong, Yoong Siang 	struct stmmac_priv *priv = netdev_priv(dev);
7249aa042f60SSong, Yoong Siang 	int ret = 0;
7250aa042f60SSong, Yoong Siang 
7251aa042f60SSong, Yoong Siang 	if (netif_running(dev))
7252aa042f60SSong, Yoong Siang 		stmmac_release(dev);
7253aa042f60SSong, Yoong Siang 
72548531c808SChristian Marangi 	priv->dma_conf.dma_rx_size = rx_size;
72558531c808SChristian Marangi 	priv->dma_conf.dma_tx_size = tx_size;
7256aa042f60SSong, Yoong Siang 
7257aa042f60SSong, Yoong Siang 	if (netif_running(dev))
7258aa042f60SSong, Yoong Siang 		ret = stmmac_open(dev);
7259aa042f60SSong, Yoong Siang 
7260aa042f60SSong, Yoong Siang 	return ret;
7261aa042f60SSong, Yoong Siang }
7262aa042f60SSong, Yoong Siang 
72635a558611SOng Boon Leong #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
stmmac_fpe_lp_task(struct work_struct * work)72645a558611SOng Boon Leong static void stmmac_fpe_lp_task(struct work_struct *work)
72655a558611SOng Boon Leong {
72665a558611SOng Boon Leong 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
72675a558611SOng Boon Leong 						fpe_task);
72685a558611SOng Boon Leong 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
72695a558611SOng Boon Leong 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
72705a558611SOng Boon Leong 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
72715a558611SOng Boon Leong 	bool *hs_enable = &fpe_cfg->hs_enable;
72725a558611SOng Boon Leong 	bool *enable = &fpe_cfg->enable;
72735a558611SOng Boon Leong 	int retries = 20;
72745a558611SOng Boon Leong 
72755a558611SOng Boon Leong 	while (retries-- > 0) {
72765a558611SOng Boon Leong 		/* Bail out immediately if FPE handshake is OFF */
72775a558611SOng Boon Leong 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
72785a558611SOng Boon Leong 			break;
72795a558611SOng Boon Leong 
72805a558611SOng Boon Leong 		if (*lo_state == FPE_STATE_ENTERING_ON &&
72815a558611SOng Boon Leong 		    *lp_state == FPE_STATE_ENTERING_ON) {
72825a558611SOng Boon Leong 			stmmac_fpe_configure(priv, priv->ioaddr,
7283e1fbdef9SJianheng Zhang 					     fpe_cfg,
72845a558611SOng Boon Leong 					     priv->plat->tx_queues_to_use,
72855a558611SOng Boon Leong 					     priv->plat->rx_queues_to_use,
72865a558611SOng Boon Leong 					     *enable);
72875a558611SOng Boon Leong 
72885a558611SOng Boon Leong 			netdev_info(priv->dev, "configured FPE\n");
72895a558611SOng Boon Leong 
72905a558611SOng Boon Leong 			*lo_state = FPE_STATE_ON;
72915a558611SOng Boon Leong 			*lp_state = FPE_STATE_ON;
72925a558611SOng Boon Leong 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
72935a558611SOng Boon Leong 			break;
72945a558611SOng Boon Leong 		}
72955a558611SOng Boon Leong 
72965a558611SOng Boon Leong 		if ((*lo_state == FPE_STATE_CAPABLE ||
72975a558611SOng Boon Leong 		     *lo_state == FPE_STATE_ENTERING_ON) &&
72985a558611SOng Boon Leong 		     *lp_state != FPE_STATE_ON) {
72995a558611SOng Boon Leong 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
73005a558611SOng Boon Leong 				    *lo_state, *lp_state);
73015a558611SOng Boon Leong 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7302e1fbdef9SJianheng Zhang 						fpe_cfg,
73035a558611SOng Boon Leong 						MPACKET_VERIFY);
73045a558611SOng Boon Leong 		}
73055a558611SOng Boon Leong 		/* Sleep then retry */
73065a558611SOng Boon Leong 		msleep(500);
73075a558611SOng Boon Leong 	}
73085a558611SOng Boon Leong 
73095a558611SOng Boon Leong 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
73105a558611SOng Boon Leong }
73115a558611SOng Boon Leong 
stmmac_fpe_handshake(struct stmmac_priv * priv,bool enable)73125a558611SOng Boon Leong void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
73135a558611SOng Boon Leong {
73145a558611SOng Boon Leong 	if (priv->plat->fpe_cfg->hs_enable != enable) {
73155a558611SOng Boon Leong 		if (enable) {
73165a558611SOng Boon Leong 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7317e1fbdef9SJianheng Zhang 						priv->plat->fpe_cfg,
73185a558611SOng Boon Leong 						MPACKET_VERIFY);
73195a558611SOng Boon Leong 		} else {
73205a558611SOng Boon Leong 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
73215a558611SOng Boon Leong 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
73225a558611SOng Boon Leong 		}
73235a558611SOng Boon Leong 
73245a558611SOng Boon Leong 		priv->plat->fpe_cfg->hs_enable = enable;
73255a558611SOng Boon Leong 	}
73265a558611SOng Boon Leong }
73275a558611SOng Boon Leong 
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7328e3f9c3e3SSong Yoong Siang static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7329e3f9c3e3SSong Yoong Siang {
7330e3f9c3e3SSong Yoong Siang 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7331e3f9c3e3SSong Yoong Siang 	struct dma_desc *desc_contains_ts = ctx->desc;
7332e3f9c3e3SSong Yoong Siang 	struct stmmac_priv *priv = ctx->priv;
7333e3f9c3e3SSong Yoong Siang 	struct dma_desc *ndesc = ctx->ndesc;
7334e3f9c3e3SSong Yoong Siang 	struct dma_desc *desc = ctx->desc;
7335e3f9c3e3SSong Yoong Siang 	u64 ns = 0;
7336e3f9c3e3SSong Yoong Siang 
7337e3f9c3e3SSong Yoong Siang 	if (!priv->hwts_rx_en)
7338e3f9c3e3SSong Yoong Siang 		return -ENODATA;
7339e3f9c3e3SSong Yoong Siang 
7340e3f9c3e3SSong Yoong Siang 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7341e3f9c3e3SSong Yoong Siang 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7342e3f9c3e3SSong Yoong Siang 		desc_contains_ts = ndesc;
7343e3f9c3e3SSong Yoong Siang 
7344e3f9c3e3SSong Yoong Siang 	/* Check if timestamp is available */
7345e3f9c3e3SSong Yoong Siang 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7346e3f9c3e3SSong Yoong Siang 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7347e3f9c3e3SSong Yoong Siang 		ns -= priv->plat->cdc_error_adj;
7348e3f9c3e3SSong Yoong Siang 		*timestamp = ns_to_ktime(ns);
7349e3f9c3e3SSong Yoong Siang 		return 0;
7350e3f9c3e3SSong Yoong Siang 	}
7351e3f9c3e3SSong Yoong Siang 
7352e3f9c3e3SSong Yoong Siang 	return -ENODATA;
7353e3f9c3e3SSong Yoong Siang }
7354e3f9c3e3SSong Yoong Siang 
7355e3f9c3e3SSong Yoong Siang static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7356e3f9c3e3SSong Yoong Siang 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7357e3f9c3e3SSong Yoong Siang };
7358e3f9c3e3SSong Yoong Siang 
7359cf3f047bSGiuseppe CAVALLARO /**
7360bfab27a1SGiuseppe CAVALLARO  * stmmac_dvr_probe
7361bfab27a1SGiuseppe CAVALLARO  * @device: device pointer
7362ff3dd78cSGiuseppe CAVALLARO  * @plat_dat: platform data pointer
7363e56788cfSJoachim Eastwood  * @res: stmmac resource pointer
7364bfab27a1SGiuseppe CAVALLARO  * Description: this is the main probe function used to
7365bfab27a1SGiuseppe CAVALLARO  * call the alloc_etherdev, allocate the priv structure.
73669afec6efSAndy Shevchenko  * Return:
736715ffac73SJoachim Eastwood  * returns 0 on success, otherwise errno.
73687ac6653aSJeff Kirsher  */
stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)736915ffac73SJoachim Eastwood int stmmac_dvr_probe(struct device *device,
7370cf3f047bSGiuseppe CAVALLARO 		     struct plat_stmmacenet_data *plat_dat,
7371e56788cfSJoachim Eastwood 		     struct stmmac_resources *res)
73727ac6653aSJeff Kirsher {
7373bfab27a1SGiuseppe CAVALLARO 	struct net_device *ndev = NULL;
7374bfab27a1SGiuseppe CAVALLARO 	struct stmmac_priv *priv;
73750366f7e0SOng Boon Leong 	u32 rxq;
737676067459SJose Abreu 	int i, ret = 0;
73777ac6653aSJeff Kirsher 
73789737070cSJisheng Zhang 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
73799737070cSJisheng Zhang 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
738041de8d4cSJoe Perches 	if (!ndev)
738115ffac73SJoachim Eastwood 		return -ENOMEM;
73827ac6653aSJeff Kirsher 
7383bfab27a1SGiuseppe CAVALLARO 	SET_NETDEV_DEV(ndev, device);
73847ac6653aSJeff Kirsher 
7385bfab27a1SGiuseppe CAVALLARO 	priv = netdev_priv(ndev);
7386bfab27a1SGiuseppe CAVALLARO 	priv->device = device;
7387bfab27a1SGiuseppe CAVALLARO 	priv->dev = ndev;
7388bfab27a1SGiuseppe CAVALLARO 
7389133466c3SJisheng Zhang 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
73909680b2abSPetr Tesarik 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
73919680b2abSPetr Tesarik 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
73929680b2abSPetr Tesarik 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
73939680b2abSPetr Tesarik 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
73949680b2abSPetr Tesarik 	}
73959680b2abSPetr Tesarik 
73969680b2abSPetr Tesarik 	priv->xstats.pcpu_stats =
73979680b2abSPetr Tesarik 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
73989680b2abSPetr Tesarik 	if (!priv->xstats.pcpu_stats)
73999680b2abSPetr Tesarik 		return -ENOMEM;
7400133466c3SJisheng Zhang 
7401bfab27a1SGiuseppe CAVALLARO 	stmmac_set_ethtool_ops(ndev);
7402cf3f047bSGiuseppe CAVALLARO 	priv->pause = pause;
7403cf3f047bSGiuseppe CAVALLARO 	priv->plat = plat_dat;
7404e56788cfSJoachim Eastwood 	priv->ioaddr = res->addr;
7405e56788cfSJoachim Eastwood 	priv->dev->base_addr = (unsigned long)res->addr;
7406956c3f09SBartosz Golaszewski 	priv->plat->dma_cfg->multi_msi_en =
7407956c3f09SBartosz Golaszewski 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7408e56788cfSJoachim Eastwood 
7409e56788cfSJoachim Eastwood 	priv->dev->irq = res->irq;
7410e56788cfSJoachim Eastwood 	priv->wol_irq = res->wol_irq;
7411e56788cfSJoachim Eastwood 	priv->lpi_irq = res->lpi_irq;
74128532f613SOng Boon Leong 	priv->sfty_ce_irq = res->sfty_ce_irq;
74138532f613SOng Boon Leong 	priv->sfty_ue_irq = res->sfty_ue_irq;
74148532f613SOng Boon Leong 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
74158532f613SOng Boon Leong 		priv->rx_irq[i] = res->rx_irq[i];
74168532f613SOng Boon Leong 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
74178532f613SOng Boon Leong 		priv->tx_irq[i] = res->tx_irq[i];
7418e56788cfSJoachim Eastwood 
741983216e39SMichael Walle 	if (!is_zero_ether_addr(res->mac))
7420a96d317fSJakub Kicinski 		eth_hw_addr_set(priv->dev, res->mac);
7421bfab27a1SGiuseppe CAVALLARO 
7422a7a62685SJoachim Eastwood 	dev_set_drvdata(device, priv->dev);
7423803f8fc4SJoachim Eastwood 
7424cf3f047bSGiuseppe CAVALLARO 	/* Verify driver arguments */
7425cf3f047bSGiuseppe CAVALLARO 	stmmac_verify_args();
7426cf3f047bSGiuseppe CAVALLARO 
7427bba2556eSOng Boon Leong 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7428bba2556eSOng Boon Leong 	if (!priv->af_xdp_zc_qps)
7429bba2556eSOng Boon Leong 		return -ENOMEM;
7430bba2556eSOng Boon Leong 
743134877a15SJose Abreu 	/* Allocate workqueue */
743234877a15SJose Abreu 	priv->wq = create_singlethread_workqueue("stmmac_wq");
743334877a15SJose Abreu 	if (!priv->wq) {
743434877a15SJose Abreu 		dev_err(priv->device, "failed to create workqueue\n");
74352cb815cfSGaosheng Cui 		ret = -ENOMEM;
7436a137f3f2SGaosheng Cui 		goto error_wq_init;
743734877a15SJose Abreu 	}
743834877a15SJose Abreu 
743934877a15SJose Abreu 	INIT_WORK(&priv->service_task, stmmac_service_task);
744034877a15SJose Abreu 
74415a558611SOng Boon Leong 	/* Initialize Link Partner FPE workqueue */
74425a558611SOng Boon Leong 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
74435a558611SOng Boon Leong 
7444cf3f047bSGiuseppe CAVALLARO 	/* Override with kernel parameters if supplied XXX CRS XXX
7445ceb69499SGiuseppe CAVALLARO 	 * this needs to have multiple instances
7446ceb69499SGiuseppe CAVALLARO 	 */
7447cf3f047bSGiuseppe CAVALLARO 	if ((phyaddr >= 0) && (phyaddr <= 31))
7448cf3f047bSGiuseppe CAVALLARO 		priv->plat->phy_addr = phyaddr;
7449cf3f047bSGiuseppe CAVALLARO 
745090f522a2SEugeniy Paltsev 	if (priv->plat->stmmac_rst) {
745190f522a2SEugeniy Paltsev 		ret = reset_control_assert(priv->plat->stmmac_rst);
7452f573c0b9Sjpinto 		reset_control_deassert(priv->plat->stmmac_rst);
745390f522a2SEugeniy Paltsev 		/* Some reset controllers have only reset callback instead of
745490f522a2SEugeniy Paltsev 		 * assert + deassert callbacks pair.
745590f522a2SEugeniy Paltsev 		 */
745690f522a2SEugeniy Paltsev 		if (ret == -ENOTSUPP)
745790f522a2SEugeniy Paltsev 			reset_control_reset(priv->plat->stmmac_rst);
745890f522a2SEugeniy Paltsev 	}
7459c5e4ddbdSChen-Yu Tsai 
7460e67f325eSMatthew Hagan 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7461e67f325eSMatthew Hagan 	if (ret == -ENOTSUPP)
7462e67f325eSMatthew Hagan 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7463e67f325eSMatthew Hagan 			ERR_PTR(ret));
7464e67f325eSMatthew Hagan 
74656264994bSBernd Edlinger 	/* Wait a bit for the reset to take effect */
74666264994bSBernd Edlinger 	udelay(10);
74676264994bSBernd Edlinger 
7468cf3f047bSGiuseppe CAVALLARO 	/* Init MAC and get the capabilities */
7469c24602efSGiuseppe CAVALLARO 	ret = stmmac_hw_init(priv);
7470c24602efSGiuseppe CAVALLARO 	if (ret)
747162866e98SChen-Yu Tsai 		goto error_hw_init;
7472cf3f047bSGiuseppe CAVALLARO 
747396874c61SMohammad Athari Bin Ismail 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
747496874c61SMohammad Athari Bin Ismail 	 */
747596874c61SMohammad Athari Bin Ismail 	if (priv->synopsys_id < DWMAC_CORE_5_20)
747696874c61SMohammad Athari Bin Ismail 		priv->plat->dma_cfg->dche = false;
747796874c61SMohammad Athari Bin Ismail 
7478b561af36SVinod Koul 	stmmac_check_ether_addr(priv);
7479b561af36SVinod Koul 
7480cf3f047bSGiuseppe CAVALLARO 	ndev->netdev_ops = &stmmac_netdev_ops;
7481cf3f047bSGiuseppe CAVALLARO 
7482e3f9c3e3SSong Yoong Siang 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7483e3f9c3e3SSong Yoong Siang 
7484cf3f047bSGiuseppe CAVALLARO 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7485cf3f047bSGiuseppe CAVALLARO 			    NETIF_F_RXCSUM;
748666c0e13aSMarek Majtyka 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7487ffb33221SWei Fang 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7488f748be53SAlexandre TORGUE 
74894dbbe8ddSJose Abreu 	ret = stmmac_tc_init(priv, priv);
74904dbbe8ddSJose Abreu 	if (!ret) {
74914dbbe8ddSJose Abreu 		ndev->hw_features |= NETIF_F_HW_TC;
74924dbbe8ddSJose Abreu 	}
74934dbbe8ddSJose Abreu 
749468861a3bSBartosz Golaszewski 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
74959edfa7daSNiklas Cassel 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7496b7766206SJose Abreu 		if (priv->plat->has_gmac4)
7497b7766206SJose Abreu 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7498f748be53SAlexandre TORGUE 		priv->tso = true;
749938ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO feature enabled\n");
7500f748be53SAlexandre TORGUE 	}
7501a993db88SJose Abreu 
7502309efe6eSBartosz Golaszewski 	if (priv->dma_cap.sphen &&
7503309efe6eSBartosz Golaszewski 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
750467afd6d1SJose Abreu 		ndev->hw_features |= NETIF_F_GRO;
7505d08d32d1SOng Boon Leong 		priv->sph_cap = true;
7506d08d32d1SOng Boon Leong 		priv->sph = priv->sph_cap;
750767afd6d1SJose Abreu 		dev_info(priv->device, "SPH feature enabled\n");
750867afd6d1SJose Abreu 	}
750967afd6d1SJose Abreu 
7510070246e4SJochen Henneberg 	/* Ideally our host DMA address width is the same as for the
7511070246e4SJochen Henneberg 	 * device. However, it may differ and then we have to use our
7512070246e4SJochen Henneberg 	 * host DMA width for allocation and the device DMA width for
7513070246e4SJochen Henneberg 	 * register handling.
7514f119cc98SFugang Duan 	 */
7515070246e4SJochen Henneberg 	if (priv->plat->host_dma_width)
7516070246e4SJochen Henneberg 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7517070246e4SJochen Henneberg 	else
7518070246e4SJochen Henneberg 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7519f119cc98SFugang Duan 
7520070246e4SJochen Henneberg 	if (priv->dma_cap.host_dma_width) {
7521a993db88SJose Abreu 		ret = dma_set_mask_and_coherent(device,
7522070246e4SJochen Henneberg 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7523a993db88SJose Abreu 		if (!ret) {
7524070246e4SJochen Henneberg 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7525070246e4SJochen Henneberg 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7526968a2978SThierry Reding 
7527968a2978SThierry Reding 			/*
7528968a2978SThierry Reding 			 * If more than 32 bits can be addressed, make sure to
7529968a2978SThierry Reding 			 * enable enhanced addressing mode.
7530968a2978SThierry Reding 			 */
7531968a2978SThierry Reding 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7532968a2978SThierry Reding 				priv->plat->dma_cfg->eame = true;
7533a993db88SJose Abreu 		} else {
7534a993db88SJose Abreu 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7535a993db88SJose Abreu 			if (ret) {
7536a993db88SJose Abreu 				dev_err(priv->device, "Failed to set DMA Mask\n");
7537a993db88SJose Abreu 				goto error_hw_init;
7538a993db88SJose Abreu 			}
7539a993db88SJose Abreu 
7540070246e4SJochen Henneberg 			priv->dma_cap.host_dma_width = 32;
7541a993db88SJose Abreu 		}
7542a993db88SJose Abreu 	}
7543a993db88SJose Abreu 
7544bfab27a1SGiuseppe CAVALLARO 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7545bfab27a1SGiuseppe CAVALLARO 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
75467ac6653aSJeff Kirsher #ifdef STMMAC_VLAN_TAG_USED
75477ac6653aSJeff Kirsher 	/* Both mac100 and gmac support receive VLAN tag detection */
7548ab188e8fSElad Nachman 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
75493cd1cfcbSJose Abreu 	if (priv->dma_cap.vlhash) {
75503cd1cfcbSJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
75513cd1cfcbSJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
75523cd1cfcbSJose Abreu 	}
755330d93227SJose Abreu 	if (priv->dma_cap.vlins) {
755430d93227SJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
755530d93227SJose Abreu 		if (priv->dma_cap.dvlan)
755630d93227SJose Abreu 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
755730d93227SJose Abreu 	}
75587ac6653aSJeff Kirsher #endif
75597ac6653aSJeff Kirsher 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
75607ac6653aSJeff Kirsher 
75612eb85b75SJisheng Zhang 	priv->xstats.threshold = tc;
75622eb85b75SJisheng Zhang 
756376067459SJose Abreu 	/* Initialize RSS */
756476067459SJose Abreu 	rxq = priv->plat->rx_queues_to_use;
756576067459SJose Abreu 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
756676067459SJose Abreu 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
756776067459SJose Abreu 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
756876067459SJose Abreu 
756976067459SJose Abreu 	if (priv->dma_cap.rssen && priv->plat->rss_en)
757076067459SJose Abreu 		ndev->features |= NETIF_F_RXHASH;
757176067459SJose Abreu 
75726b2c6e4aSCorinna Vinschen 	ndev->vlan_features |= ndev->features;
75736b2c6e4aSCorinna Vinschen 	/* TSO doesn't work on VLANs yet */
75746b2c6e4aSCorinna Vinschen 	ndev->vlan_features &= ~NETIF_F_TSO;
75756b2c6e4aSCorinna Vinschen 
757644770e11SJarod Wilson 	/* MTU range: 46 - hw-specific max */
757744770e11SJarod Wilson 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
757856bcd591SJose Abreu 	if (priv->plat->has_xgmac)
75797d9e6c5aSJose Abreu 		ndev->max_mtu = XGMAC_JUMBO_LEN;
758056bcd591SJose Abreu 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
758156bcd591SJose Abreu 		ndev->max_mtu = JUMBO_LEN;
758244770e11SJarod Wilson 	else
758344770e11SJarod Wilson 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7584a2cd64f3SKweh, Hock Leong 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7585a2cd64f3SKweh, Hock Leong 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7586a2cd64f3SKweh, Hock Leong 	 */
7587a2cd64f3SKweh, Hock Leong 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7588a2cd64f3SKweh, Hock Leong 	    (priv->plat->maxmtu >= ndev->min_mtu))
758944770e11SJarod Wilson 		ndev->max_mtu = priv->plat->maxmtu;
7590a2cd64f3SKweh, Hock Leong 	else if (priv->plat->maxmtu < ndev->min_mtu)
7591b618ab45SHeiner Kallweit 		dev_warn(priv->device,
7592a2cd64f3SKweh, Hock Leong 			 "%s: warning: maxmtu having invalid value (%d)\n",
7593a2cd64f3SKweh, Hock Leong 			 __func__, priv->plat->maxmtu);
759444770e11SJarod Wilson 
75957ac6653aSJeff Kirsher 	if (flow_ctrl)
75967ac6653aSJeff Kirsher 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
75977ac6653aSJeff Kirsher 
75984e195166SCorinna Vinschen 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
75994e195166SCorinna Vinschen 
76008fce3331SJose Abreu 	/* Setup channels NAPI */
76010366f7e0SOng Boon Leong 	stmmac_napi_add(ndev);
76027ac6653aSJeff Kirsher 
760329555fa3SThierry Reding 	mutex_init(&priv->lock);
76047ac6653aSJeff Kirsher 
7605cd7201f4SGiuseppe CAVALLARO 	/* If a specific clk_csr value is passed from the platform
7606cd7201f4SGiuseppe CAVALLARO 	 * this means that the CSR Clock Range selection cannot be
7607cd7201f4SGiuseppe CAVALLARO 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7608cd7201f4SGiuseppe CAVALLARO 	 * set the MDC clock dynamically according to the csr actual
7609cd7201f4SGiuseppe CAVALLARO 	 * clock input.
7610cd7201f4SGiuseppe CAVALLARO 	 */
76115e7f7fc5SBiao Huang 	if (priv->plat->clk_csr >= 0)
7612cd7201f4SGiuseppe CAVALLARO 		priv->clk_csr = priv->plat->clk_csr;
76135e7f7fc5SBiao Huang 	else
76145e7f7fc5SBiao Huang 		stmmac_clk_csr_set(priv);
7615cd7201f4SGiuseppe CAVALLARO 
7616e58bb43fSGiuseppe CAVALLARO 	stmmac_check_pcs_mode(priv);
7617e58bb43fSGiuseppe CAVALLARO 
76185ec55823SJoakim Zhang 	pm_runtime_get_noresume(device);
76195ec55823SJoakim Zhang 	pm_runtime_set_active(device);
7620d90d0c17SKai-Heng Feng 	if (!pm_runtime_enabled(device))
76215ec55823SJoakim Zhang 		pm_runtime_enable(device);
76225ec55823SJoakim Zhang 
7623a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
76243fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
76254bfcbd7aSFrancesco Virlinzi 		/* MDIO bus Registration */
76264bfcbd7aSFrancesco Virlinzi 		ret = stmmac_mdio_register(ndev);
76274bfcbd7aSFrancesco Virlinzi 		if (ret < 0) {
7628839612d2SRasmus Villemoes 			dev_err_probe(priv->device, ret,
7629839612d2SRasmus Villemoes 				      "%s: MDIO bus (id: %d) registration failed\n",
76304bfcbd7aSFrancesco Virlinzi 				      __func__, priv->plat->bus_id);
76316a81c26fSViresh Kumar 			goto error_mdio_register;
76324bfcbd7aSFrancesco Virlinzi 		}
7633e58bb43fSGiuseppe CAVALLARO 	}
76344bfcbd7aSFrancesco Virlinzi 
763546682cb8SVoon Weifeng 	if (priv->plat->speed_mode_2500)
763646682cb8SVoon Weifeng 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
763746682cb8SVoon Weifeng 
76387413f9a6SVladimir Oltean 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7639597a68ceSVoon Weifeng 		ret = stmmac_xpcs_setup(priv->mii);
7640597a68ceSVoon Weifeng 		if (ret)
7641597a68ceSVoon Weifeng 			goto error_xpcs_setup;
7642597a68ceSVoon Weifeng 	}
7643597a68ceSVoon Weifeng 
764474371272SJose Abreu 	ret = stmmac_phy_setup(priv);
764574371272SJose Abreu 	if (ret) {
764674371272SJose Abreu 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
764774371272SJose Abreu 		goto error_phy_setup;
764874371272SJose Abreu 	}
764974371272SJose Abreu 
765057016590SFlorian Fainelli 	ret = register_netdev(ndev);
7651b2eb09afSFlorian Fainelli 	if (ret) {
7652b618ab45SHeiner Kallweit 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
765357016590SFlorian Fainelli 			__func__, ret);
7654b2eb09afSFlorian Fainelli 		goto error_netdev_register;
7655b2eb09afSFlorian Fainelli 	}
76567ac6653aSJeff Kirsher 
76575f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS
76588d72ab11SGreg Kroah-Hartman 	stmmac_init_fs(ndev);
76595f2b8b62SThierry Reding #endif
76605f2b8b62SThierry Reding 
76614047b9dbSBhupesh Sharma 	if (priv->plat->dump_debug_regs)
76624047b9dbSBhupesh Sharma 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
76634047b9dbSBhupesh Sharma 
76645ec55823SJoakim Zhang 	/* Let pm_runtime_put() disable the clocks.
76655ec55823SJoakim Zhang 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
76665ec55823SJoakim Zhang 	 */
76675ec55823SJoakim Zhang 	pm_runtime_put(device);
76685ec55823SJoakim Zhang 
766957016590SFlorian Fainelli 	return ret;
76707ac6653aSJeff Kirsher 
76716a81c26fSViresh Kumar error_netdev_register:
767274371272SJose Abreu 	phylink_destroy(priv->phylink);
7673597a68ceSVoon Weifeng error_xpcs_setup:
767474371272SJose Abreu error_phy_setup:
7675a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7676b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7677b2eb09afSFlorian Fainelli 		stmmac_mdio_unregister(ndev);
76787ac6653aSJeff Kirsher error_mdio_register:
76790366f7e0SOng Boon Leong 	stmmac_napi_del(ndev);
768062866e98SChen-Yu Tsai error_hw_init:
768134877a15SJose Abreu 	destroy_workqueue(priv->wq);
7682a137f3f2SGaosheng Cui error_wq_init:
7683d7f576dcSWong Vee Khee 	bitmap_free(priv->af_xdp_zc_qps);
76847ac6653aSJeff Kirsher 
768515ffac73SJoachim Eastwood 	return ret;
76867ac6653aSJeff Kirsher }
7687b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
76887ac6653aSJeff Kirsher 
76897ac6653aSJeff Kirsher /**
76907ac6653aSJeff Kirsher  * stmmac_dvr_remove
7691f4e7bd81SJoachim Eastwood  * @dev: device pointer
76927ac6653aSJeff Kirsher  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7693bfab27a1SGiuseppe CAVALLARO  * changes the link status, releases the DMA descriptor rings.
76947ac6653aSJeff Kirsher  */
stmmac_dvr_remove(struct device * dev)7695ff0011cfSUwe Kleine-König void stmmac_dvr_remove(struct device *dev)
76967ac6653aSJeff Kirsher {
7697f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
76987ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
76997ac6653aSJeff Kirsher 
770038ddc59dSLABBE Corentin 	netdev_info(priv->dev, "%s: removing driver", __func__);
77017ac6653aSJeff Kirsher 
770264495203SJisheng Zhang 	pm_runtime_get_sync(dev);
770364495203SJisheng Zhang 
7704ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
7705c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
77067ac6653aSJeff Kirsher 	netif_carrier_off(ndev);
77077ac6653aSJeff Kirsher 	unregister_netdev(ndev);
77089a7b3950SOng Boon Leong 
7709474a31e1SAaro Koskinen #ifdef CONFIG_DEBUG_FS
7710474a31e1SAaro Koskinen 	stmmac_exit_fs(ndev);
7711474a31e1SAaro Koskinen #endif
771274371272SJose Abreu 	phylink_destroy(priv->phylink);
7713f573c0b9Sjpinto 	if (priv->plat->stmmac_rst)
7714f573c0b9Sjpinto 		reset_control_assert(priv->plat->stmmac_rst);
7715e67f325eSMatthew Hagan 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7716a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
77173fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7718e743471fSBryan O'Donoghue 		stmmac_mdio_unregister(ndev);
771934877a15SJose Abreu 	destroy_workqueue(priv->wq);
772029555fa3SThierry Reding 	mutex_destroy(&priv->lock);
7721d7f576dcSWong Vee Khee 	bitmap_free(priv->af_xdp_zc_qps);
77227ac6653aSJeff Kirsher 
77230d9a1591SBiao Huang 	pm_runtime_disable(dev);
77240d9a1591SBiao Huang 	pm_runtime_put_noidle(dev);
77257ac6653aSJeff Kirsher }
7726b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
77277ac6653aSJeff Kirsher 
7728732fdf0eSGiuseppe CAVALLARO /**
7729732fdf0eSGiuseppe CAVALLARO  * stmmac_suspend - suspend callback
7730f4e7bd81SJoachim Eastwood  * @dev: device pointer
7731732fdf0eSGiuseppe CAVALLARO  * Description: this is the function to suspend the device and it is called
7732732fdf0eSGiuseppe CAVALLARO  * by the platform driver to stop the network queue, release the resources,
7733732fdf0eSGiuseppe CAVALLARO  * program the PMT register (for WoL), clean and release driver resources.
7734732fdf0eSGiuseppe CAVALLARO  */
stmmac_suspend(struct device * dev)7735f4e7bd81SJoachim Eastwood int stmmac_suspend(struct device *dev)
77367ac6653aSJeff Kirsher {
7737f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
77387ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
773914b41a29SNicolin Chen 	u32 chan;
77407ac6653aSJeff Kirsher 
77417ac6653aSJeff Kirsher 	if (!ndev || !netif_running(ndev))
77427ac6653aSJeff Kirsher 		return 0;
77437ac6653aSJeff Kirsher 
7744134cc4ceSThierry Reding 	mutex_lock(&priv->lock);
774519e13cb2SJose Abreu 
77467ac6653aSJeff Kirsher 	netif_device_detach(ndev);
77477ac6653aSJeff Kirsher 
7748c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
77497ac6653aSJeff Kirsher 
775014b41a29SNicolin Chen 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
77518531c808SChristian Marangi 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
775214b41a29SNicolin Chen 
77535f585913SFugang Duan 	if (priv->eee_enabled) {
77545f585913SFugang Duan 		priv->tx_path_in_lpi_mode = false;
77555f585913SFugang Duan 		del_timer_sync(&priv->eee_ctrl_timer);
77565f585913SFugang Duan 	}
77575f585913SFugang Duan 
77587ac6653aSJeff Kirsher 	/* Stop TX/RX DMA */
7759ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
7760c24602efSGiuseppe CAVALLARO 
7761b9663b7cSVoon Weifeng 	if (priv->plat->serdes_powerdown)
7762b9663b7cSVoon Weifeng 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7763b9663b7cSVoon Weifeng 
77647ac6653aSJeff Kirsher 	/* Enable Power down mode by programming the PMT regs */
7765e8377e7aSJisheng Zhang 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7766c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, priv->wolopts);
776789f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 1;
776889f7f2cfSSrinivas Kandagatla 	} else {
7769c10d4c82SJose Abreu 		stmmac_mac_set(priv, priv->ioaddr, false);
7770db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_sleep_state(priv->device);
777130f347aeSYang Yingliang 	}
77725a558611SOng Boon Leong 
777329555fa3SThierry Reding 	mutex_unlock(&priv->lock);
77742d871aa0SVince Bridgers 
777590702dcdSJoakim Zhang 	rtnl_lock();
777690702dcdSJoakim Zhang 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
777790702dcdSJoakim Zhang 		phylink_suspend(priv->phylink, true);
777890702dcdSJoakim Zhang 	} else {
777990702dcdSJoakim Zhang 		if (device_may_wakeup(priv->device))
778090702dcdSJoakim Zhang 			phylink_speed_down(priv->phylink, false);
778190702dcdSJoakim Zhang 		phylink_suspend(priv->phylink, false);
778290702dcdSJoakim Zhang 	}
778390702dcdSJoakim Zhang 	rtnl_unlock();
778490702dcdSJoakim Zhang 
77855a558611SOng Boon Leong 	if (priv->dma_cap.fpesel) {
77865a558611SOng Boon Leong 		/* Disable FPE */
77875a558611SOng Boon Leong 		stmmac_fpe_configure(priv, priv->ioaddr,
7788e1fbdef9SJianheng Zhang 				     priv->plat->fpe_cfg,
77895a558611SOng Boon Leong 				     priv->plat->tx_queues_to_use,
77905a558611SOng Boon Leong 				     priv->plat->rx_queues_to_use, false);
77915a558611SOng Boon Leong 
77925a558611SOng Boon Leong 		stmmac_fpe_handshake(priv, false);
77936b28a86dSMohammad Athari Bin Ismail 		stmmac_fpe_stop_wq(priv);
77945a558611SOng Boon Leong 	}
77955a558611SOng Boon Leong 
7796bd00632cSLABBE Corentin 	priv->speed = SPEED_UNKNOWN;
77977ac6653aSJeff Kirsher 	return 0;
77987ac6653aSJeff Kirsher }
7799b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_suspend);
78007ac6653aSJeff Kirsher 
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)7801f9ec5723SChristian Marangi static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7802f9ec5723SChristian Marangi {
78038531c808SChristian Marangi 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7804f9ec5723SChristian Marangi 
7805f9ec5723SChristian Marangi 	rx_q->cur_rx = 0;
7806f9ec5723SChristian Marangi 	rx_q->dirty_rx = 0;
7807f9ec5723SChristian Marangi }
7808f9ec5723SChristian Marangi 
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)7809f9ec5723SChristian Marangi static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7810f9ec5723SChristian Marangi {
78118531c808SChristian Marangi 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7812f9ec5723SChristian Marangi 
7813f9ec5723SChristian Marangi 	tx_q->cur_tx = 0;
7814f9ec5723SChristian Marangi 	tx_q->dirty_tx = 0;
7815f9ec5723SChristian Marangi 	tx_q->mss = 0;
7816f9ec5723SChristian Marangi 
7817f9ec5723SChristian Marangi 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7818f9ec5723SChristian Marangi }
7819f9ec5723SChristian Marangi 
7820732fdf0eSGiuseppe CAVALLARO /**
782154139cf3SJoao Pinto  * stmmac_reset_queues_param - reset queue parameters
7822d0ea5cbdSJesse Brandeburg  * @priv: device pointer
782354139cf3SJoao Pinto  */
stmmac_reset_queues_param(struct stmmac_priv * priv)782454139cf3SJoao Pinto static void stmmac_reset_queues_param(struct stmmac_priv *priv)
782554139cf3SJoao Pinto {
782654139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7827ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
782854139cf3SJoao Pinto 	u32 queue;
782954139cf3SJoao Pinto 
7830f9ec5723SChristian Marangi 	for (queue = 0; queue < rx_cnt; queue++)
7831f9ec5723SChristian Marangi 		stmmac_reset_rx_queue(priv, queue);
783254139cf3SJoao Pinto 
7833f9ec5723SChristian Marangi 	for (queue = 0; queue < tx_cnt; queue++)
7834f9ec5723SChristian Marangi 		stmmac_reset_tx_queue(priv, queue);
783554139cf3SJoao Pinto }
783654139cf3SJoao Pinto 
783754139cf3SJoao Pinto /**
7838732fdf0eSGiuseppe CAVALLARO  * stmmac_resume - resume callback
7839f4e7bd81SJoachim Eastwood  * @dev: device pointer
7840732fdf0eSGiuseppe CAVALLARO  * Description: when resume this function is invoked to setup the DMA and CORE
7841732fdf0eSGiuseppe CAVALLARO  * in a usable state.
7842732fdf0eSGiuseppe CAVALLARO  */
stmmac_resume(struct device * dev)7843f4e7bd81SJoachim Eastwood int stmmac_resume(struct device *dev)
78447ac6653aSJeff Kirsher {
7845f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
78467ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
7847b9663b7cSVoon Weifeng 	int ret;
78487ac6653aSJeff Kirsher 
78497ac6653aSJeff Kirsher 	if (!netif_running(ndev))
78507ac6653aSJeff Kirsher 		return 0;
78517ac6653aSJeff Kirsher 
78527ac6653aSJeff Kirsher 	/* Power Down bit, into the PM register, is cleared
78537ac6653aSJeff Kirsher 	 * automatically as soon as a magic packet or a Wake-up frame
78547ac6653aSJeff Kirsher 	 * is received. Anyway, it's better to manually clear
78557ac6653aSJeff Kirsher 	 * this bit because it can generate problems while resuming
7856ceb69499SGiuseppe CAVALLARO 	 * from another devices (e.g. serial console).
7857ceb69499SGiuseppe CAVALLARO 	 */
7858e8377e7aSJisheng Zhang 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
785929555fa3SThierry Reding 		mutex_lock(&priv->lock);
7860c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, 0);
786129555fa3SThierry Reding 		mutex_unlock(&priv->lock);
786289f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 0;
7863623997fbSSrinivas Kandagatla 	} else {
7864db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_default_state(priv->device);
7865623997fbSSrinivas Kandagatla 		/* reset the phy so that it's ready */
7866623997fbSSrinivas Kandagatla 		if (priv->mii)
7867623997fbSSrinivas Kandagatla 			stmmac_mdio_reset(priv->mii);
7868623997fbSSrinivas Kandagatla 	}
78697ac6653aSJeff Kirsher 
7870efe92571SBartosz Golaszewski 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7871efe92571SBartosz Golaszewski 	    priv->plat->serdes_powerup) {
7872b9663b7cSVoon Weifeng 		ret = priv->plat->serdes_powerup(ndev,
7873b9663b7cSVoon Weifeng 						 priv->plat->bsp_priv);
7874b9663b7cSVoon Weifeng 
7875b9663b7cSVoon Weifeng 		if (ret < 0)
7876b9663b7cSVoon Weifeng 			return ret;
7877b9663b7cSVoon Weifeng 	}
7878b9663b7cSVoon Weifeng 
787936d18b56SFugang Duan 	rtnl_lock();
788090702dcdSJoakim Zhang 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
788190702dcdSJoakim Zhang 		phylink_resume(priv->phylink);
788290702dcdSJoakim Zhang 	} else {
788390702dcdSJoakim Zhang 		phylink_resume(priv->phylink);
788490702dcdSJoakim Zhang 		if (device_may_wakeup(priv->device))
788536d18b56SFugang Duan 			phylink_speed_up(priv->phylink);
788636d18b56SFugang Duan 	}
788790702dcdSJoakim Zhang 	rtnl_unlock();
788836d18b56SFugang Duan 
78898e5debedSWong Vee Khee 	rtnl_lock();
789029555fa3SThierry Reding 	mutex_lock(&priv->lock);
7891f55d84b0SVincent Palatin 
789254139cf3SJoao Pinto 	stmmac_reset_queues_param(priv);
789300423969SThierry Reding 
78944ec236c7SFugang Duan 	stmmac_free_tx_skbufs(priv);
7895ba39b344SChristian Marangi 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7896ae79a639SGiuseppe CAVALLARO 
7897fe131929SHuacai Chen 	stmmac_hw_setup(ndev, false);
7898d429b66eSJose Abreu 	stmmac_init_coalesce(priv);
7899ac316c78SGiuseppe CAVALLARO 	stmmac_set_rx_mode(ndev);
79007ac6653aSJeff Kirsher 
7901ed64639bSWong Vee Khee 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7902ed64639bSWong Vee Khee 
7903c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
7904087a7b94SVincent Whitchurch 	stmmac_enable_all_dma_irq(priv);
79057ac6653aSJeff Kirsher 
7906134cc4ceSThierry Reding 	mutex_unlock(&priv->lock);
79078e5debedSWong Vee Khee 	rtnl_unlock();
7908134cc4ceSThierry Reding 
790931096c3eSLeon Yu 	netif_device_attach(ndev);
791031096c3eSLeon Yu 
79117ac6653aSJeff Kirsher 	return 0;
79127ac6653aSJeff Kirsher }
7913b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_resume);
7914ba27ec66SGiuseppe CAVALLARO 
79157ac6653aSJeff Kirsher #ifndef MODULE
stmmac_cmdline_opt(char * str)79167ac6653aSJeff Kirsher static int __init stmmac_cmdline_opt(char *str)
79177ac6653aSJeff Kirsher {
79187ac6653aSJeff Kirsher 	char *opt;
79197ac6653aSJeff Kirsher 
79207ac6653aSJeff Kirsher 	if (!str || !*str)
7921e01b042eSRandy Dunlap 		return 1;
79227ac6653aSJeff Kirsher 	while ((opt = strsep(&str, ",")) != NULL) {
7923469d258dSVladimir Oltean 		if (!strncmp(opt, "debug:", 6)) {
7924ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &debug))
79257ac6653aSJeff Kirsher 				goto err;
7926469d258dSVladimir Oltean 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7927ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 8, 0, &phyaddr))
79287ac6653aSJeff Kirsher 				goto err;
7929469d258dSVladimir Oltean 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7930ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 7, 0, &buf_sz))
79317ac6653aSJeff Kirsher 				goto err;
7932469d258dSVladimir Oltean 		} else if (!strncmp(opt, "tc:", 3)) {
7933ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 3, 0, &tc))
79347ac6653aSJeff Kirsher 				goto err;
7935469d258dSVladimir Oltean 		} else if (!strncmp(opt, "watchdog:", 9)) {
7936ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 9, 0, &watchdog))
79377ac6653aSJeff Kirsher 				goto err;
7938469d258dSVladimir Oltean 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7939ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
79407ac6653aSJeff Kirsher 				goto err;
7941469d258dSVladimir Oltean 		} else if (!strncmp(opt, "pause:", 6)) {
7942ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &pause))
79437ac6653aSJeff Kirsher 				goto err;
7944469d258dSVladimir Oltean 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7945d765955dSGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &eee_timer))
7946d765955dSGiuseppe CAVALLARO 				goto err;
7947469d258dSVladimir Oltean 		} else if (!strncmp(opt, "chain_mode:", 11)) {
79484a7d666aSGiuseppe CAVALLARO 			if (kstrtoint(opt + 11, 0, &chain_mode))
79494a7d666aSGiuseppe CAVALLARO 				goto err;
79507ac6653aSJeff Kirsher 		}
79517ac6653aSJeff Kirsher 	}
7952e01b042eSRandy Dunlap 	return 1;
79537ac6653aSJeff Kirsher 
79547ac6653aSJeff Kirsher err:
79557ac6653aSJeff Kirsher 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7956e01b042eSRandy Dunlap 	return 1;
79577ac6653aSJeff Kirsher }
79587ac6653aSJeff Kirsher 
79597ac6653aSJeff Kirsher __setup("stmmaceth=", stmmac_cmdline_opt);
7960ceb69499SGiuseppe CAVALLARO #endif /* MODULE */
79616fc0d0f2SGiuseppe Cavallaro 
stmmac_init(void)7962466c5ac8SMathieu Olivari static int __init stmmac_init(void)
7963466c5ac8SMathieu Olivari {
7964466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
7965466c5ac8SMathieu Olivari 	/* Create debugfs main directory if it doesn't exist yet */
79668d72ab11SGreg Kroah-Hartman 	if (!stmmac_fs_dir)
7967466c5ac8SMathieu Olivari 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7968474a31e1SAaro Koskinen 	register_netdevice_notifier(&stmmac_notifier);
7969466c5ac8SMathieu Olivari #endif
7970466c5ac8SMathieu Olivari 
7971466c5ac8SMathieu Olivari 	return 0;
7972466c5ac8SMathieu Olivari }
7973466c5ac8SMathieu Olivari 
stmmac_exit(void)7974466c5ac8SMathieu Olivari static void __exit stmmac_exit(void)
7975466c5ac8SMathieu Olivari {
7976466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
7977474a31e1SAaro Koskinen 	unregister_netdevice_notifier(&stmmac_notifier);
7978466c5ac8SMathieu Olivari 	debugfs_remove_recursive(stmmac_fs_dir);
7979466c5ac8SMathieu Olivari #endif
7980466c5ac8SMathieu Olivari }
7981466c5ac8SMathieu Olivari 
7982466c5ac8SMathieu Olivari module_init(stmmac_init)
7983466c5ac8SMathieu Olivari module_exit(stmmac_exit)
7984466c5ac8SMathieu Olivari 
79856fc0d0f2SGiuseppe Cavallaro MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
79866fc0d0f2SGiuseppe Cavallaro MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
79876fc0d0f2SGiuseppe Cavallaro MODULE_LICENSE("GPL");
7988