1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Using PCS we cannot dial with the phy registers at this stage
475 	 * so we do not support extra feature like EEE.
476 	 */
477 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
478 	    priv->hw->pcs == STMMAC_PCS_RTBI)
479 		return false;
480 
481 	/* Check if MAC core supports the EEE feature. */
482 	if (!priv->dma_cap.eee)
483 		return false;
484 
485 	mutex_lock(&priv->lock);
486 
487 	/* Check if it needs to be deactivated */
488 	if (!priv->eee_active) {
489 		if (priv->eee_enabled) {
490 			netdev_dbg(priv->dev, "disable EEE\n");
491 			stmmac_lpi_entry_timer_config(priv, 0);
492 			del_timer_sync(&priv->eee_ctrl_timer);
493 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494 			if (priv->hw->xpcs)
495 				xpcs_config_eee(priv->hw->xpcs,
496 						priv->plat->mult_fact_100ns,
497 						false);
498 		}
499 		mutex_unlock(&priv->lock);
500 		return false;
501 	}
502 
503 	if (priv->eee_active && !priv->eee_enabled) {
504 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506 				     eee_tw_timer);
507 		if (priv->hw->xpcs)
508 			xpcs_config_eee(priv->hw->xpcs,
509 					priv->plat->mult_fact_100ns,
510 					true);
511 	}
512 
513 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514 		del_timer_sync(&priv->eee_ctrl_timer);
515 		priv->tx_path_in_lpi_mode = false;
516 		stmmac_lpi_entry_timer_config(priv, 1);
517 	} else {
518 		stmmac_lpi_entry_timer_config(priv, 0);
519 		mod_timer(&priv->eee_ctrl_timer,
520 			  STMMAC_LPI_T(priv->tx_lpi_timer));
521 	}
522 
523 	mutex_unlock(&priv->lock);
524 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525 	return true;
526 }
527 
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529  * @priv: driver private structure
530  * @p : descriptor pointer
531  * @skb : the socket buffer
532  * Description :
533  * This function will read timestamp from the descriptor & pass it to stack.
534  * and also perform some sanity checks.
535  */
536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537 				   struct dma_desc *p, struct sk_buff *skb)
538 {
539 	struct skb_shared_hwtstamps shhwtstamp;
540 	bool found = false;
541 	u64 ns = 0;
542 
543 	if (!priv->hwts_tx_en)
544 		return;
545 
546 	/* exit if skb doesn't support hw tstamp */
547 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548 		return;
549 
550 	/* check tx tstamp status */
551 	if (stmmac_get_tx_timestamp_status(priv, p)) {
552 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553 		found = true;
554 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555 		found = true;
556 	}
557 
558 	if (found) {
559 		ns -= priv->plat->cdc_error_adj;
560 
561 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
563 
564 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565 		/* pass tstamp to stack */
566 		skb_tstamp_tx(skb, &shhwtstamp);
567 	}
568 }
569 
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571  * @priv: driver private structure
572  * @p : descriptor pointer
573  * @np : next descriptor pointer
574  * @skb : the socket buffer
575  * Description :
576  * This function will read received packet's timestamp from the descriptor
577  * and pass it to stack. It also perform some sanity checks.
578  */
579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580 				   struct dma_desc *np, struct sk_buff *skb)
581 {
582 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
583 	struct dma_desc *desc = p;
584 	u64 ns = 0;
585 
586 	if (!priv->hwts_rx_en)
587 		return;
588 	/* For GMAC4, the valid timestamp is from CTX next desc. */
589 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590 		desc = np;
591 
592 	/* Check if timestamp is available */
593 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595 
596 		ns -= priv->plat->cdc_error_adj;
597 
598 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599 		shhwtstamp = skb_hwtstamps(skb);
600 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
602 	} else  {
603 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604 	}
605 }
606 
607 /**
608  *  stmmac_hwtstamp_set - control hardware timestamping.
609  *  @dev: device pointer.
610  *  @ifr: An IOCTL specific structure, that can contain a pointer to
611  *  a proprietary structure used to pass information to the driver.
612  *  Description:
613  *  This function configures the MAC to enable/disable both outgoing(TX)
614  *  and incoming(RX) packets time stamping based on user input.
615  *  Return Value:
616  *  0 on success and an appropriate -ve integer on failure.
617  */
618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620 	struct stmmac_priv *priv = netdev_priv(dev);
621 	struct hwtstamp_config config;
622 	u32 ptp_v2 = 0;
623 	u32 tstamp_all = 0;
624 	u32 ptp_over_ipv4_udp = 0;
625 	u32 ptp_over_ipv6_udp = 0;
626 	u32 ptp_over_ethernet = 0;
627 	u32 snap_type_sel = 0;
628 	u32 ts_master_en = 0;
629 	u32 ts_event_en = 0;
630 
631 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632 		netdev_alert(priv->dev, "No support for HW time stamping\n");
633 		priv->hwts_tx_en = 0;
634 		priv->hwts_rx_en = 0;
635 
636 		return -EOPNOTSUPP;
637 	}
638 
639 	if (copy_from_user(&config, ifr->ifr_data,
640 			   sizeof(config)))
641 		return -EFAULT;
642 
643 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 		   __func__, config.flags, config.tx_type, config.rx_filter);
645 
646 	if (config.tx_type != HWTSTAMP_TX_OFF &&
647 	    config.tx_type != HWTSTAMP_TX_ON)
648 		return -ERANGE;
649 
650 	if (priv->adv_ts) {
651 		switch (config.rx_filter) {
652 		case HWTSTAMP_FILTER_NONE:
653 			/* time stamp no incoming packet at all */
654 			config.rx_filter = HWTSTAMP_FILTER_NONE;
655 			break;
656 
657 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 			/* PTP v1, UDP, any kind of event packet */
659 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 			/* 'xmac' hardware can support Sync, Pdelay_Req and
661 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 			 * This leaves Delay_Req timestamps out.
663 			 * Enable all events *and* general purpose message
664 			 * timestamping
665 			 */
666 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			break;
670 
671 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 			/* PTP v1, UDP, Sync packet */
673 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 			/* take time stamp for SYNC messages only */
675 			ts_event_en = PTP_TCR_TSEVNTENA;
676 
677 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 			break;
680 
681 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 			/* PTP v1, UDP, Delay_req packet */
683 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 			/* take time stamp for Delay_Req messages only */
685 			ts_master_en = PTP_TCR_TSMSTRENA;
686 			ts_event_en = PTP_TCR_TSEVNTENA;
687 
688 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 			break;
691 
692 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 			/* PTP v2, UDP, any kind of event packet */
694 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 			ptp_v2 = PTP_TCR_TSVER2ENA;
696 			/* take time stamp for all event messages */
697 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698 
699 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 			break;
702 
703 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 			/* PTP v2, UDP, Sync packet */
705 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 			ptp_v2 = PTP_TCR_TSVER2ENA;
707 			/* take time stamp for SYNC messages only */
708 			ts_event_en = PTP_TCR_TSEVNTENA;
709 
710 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 			break;
713 
714 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 			/* PTP v2, UDP, Delay_req packet */
716 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 			ptp_v2 = PTP_TCR_TSVER2ENA;
718 			/* take time stamp for Delay_Req messages only */
719 			ts_master_en = PTP_TCR_TSMSTRENA;
720 			ts_event_en = PTP_TCR_TSEVNTENA;
721 
722 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 			break;
725 
726 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 			/* PTP v2/802.AS1 any layer, any kind of event packet */
728 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 			ptp_v2 = PTP_TCR_TSVER2ENA;
730 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 			if (priv->synopsys_id < DWMAC_CORE_4_10)
732 				ts_event_en = PTP_TCR_TSEVNTENA;
733 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 			ptp_over_ethernet = PTP_TCR_TSIPENA;
736 			break;
737 
738 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 			/* PTP v2/802.AS1, any layer, Sync packet */
740 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 			ptp_v2 = PTP_TCR_TSVER2ENA;
742 			/* take time stamp for SYNC messages only */
743 			ts_event_en = PTP_TCR_TSEVNTENA;
744 
745 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 			ptp_over_ethernet = PTP_TCR_TSIPENA;
748 			break;
749 
750 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 			/* PTP v2/802.AS1, any layer, Delay_req packet */
752 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 			ptp_v2 = PTP_TCR_TSVER2ENA;
754 			/* take time stamp for Delay_Req messages only */
755 			ts_master_en = PTP_TCR_TSMSTRENA;
756 			ts_event_en = PTP_TCR_TSEVNTENA;
757 
758 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 			ptp_over_ethernet = PTP_TCR_TSIPENA;
761 			break;
762 
763 		case HWTSTAMP_FILTER_NTP_ALL:
764 		case HWTSTAMP_FILTER_ALL:
765 			/* time stamp any incoming packet */
766 			config.rx_filter = HWTSTAMP_FILTER_ALL;
767 			tstamp_all = PTP_TCR_TSENALL;
768 			break;
769 
770 		default:
771 			return -ERANGE;
772 		}
773 	} else {
774 		switch (config.rx_filter) {
775 		case HWTSTAMP_FILTER_NONE:
776 			config.rx_filter = HWTSTAMP_FILTER_NONE;
777 			break;
778 		default:
779 			/* PTP v1, UDP, any kind of event packet */
780 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 			break;
782 		}
783 	}
784 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786 
787 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
788 
789 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
790 		priv->systime_flags |= tstamp_all | ptp_v2 |
791 				       ptp_over_ethernet | ptp_over_ipv6_udp |
792 				       ptp_over_ipv4_udp | ts_event_en |
793 				       ts_master_en | snap_type_sel;
794 	}
795 
796 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797 
798 	memcpy(&priv->tstamp_config, &config, sizeof(config));
799 
800 	return copy_to_user(ifr->ifr_data, &config,
801 			    sizeof(config)) ? -EFAULT : 0;
802 }
803 
804 /**
805  *  stmmac_hwtstamp_get - read hardware timestamping.
806  *  @dev: device pointer.
807  *  @ifr: An IOCTL specific structure, that can contain a pointer to
808  *  a proprietary structure used to pass information to the driver.
809  *  Description:
810  *  This function obtain the current hardware timestamping settings
811  *  as requested.
812  */
813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815 	struct stmmac_priv *priv = netdev_priv(dev);
816 	struct hwtstamp_config *config = &priv->tstamp_config;
817 
818 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819 		return -EOPNOTSUPP;
820 
821 	return copy_to_user(ifr->ifr_data, config,
822 			    sizeof(*config)) ? -EFAULT : 0;
823 }
824 
825 /**
826  * stmmac_init_tstamp_counter - init hardware timestamping counter
827  * @priv: driver private structure
828  * @systime_flags: timestamping flags
829  * Description:
830  * Initialize hardware counter for packet timestamping.
831  * This is valid as long as the interface is open and not suspended.
832  * Will be rerun after resuming from suspend, case in which the timestamping
833  * flags updated by stmmac_hwtstamp_set() also need to be restored.
834  */
835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838 	struct timespec64 now;
839 	u32 sec_inc = 0;
840 	u64 temp = 0;
841 
842 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843 		return -EOPNOTSUPP;
844 
845 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846 	priv->systime_flags = systime_flags;
847 
848 	/* program Sub Second Increment reg */
849 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850 					   priv->plat->clk_ptp_rate,
851 					   xmac, &sec_inc);
852 	temp = div_u64(1000000000ULL, sec_inc);
853 
854 	/* Store sub second increment for later use */
855 	priv->sub_second_inc = sec_inc;
856 
857 	/* calculate default added value:
858 	 * formula is :
859 	 * addend = (2^32)/freq_div_ratio;
860 	 * where, freq_div_ratio = 1e9ns/sec_inc
861 	 */
862 	temp = (u64)(temp << 32);
863 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865 
866 	/* initialize system time */
867 	ktime_get_real_ts64(&now);
868 
869 	/* lower 32 bits of tv_sec are safe until y2106 */
870 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871 
872 	return 0;
873 }
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875 
876 /**
877  * stmmac_init_ptp - init PTP
878  * @priv: driver private structure
879  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880  * This is done by looking at the HW cap. register.
881  * This function also registers the ptp driver.
882  */
883 static int stmmac_init_ptp(struct stmmac_priv *priv)
884 {
885 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886 	int ret;
887 
888 	if (priv->plat->ptp_clk_freq_config)
889 		priv->plat->ptp_clk_freq_config(priv);
890 
891 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892 	if (ret)
893 		return ret;
894 
895 	priv->adv_ts = 0;
896 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 	if (xmac && priv->dma_cap.atime_stamp)
898 		priv->adv_ts = 1;
899 	/* Dwmac 3.x core with extend_desc can support adv_ts */
900 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901 		priv->adv_ts = 1;
902 
903 	if (priv->dma_cap.time_stamp)
904 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
905 
906 	if (priv->adv_ts)
907 		netdev_info(priv->dev,
908 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
909 
910 	priv->hwts_tx_en = 0;
911 	priv->hwts_rx_en = 0;
912 
913 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
914 		stmmac_hwtstamp_correct_latency(priv, priv);
915 
916 	return 0;
917 }
918 
919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
922 	stmmac_ptp_unregister(priv);
923 }
924 
925 /**
926  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
927  *  @priv: driver private structure
928  *  @duplex: duplex passed to the next function
929  *  Description: It is used for configuring the flow control in all queues
930  */
931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933 	u32 tx_cnt = priv->plat->tx_queues_to_use;
934 
935 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936 			priv->pause, tx_cnt);
937 }
938 
939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
940 						 phy_interface_t interface)
941 {
942 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943 
944 	if (priv->hw->xpcs)
945 		return &priv->hw->xpcs->pcs;
946 
947 	if (priv->hw->lynx_pcs)
948 		return priv->hw->lynx_pcs;
949 
950 	return NULL;
951 }
952 
953 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
954 			      const struct phylink_link_state *state)
955 {
956 	/* Nothing to do, xpcs_config() handles everything */
957 }
958 
959 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
960 {
961 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
962 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
963 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
964 	bool *hs_enable = &fpe_cfg->hs_enable;
965 
966 	if (is_up && *hs_enable) {
967 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
968 	} else {
969 		*lo_state = FPE_STATE_OFF;
970 		*lp_state = FPE_STATE_OFF;
971 	}
972 }
973 
974 static void stmmac_mac_link_down(struct phylink_config *config,
975 				 unsigned int mode, phy_interface_t interface)
976 {
977 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
978 
979 	stmmac_mac_set(priv, priv->ioaddr, false);
980 	priv->eee_active = false;
981 	priv->tx_lpi_enabled = false;
982 	priv->eee_enabled = stmmac_eee_init(priv);
983 	stmmac_set_eee_pls(priv, priv->hw, false);
984 
985 	if (priv->dma_cap.fpesel)
986 		stmmac_fpe_link_state_handle(priv, false);
987 }
988 
989 static void stmmac_mac_link_up(struct phylink_config *config,
990 			       struct phy_device *phy,
991 			       unsigned int mode, phy_interface_t interface,
992 			       int speed, int duplex,
993 			       bool tx_pause, bool rx_pause)
994 {
995 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
996 	u32 old_ctrl, ctrl;
997 
998 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
999 	    priv->plat->serdes_powerup)
1000 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1001 
1002 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1003 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1004 
1005 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1006 		switch (speed) {
1007 		case SPEED_10000:
1008 			ctrl |= priv->hw->link.xgmii.speed10000;
1009 			break;
1010 		case SPEED_5000:
1011 			ctrl |= priv->hw->link.xgmii.speed5000;
1012 			break;
1013 		case SPEED_2500:
1014 			ctrl |= priv->hw->link.xgmii.speed2500;
1015 			break;
1016 		default:
1017 			return;
1018 		}
1019 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1020 		switch (speed) {
1021 		case SPEED_100000:
1022 			ctrl |= priv->hw->link.xlgmii.speed100000;
1023 			break;
1024 		case SPEED_50000:
1025 			ctrl |= priv->hw->link.xlgmii.speed50000;
1026 			break;
1027 		case SPEED_40000:
1028 			ctrl |= priv->hw->link.xlgmii.speed40000;
1029 			break;
1030 		case SPEED_25000:
1031 			ctrl |= priv->hw->link.xlgmii.speed25000;
1032 			break;
1033 		case SPEED_10000:
1034 			ctrl |= priv->hw->link.xgmii.speed10000;
1035 			break;
1036 		case SPEED_2500:
1037 			ctrl |= priv->hw->link.speed2500;
1038 			break;
1039 		case SPEED_1000:
1040 			ctrl |= priv->hw->link.speed1000;
1041 			break;
1042 		default:
1043 			return;
1044 		}
1045 	} else {
1046 		switch (speed) {
1047 		case SPEED_2500:
1048 			ctrl |= priv->hw->link.speed2500;
1049 			break;
1050 		case SPEED_1000:
1051 			ctrl |= priv->hw->link.speed1000;
1052 			break;
1053 		case SPEED_100:
1054 			ctrl |= priv->hw->link.speed100;
1055 			break;
1056 		case SPEED_10:
1057 			ctrl |= priv->hw->link.speed10;
1058 			break;
1059 		default:
1060 			return;
1061 		}
1062 	}
1063 
1064 	priv->speed = speed;
1065 
1066 	if (priv->plat->fix_mac_speed)
1067 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1068 
1069 	if (!duplex)
1070 		ctrl &= ~priv->hw->link.duplex;
1071 	else
1072 		ctrl |= priv->hw->link.duplex;
1073 
1074 	/* Flow Control operation */
1075 	if (rx_pause && tx_pause)
1076 		priv->flow_ctrl = FLOW_AUTO;
1077 	else if (rx_pause && !tx_pause)
1078 		priv->flow_ctrl = FLOW_RX;
1079 	else if (!rx_pause && tx_pause)
1080 		priv->flow_ctrl = FLOW_TX;
1081 	else
1082 		priv->flow_ctrl = FLOW_OFF;
1083 
1084 	stmmac_mac_flow_ctrl(priv, duplex);
1085 
1086 	if (ctrl != old_ctrl)
1087 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1088 
1089 	stmmac_mac_set(priv, priv->ioaddr, true);
1090 	if (phy && priv->dma_cap.eee) {
1091 		priv->eee_active =
1092 			phy_init_eee(phy, !(priv->plat->flags &
1093 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1094 		priv->eee_enabled = stmmac_eee_init(priv);
1095 		priv->tx_lpi_enabled = priv->eee_enabled;
1096 		stmmac_set_eee_pls(priv, priv->hw, true);
1097 	}
1098 
1099 	if (priv->dma_cap.fpesel)
1100 		stmmac_fpe_link_state_handle(priv, true);
1101 
1102 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1103 		stmmac_hwtstamp_correct_latency(priv, priv);
1104 }
1105 
1106 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1107 	.mac_select_pcs = stmmac_mac_select_pcs,
1108 	.mac_config = stmmac_mac_config,
1109 	.mac_link_down = stmmac_mac_link_down,
1110 	.mac_link_up = stmmac_mac_link_up,
1111 };
1112 
1113 /**
1114  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1115  * @priv: driver private structure
1116  * Description: this is to verify if the HW supports the PCS.
1117  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1118  * configured for the TBI, RTBI, or SGMII PHY interface.
1119  */
1120 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1121 {
1122 	int interface = priv->plat->mac_interface;
1123 
1124 	if (priv->dma_cap.pcs) {
1125 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1126 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1127 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1128 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1129 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1130 			priv->hw->pcs = STMMAC_PCS_RGMII;
1131 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1132 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1133 			priv->hw->pcs = STMMAC_PCS_SGMII;
1134 		}
1135 	}
1136 }
1137 
1138 /**
1139  * stmmac_init_phy - PHY initialization
1140  * @dev: net device structure
1141  * Description: it initializes the driver's PHY state, and attaches the PHY
1142  * to the mac driver.
1143  *  Return value:
1144  *  0 on success
1145  */
1146 static int stmmac_init_phy(struct net_device *dev)
1147 {
1148 	struct stmmac_priv *priv = netdev_priv(dev);
1149 	struct fwnode_handle *phy_fwnode;
1150 	struct fwnode_handle *fwnode;
1151 	int ret;
1152 
1153 	if (!phylink_expects_phy(priv->phylink))
1154 		return 0;
1155 
1156 	fwnode = priv->plat->port_node;
1157 	if (!fwnode)
1158 		fwnode = dev_fwnode(priv->device);
1159 
1160 	if (fwnode)
1161 		phy_fwnode = fwnode_get_phy_node(fwnode);
1162 	else
1163 		phy_fwnode = NULL;
1164 
1165 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1166 	 * manually parse it
1167 	 */
1168 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1169 		int addr = priv->plat->phy_addr;
1170 		struct phy_device *phydev;
1171 
1172 		if (addr < 0) {
1173 			netdev_err(priv->dev, "no phy found\n");
1174 			return -ENODEV;
1175 		}
1176 
1177 		phydev = mdiobus_get_phy(priv->mii, addr);
1178 		if (!phydev) {
1179 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1180 			return -ENODEV;
1181 		}
1182 
1183 		ret = phylink_connect_phy(priv->phylink, phydev);
1184 	} else {
1185 		fwnode_handle_put(phy_fwnode);
1186 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1187 	}
1188 
1189 	if (!priv->plat->pmt) {
1190 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1191 
1192 		phylink_ethtool_get_wol(priv->phylink, &wol);
1193 		device_set_wakeup_capable(priv->device, !!wol.supported);
1194 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1195 	}
1196 
1197 	return ret;
1198 }
1199 
1200 static int stmmac_phy_setup(struct stmmac_priv *priv)
1201 {
1202 	struct stmmac_mdio_bus_data *mdio_bus_data;
1203 	int mode = priv->plat->phy_interface;
1204 	struct fwnode_handle *fwnode;
1205 	struct phylink *phylink;
1206 	int max_speed;
1207 
1208 	priv->phylink_config.dev = &priv->dev->dev;
1209 	priv->phylink_config.type = PHYLINK_NETDEV;
1210 	priv->phylink_config.mac_managed_pm = true;
1211 
1212 	mdio_bus_data = priv->plat->mdio_bus_data;
1213 	if (mdio_bus_data)
1214 		priv->phylink_config.ovr_an_inband =
1215 			mdio_bus_data->xpcs_an_inband;
1216 
1217 	/* Set the platform/firmware specified interface mode. Note, phylink
1218 	 * deals with the PHY interface mode, not the MAC interface mode.
1219 	 */
1220 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1221 
1222 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1223 	if (priv->hw->xpcs)
1224 		xpcs_get_interfaces(priv->hw->xpcs,
1225 				    priv->phylink_config.supported_interfaces);
1226 
1227 	priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1228 						MAC_10FD | MAC_100FD |
1229 						MAC_1000FD;
1230 
1231 	/* Half-Duplex can only work with single queue */
1232 	if (priv->plat->tx_queues_to_use <= 1)
1233 		priv->phylink_config.mac_capabilities |= MAC_10HD | MAC_100HD |
1234 							 MAC_1000HD;
1235 
1236 	/* Get the MAC specific capabilities */
1237 	stmmac_mac_phylink_get_caps(priv);
1238 
1239 	max_speed = priv->plat->max_speed;
1240 	if (max_speed)
1241 		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1242 
1243 	fwnode = priv->plat->port_node;
1244 	if (!fwnode)
1245 		fwnode = dev_fwnode(priv->device);
1246 
1247 	phylink = phylink_create(&priv->phylink_config, fwnode,
1248 				 mode, &stmmac_phylink_mac_ops);
1249 	if (IS_ERR(phylink))
1250 		return PTR_ERR(phylink);
1251 
1252 	priv->phylink = phylink;
1253 	return 0;
1254 }
1255 
1256 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1257 				    struct stmmac_dma_conf *dma_conf)
1258 {
1259 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1260 	unsigned int desc_size;
1261 	void *head_rx;
1262 	u32 queue;
1263 
1264 	/* Display RX rings */
1265 	for (queue = 0; queue < rx_cnt; queue++) {
1266 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1267 
1268 		pr_info("\tRX Queue %u rings\n", queue);
1269 
1270 		if (priv->extend_desc) {
1271 			head_rx = (void *)rx_q->dma_erx;
1272 			desc_size = sizeof(struct dma_extended_desc);
1273 		} else {
1274 			head_rx = (void *)rx_q->dma_rx;
1275 			desc_size = sizeof(struct dma_desc);
1276 		}
1277 
1278 		/* Display RX ring */
1279 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1280 				    rx_q->dma_rx_phy, desc_size);
1281 	}
1282 }
1283 
1284 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1285 				    struct stmmac_dma_conf *dma_conf)
1286 {
1287 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1288 	unsigned int desc_size;
1289 	void *head_tx;
1290 	u32 queue;
1291 
1292 	/* Display TX rings */
1293 	for (queue = 0; queue < tx_cnt; queue++) {
1294 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1295 
1296 		pr_info("\tTX Queue %d rings\n", queue);
1297 
1298 		if (priv->extend_desc) {
1299 			head_tx = (void *)tx_q->dma_etx;
1300 			desc_size = sizeof(struct dma_extended_desc);
1301 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1302 			head_tx = (void *)tx_q->dma_entx;
1303 			desc_size = sizeof(struct dma_edesc);
1304 		} else {
1305 			head_tx = (void *)tx_q->dma_tx;
1306 			desc_size = sizeof(struct dma_desc);
1307 		}
1308 
1309 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1310 				    tx_q->dma_tx_phy, desc_size);
1311 	}
1312 }
1313 
1314 static void stmmac_display_rings(struct stmmac_priv *priv,
1315 				 struct stmmac_dma_conf *dma_conf)
1316 {
1317 	/* Display RX ring */
1318 	stmmac_display_rx_rings(priv, dma_conf);
1319 
1320 	/* Display TX ring */
1321 	stmmac_display_tx_rings(priv, dma_conf);
1322 }
1323 
1324 static int stmmac_set_bfsize(int mtu, int bufsize)
1325 {
1326 	int ret = bufsize;
1327 
1328 	if (mtu >= BUF_SIZE_8KiB)
1329 		ret = BUF_SIZE_16KiB;
1330 	else if (mtu >= BUF_SIZE_4KiB)
1331 		ret = BUF_SIZE_8KiB;
1332 	else if (mtu >= BUF_SIZE_2KiB)
1333 		ret = BUF_SIZE_4KiB;
1334 	else if (mtu > DEFAULT_BUFSIZE)
1335 		ret = BUF_SIZE_2KiB;
1336 	else
1337 		ret = DEFAULT_BUFSIZE;
1338 
1339 	return ret;
1340 }
1341 
1342 /**
1343  * stmmac_clear_rx_descriptors - clear RX descriptors
1344  * @priv: driver private structure
1345  * @dma_conf: structure to take the dma data
1346  * @queue: RX queue index
1347  * Description: this function is called to clear the RX descriptors
1348  * in case of both basic and extended descriptors are used.
1349  */
1350 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1351 					struct stmmac_dma_conf *dma_conf,
1352 					u32 queue)
1353 {
1354 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1355 	int i;
1356 
1357 	/* Clear the RX descriptors */
1358 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1359 		if (priv->extend_desc)
1360 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1361 					priv->use_riwt, priv->mode,
1362 					(i == dma_conf->dma_rx_size - 1),
1363 					dma_conf->dma_buf_sz);
1364 		else
1365 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1366 					priv->use_riwt, priv->mode,
1367 					(i == dma_conf->dma_rx_size - 1),
1368 					dma_conf->dma_buf_sz);
1369 }
1370 
1371 /**
1372  * stmmac_clear_tx_descriptors - clear tx descriptors
1373  * @priv: driver private structure
1374  * @dma_conf: structure to take the dma data
1375  * @queue: TX queue index.
1376  * Description: this function is called to clear the TX descriptors
1377  * in case of both basic and extended descriptors are used.
1378  */
1379 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1380 					struct stmmac_dma_conf *dma_conf,
1381 					u32 queue)
1382 {
1383 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1384 	int i;
1385 
1386 	/* Clear the TX descriptors */
1387 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1388 		int last = (i == (dma_conf->dma_tx_size - 1));
1389 		struct dma_desc *p;
1390 
1391 		if (priv->extend_desc)
1392 			p = &tx_q->dma_etx[i].basic;
1393 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1394 			p = &tx_q->dma_entx[i].basic;
1395 		else
1396 			p = &tx_q->dma_tx[i];
1397 
1398 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1399 	}
1400 }
1401 
1402 /**
1403  * stmmac_clear_descriptors - clear descriptors
1404  * @priv: driver private structure
1405  * @dma_conf: structure to take the dma data
1406  * Description: this function is called to clear the TX and RX descriptors
1407  * in case of both basic and extended descriptors are used.
1408  */
1409 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1410 				     struct stmmac_dma_conf *dma_conf)
1411 {
1412 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1413 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1414 	u32 queue;
1415 
1416 	/* Clear the RX descriptors */
1417 	for (queue = 0; queue < rx_queue_cnt; queue++)
1418 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1419 
1420 	/* Clear the TX descriptors */
1421 	for (queue = 0; queue < tx_queue_cnt; queue++)
1422 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1423 }
1424 
1425 /**
1426  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1427  * @priv: driver private structure
1428  * @dma_conf: structure to take the dma data
1429  * @p: descriptor pointer
1430  * @i: descriptor index
1431  * @flags: gfp flag
1432  * @queue: RX queue index
1433  * Description: this function is called to allocate a receive buffer, perform
1434  * the DMA mapping and init the descriptor.
1435  */
1436 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1437 				  struct stmmac_dma_conf *dma_conf,
1438 				  struct dma_desc *p,
1439 				  int i, gfp_t flags, u32 queue)
1440 {
1441 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1442 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1443 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1444 
1445 	if (priv->dma_cap.host_dma_width <= 32)
1446 		gfp |= GFP_DMA32;
1447 
1448 	if (!buf->page) {
1449 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1450 		if (!buf->page)
1451 			return -ENOMEM;
1452 		buf->page_offset = stmmac_rx_offset(priv);
1453 	}
1454 
1455 	if (priv->sph && !buf->sec_page) {
1456 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1457 		if (!buf->sec_page)
1458 			return -ENOMEM;
1459 
1460 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1461 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1462 	} else {
1463 		buf->sec_page = NULL;
1464 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1465 	}
1466 
1467 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1468 
1469 	stmmac_set_desc_addr(priv, p, buf->addr);
1470 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1471 		stmmac_init_desc3(priv, p);
1472 
1473 	return 0;
1474 }
1475 
1476 /**
1477  * stmmac_free_rx_buffer - free RX dma buffers
1478  * @priv: private structure
1479  * @rx_q: RX queue
1480  * @i: buffer index.
1481  */
1482 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1483 				  struct stmmac_rx_queue *rx_q,
1484 				  int i)
1485 {
1486 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1487 
1488 	if (buf->page)
1489 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1490 	buf->page = NULL;
1491 
1492 	if (buf->sec_page)
1493 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1494 	buf->sec_page = NULL;
1495 }
1496 
1497 /**
1498  * stmmac_free_tx_buffer - free RX dma buffers
1499  * @priv: private structure
1500  * @dma_conf: structure to take the dma data
1501  * @queue: RX queue index
1502  * @i: buffer index.
1503  */
1504 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1505 				  struct stmmac_dma_conf *dma_conf,
1506 				  u32 queue, int i)
1507 {
1508 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1509 
1510 	if (tx_q->tx_skbuff_dma[i].buf &&
1511 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1512 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1513 			dma_unmap_page(priv->device,
1514 				       tx_q->tx_skbuff_dma[i].buf,
1515 				       tx_q->tx_skbuff_dma[i].len,
1516 				       DMA_TO_DEVICE);
1517 		else
1518 			dma_unmap_single(priv->device,
1519 					 tx_q->tx_skbuff_dma[i].buf,
1520 					 tx_q->tx_skbuff_dma[i].len,
1521 					 DMA_TO_DEVICE);
1522 	}
1523 
1524 	if (tx_q->xdpf[i] &&
1525 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1526 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1527 		xdp_return_frame(tx_q->xdpf[i]);
1528 		tx_q->xdpf[i] = NULL;
1529 	}
1530 
1531 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1532 		tx_q->xsk_frames_done++;
1533 
1534 	if (tx_q->tx_skbuff[i] &&
1535 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1536 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1537 		tx_q->tx_skbuff[i] = NULL;
1538 	}
1539 
1540 	tx_q->tx_skbuff_dma[i].buf = 0;
1541 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1542 }
1543 
1544 /**
1545  * dma_free_rx_skbufs - free RX dma buffers
1546  * @priv: private structure
1547  * @dma_conf: structure to take the dma data
1548  * @queue: RX queue index
1549  */
1550 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1551 			       struct stmmac_dma_conf *dma_conf,
1552 			       u32 queue)
1553 {
1554 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1555 	int i;
1556 
1557 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1558 		stmmac_free_rx_buffer(priv, rx_q, i);
1559 }
1560 
1561 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1562 				   struct stmmac_dma_conf *dma_conf,
1563 				   u32 queue, gfp_t flags)
1564 {
1565 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1566 	int i;
1567 
1568 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1569 		struct dma_desc *p;
1570 		int ret;
1571 
1572 		if (priv->extend_desc)
1573 			p = &((rx_q->dma_erx + i)->basic);
1574 		else
1575 			p = rx_q->dma_rx + i;
1576 
1577 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1578 					     queue);
1579 		if (ret)
1580 			return ret;
1581 
1582 		rx_q->buf_alloc_num++;
1583 	}
1584 
1585 	return 0;
1586 }
1587 
1588 /**
1589  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1590  * @priv: private structure
1591  * @dma_conf: structure to take the dma data
1592  * @queue: RX queue index
1593  */
1594 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1595 				struct stmmac_dma_conf *dma_conf,
1596 				u32 queue)
1597 {
1598 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1599 	int i;
1600 
1601 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1602 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1603 
1604 		if (!buf->xdp)
1605 			continue;
1606 
1607 		xsk_buff_free(buf->xdp);
1608 		buf->xdp = NULL;
1609 	}
1610 }
1611 
1612 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1613 				      struct stmmac_dma_conf *dma_conf,
1614 				      u32 queue)
1615 {
1616 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1617 	int i;
1618 
1619 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1620 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1621 	 * use this macro to make sure no size violations.
1622 	 */
1623 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1624 
1625 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1626 		struct stmmac_rx_buffer *buf;
1627 		dma_addr_t dma_addr;
1628 		struct dma_desc *p;
1629 
1630 		if (priv->extend_desc)
1631 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1632 		else
1633 			p = rx_q->dma_rx + i;
1634 
1635 		buf = &rx_q->buf_pool[i];
1636 
1637 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1638 		if (!buf->xdp)
1639 			return -ENOMEM;
1640 
1641 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1642 		stmmac_set_desc_addr(priv, p, dma_addr);
1643 		rx_q->buf_alloc_num++;
1644 	}
1645 
1646 	return 0;
1647 }
1648 
1649 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1650 {
1651 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1652 		return NULL;
1653 
1654 	return xsk_get_pool_from_qid(priv->dev, queue);
1655 }
1656 
1657 /**
1658  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1659  * @priv: driver private structure
1660  * @dma_conf: structure to take the dma data
1661  * @queue: RX queue index
1662  * @flags: gfp flag.
1663  * Description: this function initializes the DMA RX descriptors
1664  * and allocates the socket buffers. It supports the chained and ring
1665  * modes.
1666  */
1667 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1668 				    struct stmmac_dma_conf *dma_conf,
1669 				    u32 queue, gfp_t flags)
1670 {
1671 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1672 	int ret;
1673 
1674 	netif_dbg(priv, probe, priv->dev,
1675 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1676 		  (u32)rx_q->dma_rx_phy);
1677 
1678 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1679 
1680 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1681 
1682 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1683 
1684 	if (rx_q->xsk_pool) {
1685 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1686 						   MEM_TYPE_XSK_BUFF_POOL,
1687 						   NULL));
1688 		netdev_info(priv->dev,
1689 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1690 			    rx_q->queue_index);
1691 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1692 	} else {
1693 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1694 						   MEM_TYPE_PAGE_POOL,
1695 						   rx_q->page_pool));
1696 		netdev_info(priv->dev,
1697 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1698 			    rx_q->queue_index);
1699 	}
1700 
1701 	if (rx_q->xsk_pool) {
1702 		/* RX XDP ZC buffer pool may not be populated, e.g.
1703 		 * xdpsock TX-only.
1704 		 */
1705 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1706 	} else {
1707 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1708 		if (ret < 0)
1709 			return -ENOMEM;
1710 	}
1711 
1712 	/* Setup the chained descriptor addresses */
1713 	if (priv->mode == STMMAC_CHAIN_MODE) {
1714 		if (priv->extend_desc)
1715 			stmmac_mode_init(priv, rx_q->dma_erx,
1716 					 rx_q->dma_rx_phy,
1717 					 dma_conf->dma_rx_size, 1);
1718 		else
1719 			stmmac_mode_init(priv, rx_q->dma_rx,
1720 					 rx_q->dma_rx_phy,
1721 					 dma_conf->dma_rx_size, 0);
1722 	}
1723 
1724 	return 0;
1725 }
1726 
1727 static int init_dma_rx_desc_rings(struct net_device *dev,
1728 				  struct stmmac_dma_conf *dma_conf,
1729 				  gfp_t flags)
1730 {
1731 	struct stmmac_priv *priv = netdev_priv(dev);
1732 	u32 rx_count = priv->plat->rx_queues_to_use;
1733 	int queue;
1734 	int ret;
1735 
1736 	/* RX INITIALIZATION */
1737 	netif_dbg(priv, probe, priv->dev,
1738 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1739 
1740 	for (queue = 0; queue < rx_count; queue++) {
1741 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1742 		if (ret)
1743 			goto err_init_rx_buffers;
1744 	}
1745 
1746 	return 0;
1747 
1748 err_init_rx_buffers:
1749 	while (queue >= 0) {
1750 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1751 
1752 		if (rx_q->xsk_pool)
1753 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1754 		else
1755 			dma_free_rx_skbufs(priv, dma_conf, queue);
1756 
1757 		rx_q->buf_alloc_num = 0;
1758 		rx_q->xsk_pool = NULL;
1759 
1760 		queue--;
1761 	}
1762 
1763 	return ret;
1764 }
1765 
1766 /**
1767  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1768  * @priv: driver private structure
1769  * @dma_conf: structure to take the dma data
1770  * @queue: TX queue index
1771  * Description: this function initializes the DMA TX descriptors
1772  * and allocates the socket buffers. It supports the chained and ring
1773  * modes.
1774  */
1775 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1776 				    struct stmmac_dma_conf *dma_conf,
1777 				    u32 queue)
1778 {
1779 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1780 	int i;
1781 
1782 	netif_dbg(priv, probe, priv->dev,
1783 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1784 		  (u32)tx_q->dma_tx_phy);
1785 
1786 	/* Setup the chained descriptor addresses */
1787 	if (priv->mode == STMMAC_CHAIN_MODE) {
1788 		if (priv->extend_desc)
1789 			stmmac_mode_init(priv, tx_q->dma_etx,
1790 					 tx_q->dma_tx_phy,
1791 					 dma_conf->dma_tx_size, 1);
1792 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1793 			stmmac_mode_init(priv, tx_q->dma_tx,
1794 					 tx_q->dma_tx_phy,
1795 					 dma_conf->dma_tx_size, 0);
1796 	}
1797 
1798 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1799 
1800 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1801 		struct dma_desc *p;
1802 
1803 		if (priv->extend_desc)
1804 			p = &((tx_q->dma_etx + i)->basic);
1805 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1806 			p = &((tx_q->dma_entx + i)->basic);
1807 		else
1808 			p = tx_q->dma_tx + i;
1809 
1810 		stmmac_clear_desc(priv, p);
1811 
1812 		tx_q->tx_skbuff_dma[i].buf = 0;
1813 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1814 		tx_q->tx_skbuff_dma[i].len = 0;
1815 		tx_q->tx_skbuff_dma[i].last_segment = false;
1816 		tx_q->tx_skbuff[i] = NULL;
1817 	}
1818 
1819 	return 0;
1820 }
1821 
1822 static int init_dma_tx_desc_rings(struct net_device *dev,
1823 				  struct stmmac_dma_conf *dma_conf)
1824 {
1825 	struct stmmac_priv *priv = netdev_priv(dev);
1826 	u32 tx_queue_cnt;
1827 	u32 queue;
1828 
1829 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1830 
1831 	for (queue = 0; queue < tx_queue_cnt; queue++)
1832 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1833 
1834 	return 0;
1835 }
1836 
1837 /**
1838  * init_dma_desc_rings - init the RX/TX descriptor rings
1839  * @dev: net device structure
1840  * @dma_conf: structure to take the dma data
1841  * @flags: gfp flag.
1842  * Description: this function initializes the DMA RX/TX descriptors
1843  * and allocates the socket buffers. It supports the chained and ring
1844  * modes.
1845  */
1846 static int init_dma_desc_rings(struct net_device *dev,
1847 			       struct stmmac_dma_conf *dma_conf,
1848 			       gfp_t flags)
1849 {
1850 	struct stmmac_priv *priv = netdev_priv(dev);
1851 	int ret;
1852 
1853 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1854 	if (ret)
1855 		return ret;
1856 
1857 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1858 
1859 	stmmac_clear_descriptors(priv, dma_conf);
1860 
1861 	if (netif_msg_hw(priv))
1862 		stmmac_display_rings(priv, dma_conf);
1863 
1864 	return ret;
1865 }
1866 
1867 /**
1868  * dma_free_tx_skbufs - free TX dma buffers
1869  * @priv: private structure
1870  * @dma_conf: structure to take the dma data
1871  * @queue: TX queue index
1872  */
1873 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1874 			       struct stmmac_dma_conf *dma_conf,
1875 			       u32 queue)
1876 {
1877 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1878 	int i;
1879 
1880 	tx_q->xsk_frames_done = 0;
1881 
1882 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1883 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1884 
1885 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1886 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1887 		tx_q->xsk_frames_done = 0;
1888 		tx_q->xsk_pool = NULL;
1889 	}
1890 }
1891 
1892 /**
1893  * stmmac_free_tx_skbufs - free TX skb buffers
1894  * @priv: private structure
1895  */
1896 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1897 {
1898 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1899 	u32 queue;
1900 
1901 	for (queue = 0; queue < tx_queue_cnt; queue++)
1902 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1903 }
1904 
1905 /**
1906  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1907  * @priv: private structure
1908  * @dma_conf: structure to take the dma data
1909  * @queue: RX queue index
1910  */
1911 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1912 					 struct stmmac_dma_conf *dma_conf,
1913 					 u32 queue)
1914 {
1915 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1916 
1917 	/* Release the DMA RX socket buffers */
1918 	if (rx_q->xsk_pool)
1919 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1920 	else
1921 		dma_free_rx_skbufs(priv, dma_conf, queue);
1922 
1923 	rx_q->buf_alloc_num = 0;
1924 	rx_q->xsk_pool = NULL;
1925 
1926 	/* Free DMA regions of consistent memory previously allocated */
1927 	if (!priv->extend_desc)
1928 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1929 				  sizeof(struct dma_desc),
1930 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1931 	else
1932 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1933 				  sizeof(struct dma_extended_desc),
1934 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1935 
1936 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1937 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1938 
1939 	kfree(rx_q->buf_pool);
1940 	if (rx_q->page_pool)
1941 		page_pool_destroy(rx_q->page_pool);
1942 }
1943 
1944 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1945 				       struct stmmac_dma_conf *dma_conf)
1946 {
1947 	u32 rx_count = priv->plat->rx_queues_to_use;
1948 	u32 queue;
1949 
1950 	/* Free RX queue resources */
1951 	for (queue = 0; queue < rx_count; queue++)
1952 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1953 }
1954 
1955 /**
1956  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1957  * @priv: private structure
1958  * @dma_conf: structure to take the dma data
1959  * @queue: TX queue index
1960  */
1961 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1962 					 struct stmmac_dma_conf *dma_conf,
1963 					 u32 queue)
1964 {
1965 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1966 	size_t size;
1967 	void *addr;
1968 
1969 	/* Release the DMA TX socket buffers */
1970 	dma_free_tx_skbufs(priv, dma_conf, queue);
1971 
1972 	if (priv->extend_desc) {
1973 		size = sizeof(struct dma_extended_desc);
1974 		addr = tx_q->dma_etx;
1975 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1976 		size = sizeof(struct dma_edesc);
1977 		addr = tx_q->dma_entx;
1978 	} else {
1979 		size = sizeof(struct dma_desc);
1980 		addr = tx_q->dma_tx;
1981 	}
1982 
1983 	size *= dma_conf->dma_tx_size;
1984 
1985 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1986 
1987 	kfree(tx_q->tx_skbuff_dma);
1988 	kfree(tx_q->tx_skbuff);
1989 }
1990 
1991 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1992 				       struct stmmac_dma_conf *dma_conf)
1993 {
1994 	u32 tx_count = priv->plat->tx_queues_to_use;
1995 	u32 queue;
1996 
1997 	/* Free TX queue resources */
1998 	for (queue = 0; queue < tx_count; queue++)
1999 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2000 }
2001 
2002 /**
2003  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2004  * @priv: private structure
2005  * @dma_conf: structure to take the dma data
2006  * @queue: RX queue index
2007  * Description: according to which descriptor can be used (extend or basic)
2008  * this function allocates the resources for TX and RX paths. In case of
2009  * reception, for example, it pre-allocated the RX socket buffer in order to
2010  * allow zero-copy mechanism.
2011  */
2012 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2013 					 struct stmmac_dma_conf *dma_conf,
2014 					 u32 queue)
2015 {
2016 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2017 	struct stmmac_channel *ch = &priv->channel[queue];
2018 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2019 	struct page_pool_params pp_params = { 0 };
2020 	unsigned int num_pages;
2021 	unsigned int napi_id;
2022 	int ret;
2023 
2024 	rx_q->queue_index = queue;
2025 	rx_q->priv_data = priv;
2026 
2027 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2028 	pp_params.pool_size = dma_conf->dma_rx_size;
2029 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2030 	pp_params.order = ilog2(num_pages);
2031 	pp_params.nid = dev_to_node(priv->device);
2032 	pp_params.dev = priv->device;
2033 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2034 	pp_params.offset = stmmac_rx_offset(priv);
2035 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2036 
2037 	rx_q->page_pool = page_pool_create(&pp_params);
2038 	if (IS_ERR(rx_q->page_pool)) {
2039 		ret = PTR_ERR(rx_q->page_pool);
2040 		rx_q->page_pool = NULL;
2041 		return ret;
2042 	}
2043 
2044 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2045 				 sizeof(*rx_q->buf_pool),
2046 				 GFP_KERNEL);
2047 	if (!rx_q->buf_pool)
2048 		return -ENOMEM;
2049 
2050 	if (priv->extend_desc) {
2051 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2052 						   dma_conf->dma_rx_size *
2053 						   sizeof(struct dma_extended_desc),
2054 						   &rx_q->dma_rx_phy,
2055 						   GFP_KERNEL);
2056 		if (!rx_q->dma_erx)
2057 			return -ENOMEM;
2058 
2059 	} else {
2060 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2061 						  dma_conf->dma_rx_size *
2062 						  sizeof(struct dma_desc),
2063 						  &rx_q->dma_rx_phy,
2064 						  GFP_KERNEL);
2065 		if (!rx_q->dma_rx)
2066 			return -ENOMEM;
2067 	}
2068 
2069 	if (stmmac_xdp_is_enabled(priv) &&
2070 	    test_bit(queue, priv->af_xdp_zc_qps))
2071 		napi_id = ch->rxtx_napi.napi_id;
2072 	else
2073 		napi_id = ch->rx_napi.napi_id;
2074 
2075 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2076 			       rx_q->queue_index,
2077 			       napi_id);
2078 	if (ret) {
2079 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2080 		return -EINVAL;
2081 	}
2082 
2083 	return 0;
2084 }
2085 
2086 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2087 				       struct stmmac_dma_conf *dma_conf)
2088 {
2089 	u32 rx_count = priv->plat->rx_queues_to_use;
2090 	u32 queue;
2091 	int ret;
2092 
2093 	/* RX queues buffers and DMA */
2094 	for (queue = 0; queue < rx_count; queue++) {
2095 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2096 		if (ret)
2097 			goto err_dma;
2098 	}
2099 
2100 	return 0;
2101 
2102 err_dma:
2103 	free_dma_rx_desc_resources(priv, dma_conf);
2104 
2105 	return ret;
2106 }
2107 
2108 /**
2109  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2110  * @priv: private structure
2111  * @dma_conf: structure to take the dma data
2112  * @queue: TX queue index
2113  * Description: according to which descriptor can be used (extend or basic)
2114  * this function allocates the resources for TX and RX paths. In case of
2115  * reception, for example, it pre-allocated the RX socket buffer in order to
2116  * allow zero-copy mechanism.
2117  */
2118 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2119 					 struct stmmac_dma_conf *dma_conf,
2120 					 u32 queue)
2121 {
2122 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2123 	size_t size;
2124 	void *addr;
2125 
2126 	tx_q->queue_index = queue;
2127 	tx_q->priv_data = priv;
2128 
2129 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2130 				      sizeof(*tx_q->tx_skbuff_dma),
2131 				      GFP_KERNEL);
2132 	if (!tx_q->tx_skbuff_dma)
2133 		return -ENOMEM;
2134 
2135 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2136 				  sizeof(struct sk_buff *),
2137 				  GFP_KERNEL);
2138 	if (!tx_q->tx_skbuff)
2139 		return -ENOMEM;
2140 
2141 	if (priv->extend_desc)
2142 		size = sizeof(struct dma_extended_desc);
2143 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2144 		size = sizeof(struct dma_edesc);
2145 	else
2146 		size = sizeof(struct dma_desc);
2147 
2148 	size *= dma_conf->dma_tx_size;
2149 
2150 	addr = dma_alloc_coherent(priv->device, size,
2151 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2152 	if (!addr)
2153 		return -ENOMEM;
2154 
2155 	if (priv->extend_desc)
2156 		tx_q->dma_etx = addr;
2157 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2158 		tx_q->dma_entx = addr;
2159 	else
2160 		tx_q->dma_tx = addr;
2161 
2162 	return 0;
2163 }
2164 
2165 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2166 				       struct stmmac_dma_conf *dma_conf)
2167 {
2168 	u32 tx_count = priv->plat->tx_queues_to_use;
2169 	u32 queue;
2170 	int ret;
2171 
2172 	/* TX queues buffers and DMA */
2173 	for (queue = 0; queue < tx_count; queue++) {
2174 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2175 		if (ret)
2176 			goto err_dma;
2177 	}
2178 
2179 	return 0;
2180 
2181 err_dma:
2182 	free_dma_tx_desc_resources(priv, dma_conf);
2183 	return ret;
2184 }
2185 
2186 /**
2187  * alloc_dma_desc_resources - alloc TX/RX resources.
2188  * @priv: private structure
2189  * @dma_conf: structure to take the dma data
2190  * Description: according to which descriptor can be used (extend or basic)
2191  * this function allocates the resources for TX and RX paths. In case of
2192  * reception, for example, it pre-allocated the RX socket buffer in order to
2193  * allow zero-copy mechanism.
2194  */
2195 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2196 				    struct stmmac_dma_conf *dma_conf)
2197 {
2198 	/* RX Allocation */
2199 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2200 
2201 	if (ret)
2202 		return ret;
2203 
2204 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2205 
2206 	return ret;
2207 }
2208 
2209 /**
2210  * free_dma_desc_resources - free dma desc resources
2211  * @priv: private structure
2212  * @dma_conf: structure to take the dma data
2213  */
2214 static void free_dma_desc_resources(struct stmmac_priv *priv,
2215 				    struct stmmac_dma_conf *dma_conf)
2216 {
2217 	/* Release the DMA TX socket buffers */
2218 	free_dma_tx_desc_resources(priv, dma_conf);
2219 
2220 	/* Release the DMA RX socket buffers later
2221 	 * to ensure all pending XDP_TX buffers are returned.
2222 	 */
2223 	free_dma_rx_desc_resources(priv, dma_conf);
2224 }
2225 
2226 /**
2227  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2228  *  @priv: driver private structure
2229  *  Description: It is used for enabling the rx queues in the MAC
2230  */
2231 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2232 {
2233 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2234 	int queue;
2235 	u8 mode;
2236 
2237 	for (queue = 0; queue < rx_queues_count; queue++) {
2238 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2239 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2240 	}
2241 }
2242 
2243 /**
2244  * stmmac_start_rx_dma - start RX DMA channel
2245  * @priv: driver private structure
2246  * @chan: RX channel index
2247  * Description:
2248  * This starts a RX DMA channel
2249  */
2250 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2251 {
2252 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2253 	stmmac_start_rx(priv, priv->ioaddr, chan);
2254 }
2255 
2256 /**
2257  * stmmac_start_tx_dma - start TX DMA channel
2258  * @priv: driver private structure
2259  * @chan: TX channel index
2260  * Description:
2261  * This starts a TX DMA channel
2262  */
2263 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2264 {
2265 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2266 	stmmac_start_tx(priv, priv->ioaddr, chan);
2267 }
2268 
2269 /**
2270  * stmmac_stop_rx_dma - stop RX DMA channel
2271  * @priv: driver private structure
2272  * @chan: RX channel index
2273  * Description:
2274  * This stops a RX DMA channel
2275  */
2276 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2277 {
2278 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2279 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2280 }
2281 
2282 /**
2283  * stmmac_stop_tx_dma - stop TX DMA channel
2284  * @priv: driver private structure
2285  * @chan: TX channel index
2286  * Description:
2287  * This stops a TX DMA channel
2288  */
2289 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2290 {
2291 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2292 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2293 }
2294 
2295 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2296 {
2297 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2298 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2299 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2300 	u32 chan;
2301 
2302 	for (chan = 0; chan < dma_csr_ch; chan++) {
2303 		struct stmmac_channel *ch = &priv->channel[chan];
2304 		unsigned long flags;
2305 
2306 		spin_lock_irqsave(&ch->lock, flags);
2307 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2308 		spin_unlock_irqrestore(&ch->lock, flags);
2309 	}
2310 }
2311 
2312 /**
2313  * stmmac_start_all_dma - start all RX and TX DMA channels
2314  * @priv: driver private structure
2315  * Description:
2316  * This starts all the RX and TX DMA channels
2317  */
2318 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2319 {
2320 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2321 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2322 	u32 chan = 0;
2323 
2324 	for (chan = 0; chan < rx_channels_count; chan++)
2325 		stmmac_start_rx_dma(priv, chan);
2326 
2327 	for (chan = 0; chan < tx_channels_count; chan++)
2328 		stmmac_start_tx_dma(priv, chan);
2329 }
2330 
2331 /**
2332  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2333  * @priv: driver private structure
2334  * Description:
2335  * This stops the RX and TX DMA channels
2336  */
2337 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2338 {
2339 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2340 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2341 	u32 chan = 0;
2342 
2343 	for (chan = 0; chan < rx_channels_count; chan++)
2344 		stmmac_stop_rx_dma(priv, chan);
2345 
2346 	for (chan = 0; chan < tx_channels_count; chan++)
2347 		stmmac_stop_tx_dma(priv, chan);
2348 }
2349 
2350 /**
2351  *  stmmac_dma_operation_mode - HW DMA operation mode
2352  *  @priv: driver private structure
2353  *  Description: it is used for configuring the DMA operation mode register in
2354  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2355  */
2356 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2357 {
2358 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2359 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2360 	int rxfifosz = priv->plat->rx_fifo_size;
2361 	int txfifosz = priv->plat->tx_fifo_size;
2362 	u32 txmode = 0;
2363 	u32 rxmode = 0;
2364 	u32 chan = 0;
2365 	u8 qmode = 0;
2366 
2367 	if (rxfifosz == 0)
2368 		rxfifosz = priv->dma_cap.rx_fifo_size;
2369 	if (txfifosz == 0)
2370 		txfifosz = priv->dma_cap.tx_fifo_size;
2371 
2372 	/* Adjust for real per queue fifo size */
2373 	rxfifosz /= rx_channels_count;
2374 	txfifosz /= tx_channels_count;
2375 
2376 	if (priv->plat->force_thresh_dma_mode) {
2377 		txmode = tc;
2378 		rxmode = tc;
2379 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2380 		/*
2381 		 * In case of GMAC, SF mode can be enabled
2382 		 * to perform the TX COE in HW. This depends on:
2383 		 * 1) TX COE if actually supported
2384 		 * 2) There is no bugged Jumbo frame support
2385 		 *    that needs to not insert csum in the TDES.
2386 		 */
2387 		txmode = SF_DMA_MODE;
2388 		rxmode = SF_DMA_MODE;
2389 		priv->xstats.threshold = SF_DMA_MODE;
2390 	} else {
2391 		txmode = tc;
2392 		rxmode = SF_DMA_MODE;
2393 	}
2394 
2395 	/* configure all channels */
2396 	for (chan = 0; chan < rx_channels_count; chan++) {
2397 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2398 		u32 buf_size;
2399 
2400 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2401 
2402 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2403 				rxfifosz, qmode);
2404 
2405 		if (rx_q->xsk_pool) {
2406 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2407 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2408 					      buf_size,
2409 					      chan);
2410 		} else {
2411 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2412 					      priv->dma_conf.dma_buf_sz,
2413 					      chan);
2414 		}
2415 	}
2416 
2417 	for (chan = 0; chan < tx_channels_count; chan++) {
2418 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2419 
2420 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2421 				txfifosz, qmode);
2422 	}
2423 }
2424 
2425 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2426 {
2427 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2428 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2429 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2430 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2431 	unsigned int entry = tx_q->cur_tx;
2432 	struct dma_desc *tx_desc = NULL;
2433 	struct xdp_desc xdp_desc;
2434 	bool work_done = true;
2435 	u32 tx_set_ic_bit = 0;
2436 	unsigned long flags;
2437 
2438 	/* Avoids TX time-out as we are sharing with slow path */
2439 	txq_trans_cond_update(nq);
2440 
2441 	budget = min(budget, stmmac_tx_avail(priv, queue));
2442 
2443 	while (budget-- > 0) {
2444 		dma_addr_t dma_addr;
2445 		bool set_ic;
2446 
2447 		/* We are sharing with slow path and stop XSK TX desc submission when
2448 		 * available TX ring is less than threshold.
2449 		 */
2450 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2451 		    !netif_carrier_ok(priv->dev)) {
2452 			work_done = false;
2453 			break;
2454 		}
2455 
2456 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2457 			break;
2458 
2459 		if (likely(priv->extend_desc))
2460 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2461 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2462 			tx_desc = &tx_q->dma_entx[entry].basic;
2463 		else
2464 			tx_desc = tx_q->dma_tx + entry;
2465 
2466 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2467 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2468 
2469 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2470 
2471 		/* To return XDP buffer to XSK pool, we simple call
2472 		 * xsk_tx_completed(), so we don't need to fill up
2473 		 * 'buf' and 'xdpf'.
2474 		 */
2475 		tx_q->tx_skbuff_dma[entry].buf = 0;
2476 		tx_q->xdpf[entry] = NULL;
2477 
2478 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2479 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2480 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2481 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2482 
2483 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2484 
2485 		tx_q->tx_count_frames++;
2486 
2487 		if (!priv->tx_coal_frames[queue])
2488 			set_ic = false;
2489 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2490 			set_ic = true;
2491 		else
2492 			set_ic = false;
2493 
2494 		if (set_ic) {
2495 			tx_q->tx_count_frames = 0;
2496 			stmmac_set_tx_ic(priv, tx_desc);
2497 			tx_set_ic_bit++;
2498 		}
2499 
2500 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2501 				       true, priv->mode, true, true,
2502 				       xdp_desc.len);
2503 
2504 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2505 
2506 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2507 		entry = tx_q->cur_tx;
2508 	}
2509 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
2510 	txq_stats->tx_set_ic_bit += tx_set_ic_bit;
2511 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
2512 
2513 	if (tx_desc) {
2514 		stmmac_flush_tx_descriptors(priv, queue);
2515 		xsk_tx_release(pool);
2516 	}
2517 
2518 	/* Return true if all of the 3 conditions are met
2519 	 *  a) TX Budget is still available
2520 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2521 	 *     pending XSK TX for transmission)
2522 	 */
2523 	return !!budget && work_done;
2524 }
2525 
2526 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2527 {
2528 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2529 		tc += 64;
2530 
2531 		if (priv->plat->force_thresh_dma_mode)
2532 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2533 		else
2534 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2535 						      chan);
2536 
2537 		priv->xstats.threshold = tc;
2538 	}
2539 }
2540 
2541 /**
2542  * stmmac_tx_clean - to manage the transmission completion
2543  * @priv: driver private structure
2544  * @budget: napi budget limiting this functions packet handling
2545  * @queue: TX queue index
2546  * Description: it reclaims the transmit resources after transmission completes.
2547  */
2548 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2549 {
2550 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2551 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2552 	unsigned int bytes_compl = 0, pkts_compl = 0;
2553 	unsigned int entry, xmits = 0, count = 0;
2554 	u32 tx_packets = 0, tx_errors = 0;
2555 	unsigned long flags;
2556 
2557 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2558 
2559 	tx_q->xsk_frames_done = 0;
2560 
2561 	entry = tx_q->dirty_tx;
2562 
2563 	/* Try to clean all TX complete frame in 1 shot */
2564 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2565 		struct xdp_frame *xdpf;
2566 		struct sk_buff *skb;
2567 		struct dma_desc *p;
2568 		int status;
2569 
2570 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2571 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2572 			xdpf = tx_q->xdpf[entry];
2573 			skb = NULL;
2574 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2575 			xdpf = NULL;
2576 			skb = tx_q->tx_skbuff[entry];
2577 		} else {
2578 			xdpf = NULL;
2579 			skb = NULL;
2580 		}
2581 
2582 		if (priv->extend_desc)
2583 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2584 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2585 			p = &tx_q->dma_entx[entry].basic;
2586 		else
2587 			p = tx_q->dma_tx + entry;
2588 
2589 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2590 		/* Check if the descriptor is owned by the DMA */
2591 		if (unlikely(status & tx_dma_own))
2592 			break;
2593 
2594 		count++;
2595 
2596 		/* Make sure descriptor fields are read after reading
2597 		 * the own bit.
2598 		 */
2599 		dma_rmb();
2600 
2601 		/* Just consider the last segment and ...*/
2602 		if (likely(!(status & tx_not_ls))) {
2603 			/* ... verify the status error condition */
2604 			if (unlikely(status & tx_err)) {
2605 				tx_errors++;
2606 				if (unlikely(status & tx_err_bump_tc))
2607 					stmmac_bump_dma_threshold(priv, queue);
2608 			} else {
2609 				tx_packets++;
2610 			}
2611 			if (skb)
2612 				stmmac_get_tx_hwtstamp(priv, p, skb);
2613 		}
2614 
2615 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2616 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2617 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2618 				dma_unmap_page(priv->device,
2619 					       tx_q->tx_skbuff_dma[entry].buf,
2620 					       tx_q->tx_skbuff_dma[entry].len,
2621 					       DMA_TO_DEVICE);
2622 			else
2623 				dma_unmap_single(priv->device,
2624 						 tx_q->tx_skbuff_dma[entry].buf,
2625 						 tx_q->tx_skbuff_dma[entry].len,
2626 						 DMA_TO_DEVICE);
2627 			tx_q->tx_skbuff_dma[entry].buf = 0;
2628 			tx_q->tx_skbuff_dma[entry].len = 0;
2629 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2630 		}
2631 
2632 		stmmac_clean_desc3(priv, tx_q, p);
2633 
2634 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2635 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2636 
2637 		if (xdpf &&
2638 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2639 			xdp_return_frame_rx_napi(xdpf);
2640 			tx_q->xdpf[entry] = NULL;
2641 		}
2642 
2643 		if (xdpf &&
2644 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2645 			xdp_return_frame(xdpf);
2646 			tx_q->xdpf[entry] = NULL;
2647 		}
2648 
2649 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2650 			tx_q->xsk_frames_done++;
2651 
2652 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2653 			if (likely(skb)) {
2654 				pkts_compl++;
2655 				bytes_compl += skb->len;
2656 				dev_consume_skb_any(skb);
2657 				tx_q->tx_skbuff[entry] = NULL;
2658 			}
2659 		}
2660 
2661 		stmmac_release_tx_desc(priv, p, priv->mode);
2662 
2663 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2664 	}
2665 	tx_q->dirty_tx = entry;
2666 
2667 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2668 				  pkts_compl, bytes_compl);
2669 
2670 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2671 								queue))) &&
2672 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2673 
2674 		netif_dbg(priv, tx_done, priv->dev,
2675 			  "%s: restart transmit\n", __func__);
2676 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2677 	}
2678 
2679 	if (tx_q->xsk_pool) {
2680 		bool work_done;
2681 
2682 		if (tx_q->xsk_frames_done)
2683 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2684 
2685 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2686 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2687 
2688 		/* For XSK TX, we try to send as many as possible.
2689 		 * If XSK work done (XSK TX desc empty and budget still
2690 		 * available), return "budget - 1" to reenable TX IRQ.
2691 		 * Else, return "budget" to make NAPI continue polling.
2692 		 */
2693 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2694 					       STMMAC_XSK_TX_BUDGET_MAX);
2695 		if (work_done)
2696 			xmits = budget - 1;
2697 		else
2698 			xmits = budget;
2699 	}
2700 
2701 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2702 	    priv->eee_sw_timer_en) {
2703 		if (stmmac_enable_eee_mode(priv))
2704 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2705 	}
2706 
2707 	/* We still have pending packets, let's call for a new scheduling */
2708 	if (tx_q->dirty_tx != tx_q->cur_tx)
2709 		stmmac_tx_timer_arm(priv, queue);
2710 
2711 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
2712 	txq_stats->tx_packets += tx_packets;
2713 	txq_stats->tx_pkt_n += tx_packets;
2714 	txq_stats->tx_clean++;
2715 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
2716 
2717 	priv->xstats.tx_errors += tx_errors;
2718 
2719 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2720 
2721 	/* Combine decisions from TX clean and XSK TX */
2722 	return max(count, xmits);
2723 }
2724 
2725 /**
2726  * stmmac_tx_err - to manage the tx error
2727  * @priv: driver private structure
2728  * @chan: channel index
2729  * Description: it cleans the descriptors and restarts the transmission
2730  * in case of transmission errors.
2731  */
2732 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2733 {
2734 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2735 
2736 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2737 
2738 	stmmac_stop_tx_dma(priv, chan);
2739 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2740 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2741 	stmmac_reset_tx_queue(priv, chan);
2742 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2743 			    tx_q->dma_tx_phy, chan);
2744 	stmmac_start_tx_dma(priv, chan);
2745 
2746 	priv->xstats.tx_errors++;
2747 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2748 }
2749 
2750 /**
2751  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2752  *  @priv: driver private structure
2753  *  @txmode: TX operating mode
2754  *  @rxmode: RX operating mode
2755  *  @chan: channel index
2756  *  Description: it is used for configuring of the DMA operation mode in
2757  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2758  *  mode.
2759  */
2760 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2761 					  u32 rxmode, u32 chan)
2762 {
2763 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2764 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2765 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2766 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2767 	int rxfifosz = priv->plat->rx_fifo_size;
2768 	int txfifosz = priv->plat->tx_fifo_size;
2769 
2770 	if (rxfifosz == 0)
2771 		rxfifosz = priv->dma_cap.rx_fifo_size;
2772 	if (txfifosz == 0)
2773 		txfifosz = priv->dma_cap.tx_fifo_size;
2774 
2775 	/* Adjust for real per queue fifo size */
2776 	rxfifosz /= rx_channels_count;
2777 	txfifosz /= tx_channels_count;
2778 
2779 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2780 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2781 }
2782 
2783 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2784 {
2785 	int ret;
2786 
2787 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2788 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2789 	if (ret && (ret != -EINVAL)) {
2790 		stmmac_global_err(priv);
2791 		return true;
2792 	}
2793 
2794 	return false;
2795 }
2796 
2797 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2798 {
2799 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2800 						 &priv->xstats, chan, dir);
2801 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2802 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2803 	struct stmmac_channel *ch = &priv->channel[chan];
2804 	struct napi_struct *rx_napi;
2805 	struct napi_struct *tx_napi;
2806 	unsigned long flags;
2807 
2808 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2809 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2810 
2811 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2812 		if (napi_schedule_prep(rx_napi)) {
2813 			spin_lock_irqsave(&ch->lock, flags);
2814 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2815 			spin_unlock_irqrestore(&ch->lock, flags);
2816 			__napi_schedule(rx_napi);
2817 		}
2818 	}
2819 
2820 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2821 		if (napi_schedule_prep(tx_napi)) {
2822 			spin_lock_irqsave(&ch->lock, flags);
2823 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2824 			spin_unlock_irqrestore(&ch->lock, flags);
2825 			__napi_schedule(tx_napi);
2826 		}
2827 	}
2828 
2829 	return status;
2830 }
2831 
2832 /**
2833  * stmmac_dma_interrupt - DMA ISR
2834  * @priv: driver private structure
2835  * Description: this is the DMA ISR. It is called by the main ISR.
2836  * It calls the dwmac dma routine and schedule poll method in case of some
2837  * work can be done.
2838  */
2839 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2840 {
2841 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2842 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2843 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2844 				tx_channel_count : rx_channel_count;
2845 	u32 chan;
2846 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2847 
2848 	/* Make sure we never check beyond our status buffer. */
2849 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2850 		channels_to_check = ARRAY_SIZE(status);
2851 
2852 	for (chan = 0; chan < channels_to_check; chan++)
2853 		status[chan] = stmmac_napi_check(priv, chan,
2854 						 DMA_DIR_RXTX);
2855 
2856 	for (chan = 0; chan < tx_channel_count; chan++) {
2857 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2858 			/* Try to bump up the dma threshold on this failure */
2859 			stmmac_bump_dma_threshold(priv, chan);
2860 		} else if (unlikely(status[chan] == tx_hard_error)) {
2861 			stmmac_tx_err(priv, chan);
2862 		}
2863 	}
2864 }
2865 
2866 /**
2867  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2868  * @priv: driver private structure
2869  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2870  */
2871 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2872 {
2873 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2874 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2875 
2876 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2877 
2878 	if (priv->dma_cap.rmon) {
2879 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2880 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2881 	} else
2882 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2883 }
2884 
2885 /**
2886  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2887  * @priv: driver private structure
2888  * Description:
2889  *  new GMAC chip generations have a new register to indicate the
2890  *  presence of the optional feature/functions.
2891  *  This can be also used to override the value passed through the
2892  *  platform and necessary for old MAC10/100 and GMAC chips.
2893  */
2894 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2895 {
2896 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2897 }
2898 
2899 /**
2900  * stmmac_check_ether_addr - check if the MAC addr is valid
2901  * @priv: driver private structure
2902  * Description:
2903  * it is to verify if the MAC address is valid, in case of failures it
2904  * generates a random MAC address
2905  */
2906 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2907 {
2908 	u8 addr[ETH_ALEN];
2909 
2910 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2911 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2912 		if (is_valid_ether_addr(addr))
2913 			eth_hw_addr_set(priv->dev, addr);
2914 		else
2915 			eth_hw_addr_random(priv->dev);
2916 		dev_info(priv->device, "device MAC address %pM\n",
2917 			 priv->dev->dev_addr);
2918 	}
2919 }
2920 
2921 /**
2922  * stmmac_init_dma_engine - DMA init.
2923  * @priv: driver private structure
2924  * Description:
2925  * It inits the DMA invoking the specific MAC/GMAC callback.
2926  * Some DMA parameters can be passed from the platform;
2927  * in case of these are not passed a default is kept for the MAC or GMAC.
2928  */
2929 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2930 {
2931 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2932 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2933 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2934 	struct stmmac_rx_queue *rx_q;
2935 	struct stmmac_tx_queue *tx_q;
2936 	u32 chan = 0;
2937 	int atds = 0;
2938 	int ret = 0;
2939 
2940 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2941 		dev_err(priv->device, "Invalid DMA configuration\n");
2942 		return -EINVAL;
2943 	}
2944 
2945 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2946 		atds = 1;
2947 
2948 	ret = stmmac_reset(priv, priv->ioaddr);
2949 	if (ret) {
2950 		dev_err(priv->device, "Failed to reset the dma\n");
2951 		return ret;
2952 	}
2953 
2954 	/* DMA Configuration */
2955 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2956 
2957 	if (priv->plat->axi)
2958 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2959 
2960 	/* DMA CSR Channel configuration */
2961 	for (chan = 0; chan < dma_csr_ch; chan++) {
2962 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2963 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2964 	}
2965 
2966 	/* DMA RX Channel Configuration */
2967 	for (chan = 0; chan < rx_channels_count; chan++) {
2968 		rx_q = &priv->dma_conf.rx_queue[chan];
2969 
2970 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2971 				    rx_q->dma_rx_phy, chan);
2972 
2973 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2974 				     (rx_q->buf_alloc_num *
2975 				      sizeof(struct dma_desc));
2976 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2977 				       rx_q->rx_tail_addr, chan);
2978 	}
2979 
2980 	/* DMA TX Channel Configuration */
2981 	for (chan = 0; chan < tx_channels_count; chan++) {
2982 		tx_q = &priv->dma_conf.tx_queue[chan];
2983 
2984 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2985 				    tx_q->dma_tx_phy, chan);
2986 
2987 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2988 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2989 				       tx_q->tx_tail_addr, chan);
2990 	}
2991 
2992 	return ret;
2993 }
2994 
2995 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2996 {
2997 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2998 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
2999 
3000 	if (!tx_coal_timer)
3001 		return;
3002 
3003 	hrtimer_start(&tx_q->txtimer,
3004 		      STMMAC_COAL_TIMER(tx_coal_timer),
3005 		      HRTIMER_MODE_REL);
3006 }
3007 
3008 /**
3009  * stmmac_tx_timer - mitigation sw timer for tx.
3010  * @t: data pointer
3011  * Description:
3012  * This is the timer handler to directly invoke the stmmac_tx_clean.
3013  */
3014 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3015 {
3016 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3017 	struct stmmac_priv *priv = tx_q->priv_data;
3018 	struct stmmac_channel *ch;
3019 	struct napi_struct *napi;
3020 
3021 	ch = &priv->channel[tx_q->queue_index];
3022 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3023 
3024 	if (likely(napi_schedule_prep(napi))) {
3025 		unsigned long flags;
3026 
3027 		spin_lock_irqsave(&ch->lock, flags);
3028 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3029 		spin_unlock_irqrestore(&ch->lock, flags);
3030 		__napi_schedule(napi);
3031 	}
3032 
3033 	return HRTIMER_NORESTART;
3034 }
3035 
3036 /**
3037  * stmmac_init_coalesce - init mitigation options.
3038  * @priv: driver private structure
3039  * Description:
3040  * This inits the coalesce parameters: i.e. timer rate,
3041  * timer handler and default threshold used for enabling the
3042  * interrupt on completion bit.
3043  */
3044 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3045 {
3046 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3047 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3048 	u32 chan;
3049 
3050 	for (chan = 0; chan < tx_channel_count; chan++) {
3051 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3052 
3053 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3054 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3055 
3056 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3057 		tx_q->txtimer.function = stmmac_tx_timer;
3058 	}
3059 
3060 	for (chan = 0; chan < rx_channel_count; chan++)
3061 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3062 }
3063 
3064 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3065 {
3066 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3067 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3068 	u32 chan;
3069 
3070 	/* set TX ring length */
3071 	for (chan = 0; chan < tx_channels_count; chan++)
3072 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3073 				       (priv->dma_conf.dma_tx_size - 1), chan);
3074 
3075 	/* set RX ring length */
3076 	for (chan = 0; chan < rx_channels_count; chan++)
3077 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3078 				       (priv->dma_conf.dma_rx_size - 1), chan);
3079 }
3080 
3081 /**
3082  *  stmmac_set_tx_queue_weight - Set TX queue weight
3083  *  @priv: driver private structure
3084  *  Description: It is used for setting TX queues weight
3085  */
3086 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3087 {
3088 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3089 	u32 weight;
3090 	u32 queue;
3091 
3092 	for (queue = 0; queue < tx_queues_count; queue++) {
3093 		weight = priv->plat->tx_queues_cfg[queue].weight;
3094 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3095 	}
3096 }
3097 
3098 /**
3099  *  stmmac_configure_cbs - Configure CBS in TX queue
3100  *  @priv: driver private structure
3101  *  Description: It is used for configuring CBS in AVB TX queues
3102  */
3103 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3104 {
3105 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3106 	u32 mode_to_use;
3107 	u32 queue;
3108 
3109 	/* queue 0 is reserved for legacy traffic */
3110 	for (queue = 1; queue < tx_queues_count; queue++) {
3111 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3112 		if (mode_to_use == MTL_QUEUE_DCB)
3113 			continue;
3114 
3115 		stmmac_config_cbs(priv, priv->hw,
3116 				priv->plat->tx_queues_cfg[queue].send_slope,
3117 				priv->plat->tx_queues_cfg[queue].idle_slope,
3118 				priv->plat->tx_queues_cfg[queue].high_credit,
3119 				priv->plat->tx_queues_cfg[queue].low_credit,
3120 				queue);
3121 	}
3122 }
3123 
3124 /**
3125  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3126  *  @priv: driver private structure
3127  *  Description: It is used for mapping RX queues to RX dma channels
3128  */
3129 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3130 {
3131 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3132 	u32 queue;
3133 	u32 chan;
3134 
3135 	for (queue = 0; queue < rx_queues_count; queue++) {
3136 		chan = priv->plat->rx_queues_cfg[queue].chan;
3137 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3138 	}
3139 }
3140 
3141 /**
3142  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3143  *  @priv: driver private structure
3144  *  Description: It is used for configuring the RX Queue Priority
3145  */
3146 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3147 {
3148 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3149 	u32 queue;
3150 	u32 prio;
3151 
3152 	for (queue = 0; queue < rx_queues_count; queue++) {
3153 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3154 			continue;
3155 
3156 		prio = priv->plat->rx_queues_cfg[queue].prio;
3157 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3158 	}
3159 }
3160 
3161 /**
3162  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3163  *  @priv: driver private structure
3164  *  Description: It is used for configuring the TX Queue Priority
3165  */
3166 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3167 {
3168 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3169 	u32 queue;
3170 	u32 prio;
3171 
3172 	for (queue = 0; queue < tx_queues_count; queue++) {
3173 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3174 			continue;
3175 
3176 		prio = priv->plat->tx_queues_cfg[queue].prio;
3177 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3178 	}
3179 }
3180 
3181 /**
3182  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3183  *  @priv: driver private structure
3184  *  Description: It is used for configuring the RX queue routing
3185  */
3186 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3187 {
3188 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3189 	u32 queue;
3190 	u8 packet;
3191 
3192 	for (queue = 0; queue < rx_queues_count; queue++) {
3193 		/* no specific packet type routing specified for the queue */
3194 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3195 			continue;
3196 
3197 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3198 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3199 	}
3200 }
3201 
3202 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3203 {
3204 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3205 		priv->rss.enable = false;
3206 		return;
3207 	}
3208 
3209 	if (priv->dev->features & NETIF_F_RXHASH)
3210 		priv->rss.enable = true;
3211 	else
3212 		priv->rss.enable = false;
3213 
3214 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3215 			     priv->plat->rx_queues_to_use);
3216 }
3217 
3218 /**
3219  *  stmmac_mtl_configuration - Configure MTL
3220  *  @priv: driver private structure
3221  *  Description: It is used for configurring MTL
3222  */
3223 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3224 {
3225 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3226 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3227 
3228 	if (tx_queues_count > 1)
3229 		stmmac_set_tx_queue_weight(priv);
3230 
3231 	/* Configure MTL RX algorithms */
3232 	if (rx_queues_count > 1)
3233 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3234 				priv->plat->rx_sched_algorithm);
3235 
3236 	/* Configure MTL TX algorithms */
3237 	if (tx_queues_count > 1)
3238 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3239 				priv->plat->tx_sched_algorithm);
3240 
3241 	/* Configure CBS in AVB TX queues */
3242 	if (tx_queues_count > 1)
3243 		stmmac_configure_cbs(priv);
3244 
3245 	/* Map RX MTL to DMA channels */
3246 	stmmac_rx_queue_dma_chan_map(priv);
3247 
3248 	/* Enable MAC RX Queues */
3249 	stmmac_mac_enable_rx_queues(priv);
3250 
3251 	/* Set RX priorities */
3252 	if (rx_queues_count > 1)
3253 		stmmac_mac_config_rx_queues_prio(priv);
3254 
3255 	/* Set TX priorities */
3256 	if (tx_queues_count > 1)
3257 		stmmac_mac_config_tx_queues_prio(priv);
3258 
3259 	/* Set RX routing */
3260 	if (rx_queues_count > 1)
3261 		stmmac_mac_config_rx_queues_routing(priv);
3262 
3263 	/* Receive Side Scaling */
3264 	if (rx_queues_count > 1)
3265 		stmmac_mac_config_rss(priv);
3266 }
3267 
3268 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3269 {
3270 	if (priv->dma_cap.asp) {
3271 		netdev_info(priv->dev, "Enabling Safety Features\n");
3272 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3273 					  priv->plat->safety_feat_cfg);
3274 	} else {
3275 		netdev_info(priv->dev, "No Safety Features support found\n");
3276 	}
3277 }
3278 
3279 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3280 {
3281 	char *name;
3282 
3283 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3284 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3285 
3286 	name = priv->wq_name;
3287 	sprintf(name, "%s-fpe", priv->dev->name);
3288 
3289 	priv->fpe_wq = create_singlethread_workqueue(name);
3290 	if (!priv->fpe_wq) {
3291 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3292 
3293 		return -ENOMEM;
3294 	}
3295 	netdev_info(priv->dev, "FPE workqueue start");
3296 
3297 	return 0;
3298 }
3299 
3300 /**
3301  * stmmac_hw_setup - setup mac in a usable state.
3302  *  @dev : pointer to the device structure.
3303  *  @ptp_register: register PTP if set
3304  *  Description:
3305  *  this is the main function to setup the HW in a usable state because the
3306  *  dma engine is reset, the core registers are configured (e.g. AXI,
3307  *  Checksum features, timers). The DMA is ready to start receiving and
3308  *  transmitting.
3309  *  Return value:
3310  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3311  *  file on failure.
3312  */
3313 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3314 {
3315 	struct stmmac_priv *priv = netdev_priv(dev);
3316 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3317 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3318 	bool sph_en;
3319 	u32 chan;
3320 	int ret;
3321 
3322 	/* DMA initialization and SW reset */
3323 	ret = stmmac_init_dma_engine(priv);
3324 	if (ret < 0) {
3325 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3326 			   __func__);
3327 		return ret;
3328 	}
3329 
3330 	/* Copy the MAC addr into the HW  */
3331 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3332 
3333 	/* PS and related bits will be programmed according to the speed */
3334 	if (priv->hw->pcs) {
3335 		int speed = priv->plat->mac_port_sel_speed;
3336 
3337 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3338 		    (speed == SPEED_1000)) {
3339 			priv->hw->ps = speed;
3340 		} else {
3341 			dev_warn(priv->device, "invalid port speed\n");
3342 			priv->hw->ps = 0;
3343 		}
3344 	}
3345 
3346 	/* Initialize the MAC Core */
3347 	stmmac_core_init(priv, priv->hw, dev);
3348 
3349 	/* Initialize MTL*/
3350 	stmmac_mtl_configuration(priv);
3351 
3352 	/* Initialize Safety Features */
3353 	stmmac_safety_feat_configuration(priv);
3354 
3355 	ret = stmmac_rx_ipc(priv, priv->hw);
3356 	if (!ret) {
3357 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3358 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3359 		priv->hw->rx_csum = 0;
3360 	}
3361 
3362 	/* Enable the MAC Rx/Tx */
3363 	stmmac_mac_set(priv, priv->ioaddr, true);
3364 
3365 	/* Set the HW DMA mode and the COE */
3366 	stmmac_dma_operation_mode(priv);
3367 
3368 	stmmac_mmc_setup(priv);
3369 
3370 	if (ptp_register) {
3371 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3372 		if (ret < 0)
3373 			netdev_warn(priv->dev,
3374 				    "failed to enable PTP reference clock: %pe\n",
3375 				    ERR_PTR(ret));
3376 	}
3377 
3378 	ret = stmmac_init_ptp(priv);
3379 	if (ret == -EOPNOTSUPP)
3380 		netdev_info(priv->dev, "PTP not supported by HW\n");
3381 	else if (ret)
3382 		netdev_warn(priv->dev, "PTP init failed\n");
3383 	else if (ptp_register)
3384 		stmmac_ptp_register(priv);
3385 
3386 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3387 
3388 	/* Convert the timer from msec to usec */
3389 	if (!priv->tx_lpi_timer)
3390 		priv->tx_lpi_timer = eee_timer * 1000;
3391 
3392 	if (priv->use_riwt) {
3393 		u32 queue;
3394 
3395 		for (queue = 0; queue < rx_cnt; queue++) {
3396 			if (!priv->rx_riwt[queue])
3397 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3398 
3399 			stmmac_rx_watchdog(priv, priv->ioaddr,
3400 					   priv->rx_riwt[queue], queue);
3401 		}
3402 	}
3403 
3404 	if (priv->hw->pcs)
3405 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3406 
3407 	/* set TX and RX rings length */
3408 	stmmac_set_rings_length(priv);
3409 
3410 	/* Enable TSO */
3411 	if (priv->tso) {
3412 		for (chan = 0; chan < tx_cnt; chan++) {
3413 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3414 
3415 			/* TSO and TBS cannot co-exist */
3416 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3417 				continue;
3418 
3419 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3420 		}
3421 	}
3422 
3423 	/* Enable Split Header */
3424 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3425 	for (chan = 0; chan < rx_cnt; chan++)
3426 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3427 
3428 
3429 	/* VLAN Tag Insertion */
3430 	if (priv->dma_cap.vlins)
3431 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3432 
3433 	/* TBS */
3434 	for (chan = 0; chan < tx_cnt; chan++) {
3435 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3436 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3437 
3438 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3439 	}
3440 
3441 	/* Configure real RX and TX queues */
3442 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3443 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3444 
3445 	/* Start the ball rolling... */
3446 	stmmac_start_all_dma(priv);
3447 
3448 	if (priv->dma_cap.fpesel) {
3449 		stmmac_fpe_start_wq(priv);
3450 
3451 		if (priv->plat->fpe_cfg->enable)
3452 			stmmac_fpe_handshake(priv, true);
3453 	}
3454 
3455 	return 0;
3456 }
3457 
3458 static void stmmac_hw_teardown(struct net_device *dev)
3459 {
3460 	struct stmmac_priv *priv = netdev_priv(dev);
3461 
3462 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3463 }
3464 
3465 static void stmmac_free_irq(struct net_device *dev,
3466 			    enum request_irq_err irq_err, int irq_idx)
3467 {
3468 	struct stmmac_priv *priv = netdev_priv(dev);
3469 	int j;
3470 
3471 	switch (irq_err) {
3472 	case REQ_IRQ_ERR_ALL:
3473 		irq_idx = priv->plat->tx_queues_to_use;
3474 		fallthrough;
3475 	case REQ_IRQ_ERR_TX:
3476 		for (j = irq_idx - 1; j >= 0; j--) {
3477 			if (priv->tx_irq[j] > 0) {
3478 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3479 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3480 			}
3481 		}
3482 		irq_idx = priv->plat->rx_queues_to_use;
3483 		fallthrough;
3484 	case REQ_IRQ_ERR_RX:
3485 		for (j = irq_idx - 1; j >= 0; j--) {
3486 			if (priv->rx_irq[j] > 0) {
3487 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3488 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3489 			}
3490 		}
3491 
3492 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3493 			free_irq(priv->sfty_ue_irq, dev);
3494 		fallthrough;
3495 	case REQ_IRQ_ERR_SFTY_UE:
3496 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3497 			free_irq(priv->sfty_ce_irq, dev);
3498 		fallthrough;
3499 	case REQ_IRQ_ERR_SFTY_CE:
3500 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3501 			free_irq(priv->lpi_irq, dev);
3502 		fallthrough;
3503 	case REQ_IRQ_ERR_LPI:
3504 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3505 			free_irq(priv->wol_irq, dev);
3506 		fallthrough;
3507 	case REQ_IRQ_ERR_WOL:
3508 		free_irq(dev->irq, dev);
3509 		fallthrough;
3510 	case REQ_IRQ_ERR_MAC:
3511 	case REQ_IRQ_ERR_NO:
3512 		/* If MAC IRQ request error, no more IRQ to free */
3513 		break;
3514 	}
3515 }
3516 
3517 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3518 {
3519 	struct stmmac_priv *priv = netdev_priv(dev);
3520 	enum request_irq_err irq_err;
3521 	cpumask_t cpu_mask;
3522 	int irq_idx = 0;
3523 	char *int_name;
3524 	int ret;
3525 	int i;
3526 
3527 	/* For common interrupt */
3528 	int_name = priv->int_name_mac;
3529 	sprintf(int_name, "%s:%s", dev->name, "mac");
3530 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3531 			  0, int_name, dev);
3532 	if (unlikely(ret < 0)) {
3533 		netdev_err(priv->dev,
3534 			   "%s: alloc mac MSI %d (error: %d)\n",
3535 			   __func__, dev->irq, ret);
3536 		irq_err = REQ_IRQ_ERR_MAC;
3537 		goto irq_error;
3538 	}
3539 
3540 	/* Request the Wake IRQ in case of another line
3541 	 * is used for WoL
3542 	 */
3543 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3544 		int_name = priv->int_name_wol;
3545 		sprintf(int_name, "%s:%s", dev->name, "wol");
3546 		ret = request_irq(priv->wol_irq,
3547 				  stmmac_mac_interrupt,
3548 				  0, int_name, dev);
3549 		if (unlikely(ret < 0)) {
3550 			netdev_err(priv->dev,
3551 				   "%s: alloc wol MSI %d (error: %d)\n",
3552 				   __func__, priv->wol_irq, ret);
3553 			irq_err = REQ_IRQ_ERR_WOL;
3554 			goto irq_error;
3555 		}
3556 	}
3557 
3558 	/* Request the LPI IRQ in case of another line
3559 	 * is used for LPI
3560 	 */
3561 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3562 		int_name = priv->int_name_lpi;
3563 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3564 		ret = request_irq(priv->lpi_irq,
3565 				  stmmac_mac_interrupt,
3566 				  0, int_name, dev);
3567 		if (unlikely(ret < 0)) {
3568 			netdev_err(priv->dev,
3569 				   "%s: alloc lpi MSI %d (error: %d)\n",
3570 				   __func__, priv->lpi_irq, ret);
3571 			irq_err = REQ_IRQ_ERR_LPI;
3572 			goto irq_error;
3573 		}
3574 	}
3575 
3576 	/* Request the Safety Feature Correctible Error line in
3577 	 * case of another line is used
3578 	 */
3579 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3580 		int_name = priv->int_name_sfty_ce;
3581 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3582 		ret = request_irq(priv->sfty_ce_irq,
3583 				  stmmac_safety_interrupt,
3584 				  0, int_name, dev);
3585 		if (unlikely(ret < 0)) {
3586 			netdev_err(priv->dev,
3587 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3588 				   __func__, priv->sfty_ce_irq, ret);
3589 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3590 			goto irq_error;
3591 		}
3592 	}
3593 
3594 	/* Request the Safety Feature Uncorrectible Error line in
3595 	 * case of another line is used
3596 	 */
3597 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3598 		int_name = priv->int_name_sfty_ue;
3599 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3600 		ret = request_irq(priv->sfty_ue_irq,
3601 				  stmmac_safety_interrupt,
3602 				  0, int_name, dev);
3603 		if (unlikely(ret < 0)) {
3604 			netdev_err(priv->dev,
3605 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3606 				   __func__, priv->sfty_ue_irq, ret);
3607 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3608 			goto irq_error;
3609 		}
3610 	}
3611 
3612 	/* Request Rx MSI irq */
3613 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3614 		if (i >= MTL_MAX_RX_QUEUES)
3615 			break;
3616 		if (priv->rx_irq[i] == 0)
3617 			continue;
3618 
3619 		int_name = priv->int_name_rx_irq[i];
3620 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3621 		ret = request_irq(priv->rx_irq[i],
3622 				  stmmac_msi_intr_rx,
3623 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3624 		if (unlikely(ret < 0)) {
3625 			netdev_err(priv->dev,
3626 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3627 				   __func__, i, priv->rx_irq[i], ret);
3628 			irq_err = REQ_IRQ_ERR_RX;
3629 			irq_idx = i;
3630 			goto irq_error;
3631 		}
3632 		cpumask_clear(&cpu_mask);
3633 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3634 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3635 	}
3636 
3637 	/* Request Tx MSI irq */
3638 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3639 		if (i >= MTL_MAX_TX_QUEUES)
3640 			break;
3641 		if (priv->tx_irq[i] == 0)
3642 			continue;
3643 
3644 		int_name = priv->int_name_tx_irq[i];
3645 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3646 		ret = request_irq(priv->tx_irq[i],
3647 				  stmmac_msi_intr_tx,
3648 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3649 		if (unlikely(ret < 0)) {
3650 			netdev_err(priv->dev,
3651 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3652 				   __func__, i, priv->tx_irq[i], ret);
3653 			irq_err = REQ_IRQ_ERR_TX;
3654 			irq_idx = i;
3655 			goto irq_error;
3656 		}
3657 		cpumask_clear(&cpu_mask);
3658 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3659 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3660 	}
3661 
3662 	return 0;
3663 
3664 irq_error:
3665 	stmmac_free_irq(dev, irq_err, irq_idx);
3666 	return ret;
3667 }
3668 
3669 static int stmmac_request_irq_single(struct net_device *dev)
3670 {
3671 	struct stmmac_priv *priv = netdev_priv(dev);
3672 	enum request_irq_err irq_err;
3673 	int ret;
3674 
3675 	ret = request_irq(dev->irq, stmmac_interrupt,
3676 			  IRQF_SHARED, dev->name, dev);
3677 	if (unlikely(ret < 0)) {
3678 		netdev_err(priv->dev,
3679 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3680 			   __func__, dev->irq, ret);
3681 		irq_err = REQ_IRQ_ERR_MAC;
3682 		goto irq_error;
3683 	}
3684 
3685 	/* Request the Wake IRQ in case of another line
3686 	 * is used for WoL
3687 	 */
3688 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3689 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3690 				  IRQF_SHARED, dev->name, dev);
3691 		if (unlikely(ret < 0)) {
3692 			netdev_err(priv->dev,
3693 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3694 				   __func__, priv->wol_irq, ret);
3695 			irq_err = REQ_IRQ_ERR_WOL;
3696 			goto irq_error;
3697 		}
3698 	}
3699 
3700 	/* Request the IRQ lines */
3701 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3702 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3703 				  IRQF_SHARED, dev->name, dev);
3704 		if (unlikely(ret < 0)) {
3705 			netdev_err(priv->dev,
3706 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3707 				   __func__, priv->lpi_irq, ret);
3708 			irq_err = REQ_IRQ_ERR_LPI;
3709 			goto irq_error;
3710 		}
3711 	}
3712 
3713 	return 0;
3714 
3715 irq_error:
3716 	stmmac_free_irq(dev, irq_err, 0);
3717 	return ret;
3718 }
3719 
3720 static int stmmac_request_irq(struct net_device *dev)
3721 {
3722 	struct stmmac_priv *priv = netdev_priv(dev);
3723 	int ret;
3724 
3725 	/* Request the IRQ lines */
3726 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3727 		ret = stmmac_request_irq_multi_msi(dev);
3728 	else
3729 		ret = stmmac_request_irq_single(dev);
3730 
3731 	return ret;
3732 }
3733 
3734 /**
3735  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3736  *  @priv: driver private structure
3737  *  @mtu: MTU to setup the dma queue and buf with
3738  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3739  *  Allocate the Tx/Rx DMA queue and init them.
3740  *  Return value:
3741  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3742  */
3743 static struct stmmac_dma_conf *
3744 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3745 {
3746 	struct stmmac_dma_conf *dma_conf;
3747 	int chan, bfsize, ret;
3748 
3749 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3750 	if (!dma_conf) {
3751 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3752 			   __func__);
3753 		return ERR_PTR(-ENOMEM);
3754 	}
3755 
3756 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3757 	if (bfsize < 0)
3758 		bfsize = 0;
3759 
3760 	if (bfsize < BUF_SIZE_16KiB)
3761 		bfsize = stmmac_set_bfsize(mtu, 0);
3762 
3763 	dma_conf->dma_buf_sz = bfsize;
3764 	/* Chose the tx/rx size from the already defined one in the
3765 	 * priv struct. (if defined)
3766 	 */
3767 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3768 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3769 
3770 	if (!dma_conf->dma_tx_size)
3771 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3772 	if (!dma_conf->dma_rx_size)
3773 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3774 
3775 	/* Earlier check for TBS */
3776 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3777 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3778 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3779 
3780 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3781 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3782 	}
3783 
3784 	ret = alloc_dma_desc_resources(priv, dma_conf);
3785 	if (ret < 0) {
3786 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3787 			   __func__);
3788 		goto alloc_error;
3789 	}
3790 
3791 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3792 	if (ret < 0) {
3793 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3794 			   __func__);
3795 		goto init_error;
3796 	}
3797 
3798 	return dma_conf;
3799 
3800 init_error:
3801 	free_dma_desc_resources(priv, dma_conf);
3802 alloc_error:
3803 	kfree(dma_conf);
3804 	return ERR_PTR(ret);
3805 }
3806 
3807 /**
3808  *  __stmmac_open - open entry point of the driver
3809  *  @dev : pointer to the device structure.
3810  *  @dma_conf :  structure to take the dma data
3811  *  Description:
3812  *  This function is the open entry point of the driver.
3813  *  Return value:
3814  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3815  *  file on failure.
3816  */
3817 static int __stmmac_open(struct net_device *dev,
3818 			 struct stmmac_dma_conf *dma_conf)
3819 {
3820 	struct stmmac_priv *priv = netdev_priv(dev);
3821 	int mode = priv->plat->phy_interface;
3822 	u32 chan;
3823 	int ret;
3824 
3825 	ret = pm_runtime_resume_and_get(priv->device);
3826 	if (ret < 0)
3827 		return ret;
3828 
3829 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3830 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
3831 	    (!priv->hw->xpcs ||
3832 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
3833 	    !priv->hw->lynx_pcs) {
3834 		ret = stmmac_init_phy(dev);
3835 		if (ret) {
3836 			netdev_err(priv->dev,
3837 				   "%s: Cannot attach to PHY (error: %d)\n",
3838 				   __func__, ret);
3839 			goto init_phy_error;
3840 		}
3841 	}
3842 
3843 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3844 
3845 	buf_sz = dma_conf->dma_buf_sz;
3846 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3847 
3848 	stmmac_reset_queues_param(priv);
3849 
3850 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3851 	    priv->plat->serdes_powerup) {
3852 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3853 		if (ret < 0) {
3854 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3855 				   __func__);
3856 			goto init_error;
3857 		}
3858 	}
3859 
3860 	ret = stmmac_hw_setup(dev, true);
3861 	if (ret < 0) {
3862 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3863 		goto init_error;
3864 	}
3865 
3866 	stmmac_init_coalesce(priv);
3867 
3868 	phylink_start(priv->phylink);
3869 	/* We may have called phylink_speed_down before */
3870 	phylink_speed_up(priv->phylink);
3871 
3872 	ret = stmmac_request_irq(dev);
3873 	if (ret)
3874 		goto irq_error;
3875 
3876 	stmmac_enable_all_queues(priv);
3877 	netif_tx_start_all_queues(priv->dev);
3878 	stmmac_enable_all_dma_irq(priv);
3879 
3880 	return 0;
3881 
3882 irq_error:
3883 	phylink_stop(priv->phylink);
3884 
3885 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3886 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3887 
3888 	stmmac_hw_teardown(dev);
3889 init_error:
3890 	phylink_disconnect_phy(priv->phylink);
3891 init_phy_error:
3892 	pm_runtime_put(priv->device);
3893 	return ret;
3894 }
3895 
3896 static int stmmac_open(struct net_device *dev)
3897 {
3898 	struct stmmac_priv *priv = netdev_priv(dev);
3899 	struct stmmac_dma_conf *dma_conf;
3900 	int ret;
3901 
3902 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3903 	if (IS_ERR(dma_conf))
3904 		return PTR_ERR(dma_conf);
3905 
3906 	ret = __stmmac_open(dev, dma_conf);
3907 	if (ret)
3908 		free_dma_desc_resources(priv, dma_conf);
3909 
3910 	kfree(dma_conf);
3911 	return ret;
3912 }
3913 
3914 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3915 {
3916 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3917 
3918 	if (priv->fpe_wq)
3919 		destroy_workqueue(priv->fpe_wq);
3920 
3921 	netdev_info(priv->dev, "FPE workqueue stop");
3922 }
3923 
3924 /**
3925  *  stmmac_release - close entry point of the driver
3926  *  @dev : device pointer.
3927  *  Description:
3928  *  This is the stop entry point of the driver.
3929  */
3930 static int stmmac_release(struct net_device *dev)
3931 {
3932 	struct stmmac_priv *priv = netdev_priv(dev);
3933 	u32 chan;
3934 
3935 	if (device_may_wakeup(priv->device))
3936 		phylink_speed_down(priv->phylink, false);
3937 	/* Stop and disconnect the PHY */
3938 	phylink_stop(priv->phylink);
3939 	phylink_disconnect_phy(priv->phylink);
3940 
3941 	stmmac_disable_all_queues(priv);
3942 
3943 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3944 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3945 
3946 	netif_tx_disable(dev);
3947 
3948 	/* Free the IRQ lines */
3949 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3950 
3951 	if (priv->eee_enabled) {
3952 		priv->tx_path_in_lpi_mode = false;
3953 		del_timer_sync(&priv->eee_ctrl_timer);
3954 	}
3955 
3956 	/* Stop TX/RX DMA and clear the descriptors */
3957 	stmmac_stop_all_dma(priv);
3958 
3959 	/* Release and free the Rx/Tx resources */
3960 	free_dma_desc_resources(priv, &priv->dma_conf);
3961 
3962 	/* Disable the MAC Rx/Tx */
3963 	stmmac_mac_set(priv, priv->ioaddr, false);
3964 
3965 	/* Powerdown Serdes if there is */
3966 	if (priv->plat->serdes_powerdown)
3967 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3968 
3969 	netif_carrier_off(dev);
3970 
3971 	stmmac_release_ptp(priv);
3972 
3973 	pm_runtime_put(priv->device);
3974 
3975 	if (priv->dma_cap.fpesel)
3976 		stmmac_fpe_stop_wq(priv);
3977 
3978 	return 0;
3979 }
3980 
3981 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3982 			       struct stmmac_tx_queue *tx_q)
3983 {
3984 	u16 tag = 0x0, inner_tag = 0x0;
3985 	u32 inner_type = 0x0;
3986 	struct dma_desc *p;
3987 
3988 	if (!priv->dma_cap.vlins)
3989 		return false;
3990 	if (!skb_vlan_tag_present(skb))
3991 		return false;
3992 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3993 		inner_tag = skb_vlan_tag_get(skb);
3994 		inner_type = STMMAC_VLAN_INSERT;
3995 	}
3996 
3997 	tag = skb_vlan_tag_get(skb);
3998 
3999 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4000 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4001 	else
4002 		p = &tx_q->dma_tx[tx_q->cur_tx];
4003 
4004 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4005 		return false;
4006 
4007 	stmmac_set_tx_owner(priv, p);
4008 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4009 	return true;
4010 }
4011 
4012 /**
4013  *  stmmac_tso_allocator - close entry point of the driver
4014  *  @priv: driver private structure
4015  *  @des: buffer start address
4016  *  @total_len: total length to fill in descriptors
4017  *  @last_segment: condition for the last descriptor
4018  *  @queue: TX queue index
4019  *  Description:
4020  *  This function fills descriptor and request new descriptors according to
4021  *  buffer length to fill
4022  */
4023 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4024 				 int total_len, bool last_segment, u32 queue)
4025 {
4026 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4027 	struct dma_desc *desc;
4028 	u32 buff_size;
4029 	int tmp_len;
4030 
4031 	tmp_len = total_len;
4032 
4033 	while (tmp_len > 0) {
4034 		dma_addr_t curr_addr;
4035 
4036 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4037 						priv->dma_conf.dma_tx_size);
4038 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4039 
4040 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4041 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4042 		else
4043 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4044 
4045 		curr_addr = des + (total_len - tmp_len);
4046 		if (priv->dma_cap.addr64 <= 32)
4047 			desc->des0 = cpu_to_le32(curr_addr);
4048 		else
4049 			stmmac_set_desc_addr(priv, desc, curr_addr);
4050 
4051 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4052 			    TSO_MAX_BUFF_SIZE : tmp_len;
4053 
4054 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4055 				0, 1,
4056 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4057 				0, 0);
4058 
4059 		tmp_len -= TSO_MAX_BUFF_SIZE;
4060 	}
4061 }
4062 
4063 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4064 {
4065 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4066 	int desc_size;
4067 
4068 	if (likely(priv->extend_desc))
4069 		desc_size = sizeof(struct dma_extended_desc);
4070 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4071 		desc_size = sizeof(struct dma_edesc);
4072 	else
4073 		desc_size = sizeof(struct dma_desc);
4074 
4075 	/* The own bit must be the latest setting done when prepare the
4076 	 * descriptor and then barrier is needed to make sure that
4077 	 * all is coherent before granting the DMA engine.
4078 	 */
4079 	wmb();
4080 
4081 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4082 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4083 }
4084 
4085 /**
4086  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4087  *  @skb : the socket buffer
4088  *  @dev : device pointer
4089  *  Description: this is the transmit function that is called on TSO frames
4090  *  (support available on GMAC4 and newer chips).
4091  *  Diagram below show the ring programming in case of TSO frames:
4092  *
4093  *  First Descriptor
4094  *   --------
4095  *   | DES0 |---> buffer1 = L2/L3/L4 header
4096  *   | DES1 |---> TCP Payload (can continue on next descr...)
4097  *   | DES2 |---> buffer 1 and 2 len
4098  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4099  *   --------
4100  *	|
4101  *     ...
4102  *	|
4103  *   --------
4104  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4105  *   | DES1 | --|
4106  *   | DES2 | --> buffer 1 and 2 len
4107  *   | DES3 |
4108  *   --------
4109  *
4110  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4111  */
4112 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4113 {
4114 	struct dma_desc *desc, *first, *mss_desc = NULL;
4115 	struct stmmac_priv *priv = netdev_priv(dev);
4116 	int nfrags = skb_shinfo(skb)->nr_frags;
4117 	u32 queue = skb_get_queue_mapping(skb);
4118 	unsigned int first_entry, tx_packets;
4119 	struct stmmac_txq_stats *txq_stats;
4120 	int tmp_pay_len = 0, first_tx;
4121 	struct stmmac_tx_queue *tx_q;
4122 	bool has_vlan, set_ic;
4123 	u8 proto_hdr_len, hdr;
4124 	unsigned long flags;
4125 	u32 pay_len, mss;
4126 	dma_addr_t des;
4127 	int i;
4128 
4129 	tx_q = &priv->dma_conf.tx_queue[queue];
4130 	txq_stats = &priv->xstats.txq_stats[queue];
4131 	first_tx = tx_q->cur_tx;
4132 
4133 	/* Compute header lengths */
4134 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4135 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4136 		hdr = sizeof(struct udphdr);
4137 	} else {
4138 		proto_hdr_len = skb_tcp_all_headers(skb);
4139 		hdr = tcp_hdrlen(skb);
4140 	}
4141 
4142 	/* Desc availability based on threshold should be enough safe */
4143 	if (unlikely(stmmac_tx_avail(priv, queue) <
4144 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4145 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4146 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4147 								queue));
4148 			/* This is a hard error, log it. */
4149 			netdev_err(priv->dev,
4150 				   "%s: Tx Ring full when queue awake\n",
4151 				   __func__);
4152 		}
4153 		return NETDEV_TX_BUSY;
4154 	}
4155 
4156 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4157 
4158 	mss = skb_shinfo(skb)->gso_size;
4159 
4160 	/* set new MSS value if needed */
4161 	if (mss != tx_q->mss) {
4162 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4163 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4164 		else
4165 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4166 
4167 		stmmac_set_mss(priv, mss_desc, mss);
4168 		tx_q->mss = mss;
4169 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4170 						priv->dma_conf.dma_tx_size);
4171 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4172 	}
4173 
4174 	if (netif_msg_tx_queued(priv)) {
4175 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4176 			__func__, hdr, proto_hdr_len, pay_len, mss);
4177 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4178 			skb->data_len);
4179 	}
4180 
4181 	/* Check if VLAN can be inserted by HW */
4182 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4183 
4184 	first_entry = tx_q->cur_tx;
4185 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4186 
4187 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4188 		desc = &tx_q->dma_entx[first_entry].basic;
4189 	else
4190 		desc = &tx_q->dma_tx[first_entry];
4191 	first = desc;
4192 
4193 	if (has_vlan)
4194 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4195 
4196 	/* first descriptor: fill Headers on Buf1 */
4197 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4198 			     DMA_TO_DEVICE);
4199 	if (dma_mapping_error(priv->device, des))
4200 		goto dma_map_err;
4201 
4202 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4203 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4204 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4205 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4206 
4207 	if (priv->dma_cap.addr64 <= 32) {
4208 		first->des0 = cpu_to_le32(des);
4209 
4210 		/* Fill start of payload in buff2 of first descriptor */
4211 		if (pay_len)
4212 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4213 
4214 		/* If needed take extra descriptors to fill the remaining payload */
4215 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4216 	} else {
4217 		stmmac_set_desc_addr(priv, first, des);
4218 		tmp_pay_len = pay_len;
4219 		des += proto_hdr_len;
4220 		pay_len = 0;
4221 	}
4222 
4223 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4224 
4225 	/* Prepare fragments */
4226 	for (i = 0; i < nfrags; i++) {
4227 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4228 
4229 		des = skb_frag_dma_map(priv->device, frag, 0,
4230 				       skb_frag_size(frag),
4231 				       DMA_TO_DEVICE);
4232 		if (dma_mapping_error(priv->device, des))
4233 			goto dma_map_err;
4234 
4235 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4236 				     (i == nfrags - 1), queue);
4237 
4238 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4239 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4240 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4241 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4242 	}
4243 
4244 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4245 
4246 	/* Only the last descriptor gets to point to the skb. */
4247 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4248 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4249 
4250 	/* Manage tx mitigation */
4251 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4252 	tx_q->tx_count_frames += tx_packets;
4253 
4254 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4255 		set_ic = true;
4256 	else if (!priv->tx_coal_frames[queue])
4257 		set_ic = false;
4258 	else if (tx_packets > priv->tx_coal_frames[queue])
4259 		set_ic = true;
4260 	else if ((tx_q->tx_count_frames %
4261 		  priv->tx_coal_frames[queue]) < tx_packets)
4262 		set_ic = true;
4263 	else
4264 		set_ic = false;
4265 
4266 	if (set_ic) {
4267 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4268 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4269 		else
4270 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4271 
4272 		tx_q->tx_count_frames = 0;
4273 		stmmac_set_tx_ic(priv, desc);
4274 	}
4275 
4276 	/* We've used all descriptors we need for this skb, however,
4277 	 * advance cur_tx so that it references a fresh descriptor.
4278 	 * ndo_start_xmit will fill this descriptor the next time it's
4279 	 * called and stmmac_tx_clean may clean up to this descriptor.
4280 	 */
4281 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4282 
4283 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4284 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4285 			  __func__);
4286 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4287 	}
4288 
4289 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4290 	txq_stats->tx_bytes += skb->len;
4291 	txq_stats->tx_tso_frames++;
4292 	txq_stats->tx_tso_nfrags += nfrags;
4293 	if (set_ic)
4294 		txq_stats->tx_set_ic_bit++;
4295 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4296 
4297 	if (priv->sarc_type)
4298 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4299 
4300 	skb_tx_timestamp(skb);
4301 
4302 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4303 		     priv->hwts_tx_en)) {
4304 		/* declare that device is doing timestamping */
4305 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4306 		stmmac_enable_tx_timestamp(priv, first);
4307 	}
4308 
4309 	/* Complete the first descriptor before granting the DMA */
4310 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4311 			proto_hdr_len,
4312 			pay_len,
4313 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4314 			hdr / 4, (skb->len - proto_hdr_len));
4315 
4316 	/* If context desc is used to change MSS */
4317 	if (mss_desc) {
4318 		/* Make sure that first descriptor has been completely
4319 		 * written, including its own bit. This is because MSS is
4320 		 * actually before first descriptor, so we need to make
4321 		 * sure that MSS's own bit is the last thing written.
4322 		 */
4323 		dma_wmb();
4324 		stmmac_set_tx_owner(priv, mss_desc);
4325 	}
4326 
4327 	if (netif_msg_pktdata(priv)) {
4328 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4329 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4330 			tx_q->cur_tx, first, nfrags);
4331 		pr_info(">>> frame to be transmitted: ");
4332 		print_pkt(skb->data, skb_headlen(skb));
4333 	}
4334 
4335 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4336 
4337 	stmmac_flush_tx_descriptors(priv, queue);
4338 	stmmac_tx_timer_arm(priv, queue);
4339 
4340 	return NETDEV_TX_OK;
4341 
4342 dma_map_err:
4343 	dev_err(priv->device, "Tx dma map failed\n");
4344 	dev_kfree_skb(skb);
4345 	priv->xstats.tx_dropped++;
4346 	return NETDEV_TX_OK;
4347 }
4348 
4349 /**
4350  *  stmmac_xmit - Tx entry point of the driver
4351  *  @skb : the socket buffer
4352  *  @dev : device pointer
4353  *  Description : this is the tx entry point of the driver.
4354  *  It programs the chain or the ring and supports oversized frames
4355  *  and SG feature.
4356  */
4357 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4358 {
4359 	unsigned int first_entry, tx_packets, enh_desc;
4360 	struct stmmac_priv *priv = netdev_priv(dev);
4361 	unsigned int nopaged_len = skb_headlen(skb);
4362 	int i, csum_insertion = 0, is_jumbo = 0;
4363 	u32 queue = skb_get_queue_mapping(skb);
4364 	int nfrags = skb_shinfo(skb)->nr_frags;
4365 	int gso = skb_shinfo(skb)->gso_type;
4366 	struct stmmac_txq_stats *txq_stats;
4367 	struct dma_edesc *tbs_desc = NULL;
4368 	struct dma_desc *desc, *first;
4369 	struct stmmac_tx_queue *tx_q;
4370 	bool has_vlan, set_ic;
4371 	int entry, first_tx;
4372 	unsigned long flags;
4373 	dma_addr_t des;
4374 
4375 	tx_q = &priv->dma_conf.tx_queue[queue];
4376 	txq_stats = &priv->xstats.txq_stats[queue];
4377 	first_tx = tx_q->cur_tx;
4378 
4379 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4380 		stmmac_disable_eee_mode(priv);
4381 
4382 	/* Manage oversized TCP frames for GMAC4 device */
4383 	if (skb_is_gso(skb) && priv->tso) {
4384 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4385 			return stmmac_tso_xmit(skb, dev);
4386 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4387 			return stmmac_tso_xmit(skb, dev);
4388 	}
4389 
4390 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4391 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4392 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4393 								queue));
4394 			/* This is a hard error, log it. */
4395 			netdev_err(priv->dev,
4396 				   "%s: Tx Ring full when queue awake\n",
4397 				   __func__);
4398 		}
4399 		return NETDEV_TX_BUSY;
4400 	}
4401 
4402 	/* Check if VLAN can be inserted by HW */
4403 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4404 
4405 	entry = tx_q->cur_tx;
4406 	first_entry = entry;
4407 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4408 
4409 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4410 
4411 	if (likely(priv->extend_desc))
4412 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4413 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4414 		desc = &tx_q->dma_entx[entry].basic;
4415 	else
4416 		desc = tx_q->dma_tx + entry;
4417 
4418 	first = desc;
4419 
4420 	if (has_vlan)
4421 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4422 
4423 	enh_desc = priv->plat->enh_desc;
4424 	/* To program the descriptors according to the size of the frame */
4425 	if (enh_desc)
4426 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4427 
4428 	if (unlikely(is_jumbo)) {
4429 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4430 		if (unlikely(entry < 0) && (entry != -EINVAL))
4431 			goto dma_map_err;
4432 	}
4433 
4434 	for (i = 0; i < nfrags; i++) {
4435 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4436 		int len = skb_frag_size(frag);
4437 		bool last_segment = (i == (nfrags - 1));
4438 
4439 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4440 		WARN_ON(tx_q->tx_skbuff[entry]);
4441 
4442 		if (likely(priv->extend_desc))
4443 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4444 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4445 			desc = &tx_q->dma_entx[entry].basic;
4446 		else
4447 			desc = tx_q->dma_tx + entry;
4448 
4449 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4450 				       DMA_TO_DEVICE);
4451 		if (dma_mapping_error(priv->device, des))
4452 			goto dma_map_err; /* should reuse desc w/o issues */
4453 
4454 		tx_q->tx_skbuff_dma[entry].buf = des;
4455 
4456 		stmmac_set_desc_addr(priv, desc, des);
4457 
4458 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4459 		tx_q->tx_skbuff_dma[entry].len = len;
4460 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4461 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4462 
4463 		/* Prepare the descriptor and set the own bit too */
4464 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4465 				priv->mode, 1, last_segment, skb->len);
4466 	}
4467 
4468 	/* Only the last descriptor gets to point to the skb. */
4469 	tx_q->tx_skbuff[entry] = skb;
4470 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4471 
4472 	/* According to the coalesce parameter the IC bit for the latest
4473 	 * segment is reset and the timer re-started to clean the tx status.
4474 	 * This approach takes care about the fragments: desc is the first
4475 	 * element in case of no SG.
4476 	 */
4477 	tx_packets = (entry + 1) - first_tx;
4478 	tx_q->tx_count_frames += tx_packets;
4479 
4480 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4481 		set_ic = true;
4482 	else if (!priv->tx_coal_frames[queue])
4483 		set_ic = false;
4484 	else if (tx_packets > priv->tx_coal_frames[queue])
4485 		set_ic = true;
4486 	else if ((tx_q->tx_count_frames %
4487 		  priv->tx_coal_frames[queue]) < tx_packets)
4488 		set_ic = true;
4489 	else
4490 		set_ic = false;
4491 
4492 	if (set_ic) {
4493 		if (likely(priv->extend_desc))
4494 			desc = &tx_q->dma_etx[entry].basic;
4495 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4496 			desc = &tx_q->dma_entx[entry].basic;
4497 		else
4498 			desc = &tx_q->dma_tx[entry];
4499 
4500 		tx_q->tx_count_frames = 0;
4501 		stmmac_set_tx_ic(priv, desc);
4502 	}
4503 
4504 	/* We've used all descriptors we need for this skb, however,
4505 	 * advance cur_tx so that it references a fresh descriptor.
4506 	 * ndo_start_xmit will fill this descriptor the next time it's
4507 	 * called and stmmac_tx_clean may clean up to this descriptor.
4508 	 */
4509 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4510 	tx_q->cur_tx = entry;
4511 
4512 	if (netif_msg_pktdata(priv)) {
4513 		netdev_dbg(priv->dev,
4514 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4515 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4516 			   entry, first, nfrags);
4517 
4518 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4519 		print_pkt(skb->data, skb->len);
4520 	}
4521 
4522 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4523 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4524 			  __func__);
4525 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4526 	}
4527 
4528 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4529 	txq_stats->tx_bytes += skb->len;
4530 	if (set_ic)
4531 		txq_stats->tx_set_ic_bit++;
4532 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4533 
4534 	if (priv->sarc_type)
4535 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4536 
4537 	skb_tx_timestamp(skb);
4538 
4539 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4540 	 * problems because all the descriptors are actually ready to be
4541 	 * passed to the DMA engine.
4542 	 */
4543 	if (likely(!is_jumbo)) {
4544 		bool last_segment = (nfrags == 0);
4545 
4546 		des = dma_map_single(priv->device, skb->data,
4547 				     nopaged_len, DMA_TO_DEVICE);
4548 		if (dma_mapping_error(priv->device, des))
4549 			goto dma_map_err;
4550 
4551 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4552 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4553 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4554 
4555 		stmmac_set_desc_addr(priv, first, des);
4556 
4557 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4558 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4559 
4560 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4561 			     priv->hwts_tx_en)) {
4562 			/* declare that device is doing timestamping */
4563 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4564 			stmmac_enable_tx_timestamp(priv, first);
4565 		}
4566 
4567 		/* Prepare the first descriptor setting the OWN bit too */
4568 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4569 				csum_insertion, priv->mode, 0, last_segment,
4570 				skb->len);
4571 	}
4572 
4573 	if (tx_q->tbs & STMMAC_TBS_EN) {
4574 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4575 
4576 		tbs_desc = &tx_q->dma_entx[first_entry];
4577 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4578 	}
4579 
4580 	stmmac_set_tx_owner(priv, first);
4581 
4582 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4583 
4584 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4585 
4586 	stmmac_flush_tx_descriptors(priv, queue);
4587 	stmmac_tx_timer_arm(priv, queue);
4588 
4589 	return NETDEV_TX_OK;
4590 
4591 dma_map_err:
4592 	netdev_err(priv->dev, "Tx DMA map failed\n");
4593 	dev_kfree_skb(skb);
4594 	priv->xstats.tx_dropped++;
4595 	return NETDEV_TX_OK;
4596 }
4597 
4598 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4599 {
4600 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4601 	__be16 vlan_proto = veth->h_vlan_proto;
4602 	u16 vlanid;
4603 
4604 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4605 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4606 	    (vlan_proto == htons(ETH_P_8021AD) &&
4607 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4608 		/* pop the vlan tag */
4609 		vlanid = ntohs(veth->h_vlan_TCI);
4610 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4611 		skb_pull(skb, VLAN_HLEN);
4612 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4613 	}
4614 }
4615 
4616 /**
4617  * stmmac_rx_refill - refill used skb preallocated buffers
4618  * @priv: driver private structure
4619  * @queue: RX queue index
4620  * Description : this is to reallocate the skb for the reception process
4621  * that is based on zero-copy.
4622  */
4623 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4624 {
4625 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4626 	int dirty = stmmac_rx_dirty(priv, queue);
4627 	unsigned int entry = rx_q->dirty_rx;
4628 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4629 
4630 	if (priv->dma_cap.host_dma_width <= 32)
4631 		gfp |= GFP_DMA32;
4632 
4633 	while (dirty-- > 0) {
4634 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4635 		struct dma_desc *p;
4636 		bool use_rx_wd;
4637 
4638 		if (priv->extend_desc)
4639 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4640 		else
4641 			p = rx_q->dma_rx + entry;
4642 
4643 		if (!buf->page) {
4644 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4645 			if (!buf->page)
4646 				break;
4647 		}
4648 
4649 		if (priv->sph && !buf->sec_page) {
4650 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4651 			if (!buf->sec_page)
4652 				break;
4653 
4654 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4655 		}
4656 
4657 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4658 
4659 		stmmac_set_desc_addr(priv, p, buf->addr);
4660 		if (priv->sph)
4661 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4662 		else
4663 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4664 		stmmac_refill_desc3(priv, rx_q, p);
4665 
4666 		rx_q->rx_count_frames++;
4667 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4668 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4669 			rx_q->rx_count_frames = 0;
4670 
4671 		use_rx_wd = !priv->rx_coal_frames[queue];
4672 		use_rx_wd |= rx_q->rx_count_frames > 0;
4673 		if (!priv->use_riwt)
4674 			use_rx_wd = false;
4675 
4676 		dma_wmb();
4677 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4678 
4679 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4680 	}
4681 	rx_q->dirty_rx = entry;
4682 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4683 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4684 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4685 }
4686 
4687 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4688 				       struct dma_desc *p,
4689 				       int status, unsigned int len)
4690 {
4691 	unsigned int plen = 0, hlen = 0;
4692 	int coe = priv->hw->rx_csum;
4693 
4694 	/* Not first descriptor, buffer is always zero */
4695 	if (priv->sph && len)
4696 		return 0;
4697 
4698 	/* First descriptor, get split header length */
4699 	stmmac_get_rx_header_len(priv, p, &hlen);
4700 	if (priv->sph && hlen) {
4701 		priv->xstats.rx_split_hdr_pkt_n++;
4702 		return hlen;
4703 	}
4704 
4705 	/* First descriptor, not last descriptor and not split header */
4706 	if (status & rx_not_ls)
4707 		return priv->dma_conf.dma_buf_sz;
4708 
4709 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4710 
4711 	/* First descriptor and last descriptor and not split header */
4712 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4713 }
4714 
4715 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4716 				       struct dma_desc *p,
4717 				       int status, unsigned int len)
4718 {
4719 	int coe = priv->hw->rx_csum;
4720 	unsigned int plen = 0;
4721 
4722 	/* Not split header, buffer is not available */
4723 	if (!priv->sph)
4724 		return 0;
4725 
4726 	/* Not last descriptor */
4727 	if (status & rx_not_ls)
4728 		return priv->dma_conf.dma_buf_sz;
4729 
4730 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4731 
4732 	/* Last descriptor */
4733 	return plen - len;
4734 }
4735 
4736 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4737 				struct xdp_frame *xdpf, bool dma_map)
4738 {
4739 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4740 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4741 	unsigned int entry = tx_q->cur_tx;
4742 	struct dma_desc *tx_desc;
4743 	dma_addr_t dma_addr;
4744 	bool set_ic;
4745 
4746 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4747 		return STMMAC_XDP_CONSUMED;
4748 
4749 	if (likely(priv->extend_desc))
4750 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4751 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4752 		tx_desc = &tx_q->dma_entx[entry].basic;
4753 	else
4754 		tx_desc = tx_q->dma_tx + entry;
4755 
4756 	if (dma_map) {
4757 		dma_addr = dma_map_single(priv->device, xdpf->data,
4758 					  xdpf->len, DMA_TO_DEVICE);
4759 		if (dma_mapping_error(priv->device, dma_addr))
4760 			return STMMAC_XDP_CONSUMED;
4761 
4762 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4763 	} else {
4764 		struct page *page = virt_to_page(xdpf->data);
4765 
4766 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4767 			   xdpf->headroom;
4768 		dma_sync_single_for_device(priv->device, dma_addr,
4769 					   xdpf->len, DMA_BIDIRECTIONAL);
4770 
4771 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4772 	}
4773 
4774 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4775 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4776 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4777 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4778 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4779 
4780 	tx_q->xdpf[entry] = xdpf;
4781 
4782 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4783 
4784 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4785 			       true, priv->mode, true, true,
4786 			       xdpf->len);
4787 
4788 	tx_q->tx_count_frames++;
4789 
4790 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4791 		set_ic = true;
4792 	else
4793 		set_ic = false;
4794 
4795 	if (set_ic) {
4796 		unsigned long flags;
4797 		tx_q->tx_count_frames = 0;
4798 		stmmac_set_tx_ic(priv, tx_desc);
4799 		flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4800 		txq_stats->tx_set_ic_bit++;
4801 		u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4802 	}
4803 
4804 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4805 
4806 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4807 	tx_q->cur_tx = entry;
4808 
4809 	return STMMAC_XDP_TX;
4810 }
4811 
4812 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4813 				   int cpu)
4814 {
4815 	int index = cpu;
4816 
4817 	if (unlikely(index < 0))
4818 		index = 0;
4819 
4820 	while (index >= priv->plat->tx_queues_to_use)
4821 		index -= priv->plat->tx_queues_to_use;
4822 
4823 	return index;
4824 }
4825 
4826 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4827 				struct xdp_buff *xdp)
4828 {
4829 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4830 	int cpu = smp_processor_id();
4831 	struct netdev_queue *nq;
4832 	int queue;
4833 	int res;
4834 
4835 	if (unlikely(!xdpf))
4836 		return STMMAC_XDP_CONSUMED;
4837 
4838 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4839 	nq = netdev_get_tx_queue(priv->dev, queue);
4840 
4841 	__netif_tx_lock(nq, cpu);
4842 	/* Avoids TX time-out as we are sharing with slow path */
4843 	txq_trans_cond_update(nq);
4844 
4845 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4846 	if (res == STMMAC_XDP_TX)
4847 		stmmac_flush_tx_descriptors(priv, queue);
4848 
4849 	__netif_tx_unlock(nq);
4850 
4851 	return res;
4852 }
4853 
4854 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4855 				 struct bpf_prog *prog,
4856 				 struct xdp_buff *xdp)
4857 {
4858 	u32 act;
4859 	int res;
4860 
4861 	act = bpf_prog_run_xdp(prog, xdp);
4862 	switch (act) {
4863 	case XDP_PASS:
4864 		res = STMMAC_XDP_PASS;
4865 		break;
4866 	case XDP_TX:
4867 		res = stmmac_xdp_xmit_back(priv, xdp);
4868 		break;
4869 	case XDP_REDIRECT:
4870 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4871 			res = STMMAC_XDP_CONSUMED;
4872 		else
4873 			res = STMMAC_XDP_REDIRECT;
4874 		break;
4875 	default:
4876 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4877 		fallthrough;
4878 	case XDP_ABORTED:
4879 		trace_xdp_exception(priv->dev, prog, act);
4880 		fallthrough;
4881 	case XDP_DROP:
4882 		res = STMMAC_XDP_CONSUMED;
4883 		break;
4884 	}
4885 
4886 	return res;
4887 }
4888 
4889 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4890 					   struct xdp_buff *xdp)
4891 {
4892 	struct bpf_prog *prog;
4893 	int res;
4894 
4895 	prog = READ_ONCE(priv->xdp_prog);
4896 	if (!prog) {
4897 		res = STMMAC_XDP_PASS;
4898 		goto out;
4899 	}
4900 
4901 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
4902 out:
4903 	return ERR_PTR(-res);
4904 }
4905 
4906 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4907 				   int xdp_status)
4908 {
4909 	int cpu = smp_processor_id();
4910 	int queue;
4911 
4912 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4913 
4914 	if (xdp_status & STMMAC_XDP_TX)
4915 		stmmac_tx_timer_arm(priv, queue);
4916 
4917 	if (xdp_status & STMMAC_XDP_REDIRECT)
4918 		xdp_do_flush();
4919 }
4920 
4921 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4922 					       struct xdp_buff *xdp)
4923 {
4924 	unsigned int metasize = xdp->data - xdp->data_meta;
4925 	unsigned int datasize = xdp->data_end - xdp->data;
4926 	struct sk_buff *skb;
4927 
4928 	skb = __napi_alloc_skb(&ch->rxtx_napi,
4929 			       xdp->data_end - xdp->data_hard_start,
4930 			       GFP_ATOMIC | __GFP_NOWARN);
4931 	if (unlikely(!skb))
4932 		return NULL;
4933 
4934 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
4935 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4936 	if (metasize)
4937 		skb_metadata_set(skb, metasize);
4938 
4939 	return skb;
4940 }
4941 
4942 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4943 				   struct dma_desc *p, struct dma_desc *np,
4944 				   struct xdp_buff *xdp)
4945 {
4946 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
4947 	struct stmmac_channel *ch = &priv->channel[queue];
4948 	unsigned int len = xdp->data_end - xdp->data;
4949 	enum pkt_hash_types hash_type;
4950 	int coe = priv->hw->rx_csum;
4951 	unsigned long flags;
4952 	struct sk_buff *skb;
4953 	u32 hash;
4954 
4955 	skb = stmmac_construct_skb_zc(ch, xdp);
4956 	if (!skb) {
4957 		priv->xstats.rx_dropped++;
4958 		return;
4959 	}
4960 
4961 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
4962 	stmmac_rx_vlan(priv->dev, skb);
4963 	skb->protocol = eth_type_trans(skb, priv->dev);
4964 
4965 	if (unlikely(!coe))
4966 		skb_checksum_none_assert(skb);
4967 	else
4968 		skb->ip_summed = CHECKSUM_UNNECESSARY;
4969 
4970 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4971 		skb_set_hash(skb, hash, hash_type);
4972 
4973 	skb_record_rx_queue(skb, queue);
4974 	napi_gro_receive(&ch->rxtx_napi, skb);
4975 
4976 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
4977 	rxq_stats->rx_pkt_n++;
4978 	rxq_stats->rx_bytes += len;
4979 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
4980 }
4981 
4982 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4983 {
4984 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4985 	unsigned int entry = rx_q->dirty_rx;
4986 	struct dma_desc *rx_desc = NULL;
4987 	bool ret = true;
4988 
4989 	budget = min(budget, stmmac_rx_dirty(priv, queue));
4990 
4991 	while (budget-- > 0 && entry != rx_q->cur_rx) {
4992 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4993 		dma_addr_t dma_addr;
4994 		bool use_rx_wd;
4995 
4996 		if (!buf->xdp) {
4997 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
4998 			if (!buf->xdp) {
4999 				ret = false;
5000 				break;
5001 			}
5002 		}
5003 
5004 		if (priv->extend_desc)
5005 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5006 		else
5007 			rx_desc = rx_q->dma_rx + entry;
5008 
5009 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5010 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5011 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5012 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5013 
5014 		rx_q->rx_count_frames++;
5015 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5016 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5017 			rx_q->rx_count_frames = 0;
5018 
5019 		use_rx_wd = !priv->rx_coal_frames[queue];
5020 		use_rx_wd |= rx_q->rx_count_frames > 0;
5021 		if (!priv->use_riwt)
5022 			use_rx_wd = false;
5023 
5024 		dma_wmb();
5025 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5026 
5027 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5028 	}
5029 
5030 	if (rx_desc) {
5031 		rx_q->dirty_rx = entry;
5032 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5033 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5034 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5035 	}
5036 
5037 	return ret;
5038 }
5039 
5040 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5041 {
5042 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5043 	 * to represent incoming packet, whereas cb field in the same structure
5044 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5045 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5046 	 */
5047 	return (struct stmmac_xdp_buff *)xdp;
5048 }
5049 
5050 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5051 {
5052 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5053 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5054 	unsigned int count = 0, error = 0, len = 0;
5055 	int dirty = stmmac_rx_dirty(priv, queue);
5056 	unsigned int next_entry = rx_q->cur_rx;
5057 	u32 rx_errors = 0, rx_dropped = 0;
5058 	unsigned int desc_size;
5059 	struct bpf_prog *prog;
5060 	bool failure = false;
5061 	unsigned long flags;
5062 	int xdp_status = 0;
5063 	int status = 0;
5064 
5065 	if (netif_msg_rx_status(priv)) {
5066 		void *rx_head;
5067 
5068 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5069 		if (priv->extend_desc) {
5070 			rx_head = (void *)rx_q->dma_erx;
5071 			desc_size = sizeof(struct dma_extended_desc);
5072 		} else {
5073 			rx_head = (void *)rx_q->dma_rx;
5074 			desc_size = sizeof(struct dma_desc);
5075 		}
5076 
5077 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5078 				    rx_q->dma_rx_phy, desc_size);
5079 	}
5080 	while (count < limit) {
5081 		struct stmmac_rx_buffer *buf;
5082 		struct stmmac_xdp_buff *ctx;
5083 		unsigned int buf1_len = 0;
5084 		struct dma_desc *np, *p;
5085 		int entry;
5086 		int res;
5087 
5088 		if (!count && rx_q->state_saved) {
5089 			error = rx_q->state.error;
5090 			len = rx_q->state.len;
5091 		} else {
5092 			rx_q->state_saved = false;
5093 			error = 0;
5094 			len = 0;
5095 		}
5096 
5097 		if (count >= limit)
5098 			break;
5099 
5100 read_again:
5101 		buf1_len = 0;
5102 		entry = next_entry;
5103 		buf = &rx_q->buf_pool[entry];
5104 
5105 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5106 			failure = failure ||
5107 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5108 			dirty = 0;
5109 		}
5110 
5111 		if (priv->extend_desc)
5112 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5113 		else
5114 			p = rx_q->dma_rx + entry;
5115 
5116 		/* read the status of the incoming frame */
5117 		status = stmmac_rx_status(priv, &priv->xstats, p);
5118 		/* check if managed by the DMA otherwise go ahead */
5119 		if (unlikely(status & dma_own))
5120 			break;
5121 
5122 		/* Prefetch the next RX descriptor */
5123 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5124 						priv->dma_conf.dma_rx_size);
5125 		next_entry = rx_q->cur_rx;
5126 
5127 		if (priv->extend_desc)
5128 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5129 		else
5130 			np = rx_q->dma_rx + next_entry;
5131 
5132 		prefetch(np);
5133 
5134 		/* Ensure a valid XSK buffer before proceed */
5135 		if (!buf->xdp)
5136 			break;
5137 
5138 		if (priv->extend_desc)
5139 			stmmac_rx_extended_status(priv, &priv->xstats,
5140 						  rx_q->dma_erx + entry);
5141 		if (unlikely(status == discard_frame)) {
5142 			xsk_buff_free(buf->xdp);
5143 			buf->xdp = NULL;
5144 			dirty++;
5145 			error = 1;
5146 			if (!priv->hwts_rx_en)
5147 				rx_errors++;
5148 		}
5149 
5150 		if (unlikely(error && (status & rx_not_ls)))
5151 			goto read_again;
5152 		if (unlikely(error)) {
5153 			count++;
5154 			continue;
5155 		}
5156 
5157 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5158 		if (likely(status & rx_not_ls)) {
5159 			xsk_buff_free(buf->xdp);
5160 			buf->xdp = NULL;
5161 			dirty++;
5162 			count++;
5163 			goto read_again;
5164 		}
5165 
5166 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5167 		ctx->priv = priv;
5168 		ctx->desc = p;
5169 		ctx->ndesc = np;
5170 
5171 		/* XDP ZC Frame only support primary buffers for now */
5172 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5173 		len += buf1_len;
5174 
5175 		/* ACS is disabled; strip manually. */
5176 		if (likely(!(status & rx_not_ls))) {
5177 			buf1_len -= ETH_FCS_LEN;
5178 			len -= ETH_FCS_LEN;
5179 		}
5180 
5181 		/* RX buffer is good and fit into a XSK pool buffer */
5182 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5183 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5184 
5185 		prog = READ_ONCE(priv->xdp_prog);
5186 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5187 
5188 		switch (res) {
5189 		case STMMAC_XDP_PASS:
5190 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5191 			xsk_buff_free(buf->xdp);
5192 			break;
5193 		case STMMAC_XDP_CONSUMED:
5194 			xsk_buff_free(buf->xdp);
5195 			rx_dropped++;
5196 			break;
5197 		case STMMAC_XDP_TX:
5198 		case STMMAC_XDP_REDIRECT:
5199 			xdp_status |= res;
5200 			break;
5201 		}
5202 
5203 		buf->xdp = NULL;
5204 		dirty++;
5205 		count++;
5206 	}
5207 
5208 	if (status & rx_not_ls) {
5209 		rx_q->state_saved = true;
5210 		rx_q->state.error = error;
5211 		rx_q->state.len = len;
5212 	}
5213 
5214 	stmmac_finalize_xdp_rx(priv, xdp_status);
5215 
5216 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5217 	rxq_stats->rx_pkt_n += count;
5218 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5219 
5220 	priv->xstats.rx_dropped += rx_dropped;
5221 	priv->xstats.rx_errors += rx_errors;
5222 
5223 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5224 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5225 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5226 		else
5227 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5228 
5229 		return (int)count;
5230 	}
5231 
5232 	return failure ? limit : (int)count;
5233 }
5234 
5235 /**
5236  * stmmac_rx - manage the receive process
5237  * @priv: driver private structure
5238  * @limit: napi bugget
5239  * @queue: RX queue index.
5240  * Description :  this the function called by the napi poll method.
5241  * It gets all the frames inside the ring.
5242  */
5243 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5244 {
5245 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5246 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5247 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5248 	struct stmmac_channel *ch = &priv->channel[queue];
5249 	unsigned int count = 0, error = 0, len = 0;
5250 	int status = 0, coe = priv->hw->rx_csum;
5251 	unsigned int next_entry = rx_q->cur_rx;
5252 	enum dma_data_direction dma_dir;
5253 	unsigned int desc_size;
5254 	struct sk_buff *skb = NULL;
5255 	struct stmmac_xdp_buff ctx;
5256 	unsigned long flags;
5257 	int xdp_status = 0;
5258 	int buf_sz;
5259 
5260 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5261 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5262 
5263 	if (netif_msg_rx_status(priv)) {
5264 		void *rx_head;
5265 
5266 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5267 		if (priv->extend_desc) {
5268 			rx_head = (void *)rx_q->dma_erx;
5269 			desc_size = sizeof(struct dma_extended_desc);
5270 		} else {
5271 			rx_head = (void *)rx_q->dma_rx;
5272 			desc_size = sizeof(struct dma_desc);
5273 		}
5274 
5275 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5276 				    rx_q->dma_rx_phy, desc_size);
5277 	}
5278 	while (count < limit) {
5279 		unsigned int buf1_len = 0, buf2_len = 0;
5280 		enum pkt_hash_types hash_type;
5281 		struct stmmac_rx_buffer *buf;
5282 		struct dma_desc *np, *p;
5283 		int entry;
5284 		u32 hash;
5285 
5286 		if (!count && rx_q->state_saved) {
5287 			skb = rx_q->state.skb;
5288 			error = rx_q->state.error;
5289 			len = rx_q->state.len;
5290 		} else {
5291 			rx_q->state_saved = false;
5292 			skb = NULL;
5293 			error = 0;
5294 			len = 0;
5295 		}
5296 
5297 		if (count >= limit)
5298 			break;
5299 
5300 read_again:
5301 		buf1_len = 0;
5302 		buf2_len = 0;
5303 		entry = next_entry;
5304 		buf = &rx_q->buf_pool[entry];
5305 
5306 		if (priv->extend_desc)
5307 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5308 		else
5309 			p = rx_q->dma_rx + entry;
5310 
5311 		/* read the status of the incoming frame */
5312 		status = stmmac_rx_status(priv, &priv->xstats, p);
5313 		/* check if managed by the DMA otherwise go ahead */
5314 		if (unlikely(status & dma_own))
5315 			break;
5316 
5317 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5318 						priv->dma_conf.dma_rx_size);
5319 		next_entry = rx_q->cur_rx;
5320 
5321 		if (priv->extend_desc)
5322 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5323 		else
5324 			np = rx_q->dma_rx + next_entry;
5325 
5326 		prefetch(np);
5327 
5328 		if (priv->extend_desc)
5329 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5330 		if (unlikely(status == discard_frame)) {
5331 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5332 			buf->page = NULL;
5333 			error = 1;
5334 			if (!priv->hwts_rx_en)
5335 				rx_errors++;
5336 		}
5337 
5338 		if (unlikely(error && (status & rx_not_ls)))
5339 			goto read_again;
5340 		if (unlikely(error)) {
5341 			dev_kfree_skb(skb);
5342 			skb = NULL;
5343 			count++;
5344 			continue;
5345 		}
5346 
5347 		/* Buffer is good. Go on. */
5348 
5349 		prefetch(page_address(buf->page) + buf->page_offset);
5350 		if (buf->sec_page)
5351 			prefetch(page_address(buf->sec_page));
5352 
5353 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5354 		len += buf1_len;
5355 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5356 		len += buf2_len;
5357 
5358 		/* ACS is disabled; strip manually. */
5359 		if (likely(!(status & rx_not_ls))) {
5360 			if (buf2_len) {
5361 				buf2_len -= ETH_FCS_LEN;
5362 				len -= ETH_FCS_LEN;
5363 			} else if (buf1_len) {
5364 				buf1_len -= ETH_FCS_LEN;
5365 				len -= ETH_FCS_LEN;
5366 			}
5367 		}
5368 
5369 		if (!skb) {
5370 			unsigned int pre_len, sync_len;
5371 
5372 			dma_sync_single_for_cpu(priv->device, buf->addr,
5373 						buf1_len, dma_dir);
5374 
5375 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5376 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5377 					 buf->page_offset, buf1_len, true);
5378 
5379 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5380 				  buf->page_offset;
5381 
5382 			ctx.priv = priv;
5383 			ctx.desc = p;
5384 			ctx.ndesc = np;
5385 
5386 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5387 			/* Due xdp_adjust_tail: DMA sync for_device
5388 			 * cover max len CPU touch
5389 			 */
5390 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5391 				   buf->page_offset;
5392 			sync_len = max(sync_len, pre_len);
5393 
5394 			/* For Not XDP_PASS verdict */
5395 			if (IS_ERR(skb)) {
5396 				unsigned int xdp_res = -PTR_ERR(skb);
5397 
5398 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5399 					page_pool_put_page(rx_q->page_pool,
5400 							   virt_to_head_page(ctx.xdp.data),
5401 							   sync_len, true);
5402 					buf->page = NULL;
5403 					rx_dropped++;
5404 
5405 					/* Clear skb as it was set as
5406 					 * status by XDP program.
5407 					 */
5408 					skb = NULL;
5409 
5410 					if (unlikely((status & rx_not_ls)))
5411 						goto read_again;
5412 
5413 					count++;
5414 					continue;
5415 				} else if (xdp_res & (STMMAC_XDP_TX |
5416 						      STMMAC_XDP_REDIRECT)) {
5417 					xdp_status |= xdp_res;
5418 					buf->page = NULL;
5419 					skb = NULL;
5420 					count++;
5421 					continue;
5422 				}
5423 			}
5424 		}
5425 
5426 		if (!skb) {
5427 			/* XDP program may expand or reduce tail */
5428 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5429 
5430 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5431 			if (!skb) {
5432 				rx_dropped++;
5433 				count++;
5434 				goto drain_data;
5435 			}
5436 
5437 			/* XDP program may adjust header */
5438 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5439 			skb_put(skb, buf1_len);
5440 
5441 			/* Data payload copied into SKB, page ready for recycle */
5442 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5443 			buf->page = NULL;
5444 		} else if (buf1_len) {
5445 			dma_sync_single_for_cpu(priv->device, buf->addr,
5446 						buf1_len, dma_dir);
5447 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5448 					buf->page, buf->page_offset, buf1_len,
5449 					priv->dma_conf.dma_buf_sz);
5450 
5451 			/* Data payload appended into SKB */
5452 			skb_mark_for_recycle(skb);
5453 			buf->page = NULL;
5454 		}
5455 
5456 		if (buf2_len) {
5457 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5458 						buf2_len, dma_dir);
5459 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5460 					buf->sec_page, 0, buf2_len,
5461 					priv->dma_conf.dma_buf_sz);
5462 
5463 			/* Data payload appended into SKB */
5464 			skb_mark_for_recycle(skb);
5465 			buf->sec_page = NULL;
5466 		}
5467 
5468 drain_data:
5469 		if (likely(status & rx_not_ls))
5470 			goto read_again;
5471 		if (!skb)
5472 			continue;
5473 
5474 		/* Got entire packet into SKB. Finish it. */
5475 
5476 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5477 		stmmac_rx_vlan(priv->dev, skb);
5478 		skb->protocol = eth_type_trans(skb, priv->dev);
5479 
5480 		if (unlikely(!coe))
5481 			skb_checksum_none_assert(skb);
5482 		else
5483 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5484 
5485 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5486 			skb_set_hash(skb, hash, hash_type);
5487 
5488 		skb_record_rx_queue(skb, queue);
5489 		napi_gro_receive(&ch->rx_napi, skb);
5490 		skb = NULL;
5491 
5492 		rx_packets++;
5493 		rx_bytes += len;
5494 		count++;
5495 	}
5496 
5497 	if (status & rx_not_ls || skb) {
5498 		rx_q->state_saved = true;
5499 		rx_q->state.skb = skb;
5500 		rx_q->state.error = error;
5501 		rx_q->state.len = len;
5502 	}
5503 
5504 	stmmac_finalize_xdp_rx(priv, xdp_status);
5505 
5506 	stmmac_rx_refill(priv, queue);
5507 
5508 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5509 	rxq_stats->rx_packets += rx_packets;
5510 	rxq_stats->rx_bytes += rx_bytes;
5511 	rxq_stats->rx_pkt_n += count;
5512 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5513 
5514 	priv->xstats.rx_dropped += rx_dropped;
5515 	priv->xstats.rx_errors += rx_errors;
5516 
5517 	return count;
5518 }
5519 
5520 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5521 {
5522 	struct stmmac_channel *ch =
5523 		container_of(napi, struct stmmac_channel, rx_napi);
5524 	struct stmmac_priv *priv = ch->priv_data;
5525 	struct stmmac_rxq_stats *rxq_stats;
5526 	u32 chan = ch->index;
5527 	unsigned long flags;
5528 	int work_done;
5529 
5530 	rxq_stats = &priv->xstats.rxq_stats[chan];
5531 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5532 	rxq_stats->napi_poll++;
5533 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5534 
5535 	work_done = stmmac_rx(priv, budget, chan);
5536 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5537 		unsigned long flags;
5538 
5539 		spin_lock_irqsave(&ch->lock, flags);
5540 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5541 		spin_unlock_irqrestore(&ch->lock, flags);
5542 	}
5543 
5544 	return work_done;
5545 }
5546 
5547 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5548 {
5549 	struct stmmac_channel *ch =
5550 		container_of(napi, struct stmmac_channel, tx_napi);
5551 	struct stmmac_priv *priv = ch->priv_data;
5552 	struct stmmac_txq_stats *txq_stats;
5553 	u32 chan = ch->index;
5554 	unsigned long flags;
5555 	int work_done;
5556 
5557 	txq_stats = &priv->xstats.txq_stats[chan];
5558 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
5559 	txq_stats->napi_poll++;
5560 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
5561 
5562 	work_done = stmmac_tx_clean(priv, budget, chan);
5563 	work_done = min(work_done, budget);
5564 
5565 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5566 		unsigned long flags;
5567 
5568 		spin_lock_irqsave(&ch->lock, flags);
5569 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5570 		spin_unlock_irqrestore(&ch->lock, flags);
5571 	}
5572 
5573 	return work_done;
5574 }
5575 
5576 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5577 {
5578 	struct stmmac_channel *ch =
5579 		container_of(napi, struct stmmac_channel, rxtx_napi);
5580 	struct stmmac_priv *priv = ch->priv_data;
5581 	int rx_done, tx_done, rxtx_done;
5582 	struct stmmac_rxq_stats *rxq_stats;
5583 	struct stmmac_txq_stats *txq_stats;
5584 	u32 chan = ch->index;
5585 	unsigned long flags;
5586 
5587 	rxq_stats = &priv->xstats.rxq_stats[chan];
5588 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5589 	rxq_stats->napi_poll++;
5590 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5591 
5592 	txq_stats = &priv->xstats.txq_stats[chan];
5593 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
5594 	txq_stats->napi_poll++;
5595 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
5596 
5597 	tx_done = stmmac_tx_clean(priv, budget, chan);
5598 	tx_done = min(tx_done, budget);
5599 
5600 	rx_done = stmmac_rx_zc(priv, budget, chan);
5601 
5602 	rxtx_done = max(tx_done, rx_done);
5603 
5604 	/* If either TX or RX work is not complete, return budget
5605 	 * and keep pooling
5606 	 */
5607 	if (rxtx_done >= budget)
5608 		return budget;
5609 
5610 	/* all work done, exit the polling mode */
5611 	if (napi_complete_done(napi, rxtx_done)) {
5612 		unsigned long flags;
5613 
5614 		spin_lock_irqsave(&ch->lock, flags);
5615 		/* Both RX and TX work done are compelte,
5616 		 * so enable both RX & TX IRQs.
5617 		 */
5618 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5619 		spin_unlock_irqrestore(&ch->lock, flags);
5620 	}
5621 
5622 	return min(rxtx_done, budget - 1);
5623 }
5624 
5625 /**
5626  *  stmmac_tx_timeout
5627  *  @dev : Pointer to net device structure
5628  *  @txqueue: the index of the hanging transmit queue
5629  *  Description: this function is called when a packet transmission fails to
5630  *   complete within a reasonable time. The driver will mark the error in the
5631  *   netdev structure and arrange for the device to be reset to a sane state
5632  *   in order to transmit a new packet.
5633  */
5634 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5635 {
5636 	struct stmmac_priv *priv = netdev_priv(dev);
5637 
5638 	stmmac_global_err(priv);
5639 }
5640 
5641 /**
5642  *  stmmac_set_rx_mode - entry point for multicast addressing
5643  *  @dev : pointer to the device structure
5644  *  Description:
5645  *  This function is a driver entry point which gets called by the kernel
5646  *  whenever multicast addresses must be enabled/disabled.
5647  *  Return value:
5648  *  void.
5649  */
5650 static void stmmac_set_rx_mode(struct net_device *dev)
5651 {
5652 	struct stmmac_priv *priv = netdev_priv(dev);
5653 
5654 	stmmac_set_filter(priv, priv->hw, dev);
5655 }
5656 
5657 /**
5658  *  stmmac_change_mtu - entry point to change MTU size for the device.
5659  *  @dev : device pointer.
5660  *  @new_mtu : the new MTU size for the device.
5661  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5662  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5663  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5664  *  Return value:
5665  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5666  *  file on failure.
5667  */
5668 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5669 {
5670 	struct stmmac_priv *priv = netdev_priv(dev);
5671 	int txfifosz = priv->plat->tx_fifo_size;
5672 	struct stmmac_dma_conf *dma_conf;
5673 	const int mtu = new_mtu;
5674 	int ret;
5675 
5676 	if (txfifosz == 0)
5677 		txfifosz = priv->dma_cap.tx_fifo_size;
5678 
5679 	txfifosz /= priv->plat->tx_queues_to_use;
5680 
5681 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5682 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5683 		return -EINVAL;
5684 	}
5685 
5686 	new_mtu = STMMAC_ALIGN(new_mtu);
5687 
5688 	/* If condition true, FIFO is too small or MTU too large */
5689 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5690 		return -EINVAL;
5691 
5692 	if (netif_running(dev)) {
5693 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5694 		/* Try to allocate the new DMA conf with the new mtu */
5695 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5696 		if (IS_ERR(dma_conf)) {
5697 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5698 				   mtu);
5699 			return PTR_ERR(dma_conf);
5700 		}
5701 
5702 		stmmac_release(dev);
5703 
5704 		ret = __stmmac_open(dev, dma_conf);
5705 		if (ret) {
5706 			free_dma_desc_resources(priv, dma_conf);
5707 			kfree(dma_conf);
5708 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5709 			return ret;
5710 		}
5711 
5712 		kfree(dma_conf);
5713 
5714 		stmmac_set_rx_mode(dev);
5715 	}
5716 
5717 	dev->mtu = mtu;
5718 	netdev_update_features(dev);
5719 
5720 	return 0;
5721 }
5722 
5723 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5724 					     netdev_features_t features)
5725 {
5726 	struct stmmac_priv *priv = netdev_priv(dev);
5727 
5728 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5729 		features &= ~NETIF_F_RXCSUM;
5730 
5731 	if (!priv->plat->tx_coe)
5732 		features &= ~NETIF_F_CSUM_MASK;
5733 
5734 	/* Some GMAC devices have a bugged Jumbo frame support that
5735 	 * needs to have the Tx COE disabled for oversized frames
5736 	 * (due to limited buffer sizes). In this case we disable
5737 	 * the TX csum insertion in the TDES and not use SF.
5738 	 */
5739 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5740 		features &= ~NETIF_F_CSUM_MASK;
5741 
5742 	/* Disable tso if asked by ethtool */
5743 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5744 		if (features & NETIF_F_TSO)
5745 			priv->tso = true;
5746 		else
5747 			priv->tso = false;
5748 	}
5749 
5750 	return features;
5751 }
5752 
5753 static int stmmac_set_features(struct net_device *netdev,
5754 			       netdev_features_t features)
5755 {
5756 	struct stmmac_priv *priv = netdev_priv(netdev);
5757 
5758 	/* Keep the COE Type in case of csum is supporting */
5759 	if (features & NETIF_F_RXCSUM)
5760 		priv->hw->rx_csum = priv->plat->rx_coe;
5761 	else
5762 		priv->hw->rx_csum = 0;
5763 	/* No check needed because rx_coe has been set before and it will be
5764 	 * fixed in case of issue.
5765 	 */
5766 	stmmac_rx_ipc(priv, priv->hw);
5767 
5768 	if (priv->sph_cap) {
5769 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5770 		u32 chan;
5771 
5772 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5773 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5774 	}
5775 
5776 	return 0;
5777 }
5778 
5779 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5780 {
5781 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5782 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5783 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5784 	bool *hs_enable = &fpe_cfg->hs_enable;
5785 
5786 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5787 		return;
5788 
5789 	/* If LP has sent verify mPacket, LP is FPE capable */
5790 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5791 		if (*lp_state < FPE_STATE_CAPABLE)
5792 			*lp_state = FPE_STATE_CAPABLE;
5793 
5794 		/* If user has requested FPE enable, quickly response */
5795 		if (*hs_enable)
5796 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5797 						MPACKET_RESPONSE);
5798 	}
5799 
5800 	/* If Local has sent verify mPacket, Local is FPE capable */
5801 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5802 		if (*lo_state < FPE_STATE_CAPABLE)
5803 			*lo_state = FPE_STATE_CAPABLE;
5804 	}
5805 
5806 	/* If LP has sent response mPacket, LP is entering FPE ON */
5807 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5808 		*lp_state = FPE_STATE_ENTERING_ON;
5809 
5810 	/* If Local has sent response mPacket, Local is entering FPE ON */
5811 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5812 		*lo_state = FPE_STATE_ENTERING_ON;
5813 
5814 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5815 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5816 	    priv->fpe_wq) {
5817 		queue_work(priv->fpe_wq, &priv->fpe_task);
5818 	}
5819 }
5820 
5821 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5822 {
5823 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5824 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5825 	u32 queues_count;
5826 	u32 queue;
5827 	bool xmac;
5828 
5829 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5830 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5831 
5832 	if (priv->irq_wake)
5833 		pm_wakeup_event(priv->device, 0);
5834 
5835 	if (priv->dma_cap.estsel)
5836 		stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5837 				      &priv->xstats, tx_cnt);
5838 
5839 	if (priv->dma_cap.fpesel) {
5840 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5841 						   priv->dev);
5842 
5843 		stmmac_fpe_event_status(priv, status);
5844 	}
5845 
5846 	/* To handle GMAC own interrupts */
5847 	if ((priv->plat->has_gmac) || xmac) {
5848 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5849 
5850 		if (unlikely(status)) {
5851 			/* For LPI we need to save the tx status */
5852 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5853 				priv->tx_path_in_lpi_mode = true;
5854 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5855 				priv->tx_path_in_lpi_mode = false;
5856 		}
5857 
5858 		for (queue = 0; queue < queues_count; queue++) {
5859 			status = stmmac_host_mtl_irq_status(priv, priv->hw,
5860 							    queue);
5861 		}
5862 
5863 		/* PCS link status */
5864 		if (priv->hw->pcs &&
5865 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5866 			if (priv->xstats.pcs_link)
5867 				netif_carrier_on(priv->dev);
5868 			else
5869 				netif_carrier_off(priv->dev);
5870 		}
5871 
5872 		stmmac_timestamp_interrupt(priv, priv);
5873 	}
5874 }
5875 
5876 /**
5877  *  stmmac_interrupt - main ISR
5878  *  @irq: interrupt number.
5879  *  @dev_id: to pass the net device pointer.
5880  *  Description: this is the main driver interrupt service routine.
5881  *  It can call:
5882  *  o DMA service routine (to manage incoming frame reception and transmission
5883  *    status)
5884  *  o Core interrupts to manage: remote wake-up, management counter, LPI
5885  *    interrupts.
5886  */
5887 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5888 {
5889 	struct net_device *dev = (struct net_device *)dev_id;
5890 	struct stmmac_priv *priv = netdev_priv(dev);
5891 
5892 	/* Check if adapter is up */
5893 	if (test_bit(STMMAC_DOWN, &priv->state))
5894 		return IRQ_HANDLED;
5895 
5896 	/* Check if a fatal error happened */
5897 	if (stmmac_safety_feat_interrupt(priv))
5898 		return IRQ_HANDLED;
5899 
5900 	/* To handle Common interrupts */
5901 	stmmac_common_interrupt(priv);
5902 
5903 	/* To handle DMA interrupts */
5904 	stmmac_dma_interrupt(priv);
5905 
5906 	return IRQ_HANDLED;
5907 }
5908 
5909 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5910 {
5911 	struct net_device *dev = (struct net_device *)dev_id;
5912 	struct stmmac_priv *priv = netdev_priv(dev);
5913 
5914 	if (unlikely(!dev)) {
5915 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5916 		return IRQ_NONE;
5917 	}
5918 
5919 	/* Check if adapter is up */
5920 	if (test_bit(STMMAC_DOWN, &priv->state))
5921 		return IRQ_HANDLED;
5922 
5923 	/* To handle Common interrupts */
5924 	stmmac_common_interrupt(priv);
5925 
5926 	return IRQ_HANDLED;
5927 }
5928 
5929 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5930 {
5931 	struct net_device *dev = (struct net_device *)dev_id;
5932 	struct stmmac_priv *priv = netdev_priv(dev);
5933 
5934 	if (unlikely(!dev)) {
5935 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5936 		return IRQ_NONE;
5937 	}
5938 
5939 	/* Check if adapter is up */
5940 	if (test_bit(STMMAC_DOWN, &priv->state))
5941 		return IRQ_HANDLED;
5942 
5943 	/* Check if a fatal error happened */
5944 	stmmac_safety_feat_interrupt(priv);
5945 
5946 	return IRQ_HANDLED;
5947 }
5948 
5949 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5950 {
5951 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5952 	struct stmmac_dma_conf *dma_conf;
5953 	int chan = tx_q->queue_index;
5954 	struct stmmac_priv *priv;
5955 	int status;
5956 
5957 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
5958 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5959 
5960 	if (unlikely(!data)) {
5961 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5962 		return IRQ_NONE;
5963 	}
5964 
5965 	/* Check if adapter is up */
5966 	if (test_bit(STMMAC_DOWN, &priv->state))
5967 		return IRQ_HANDLED;
5968 
5969 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5970 
5971 	if (unlikely(status & tx_hard_error_bump_tc)) {
5972 		/* Try to bump up the dma threshold on this failure */
5973 		stmmac_bump_dma_threshold(priv, chan);
5974 	} else if (unlikely(status == tx_hard_error)) {
5975 		stmmac_tx_err(priv, chan);
5976 	}
5977 
5978 	return IRQ_HANDLED;
5979 }
5980 
5981 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
5982 {
5983 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
5984 	struct stmmac_dma_conf *dma_conf;
5985 	int chan = rx_q->queue_index;
5986 	struct stmmac_priv *priv;
5987 
5988 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
5989 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5990 
5991 	if (unlikely(!data)) {
5992 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5993 		return IRQ_NONE;
5994 	}
5995 
5996 	/* Check if adapter is up */
5997 	if (test_bit(STMMAC_DOWN, &priv->state))
5998 		return IRQ_HANDLED;
5999 
6000 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6001 
6002 	return IRQ_HANDLED;
6003 }
6004 
6005 #ifdef CONFIG_NET_POLL_CONTROLLER
6006 /* Polling receive - used by NETCONSOLE and other diagnostic tools
6007  * to allow network I/O with interrupts disabled.
6008  */
6009 static void stmmac_poll_controller(struct net_device *dev)
6010 {
6011 	struct stmmac_priv *priv = netdev_priv(dev);
6012 	int i;
6013 
6014 	/* If adapter is down, do nothing */
6015 	if (test_bit(STMMAC_DOWN, &priv->state))
6016 		return;
6017 
6018 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) {
6019 		for (i = 0; i < priv->plat->rx_queues_to_use; i++)
6020 			stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
6021 
6022 		for (i = 0; i < priv->plat->tx_queues_to_use; i++)
6023 			stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
6024 	} else {
6025 		disable_irq(dev->irq);
6026 		stmmac_interrupt(dev->irq, dev);
6027 		enable_irq(dev->irq);
6028 	}
6029 }
6030 #endif
6031 
6032 /**
6033  *  stmmac_ioctl - Entry point for the Ioctl
6034  *  @dev: Device pointer.
6035  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6036  *  a proprietary structure used to pass information to the driver.
6037  *  @cmd: IOCTL command
6038  *  Description:
6039  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6040  */
6041 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6042 {
6043 	struct stmmac_priv *priv = netdev_priv (dev);
6044 	int ret = -EOPNOTSUPP;
6045 
6046 	if (!netif_running(dev))
6047 		return -EINVAL;
6048 
6049 	switch (cmd) {
6050 	case SIOCGMIIPHY:
6051 	case SIOCGMIIREG:
6052 	case SIOCSMIIREG:
6053 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6054 		break;
6055 	case SIOCSHWTSTAMP:
6056 		ret = stmmac_hwtstamp_set(dev, rq);
6057 		break;
6058 	case SIOCGHWTSTAMP:
6059 		ret = stmmac_hwtstamp_get(dev, rq);
6060 		break;
6061 	default:
6062 		break;
6063 	}
6064 
6065 	return ret;
6066 }
6067 
6068 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6069 				    void *cb_priv)
6070 {
6071 	struct stmmac_priv *priv = cb_priv;
6072 	int ret = -EOPNOTSUPP;
6073 
6074 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6075 		return ret;
6076 
6077 	__stmmac_disable_all_queues(priv);
6078 
6079 	switch (type) {
6080 	case TC_SETUP_CLSU32:
6081 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6082 		break;
6083 	case TC_SETUP_CLSFLOWER:
6084 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6085 		break;
6086 	default:
6087 		break;
6088 	}
6089 
6090 	stmmac_enable_all_queues(priv);
6091 	return ret;
6092 }
6093 
6094 static LIST_HEAD(stmmac_block_cb_list);
6095 
6096 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6097 			   void *type_data)
6098 {
6099 	struct stmmac_priv *priv = netdev_priv(ndev);
6100 
6101 	switch (type) {
6102 	case TC_QUERY_CAPS:
6103 		return stmmac_tc_query_caps(priv, priv, type_data);
6104 	case TC_SETUP_BLOCK:
6105 		return flow_block_cb_setup_simple(type_data,
6106 						  &stmmac_block_cb_list,
6107 						  stmmac_setup_tc_block_cb,
6108 						  priv, priv, true);
6109 	case TC_SETUP_QDISC_CBS:
6110 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6111 	case TC_SETUP_QDISC_TAPRIO:
6112 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6113 	case TC_SETUP_QDISC_ETF:
6114 		return stmmac_tc_setup_etf(priv, priv, type_data);
6115 	default:
6116 		return -EOPNOTSUPP;
6117 	}
6118 }
6119 
6120 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6121 			       struct net_device *sb_dev)
6122 {
6123 	int gso = skb_shinfo(skb)->gso_type;
6124 
6125 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6126 		/*
6127 		 * There is no way to determine the number of TSO/USO
6128 		 * capable Queues. Let's use always the Queue 0
6129 		 * because if TSO/USO is supported then at least this
6130 		 * one will be capable.
6131 		 */
6132 		return 0;
6133 	}
6134 
6135 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6136 }
6137 
6138 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6139 {
6140 	struct stmmac_priv *priv = netdev_priv(ndev);
6141 	int ret = 0;
6142 
6143 	ret = pm_runtime_resume_and_get(priv->device);
6144 	if (ret < 0)
6145 		return ret;
6146 
6147 	ret = eth_mac_addr(ndev, addr);
6148 	if (ret)
6149 		goto set_mac_error;
6150 
6151 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6152 
6153 set_mac_error:
6154 	pm_runtime_put(priv->device);
6155 
6156 	return ret;
6157 }
6158 
6159 #ifdef CONFIG_DEBUG_FS
6160 static struct dentry *stmmac_fs_dir;
6161 
6162 static void sysfs_display_ring(void *head, int size, int extend_desc,
6163 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6164 {
6165 	int i;
6166 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6167 	struct dma_desc *p = (struct dma_desc *)head;
6168 	dma_addr_t dma_addr;
6169 
6170 	for (i = 0; i < size; i++) {
6171 		if (extend_desc) {
6172 			dma_addr = dma_phy_addr + i * sizeof(*ep);
6173 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6174 				   i, &dma_addr,
6175 				   le32_to_cpu(ep->basic.des0),
6176 				   le32_to_cpu(ep->basic.des1),
6177 				   le32_to_cpu(ep->basic.des2),
6178 				   le32_to_cpu(ep->basic.des3));
6179 			ep++;
6180 		} else {
6181 			dma_addr = dma_phy_addr + i * sizeof(*p);
6182 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6183 				   i, &dma_addr,
6184 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6185 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6186 			p++;
6187 		}
6188 		seq_printf(seq, "\n");
6189 	}
6190 }
6191 
6192 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6193 {
6194 	struct net_device *dev = seq->private;
6195 	struct stmmac_priv *priv = netdev_priv(dev);
6196 	u32 rx_count = priv->plat->rx_queues_to_use;
6197 	u32 tx_count = priv->plat->tx_queues_to_use;
6198 	u32 queue;
6199 
6200 	if ((dev->flags & IFF_UP) == 0)
6201 		return 0;
6202 
6203 	for (queue = 0; queue < rx_count; queue++) {
6204 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6205 
6206 		seq_printf(seq, "RX Queue %d:\n", queue);
6207 
6208 		if (priv->extend_desc) {
6209 			seq_printf(seq, "Extended descriptor ring:\n");
6210 			sysfs_display_ring((void *)rx_q->dma_erx,
6211 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6212 		} else {
6213 			seq_printf(seq, "Descriptor ring:\n");
6214 			sysfs_display_ring((void *)rx_q->dma_rx,
6215 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6216 		}
6217 	}
6218 
6219 	for (queue = 0; queue < tx_count; queue++) {
6220 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6221 
6222 		seq_printf(seq, "TX Queue %d:\n", queue);
6223 
6224 		if (priv->extend_desc) {
6225 			seq_printf(seq, "Extended descriptor ring:\n");
6226 			sysfs_display_ring((void *)tx_q->dma_etx,
6227 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6228 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6229 			seq_printf(seq, "Descriptor ring:\n");
6230 			sysfs_display_ring((void *)tx_q->dma_tx,
6231 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6232 		}
6233 	}
6234 
6235 	return 0;
6236 }
6237 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6238 
6239 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6240 {
6241 	static const char * const dwxgmac_timestamp_source[] = {
6242 		"None",
6243 		"Internal",
6244 		"External",
6245 		"Both",
6246 	};
6247 	static const char * const dwxgmac_safety_feature_desc[] = {
6248 		"No",
6249 		"All Safety Features with ECC and Parity",
6250 		"All Safety Features without ECC or Parity",
6251 		"All Safety Features with Parity Only",
6252 		"ECC Only",
6253 		"UNDEFINED",
6254 		"UNDEFINED",
6255 		"UNDEFINED",
6256 	};
6257 	struct net_device *dev = seq->private;
6258 	struct stmmac_priv *priv = netdev_priv(dev);
6259 
6260 	if (!priv->hw_cap_support) {
6261 		seq_printf(seq, "DMA HW features not supported\n");
6262 		return 0;
6263 	}
6264 
6265 	seq_printf(seq, "==============================\n");
6266 	seq_printf(seq, "\tDMA HW features\n");
6267 	seq_printf(seq, "==============================\n");
6268 
6269 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6270 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6271 	seq_printf(seq, "\t1000 Mbps: %s\n",
6272 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6273 	seq_printf(seq, "\tHalf duplex: %s\n",
6274 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6275 	if (priv->plat->has_xgmac) {
6276 		seq_printf(seq,
6277 			   "\tNumber of Additional MAC address registers: %d\n",
6278 			   priv->dma_cap.multi_addr);
6279 	} else {
6280 		seq_printf(seq, "\tHash Filter: %s\n",
6281 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6282 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6283 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6284 	}
6285 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6286 		   (priv->dma_cap.pcs) ? "Y" : "N");
6287 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6288 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6289 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6290 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6291 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6292 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6293 	seq_printf(seq, "\tRMON module: %s\n",
6294 		   (priv->dma_cap.rmon) ? "Y" : "N");
6295 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6296 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6297 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6298 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6299 	if (priv->plat->has_xgmac)
6300 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6301 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6302 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6303 		   (priv->dma_cap.eee) ? "Y" : "N");
6304 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6305 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6306 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6307 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6308 	    priv->plat->has_xgmac) {
6309 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6310 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6311 	} else {
6312 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6313 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6314 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6315 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6316 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6317 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6318 	}
6319 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6320 		   priv->dma_cap.number_rx_channel);
6321 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6322 		   priv->dma_cap.number_tx_channel);
6323 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6324 		   priv->dma_cap.number_rx_queues);
6325 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6326 		   priv->dma_cap.number_tx_queues);
6327 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6328 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6329 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6330 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6331 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6332 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6333 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6334 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6335 		   priv->dma_cap.pps_out_num);
6336 	seq_printf(seq, "\tSafety Features: %s\n",
6337 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6338 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6339 		   priv->dma_cap.frpsel ? "Y" : "N");
6340 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6341 		   priv->dma_cap.host_dma_width);
6342 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6343 		   priv->dma_cap.rssen ? "Y" : "N");
6344 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6345 		   priv->dma_cap.vlhash ? "Y" : "N");
6346 	seq_printf(seq, "\tSplit Header: %s\n",
6347 		   priv->dma_cap.sphen ? "Y" : "N");
6348 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6349 		   priv->dma_cap.vlins ? "Y" : "N");
6350 	seq_printf(seq, "\tDouble VLAN: %s\n",
6351 		   priv->dma_cap.dvlan ? "Y" : "N");
6352 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6353 		   priv->dma_cap.l3l4fnum);
6354 	seq_printf(seq, "\tARP Offloading: %s\n",
6355 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6356 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6357 		   priv->dma_cap.estsel ? "Y" : "N");
6358 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6359 		   priv->dma_cap.fpesel ? "Y" : "N");
6360 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6361 		   priv->dma_cap.tbssel ? "Y" : "N");
6362 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6363 		   priv->dma_cap.tbs_ch_num);
6364 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6365 		   priv->dma_cap.sgfsel ? "Y" : "N");
6366 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6367 		   BIT(priv->dma_cap.ttsfd) >> 1);
6368 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6369 		   priv->dma_cap.numtc);
6370 	seq_printf(seq, "\tDCB Feature: %s\n",
6371 		   priv->dma_cap.dcben ? "Y" : "N");
6372 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6373 		   priv->dma_cap.advthword ? "Y" : "N");
6374 	seq_printf(seq, "\tPTP Offload: %s\n",
6375 		   priv->dma_cap.ptoen ? "Y" : "N");
6376 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6377 		   priv->dma_cap.osten ? "Y" : "N");
6378 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6379 		   priv->dma_cap.pfcen ? "Y" : "N");
6380 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6381 		   BIT(priv->dma_cap.frpes) << 6);
6382 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6383 		   BIT(priv->dma_cap.frpbs) << 6);
6384 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6385 		   priv->dma_cap.frppipe_num);
6386 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6387 		   priv->dma_cap.nrvf_num ?
6388 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6389 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6390 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6391 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6392 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6393 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6394 		   priv->dma_cap.cbtisel ? "Y" : "N");
6395 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6396 		   priv->dma_cap.aux_snapshot_n);
6397 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6398 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6399 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6400 		   priv->dma_cap.edma ? "Y" : "N");
6401 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6402 		   priv->dma_cap.ediffc ? "Y" : "N");
6403 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6404 		   priv->dma_cap.vxn ? "Y" : "N");
6405 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6406 		   priv->dma_cap.dbgmem ? "Y" : "N");
6407 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6408 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6409 	return 0;
6410 }
6411 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6412 
6413 /* Use network device events to rename debugfs file entries.
6414  */
6415 static int stmmac_device_event(struct notifier_block *unused,
6416 			       unsigned long event, void *ptr)
6417 {
6418 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6419 	struct stmmac_priv *priv = netdev_priv(dev);
6420 
6421 	if (dev->netdev_ops != &stmmac_netdev_ops)
6422 		goto done;
6423 
6424 	switch (event) {
6425 	case NETDEV_CHANGENAME:
6426 		if (priv->dbgfs_dir)
6427 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6428 							 priv->dbgfs_dir,
6429 							 stmmac_fs_dir,
6430 							 dev->name);
6431 		break;
6432 	}
6433 done:
6434 	return NOTIFY_DONE;
6435 }
6436 
6437 static struct notifier_block stmmac_notifier = {
6438 	.notifier_call = stmmac_device_event,
6439 };
6440 
6441 static void stmmac_init_fs(struct net_device *dev)
6442 {
6443 	struct stmmac_priv *priv = netdev_priv(dev);
6444 
6445 	rtnl_lock();
6446 
6447 	/* Create per netdev entries */
6448 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6449 
6450 	/* Entry to report DMA RX/TX rings */
6451 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6452 			    &stmmac_rings_status_fops);
6453 
6454 	/* Entry to report the DMA HW features */
6455 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6456 			    &stmmac_dma_cap_fops);
6457 
6458 	rtnl_unlock();
6459 }
6460 
6461 static void stmmac_exit_fs(struct net_device *dev)
6462 {
6463 	struct stmmac_priv *priv = netdev_priv(dev);
6464 
6465 	debugfs_remove_recursive(priv->dbgfs_dir);
6466 }
6467 #endif /* CONFIG_DEBUG_FS */
6468 
6469 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6470 {
6471 	unsigned char *data = (unsigned char *)&vid_le;
6472 	unsigned char data_byte = 0;
6473 	u32 crc = ~0x0;
6474 	u32 temp = 0;
6475 	int i, bits;
6476 
6477 	bits = get_bitmask_order(VLAN_VID_MASK);
6478 	for (i = 0; i < bits; i++) {
6479 		if ((i % 8) == 0)
6480 			data_byte = data[i / 8];
6481 
6482 		temp = ((crc & 1) ^ data_byte) & 1;
6483 		crc >>= 1;
6484 		data_byte >>= 1;
6485 
6486 		if (temp)
6487 			crc ^= 0xedb88320;
6488 	}
6489 
6490 	return crc;
6491 }
6492 
6493 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6494 {
6495 	u32 crc, hash = 0;
6496 	__le16 pmatch = 0;
6497 	int count = 0;
6498 	u16 vid = 0;
6499 
6500 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6501 		__le16 vid_le = cpu_to_le16(vid);
6502 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6503 		hash |= (1 << crc);
6504 		count++;
6505 	}
6506 
6507 	if (!priv->dma_cap.vlhash) {
6508 		if (count > 2) /* VID = 0 always passes filter */
6509 			return -EOPNOTSUPP;
6510 
6511 		pmatch = cpu_to_le16(vid);
6512 		hash = 0;
6513 	}
6514 
6515 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6516 }
6517 
6518 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6519 {
6520 	struct stmmac_priv *priv = netdev_priv(ndev);
6521 	bool is_double = false;
6522 	int ret;
6523 
6524 	ret = pm_runtime_resume_and_get(priv->device);
6525 	if (ret < 0)
6526 		return ret;
6527 
6528 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6529 		is_double = true;
6530 
6531 	set_bit(vid, priv->active_vlans);
6532 	ret = stmmac_vlan_update(priv, is_double);
6533 	if (ret) {
6534 		clear_bit(vid, priv->active_vlans);
6535 		goto err_pm_put;
6536 	}
6537 
6538 	if (priv->hw->num_vlan) {
6539 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6540 		if (ret)
6541 			goto err_pm_put;
6542 	}
6543 err_pm_put:
6544 	pm_runtime_put(priv->device);
6545 
6546 	return ret;
6547 }
6548 
6549 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6550 {
6551 	struct stmmac_priv *priv = netdev_priv(ndev);
6552 	bool is_double = false;
6553 	int ret;
6554 
6555 	ret = pm_runtime_resume_and_get(priv->device);
6556 	if (ret < 0)
6557 		return ret;
6558 
6559 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6560 		is_double = true;
6561 
6562 	clear_bit(vid, priv->active_vlans);
6563 
6564 	if (priv->hw->num_vlan) {
6565 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6566 		if (ret)
6567 			goto del_vlan_error;
6568 	}
6569 
6570 	ret = stmmac_vlan_update(priv, is_double);
6571 
6572 del_vlan_error:
6573 	pm_runtime_put(priv->device);
6574 
6575 	return ret;
6576 }
6577 
6578 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6579 {
6580 	struct stmmac_priv *priv = netdev_priv(dev);
6581 
6582 	switch (bpf->command) {
6583 	case XDP_SETUP_PROG:
6584 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6585 	case XDP_SETUP_XSK_POOL:
6586 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6587 					     bpf->xsk.queue_id);
6588 	default:
6589 		return -EOPNOTSUPP;
6590 	}
6591 }
6592 
6593 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6594 			   struct xdp_frame **frames, u32 flags)
6595 {
6596 	struct stmmac_priv *priv = netdev_priv(dev);
6597 	int cpu = smp_processor_id();
6598 	struct netdev_queue *nq;
6599 	int i, nxmit = 0;
6600 	int queue;
6601 
6602 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6603 		return -ENETDOWN;
6604 
6605 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6606 		return -EINVAL;
6607 
6608 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6609 	nq = netdev_get_tx_queue(priv->dev, queue);
6610 
6611 	__netif_tx_lock(nq, cpu);
6612 	/* Avoids TX time-out as we are sharing with slow path */
6613 	txq_trans_cond_update(nq);
6614 
6615 	for (i = 0; i < num_frames; i++) {
6616 		int res;
6617 
6618 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6619 		if (res == STMMAC_XDP_CONSUMED)
6620 			break;
6621 
6622 		nxmit++;
6623 	}
6624 
6625 	if (flags & XDP_XMIT_FLUSH) {
6626 		stmmac_flush_tx_descriptors(priv, queue);
6627 		stmmac_tx_timer_arm(priv, queue);
6628 	}
6629 
6630 	__netif_tx_unlock(nq);
6631 
6632 	return nxmit;
6633 }
6634 
6635 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6636 {
6637 	struct stmmac_channel *ch = &priv->channel[queue];
6638 	unsigned long flags;
6639 
6640 	spin_lock_irqsave(&ch->lock, flags);
6641 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6642 	spin_unlock_irqrestore(&ch->lock, flags);
6643 
6644 	stmmac_stop_rx_dma(priv, queue);
6645 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6646 }
6647 
6648 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6649 {
6650 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6651 	struct stmmac_channel *ch = &priv->channel[queue];
6652 	unsigned long flags;
6653 	u32 buf_size;
6654 	int ret;
6655 
6656 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6657 	if (ret) {
6658 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6659 		return;
6660 	}
6661 
6662 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6663 	if (ret) {
6664 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6665 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6666 		return;
6667 	}
6668 
6669 	stmmac_reset_rx_queue(priv, queue);
6670 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6671 
6672 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6673 			    rx_q->dma_rx_phy, rx_q->queue_index);
6674 
6675 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6676 			     sizeof(struct dma_desc));
6677 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6678 			       rx_q->rx_tail_addr, rx_q->queue_index);
6679 
6680 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6681 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6682 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6683 				      buf_size,
6684 				      rx_q->queue_index);
6685 	} else {
6686 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6687 				      priv->dma_conf.dma_buf_sz,
6688 				      rx_q->queue_index);
6689 	}
6690 
6691 	stmmac_start_rx_dma(priv, queue);
6692 
6693 	spin_lock_irqsave(&ch->lock, flags);
6694 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6695 	spin_unlock_irqrestore(&ch->lock, flags);
6696 }
6697 
6698 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6699 {
6700 	struct stmmac_channel *ch = &priv->channel[queue];
6701 	unsigned long flags;
6702 
6703 	spin_lock_irqsave(&ch->lock, flags);
6704 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6705 	spin_unlock_irqrestore(&ch->lock, flags);
6706 
6707 	stmmac_stop_tx_dma(priv, queue);
6708 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6709 }
6710 
6711 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6712 {
6713 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6714 	struct stmmac_channel *ch = &priv->channel[queue];
6715 	unsigned long flags;
6716 	int ret;
6717 
6718 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6719 	if (ret) {
6720 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6721 		return;
6722 	}
6723 
6724 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6725 	if (ret) {
6726 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6727 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6728 		return;
6729 	}
6730 
6731 	stmmac_reset_tx_queue(priv, queue);
6732 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6733 
6734 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6735 			    tx_q->dma_tx_phy, tx_q->queue_index);
6736 
6737 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6738 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6739 
6740 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6741 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6742 			       tx_q->tx_tail_addr, tx_q->queue_index);
6743 
6744 	stmmac_start_tx_dma(priv, queue);
6745 
6746 	spin_lock_irqsave(&ch->lock, flags);
6747 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6748 	spin_unlock_irqrestore(&ch->lock, flags);
6749 }
6750 
6751 void stmmac_xdp_release(struct net_device *dev)
6752 {
6753 	struct stmmac_priv *priv = netdev_priv(dev);
6754 	u32 chan;
6755 
6756 	/* Ensure tx function is not running */
6757 	netif_tx_disable(dev);
6758 
6759 	/* Disable NAPI process */
6760 	stmmac_disable_all_queues(priv);
6761 
6762 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6763 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6764 
6765 	/* Free the IRQ lines */
6766 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6767 
6768 	/* Stop TX/RX DMA channels */
6769 	stmmac_stop_all_dma(priv);
6770 
6771 	/* Release and free the Rx/Tx resources */
6772 	free_dma_desc_resources(priv, &priv->dma_conf);
6773 
6774 	/* Disable the MAC Rx/Tx */
6775 	stmmac_mac_set(priv, priv->ioaddr, false);
6776 
6777 	/* set trans_start so we don't get spurious
6778 	 * watchdogs during reset
6779 	 */
6780 	netif_trans_update(dev);
6781 	netif_carrier_off(dev);
6782 }
6783 
6784 int stmmac_xdp_open(struct net_device *dev)
6785 {
6786 	struct stmmac_priv *priv = netdev_priv(dev);
6787 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6788 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6789 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6790 	struct stmmac_rx_queue *rx_q;
6791 	struct stmmac_tx_queue *tx_q;
6792 	u32 buf_size;
6793 	bool sph_en;
6794 	u32 chan;
6795 	int ret;
6796 
6797 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6798 	if (ret < 0) {
6799 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6800 			   __func__);
6801 		goto dma_desc_error;
6802 	}
6803 
6804 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6805 	if (ret < 0) {
6806 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6807 			   __func__);
6808 		goto init_error;
6809 	}
6810 
6811 	stmmac_reset_queues_param(priv);
6812 
6813 	/* DMA CSR Channel configuration */
6814 	for (chan = 0; chan < dma_csr_ch; chan++) {
6815 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6816 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6817 	}
6818 
6819 	/* Adjust Split header */
6820 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6821 
6822 	/* DMA RX Channel Configuration */
6823 	for (chan = 0; chan < rx_cnt; chan++) {
6824 		rx_q = &priv->dma_conf.rx_queue[chan];
6825 
6826 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6827 				    rx_q->dma_rx_phy, chan);
6828 
6829 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6830 				     (rx_q->buf_alloc_num *
6831 				      sizeof(struct dma_desc));
6832 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6833 				       rx_q->rx_tail_addr, chan);
6834 
6835 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6836 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6837 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6838 					      buf_size,
6839 					      rx_q->queue_index);
6840 		} else {
6841 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6842 					      priv->dma_conf.dma_buf_sz,
6843 					      rx_q->queue_index);
6844 		}
6845 
6846 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6847 	}
6848 
6849 	/* DMA TX Channel Configuration */
6850 	for (chan = 0; chan < tx_cnt; chan++) {
6851 		tx_q = &priv->dma_conf.tx_queue[chan];
6852 
6853 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6854 				    tx_q->dma_tx_phy, chan);
6855 
6856 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6857 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6858 				       tx_q->tx_tail_addr, chan);
6859 
6860 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6861 		tx_q->txtimer.function = stmmac_tx_timer;
6862 	}
6863 
6864 	/* Enable the MAC Rx/Tx */
6865 	stmmac_mac_set(priv, priv->ioaddr, true);
6866 
6867 	/* Start Rx & Tx DMA Channels */
6868 	stmmac_start_all_dma(priv);
6869 
6870 	ret = stmmac_request_irq(dev);
6871 	if (ret)
6872 		goto irq_error;
6873 
6874 	/* Enable NAPI process*/
6875 	stmmac_enable_all_queues(priv);
6876 	netif_carrier_on(dev);
6877 	netif_tx_start_all_queues(dev);
6878 	stmmac_enable_all_dma_irq(priv);
6879 
6880 	return 0;
6881 
6882 irq_error:
6883 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6884 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6885 
6886 	stmmac_hw_teardown(dev);
6887 init_error:
6888 	free_dma_desc_resources(priv, &priv->dma_conf);
6889 dma_desc_error:
6890 	return ret;
6891 }
6892 
6893 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6894 {
6895 	struct stmmac_priv *priv = netdev_priv(dev);
6896 	struct stmmac_rx_queue *rx_q;
6897 	struct stmmac_tx_queue *tx_q;
6898 	struct stmmac_channel *ch;
6899 
6900 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6901 	    !netif_carrier_ok(priv->dev))
6902 		return -ENETDOWN;
6903 
6904 	if (!stmmac_xdp_is_enabled(priv))
6905 		return -EINVAL;
6906 
6907 	if (queue >= priv->plat->rx_queues_to_use ||
6908 	    queue >= priv->plat->tx_queues_to_use)
6909 		return -EINVAL;
6910 
6911 	rx_q = &priv->dma_conf.rx_queue[queue];
6912 	tx_q = &priv->dma_conf.tx_queue[queue];
6913 	ch = &priv->channel[queue];
6914 
6915 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6916 		return -EINVAL;
6917 
6918 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6919 		/* EQoS does not have per-DMA channel SW interrupt,
6920 		 * so we schedule RX Napi straight-away.
6921 		 */
6922 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6923 			__napi_schedule(&ch->rxtx_napi);
6924 	}
6925 
6926 	return 0;
6927 }
6928 
6929 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6930 {
6931 	struct stmmac_priv *priv = netdev_priv(dev);
6932 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6933 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6934 	unsigned int start;
6935 	int q;
6936 
6937 	for (q = 0; q < tx_cnt; q++) {
6938 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
6939 		u64 tx_packets;
6940 		u64 tx_bytes;
6941 
6942 		do {
6943 			start = u64_stats_fetch_begin(&txq_stats->syncp);
6944 			tx_packets = txq_stats->tx_packets;
6945 			tx_bytes   = txq_stats->tx_bytes;
6946 		} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
6947 
6948 		stats->tx_packets += tx_packets;
6949 		stats->tx_bytes += tx_bytes;
6950 	}
6951 
6952 	for (q = 0; q < rx_cnt; q++) {
6953 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
6954 		u64 rx_packets;
6955 		u64 rx_bytes;
6956 
6957 		do {
6958 			start = u64_stats_fetch_begin(&rxq_stats->syncp);
6959 			rx_packets = rxq_stats->rx_packets;
6960 			rx_bytes   = rxq_stats->rx_bytes;
6961 		} while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
6962 
6963 		stats->rx_packets += rx_packets;
6964 		stats->rx_bytes += rx_bytes;
6965 	}
6966 
6967 	stats->rx_dropped = priv->xstats.rx_dropped;
6968 	stats->rx_errors = priv->xstats.rx_errors;
6969 	stats->tx_dropped = priv->xstats.tx_dropped;
6970 	stats->tx_errors = priv->xstats.tx_errors;
6971 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
6972 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
6973 	stats->rx_length_errors = priv->xstats.rx_length;
6974 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
6975 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
6976 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
6977 }
6978 
6979 static const struct net_device_ops stmmac_netdev_ops = {
6980 	.ndo_open = stmmac_open,
6981 	.ndo_start_xmit = stmmac_xmit,
6982 	.ndo_stop = stmmac_release,
6983 	.ndo_change_mtu = stmmac_change_mtu,
6984 	.ndo_fix_features = stmmac_fix_features,
6985 	.ndo_set_features = stmmac_set_features,
6986 	.ndo_set_rx_mode = stmmac_set_rx_mode,
6987 	.ndo_tx_timeout = stmmac_tx_timeout,
6988 	.ndo_eth_ioctl = stmmac_ioctl,
6989 	.ndo_get_stats64 = stmmac_get_stats64,
6990 	.ndo_setup_tc = stmmac_setup_tc,
6991 	.ndo_select_queue = stmmac_select_queue,
6992 #ifdef CONFIG_NET_POLL_CONTROLLER
6993 	.ndo_poll_controller = stmmac_poll_controller,
6994 #endif
6995 	.ndo_set_mac_address = stmmac_set_mac_address,
6996 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6997 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6998 	.ndo_bpf = stmmac_bpf,
6999 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7000 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7001 };
7002 
7003 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7004 {
7005 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7006 		return;
7007 	if (test_bit(STMMAC_DOWN, &priv->state))
7008 		return;
7009 
7010 	netdev_err(priv->dev, "Reset adapter.\n");
7011 
7012 	rtnl_lock();
7013 	netif_trans_update(priv->dev);
7014 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7015 		usleep_range(1000, 2000);
7016 
7017 	set_bit(STMMAC_DOWN, &priv->state);
7018 	dev_close(priv->dev);
7019 	dev_open(priv->dev, NULL);
7020 	clear_bit(STMMAC_DOWN, &priv->state);
7021 	clear_bit(STMMAC_RESETING, &priv->state);
7022 	rtnl_unlock();
7023 }
7024 
7025 static void stmmac_service_task(struct work_struct *work)
7026 {
7027 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7028 			service_task);
7029 
7030 	stmmac_reset_subtask(priv);
7031 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7032 }
7033 
7034 /**
7035  *  stmmac_hw_init - Init the MAC device
7036  *  @priv: driver private structure
7037  *  Description: this function is to configure the MAC device according to
7038  *  some platform parameters or the HW capability register. It prepares the
7039  *  driver to use either ring or chain modes and to setup either enhanced or
7040  *  normal descriptors.
7041  */
7042 static int stmmac_hw_init(struct stmmac_priv *priv)
7043 {
7044 	int ret;
7045 
7046 	/* dwmac-sun8i only work in chain mode */
7047 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7048 		chain_mode = 1;
7049 	priv->chain_mode = chain_mode;
7050 
7051 	/* Initialize HW Interface */
7052 	ret = stmmac_hwif_init(priv);
7053 	if (ret)
7054 		return ret;
7055 
7056 	/* Get the HW capability (new GMAC newer than 3.50a) */
7057 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7058 	if (priv->hw_cap_support) {
7059 		dev_info(priv->device, "DMA HW capability register supported\n");
7060 
7061 		/* We can override some gmac/dma configuration fields: e.g.
7062 		 * enh_desc, tx_coe (e.g. that are passed through the
7063 		 * platform) with the values from the HW capability
7064 		 * register (if supported).
7065 		 */
7066 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7067 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7068 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7069 		priv->hw->pmt = priv->plat->pmt;
7070 		if (priv->dma_cap.hash_tb_sz) {
7071 			priv->hw->multicast_filter_bins =
7072 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7073 			priv->hw->mcast_bits_log2 =
7074 					ilog2(priv->hw->multicast_filter_bins);
7075 		}
7076 
7077 		/* TXCOE doesn't work in thresh DMA mode */
7078 		if (priv->plat->force_thresh_dma_mode)
7079 			priv->plat->tx_coe = 0;
7080 		else
7081 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7082 
7083 		/* In case of GMAC4 rx_coe is from HW cap register. */
7084 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7085 
7086 		if (priv->dma_cap.rx_coe_type2)
7087 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7088 		else if (priv->dma_cap.rx_coe_type1)
7089 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7090 
7091 	} else {
7092 		dev_info(priv->device, "No HW DMA feature register supported\n");
7093 	}
7094 
7095 	if (priv->plat->rx_coe) {
7096 		priv->hw->rx_csum = priv->plat->rx_coe;
7097 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7098 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7099 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7100 	}
7101 	if (priv->plat->tx_coe)
7102 		dev_info(priv->device, "TX Checksum insertion supported\n");
7103 
7104 	if (priv->plat->pmt) {
7105 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7106 		device_set_wakeup_capable(priv->device, 1);
7107 	}
7108 
7109 	if (priv->dma_cap.tsoen)
7110 		dev_info(priv->device, "TSO supported\n");
7111 
7112 	priv->hw->vlan_fail_q_en =
7113 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7114 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7115 
7116 	/* Run HW quirks, if any */
7117 	if (priv->hwif_quirks) {
7118 		ret = priv->hwif_quirks(priv);
7119 		if (ret)
7120 			return ret;
7121 	}
7122 
7123 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7124 	 * In some case, for example on bugged HW this feature
7125 	 * has to be disable and this can be done by passing the
7126 	 * riwt_off field from the platform.
7127 	 */
7128 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7129 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7130 		priv->use_riwt = 1;
7131 		dev_info(priv->device,
7132 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7133 	}
7134 
7135 	return 0;
7136 }
7137 
7138 static void stmmac_napi_add(struct net_device *dev)
7139 {
7140 	struct stmmac_priv *priv = netdev_priv(dev);
7141 	u32 queue, maxq;
7142 
7143 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7144 
7145 	for (queue = 0; queue < maxq; queue++) {
7146 		struct stmmac_channel *ch = &priv->channel[queue];
7147 
7148 		ch->priv_data = priv;
7149 		ch->index = queue;
7150 		spin_lock_init(&ch->lock);
7151 
7152 		if (queue < priv->plat->rx_queues_to_use) {
7153 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7154 		}
7155 		if (queue < priv->plat->tx_queues_to_use) {
7156 			netif_napi_add_tx(dev, &ch->tx_napi,
7157 					  stmmac_napi_poll_tx);
7158 		}
7159 		if (queue < priv->plat->rx_queues_to_use &&
7160 		    queue < priv->plat->tx_queues_to_use) {
7161 			netif_napi_add(dev, &ch->rxtx_napi,
7162 				       stmmac_napi_poll_rxtx);
7163 		}
7164 	}
7165 }
7166 
7167 static void stmmac_napi_del(struct net_device *dev)
7168 {
7169 	struct stmmac_priv *priv = netdev_priv(dev);
7170 	u32 queue, maxq;
7171 
7172 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7173 
7174 	for (queue = 0; queue < maxq; queue++) {
7175 		struct stmmac_channel *ch = &priv->channel[queue];
7176 
7177 		if (queue < priv->plat->rx_queues_to_use)
7178 			netif_napi_del(&ch->rx_napi);
7179 		if (queue < priv->plat->tx_queues_to_use)
7180 			netif_napi_del(&ch->tx_napi);
7181 		if (queue < priv->plat->rx_queues_to_use &&
7182 		    queue < priv->plat->tx_queues_to_use) {
7183 			netif_napi_del(&ch->rxtx_napi);
7184 		}
7185 	}
7186 }
7187 
7188 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7189 {
7190 	struct stmmac_priv *priv = netdev_priv(dev);
7191 	int ret = 0, i;
7192 
7193 	if (netif_running(dev))
7194 		stmmac_release(dev);
7195 
7196 	stmmac_napi_del(dev);
7197 
7198 	priv->plat->rx_queues_to_use = rx_cnt;
7199 	priv->plat->tx_queues_to_use = tx_cnt;
7200 	if (!netif_is_rxfh_configured(dev))
7201 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7202 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7203 									rx_cnt);
7204 
7205 	stmmac_napi_add(dev);
7206 
7207 	if (netif_running(dev))
7208 		ret = stmmac_open(dev);
7209 
7210 	return ret;
7211 }
7212 
7213 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7214 {
7215 	struct stmmac_priv *priv = netdev_priv(dev);
7216 	int ret = 0;
7217 
7218 	if (netif_running(dev))
7219 		stmmac_release(dev);
7220 
7221 	priv->dma_conf.dma_rx_size = rx_size;
7222 	priv->dma_conf.dma_tx_size = tx_size;
7223 
7224 	if (netif_running(dev))
7225 		ret = stmmac_open(dev);
7226 
7227 	return ret;
7228 }
7229 
7230 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7231 static void stmmac_fpe_lp_task(struct work_struct *work)
7232 {
7233 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7234 						fpe_task);
7235 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7236 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7237 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7238 	bool *hs_enable = &fpe_cfg->hs_enable;
7239 	bool *enable = &fpe_cfg->enable;
7240 	int retries = 20;
7241 
7242 	while (retries-- > 0) {
7243 		/* Bail out immediately if FPE handshake is OFF */
7244 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7245 			break;
7246 
7247 		if (*lo_state == FPE_STATE_ENTERING_ON &&
7248 		    *lp_state == FPE_STATE_ENTERING_ON) {
7249 			stmmac_fpe_configure(priv, priv->ioaddr,
7250 					     priv->plat->tx_queues_to_use,
7251 					     priv->plat->rx_queues_to_use,
7252 					     *enable);
7253 
7254 			netdev_info(priv->dev, "configured FPE\n");
7255 
7256 			*lo_state = FPE_STATE_ON;
7257 			*lp_state = FPE_STATE_ON;
7258 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7259 			break;
7260 		}
7261 
7262 		if ((*lo_state == FPE_STATE_CAPABLE ||
7263 		     *lo_state == FPE_STATE_ENTERING_ON) &&
7264 		     *lp_state != FPE_STATE_ON) {
7265 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7266 				    *lo_state, *lp_state);
7267 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7268 						MPACKET_VERIFY);
7269 		}
7270 		/* Sleep then retry */
7271 		msleep(500);
7272 	}
7273 
7274 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7275 }
7276 
7277 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7278 {
7279 	if (priv->plat->fpe_cfg->hs_enable != enable) {
7280 		if (enable) {
7281 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7282 						MPACKET_VERIFY);
7283 		} else {
7284 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7285 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7286 		}
7287 
7288 		priv->plat->fpe_cfg->hs_enable = enable;
7289 	}
7290 }
7291 
7292 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7293 {
7294 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7295 	struct dma_desc *desc_contains_ts = ctx->desc;
7296 	struct stmmac_priv *priv = ctx->priv;
7297 	struct dma_desc *ndesc = ctx->ndesc;
7298 	struct dma_desc *desc = ctx->desc;
7299 	u64 ns = 0;
7300 
7301 	if (!priv->hwts_rx_en)
7302 		return -ENODATA;
7303 
7304 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7305 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7306 		desc_contains_ts = ndesc;
7307 
7308 	/* Check if timestamp is available */
7309 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7310 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7311 		ns -= priv->plat->cdc_error_adj;
7312 		*timestamp = ns_to_ktime(ns);
7313 		return 0;
7314 	}
7315 
7316 	return -ENODATA;
7317 }
7318 
7319 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7320 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7321 };
7322 
7323 /**
7324  * stmmac_dvr_probe
7325  * @device: device pointer
7326  * @plat_dat: platform data pointer
7327  * @res: stmmac resource pointer
7328  * Description: this is the main probe function used to
7329  * call the alloc_etherdev, allocate the priv structure.
7330  * Return:
7331  * returns 0 on success, otherwise errno.
7332  */
7333 int stmmac_dvr_probe(struct device *device,
7334 		     struct plat_stmmacenet_data *plat_dat,
7335 		     struct stmmac_resources *res)
7336 {
7337 	struct net_device *ndev = NULL;
7338 	struct stmmac_priv *priv;
7339 	u32 rxq;
7340 	int i, ret = 0;
7341 
7342 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7343 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7344 	if (!ndev)
7345 		return -ENOMEM;
7346 
7347 	SET_NETDEV_DEV(ndev, device);
7348 
7349 	priv = netdev_priv(ndev);
7350 	priv->device = device;
7351 	priv->dev = ndev;
7352 
7353 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7354 		u64_stats_init(&priv->xstats.rxq_stats[i].syncp);
7355 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7356 		u64_stats_init(&priv->xstats.txq_stats[i].syncp);
7357 
7358 	stmmac_set_ethtool_ops(ndev);
7359 	priv->pause = pause;
7360 	priv->plat = plat_dat;
7361 	priv->ioaddr = res->addr;
7362 	priv->dev->base_addr = (unsigned long)res->addr;
7363 	priv->plat->dma_cfg->multi_msi_en =
7364 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7365 
7366 	priv->dev->irq = res->irq;
7367 	priv->wol_irq = res->wol_irq;
7368 	priv->lpi_irq = res->lpi_irq;
7369 	priv->sfty_ce_irq = res->sfty_ce_irq;
7370 	priv->sfty_ue_irq = res->sfty_ue_irq;
7371 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7372 		priv->rx_irq[i] = res->rx_irq[i];
7373 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7374 		priv->tx_irq[i] = res->tx_irq[i];
7375 
7376 	if (!is_zero_ether_addr(res->mac))
7377 		eth_hw_addr_set(priv->dev, res->mac);
7378 
7379 	dev_set_drvdata(device, priv->dev);
7380 
7381 	/* Verify driver arguments */
7382 	stmmac_verify_args();
7383 
7384 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7385 	if (!priv->af_xdp_zc_qps)
7386 		return -ENOMEM;
7387 
7388 	/* Allocate workqueue */
7389 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7390 	if (!priv->wq) {
7391 		dev_err(priv->device, "failed to create workqueue\n");
7392 		ret = -ENOMEM;
7393 		goto error_wq_init;
7394 	}
7395 
7396 	INIT_WORK(&priv->service_task, stmmac_service_task);
7397 
7398 	/* Initialize Link Partner FPE workqueue */
7399 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7400 
7401 	/* Override with kernel parameters if supplied XXX CRS XXX
7402 	 * this needs to have multiple instances
7403 	 */
7404 	if ((phyaddr >= 0) && (phyaddr <= 31))
7405 		priv->plat->phy_addr = phyaddr;
7406 
7407 	if (priv->plat->stmmac_rst) {
7408 		ret = reset_control_assert(priv->plat->stmmac_rst);
7409 		reset_control_deassert(priv->plat->stmmac_rst);
7410 		/* Some reset controllers have only reset callback instead of
7411 		 * assert + deassert callbacks pair.
7412 		 */
7413 		if (ret == -ENOTSUPP)
7414 			reset_control_reset(priv->plat->stmmac_rst);
7415 	}
7416 
7417 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7418 	if (ret == -ENOTSUPP)
7419 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7420 			ERR_PTR(ret));
7421 
7422 	/* Init MAC and get the capabilities */
7423 	ret = stmmac_hw_init(priv);
7424 	if (ret)
7425 		goto error_hw_init;
7426 
7427 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7428 	 */
7429 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7430 		priv->plat->dma_cfg->dche = false;
7431 
7432 	stmmac_check_ether_addr(priv);
7433 
7434 	ndev->netdev_ops = &stmmac_netdev_ops;
7435 
7436 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7437 
7438 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7439 			    NETIF_F_RXCSUM;
7440 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7441 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7442 
7443 	ret = stmmac_tc_init(priv, priv);
7444 	if (!ret) {
7445 		ndev->hw_features |= NETIF_F_HW_TC;
7446 	}
7447 
7448 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7449 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7450 		if (priv->plat->has_gmac4)
7451 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7452 		priv->tso = true;
7453 		dev_info(priv->device, "TSO feature enabled\n");
7454 	}
7455 
7456 	if (priv->dma_cap.sphen &&
7457 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7458 		ndev->hw_features |= NETIF_F_GRO;
7459 		priv->sph_cap = true;
7460 		priv->sph = priv->sph_cap;
7461 		dev_info(priv->device, "SPH feature enabled\n");
7462 	}
7463 
7464 	/* Ideally our host DMA address width is the same as for the
7465 	 * device. However, it may differ and then we have to use our
7466 	 * host DMA width for allocation and the device DMA width for
7467 	 * register handling.
7468 	 */
7469 	if (priv->plat->host_dma_width)
7470 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7471 	else
7472 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7473 
7474 	if (priv->dma_cap.host_dma_width) {
7475 		ret = dma_set_mask_and_coherent(device,
7476 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7477 		if (!ret) {
7478 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7479 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7480 
7481 			/*
7482 			 * If more than 32 bits can be addressed, make sure to
7483 			 * enable enhanced addressing mode.
7484 			 */
7485 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7486 				priv->plat->dma_cfg->eame = true;
7487 		} else {
7488 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7489 			if (ret) {
7490 				dev_err(priv->device, "Failed to set DMA Mask\n");
7491 				goto error_hw_init;
7492 			}
7493 
7494 			priv->dma_cap.host_dma_width = 32;
7495 		}
7496 	}
7497 
7498 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7499 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7500 #ifdef STMMAC_VLAN_TAG_USED
7501 	/* Both mac100 and gmac support receive VLAN tag detection */
7502 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7503 	if (priv->dma_cap.vlhash) {
7504 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7505 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7506 	}
7507 	if (priv->dma_cap.vlins) {
7508 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7509 		if (priv->dma_cap.dvlan)
7510 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7511 	}
7512 #endif
7513 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7514 
7515 	priv->xstats.threshold = tc;
7516 
7517 	/* Initialize RSS */
7518 	rxq = priv->plat->rx_queues_to_use;
7519 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7520 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7521 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7522 
7523 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7524 		ndev->features |= NETIF_F_RXHASH;
7525 
7526 	ndev->vlan_features |= ndev->features;
7527 	/* TSO doesn't work on VLANs yet */
7528 	ndev->vlan_features &= ~NETIF_F_TSO;
7529 
7530 	/* MTU range: 46 - hw-specific max */
7531 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7532 	if (priv->plat->has_xgmac)
7533 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7534 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7535 		ndev->max_mtu = JUMBO_LEN;
7536 	else
7537 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7538 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7539 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7540 	 */
7541 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7542 	    (priv->plat->maxmtu >= ndev->min_mtu))
7543 		ndev->max_mtu = priv->plat->maxmtu;
7544 	else if (priv->plat->maxmtu < ndev->min_mtu)
7545 		dev_warn(priv->device,
7546 			 "%s: warning: maxmtu having invalid value (%d)\n",
7547 			 __func__, priv->plat->maxmtu);
7548 
7549 	if (flow_ctrl)
7550 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7551 
7552 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7553 
7554 	/* Setup channels NAPI */
7555 	stmmac_napi_add(ndev);
7556 
7557 	mutex_init(&priv->lock);
7558 
7559 	/* If a specific clk_csr value is passed from the platform
7560 	 * this means that the CSR Clock Range selection cannot be
7561 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7562 	 * set the MDC clock dynamically according to the csr actual
7563 	 * clock input.
7564 	 */
7565 	if (priv->plat->clk_csr >= 0)
7566 		priv->clk_csr = priv->plat->clk_csr;
7567 	else
7568 		stmmac_clk_csr_set(priv);
7569 
7570 	stmmac_check_pcs_mode(priv);
7571 
7572 	pm_runtime_get_noresume(device);
7573 	pm_runtime_set_active(device);
7574 	if (!pm_runtime_enabled(device))
7575 		pm_runtime_enable(device);
7576 
7577 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7578 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
7579 		/* MDIO bus Registration */
7580 		ret = stmmac_mdio_register(ndev);
7581 		if (ret < 0) {
7582 			dev_err_probe(priv->device, ret,
7583 				      "%s: MDIO bus (id: %d) registration failed\n",
7584 				      __func__, priv->plat->bus_id);
7585 			goto error_mdio_register;
7586 		}
7587 	}
7588 
7589 	if (priv->plat->speed_mode_2500)
7590 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7591 
7592 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7593 		ret = stmmac_xpcs_setup(priv->mii);
7594 		if (ret)
7595 			goto error_xpcs_setup;
7596 	}
7597 
7598 	ret = stmmac_phy_setup(priv);
7599 	if (ret) {
7600 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7601 		goto error_phy_setup;
7602 	}
7603 
7604 	ret = register_netdev(ndev);
7605 	if (ret) {
7606 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7607 			__func__, ret);
7608 		goto error_netdev_register;
7609 	}
7610 
7611 #ifdef CONFIG_DEBUG_FS
7612 	stmmac_init_fs(ndev);
7613 #endif
7614 
7615 	if (priv->plat->dump_debug_regs)
7616 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7617 
7618 	/* Let pm_runtime_put() disable the clocks.
7619 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7620 	 */
7621 	pm_runtime_put(device);
7622 
7623 	return ret;
7624 
7625 error_netdev_register:
7626 	phylink_destroy(priv->phylink);
7627 error_xpcs_setup:
7628 error_phy_setup:
7629 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7630 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7631 		stmmac_mdio_unregister(ndev);
7632 error_mdio_register:
7633 	stmmac_napi_del(ndev);
7634 error_hw_init:
7635 	destroy_workqueue(priv->wq);
7636 error_wq_init:
7637 	bitmap_free(priv->af_xdp_zc_qps);
7638 
7639 	return ret;
7640 }
7641 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7642 
7643 /**
7644  * stmmac_dvr_remove
7645  * @dev: device pointer
7646  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7647  * changes the link status, releases the DMA descriptor rings.
7648  */
7649 void stmmac_dvr_remove(struct device *dev)
7650 {
7651 	struct net_device *ndev = dev_get_drvdata(dev);
7652 	struct stmmac_priv *priv = netdev_priv(ndev);
7653 
7654 	netdev_info(priv->dev, "%s: removing driver", __func__);
7655 
7656 	pm_runtime_get_sync(dev);
7657 
7658 	stmmac_stop_all_dma(priv);
7659 	stmmac_mac_set(priv, priv->ioaddr, false);
7660 	netif_carrier_off(ndev);
7661 	unregister_netdev(ndev);
7662 
7663 #ifdef CONFIG_DEBUG_FS
7664 	stmmac_exit_fs(ndev);
7665 #endif
7666 	phylink_destroy(priv->phylink);
7667 	if (priv->plat->stmmac_rst)
7668 		reset_control_assert(priv->plat->stmmac_rst);
7669 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7670 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7671 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7672 		stmmac_mdio_unregister(ndev);
7673 	destroy_workqueue(priv->wq);
7674 	mutex_destroy(&priv->lock);
7675 	bitmap_free(priv->af_xdp_zc_qps);
7676 
7677 	pm_runtime_disable(dev);
7678 	pm_runtime_put_noidle(dev);
7679 }
7680 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7681 
7682 /**
7683  * stmmac_suspend - suspend callback
7684  * @dev: device pointer
7685  * Description: this is the function to suspend the device and it is called
7686  * by the platform driver to stop the network queue, release the resources,
7687  * program the PMT register (for WoL), clean and release driver resources.
7688  */
7689 int stmmac_suspend(struct device *dev)
7690 {
7691 	struct net_device *ndev = dev_get_drvdata(dev);
7692 	struct stmmac_priv *priv = netdev_priv(ndev);
7693 	u32 chan;
7694 
7695 	if (!ndev || !netif_running(ndev))
7696 		return 0;
7697 
7698 	mutex_lock(&priv->lock);
7699 
7700 	netif_device_detach(ndev);
7701 
7702 	stmmac_disable_all_queues(priv);
7703 
7704 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7705 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7706 
7707 	if (priv->eee_enabled) {
7708 		priv->tx_path_in_lpi_mode = false;
7709 		del_timer_sync(&priv->eee_ctrl_timer);
7710 	}
7711 
7712 	/* Stop TX/RX DMA */
7713 	stmmac_stop_all_dma(priv);
7714 
7715 	if (priv->plat->serdes_powerdown)
7716 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7717 
7718 	/* Enable Power down mode by programming the PMT regs */
7719 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7720 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7721 		priv->irq_wake = 1;
7722 	} else {
7723 		stmmac_mac_set(priv, priv->ioaddr, false);
7724 		pinctrl_pm_select_sleep_state(priv->device);
7725 	}
7726 
7727 	mutex_unlock(&priv->lock);
7728 
7729 	rtnl_lock();
7730 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7731 		phylink_suspend(priv->phylink, true);
7732 	} else {
7733 		if (device_may_wakeup(priv->device))
7734 			phylink_speed_down(priv->phylink, false);
7735 		phylink_suspend(priv->phylink, false);
7736 	}
7737 	rtnl_unlock();
7738 
7739 	if (priv->dma_cap.fpesel) {
7740 		/* Disable FPE */
7741 		stmmac_fpe_configure(priv, priv->ioaddr,
7742 				     priv->plat->tx_queues_to_use,
7743 				     priv->plat->rx_queues_to_use, false);
7744 
7745 		stmmac_fpe_handshake(priv, false);
7746 		stmmac_fpe_stop_wq(priv);
7747 	}
7748 
7749 	priv->speed = SPEED_UNKNOWN;
7750 	return 0;
7751 }
7752 EXPORT_SYMBOL_GPL(stmmac_suspend);
7753 
7754 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7755 {
7756 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7757 
7758 	rx_q->cur_rx = 0;
7759 	rx_q->dirty_rx = 0;
7760 }
7761 
7762 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7763 {
7764 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7765 
7766 	tx_q->cur_tx = 0;
7767 	tx_q->dirty_tx = 0;
7768 	tx_q->mss = 0;
7769 
7770 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7771 }
7772 
7773 /**
7774  * stmmac_reset_queues_param - reset queue parameters
7775  * @priv: device pointer
7776  */
7777 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7778 {
7779 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7780 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7781 	u32 queue;
7782 
7783 	for (queue = 0; queue < rx_cnt; queue++)
7784 		stmmac_reset_rx_queue(priv, queue);
7785 
7786 	for (queue = 0; queue < tx_cnt; queue++)
7787 		stmmac_reset_tx_queue(priv, queue);
7788 }
7789 
7790 /**
7791  * stmmac_resume - resume callback
7792  * @dev: device pointer
7793  * Description: when resume this function is invoked to setup the DMA and CORE
7794  * in a usable state.
7795  */
7796 int stmmac_resume(struct device *dev)
7797 {
7798 	struct net_device *ndev = dev_get_drvdata(dev);
7799 	struct stmmac_priv *priv = netdev_priv(ndev);
7800 	int ret;
7801 
7802 	if (!netif_running(ndev))
7803 		return 0;
7804 
7805 	/* Power Down bit, into the PM register, is cleared
7806 	 * automatically as soon as a magic packet or a Wake-up frame
7807 	 * is received. Anyway, it's better to manually clear
7808 	 * this bit because it can generate problems while resuming
7809 	 * from another devices (e.g. serial console).
7810 	 */
7811 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7812 		mutex_lock(&priv->lock);
7813 		stmmac_pmt(priv, priv->hw, 0);
7814 		mutex_unlock(&priv->lock);
7815 		priv->irq_wake = 0;
7816 	} else {
7817 		pinctrl_pm_select_default_state(priv->device);
7818 		/* reset the phy so that it's ready */
7819 		if (priv->mii)
7820 			stmmac_mdio_reset(priv->mii);
7821 	}
7822 
7823 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7824 	    priv->plat->serdes_powerup) {
7825 		ret = priv->plat->serdes_powerup(ndev,
7826 						 priv->plat->bsp_priv);
7827 
7828 		if (ret < 0)
7829 			return ret;
7830 	}
7831 
7832 	rtnl_lock();
7833 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7834 		phylink_resume(priv->phylink);
7835 	} else {
7836 		phylink_resume(priv->phylink);
7837 		if (device_may_wakeup(priv->device))
7838 			phylink_speed_up(priv->phylink);
7839 	}
7840 	rtnl_unlock();
7841 
7842 	rtnl_lock();
7843 	mutex_lock(&priv->lock);
7844 
7845 	stmmac_reset_queues_param(priv);
7846 
7847 	stmmac_free_tx_skbufs(priv);
7848 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7849 
7850 	stmmac_hw_setup(ndev, false);
7851 	stmmac_init_coalesce(priv);
7852 	stmmac_set_rx_mode(ndev);
7853 
7854 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7855 
7856 	stmmac_enable_all_queues(priv);
7857 	stmmac_enable_all_dma_irq(priv);
7858 
7859 	mutex_unlock(&priv->lock);
7860 	rtnl_unlock();
7861 
7862 	netif_device_attach(ndev);
7863 
7864 	return 0;
7865 }
7866 EXPORT_SYMBOL_GPL(stmmac_resume);
7867 
7868 #ifndef MODULE
7869 static int __init stmmac_cmdline_opt(char *str)
7870 {
7871 	char *opt;
7872 
7873 	if (!str || !*str)
7874 		return 1;
7875 	while ((opt = strsep(&str, ",")) != NULL) {
7876 		if (!strncmp(opt, "debug:", 6)) {
7877 			if (kstrtoint(opt + 6, 0, &debug))
7878 				goto err;
7879 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7880 			if (kstrtoint(opt + 8, 0, &phyaddr))
7881 				goto err;
7882 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7883 			if (kstrtoint(opt + 7, 0, &buf_sz))
7884 				goto err;
7885 		} else if (!strncmp(opt, "tc:", 3)) {
7886 			if (kstrtoint(opt + 3, 0, &tc))
7887 				goto err;
7888 		} else if (!strncmp(opt, "watchdog:", 9)) {
7889 			if (kstrtoint(opt + 9, 0, &watchdog))
7890 				goto err;
7891 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7892 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7893 				goto err;
7894 		} else if (!strncmp(opt, "pause:", 6)) {
7895 			if (kstrtoint(opt + 6, 0, &pause))
7896 				goto err;
7897 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7898 			if (kstrtoint(opt + 10, 0, &eee_timer))
7899 				goto err;
7900 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7901 			if (kstrtoint(opt + 11, 0, &chain_mode))
7902 				goto err;
7903 		}
7904 	}
7905 	return 1;
7906 
7907 err:
7908 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7909 	return 1;
7910 }
7911 
7912 __setup("stmmaceth=", stmmac_cmdline_opt);
7913 #endif /* MODULE */
7914 
7915 static int __init stmmac_init(void)
7916 {
7917 #ifdef CONFIG_DEBUG_FS
7918 	/* Create debugfs main directory if it doesn't exist yet */
7919 	if (!stmmac_fs_dir)
7920 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7921 	register_netdevice_notifier(&stmmac_notifier);
7922 #endif
7923 
7924 	return 0;
7925 }
7926 
7927 static void __exit stmmac_exit(void)
7928 {
7929 #ifdef CONFIG_DEBUG_FS
7930 	unregister_netdevice_notifier(&stmmac_notifier);
7931 	debugfs_remove_recursive(stmmac_fs_dir);
7932 #endif
7933 }
7934 
7935 module_init(stmmac_init)
7936 module_exit(stmmac_exit)
7937 
7938 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7939 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7940 MODULE_LICENSE("GPL");
7941