1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Using PCS we cannot dial with the phy registers at this stage
475 	 * so we do not support extra feature like EEE.
476 	 */
477 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
478 	    priv->hw->pcs == STMMAC_PCS_RTBI)
479 		return false;
480 
481 	/* Check if MAC core supports the EEE feature. */
482 	if (!priv->dma_cap.eee)
483 		return false;
484 
485 	mutex_lock(&priv->lock);
486 
487 	/* Check if it needs to be deactivated */
488 	if (!priv->eee_active) {
489 		if (priv->eee_enabled) {
490 			netdev_dbg(priv->dev, "disable EEE\n");
491 			stmmac_lpi_entry_timer_config(priv, 0);
492 			del_timer_sync(&priv->eee_ctrl_timer);
493 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494 			if (priv->hw->xpcs)
495 				xpcs_config_eee(priv->hw->xpcs,
496 						priv->plat->mult_fact_100ns,
497 						false);
498 		}
499 		mutex_unlock(&priv->lock);
500 		return false;
501 	}
502 
503 	if (priv->eee_active && !priv->eee_enabled) {
504 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506 				     eee_tw_timer);
507 		if (priv->hw->xpcs)
508 			xpcs_config_eee(priv->hw->xpcs,
509 					priv->plat->mult_fact_100ns,
510 					true);
511 	}
512 
513 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514 		del_timer_sync(&priv->eee_ctrl_timer);
515 		priv->tx_path_in_lpi_mode = false;
516 		stmmac_lpi_entry_timer_config(priv, 1);
517 	} else {
518 		stmmac_lpi_entry_timer_config(priv, 0);
519 		mod_timer(&priv->eee_ctrl_timer,
520 			  STMMAC_LPI_T(priv->tx_lpi_timer));
521 	}
522 
523 	mutex_unlock(&priv->lock);
524 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525 	return true;
526 }
527 
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529  * @priv: driver private structure
530  * @p : descriptor pointer
531  * @skb : the socket buffer
532  * Description :
533  * This function will read timestamp from the descriptor & pass it to stack.
534  * and also perform some sanity checks.
535  */
536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537 				   struct dma_desc *p, struct sk_buff *skb)
538 {
539 	struct skb_shared_hwtstamps shhwtstamp;
540 	bool found = false;
541 	u64 ns = 0;
542 
543 	if (!priv->hwts_tx_en)
544 		return;
545 
546 	/* exit if skb doesn't support hw tstamp */
547 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548 		return;
549 
550 	/* check tx tstamp status */
551 	if (stmmac_get_tx_timestamp_status(priv, p)) {
552 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553 		found = true;
554 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555 		found = true;
556 	}
557 
558 	if (found) {
559 		ns -= priv->plat->cdc_error_adj;
560 
561 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
563 
564 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565 		/* pass tstamp to stack */
566 		skb_tstamp_tx(skb, &shhwtstamp);
567 	}
568 }
569 
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571  * @priv: driver private structure
572  * @p : descriptor pointer
573  * @np : next descriptor pointer
574  * @skb : the socket buffer
575  * Description :
576  * This function will read received packet's timestamp from the descriptor
577  * and pass it to stack. It also perform some sanity checks.
578  */
579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580 				   struct dma_desc *np, struct sk_buff *skb)
581 {
582 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
583 	struct dma_desc *desc = p;
584 	u64 ns = 0;
585 
586 	if (!priv->hwts_rx_en)
587 		return;
588 	/* For GMAC4, the valid timestamp is from CTX next desc. */
589 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590 		desc = np;
591 
592 	/* Check if timestamp is available */
593 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595 
596 		ns -= priv->plat->cdc_error_adj;
597 
598 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599 		shhwtstamp = skb_hwtstamps(skb);
600 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
602 	} else  {
603 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604 	}
605 }
606 
607 /**
608  *  stmmac_hwtstamp_set - control hardware timestamping.
609  *  @dev: device pointer.
610  *  @ifr: An IOCTL specific structure, that can contain a pointer to
611  *  a proprietary structure used to pass information to the driver.
612  *  Description:
613  *  This function configures the MAC to enable/disable both outgoing(TX)
614  *  and incoming(RX) packets time stamping based on user input.
615  *  Return Value:
616  *  0 on success and an appropriate -ve integer on failure.
617  */
618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620 	struct stmmac_priv *priv = netdev_priv(dev);
621 	struct hwtstamp_config config;
622 	u32 ptp_v2 = 0;
623 	u32 tstamp_all = 0;
624 	u32 ptp_over_ipv4_udp = 0;
625 	u32 ptp_over_ipv6_udp = 0;
626 	u32 ptp_over_ethernet = 0;
627 	u32 snap_type_sel = 0;
628 	u32 ts_master_en = 0;
629 	u32 ts_event_en = 0;
630 
631 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632 		netdev_alert(priv->dev, "No support for HW time stamping\n");
633 		priv->hwts_tx_en = 0;
634 		priv->hwts_rx_en = 0;
635 
636 		return -EOPNOTSUPP;
637 	}
638 
639 	if (copy_from_user(&config, ifr->ifr_data,
640 			   sizeof(config)))
641 		return -EFAULT;
642 
643 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 		   __func__, config.flags, config.tx_type, config.rx_filter);
645 
646 	if (config.tx_type != HWTSTAMP_TX_OFF &&
647 	    config.tx_type != HWTSTAMP_TX_ON)
648 		return -ERANGE;
649 
650 	if (priv->adv_ts) {
651 		switch (config.rx_filter) {
652 		case HWTSTAMP_FILTER_NONE:
653 			/* time stamp no incoming packet at all */
654 			config.rx_filter = HWTSTAMP_FILTER_NONE;
655 			break;
656 
657 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 			/* PTP v1, UDP, any kind of event packet */
659 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 			/* 'xmac' hardware can support Sync, Pdelay_Req and
661 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 			 * This leaves Delay_Req timestamps out.
663 			 * Enable all events *and* general purpose message
664 			 * timestamping
665 			 */
666 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			break;
670 
671 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 			/* PTP v1, UDP, Sync packet */
673 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 			/* take time stamp for SYNC messages only */
675 			ts_event_en = PTP_TCR_TSEVNTENA;
676 
677 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 			break;
680 
681 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 			/* PTP v1, UDP, Delay_req packet */
683 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 			/* take time stamp for Delay_Req messages only */
685 			ts_master_en = PTP_TCR_TSMSTRENA;
686 			ts_event_en = PTP_TCR_TSEVNTENA;
687 
688 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 			break;
691 
692 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 			/* PTP v2, UDP, any kind of event packet */
694 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 			ptp_v2 = PTP_TCR_TSVER2ENA;
696 			/* take time stamp for all event messages */
697 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698 
699 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 			break;
702 
703 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 			/* PTP v2, UDP, Sync packet */
705 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 			ptp_v2 = PTP_TCR_TSVER2ENA;
707 			/* take time stamp for SYNC messages only */
708 			ts_event_en = PTP_TCR_TSEVNTENA;
709 
710 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 			break;
713 
714 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 			/* PTP v2, UDP, Delay_req packet */
716 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 			ptp_v2 = PTP_TCR_TSVER2ENA;
718 			/* take time stamp for Delay_Req messages only */
719 			ts_master_en = PTP_TCR_TSMSTRENA;
720 			ts_event_en = PTP_TCR_TSEVNTENA;
721 
722 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 			break;
725 
726 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 			/* PTP v2/802.AS1 any layer, any kind of event packet */
728 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 			ptp_v2 = PTP_TCR_TSVER2ENA;
730 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 			if (priv->synopsys_id < DWMAC_CORE_4_10)
732 				ts_event_en = PTP_TCR_TSEVNTENA;
733 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 			ptp_over_ethernet = PTP_TCR_TSIPENA;
736 			break;
737 
738 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 			/* PTP v2/802.AS1, any layer, Sync packet */
740 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 			ptp_v2 = PTP_TCR_TSVER2ENA;
742 			/* take time stamp for SYNC messages only */
743 			ts_event_en = PTP_TCR_TSEVNTENA;
744 
745 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 			ptp_over_ethernet = PTP_TCR_TSIPENA;
748 			break;
749 
750 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 			/* PTP v2/802.AS1, any layer, Delay_req packet */
752 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 			ptp_v2 = PTP_TCR_TSVER2ENA;
754 			/* take time stamp for Delay_Req messages only */
755 			ts_master_en = PTP_TCR_TSMSTRENA;
756 			ts_event_en = PTP_TCR_TSEVNTENA;
757 
758 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 			ptp_over_ethernet = PTP_TCR_TSIPENA;
761 			break;
762 
763 		case HWTSTAMP_FILTER_NTP_ALL:
764 		case HWTSTAMP_FILTER_ALL:
765 			/* time stamp any incoming packet */
766 			config.rx_filter = HWTSTAMP_FILTER_ALL;
767 			tstamp_all = PTP_TCR_TSENALL;
768 			break;
769 
770 		default:
771 			return -ERANGE;
772 		}
773 	} else {
774 		switch (config.rx_filter) {
775 		case HWTSTAMP_FILTER_NONE:
776 			config.rx_filter = HWTSTAMP_FILTER_NONE;
777 			break;
778 		default:
779 			/* PTP v1, UDP, any kind of event packet */
780 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 			break;
782 		}
783 	}
784 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786 
787 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
788 
789 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
790 		priv->systime_flags |= tstamp_all | ptp_v2 |
791 				       ptp_over_ethernet | ptp_over_ipv6_udp |
792 				       ptp_over_ipv4_udp | ts_event_en |
793 				       ts_master_en | snap_type_sel;
794 	}
795 
796 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797 
798 	memcpy(&priv->tstamp_config, &config, sizeof(config));
799 
800 	return copy_to_user(ifr->ifr_data, &config,
801 			    sizeof(config)) ? -EFAULT : 0;
802 }
803 
804 /**
805  *  stmmac_hwtstamp_get - read hardware timestamping.
806  *  @dev: device pointer.
807  *  @ifr: An IOCTL specific structure, that can contain a pointer to
808  *  a proprietary structure used to pass information to the driver.
809  *  Description:
810  *  This function obtain the current hardware timestamping settings
811  *  as requested.
812  */
813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815 	struct stmmac_priv *priv = netdev_priv(dev);
816 	struct hwtstamp_config *config = &priv->tstamp_config;
817 
818 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819 		return -EOPNOTSUPP;
820 
821 	return copy_to_user(ifr->ifr_data, config,
822 			    sizeof(*config)) ? -EFAULT : 0;
823 }
824 
825 /**
826  * stmmac_init_tstamp_counter - init hardware timestamping counter
827  * @priv: driver private structure
828  * @systime_flags: timestamping flags
829  * Description:
830  * Initialize hardware counter for packet timestamping.
831  * This is valid as long as the interface is open and not suspended.
832  * Will be rerun after resuming from suspend, case in which the timestamping
833  * flags updated by stmmac_hwtstamp_set() also need to be restored.
834  */
835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838 	struct timespec64 now;
839 	u32 sec_inc = 0;
840 	u64 temp = 0;
841 
842 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843 		return -EOPNOTSUPP;
844 
845 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846 	priv->systime_flags = systime_flags;
847 
848 	/* program Sub Second Increment reg */
849 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850 					   priv->plat->clk_ptp_rate,
851 					   xmac, &sec_inc);
852 	temp = div_u64(1000000000ULL, sec_inc);
853 
854 	/* Store sub second increment for later use */
855 	priv->sub_second_inc = sec_inc;
856 
857 	/* calculate default added value:
858 	 * formula is :
859 	 * addend = (2^32)/freq_div_ratio;
860 	 * where, freq_div_ratio = 1e9ns/sec_inc
861 	 */
862 	temp = (u64)(temp << 32);
863 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865 
866 	/* initialize system time */
867 	ktime_get_real_ts64(&now);
868 
869 	/* lower 32 bits of tv_sec are safe until y2106 */
870 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871 
872 	return 0;
873 }
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875 
876 /**
877  * stmmac_init_ptp - init PTP
878  * @priv: driver private structure
879  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880  * This is done by looking at the HW cap. register.
881  * This function also registers the ptp driver.
882  */
883 static int stmmac_init_ptp(struct stmmac_priv *priv)
884 {
885 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886 	int ret;
887 
888 	if (priv->plat->ptp_clk_freq_config)
889 		priv->plat->ptp_clk_freq_config(priv);
890 
891 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892 	if (ret)
893 		return ret;
894 
895 	priv->adv_ts = 0;
896 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 	if (xmac && priv->dma_cap.atime_stamp)
898 		priv->adv_ts = 1;
899 	/* Dwmac 3.x core with extend_desc can support adv_ts */
900 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901 		priv->adv_ts = 1;
902 
903 	if (priv->dma_cap.time_stamp)
904 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
905 
906 	if (priv->adv_ts)
907 		netdev_info(priv->dev,
908 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
909 
910 	priv->hwts_tx_en = 0;
911 	priv->hwts_rx_en = 0;
912 
913 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
914 		stmmac_hwtstamp_correct_latency(priv, priv);
915 
916 	return 0;
917 }
918 
919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
922 	stmmac_ptp_unregister(priv);
923 }
924 
925 /**
926  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
927  *  @priv: driver private structure
928  *  @duplex: duplex passed to the next function
929  *  Description: It is used for configuring the flow control in all queues
930  */
931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933 	u32 tx_cnt = priv->plat->tx_queues_to_use;
934 
935 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936 			priv->pause, tx_cnt);
937 }
938 
939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
940 						 phy_interface_t interface)
941 {
942 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943 
944 	if (priv->hw->xpcs)
945 		return &priv->hw->xpcs->pcs;
946 
947 	if (priv->hw->lynx_pcs)
948 		return priv->hw->lynx_pcs;
949 
950 	return NULL;
951 }
952 
953 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
954 			      const struct phylink_link_state *state)
955 {
956 	/* Nothing to do, xpcs_config() handles everything */
957 }
958 
959 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
960 {
961 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
962 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
963 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
964 	bool *hs_enable = &fpe_cfg->hs_enable;
965 
966 	if (is_up && *hs_enable) {
967 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
968 					MPACKET_VERIFY);
969 	} else {
970 		*lo_state = FPE_STATE_OFF;
971 		*lp_state = FPE_STATE_OFF;
972 	}
973 }
974 
975 static void stmmac_mac_link_down(struct phylink_config *config,
976 				 unsigned int mode, phy_interface_t interface)
977 {
978 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
979 
980 	stmmac_mac_set(priv, priv->ioaddr, false);
981 	priv->eee_active = false;
982 	priv->tx_lpi_enabled = false;
983 	priv->eee_enabled = stmmac_eee_init(priv);
984 	stmmac_set_eee_pls(priv, priv->hw, false);
985 
986 	if (priv->dma_cap.fpesel)
987 		stmmac_fpe_link_state_handle(priv, false);
988 }
989 
990 static void stmmac_mac_link_up(struct phylink_config *config,
991 			       struct phy_device *phy,
992 			       unsigned int mode, phy_interface_t interface,
993 			       int speed, int duplex,
994 			       bool tx_pause, bool rx_pause)
995 {
996 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
997 	u32 old_ctrl, ctrl;
998 
999 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1000 	    priv->plat->serdes_powerup)
1001 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1002 
1003 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1004 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1005 
1006 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1007 		switch (speed) {
1008 		case SPEED_10000:
1009 			ctrl |= priv->hw->link.xgmii.speed10000;
1010 			break;
1011 		case SPEED_5000:
1012 			ctrl |= priv->hw->link.xgmii.speed5000;
1013 			break;
1014 		case SPEED_2500:
1015 			ctrl |= priv->hw->link.xgmii.speed2500;
1016 			break;
1017 		default:
1018 			return;
1019 		}
1020 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1021 		switch (speed) {
1022 		case SPEED_100000:
1023 			ctrl |= priv->hw->link.xlgmii.speed100000;
1024 			break;
1025 		case SPEED_50000:
1026 			ctrl |= priv->hw->link.xlgmii.speed50000;
1027 			break;
1028 		case SPEED_40000:
1029 			ctrl |= priv->hw->link.xlgmii.speed40000;
1030 			break;
1031 		case SPEED_25000:
1032 			ctrl |= priv->hw->link.xlgmii.speed25000;
1033 			break;
1034 		case SPEED_10000:
1035 			ctrl |= priv->hw->link.xgmii.speed10000;
1036 			break;
1037 		case SPEED_2500:
1038 			ctrl |= priv->hw->link.speed2500;
1039 			break;
1040 		case SPEED_1000:
1041 			ctrl |= priv->hw->link.speed1000;
1042 			break;
1043 		default:
1044 			return;
1045 		}
1046 	} else {
1047 		switch (speed) {
1048 		case SPEED_2500:
1049 			ctrl |= priv->hw->link.speed2500;
1050 			break;
1051 		case SPEED_1000:
1052 			ctrl |= priv->hw->link.speed1000;
1053 			break;
1054 		case SPEED_100:
1055 			ctrl |= priv->hw->link.speed100;
1056 			break;
1057 		case SPEED_10:
1058 			ctrl |= priv->hw->link.speed10;
1059 			break;
1060 		default:
1061 			return;
1062 		}
1063 	}
1064 
1065 	priv->speed = speed;
1066 
1067 	if (priv->plat->fix_mac_speed)
1068 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1069 
1070 	if (!duplex)
1071 		ctrl &= ~priv->hw->link.duplex;
1072 	else
1073 		ctrl |= priv->hw->link.duplex;
1074 
1075 	/* Flow Control operation */
1076 	if (rx_pause && tx_pause)
1077 		priv->flow_ctrl = FLOW_AUTO;
1078 	else if (rx_pause && !tx_pause)
1079 		priv->flow_ctrl = FLOW_RX;
1080 	else if (!rx_pause && tx_pause)
1081 		priv->flow_ctrl = FLOW_TX;
1082 	else
1083 		priv->flow_ctrl = FLOW_OFF;
1084 
1085 	stmmac_mac_flow_ctrl(priv, duplex);
1086 
1087 	if (ctrl != old_ctrl)
1088 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1089 
1090 	stmmac_mac_set(priv, priv->ioaddr, true);
1091 	if (phy && priv->dma_cap.eee) {
1092 		priv->eee_active =
1093 			phy_init_eee(phy, !(priv->plat->flags &
1094 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1095 		priv->eee_enabled = stmmac_eee_init(priv);
1096 		priv->tx_lpi_enabled = priv->eee_enabled;
1097 		stmmac_set_eee_pls(priv, priv->hw, true);
1098 	}
1099 
1100 	if (priv->dma_cap.fpesel)
1101 		stmmac_fpe_link_state_handle(priv, true);
1102 
1103 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1104 		stmmac_hwtstamp_correct_latency(priv, priv);
1105 }
1106 
1107 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1108 	.mac_select_pcs = stmmac_mac_select_pcs,
1109 	.mac_config = stmmac_mac_config,
1110 	.mac_link_down = stmmac_mac_link_down,
1111 	.mac_link_up = stmmac_mac_link_up,
1112 };
1113 
1114 /**
1115  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1116  * @priv: driver private structure
1117  * Description: this is to verify if the HW supports the PCS.
1118  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1119  * configured for the TBI, RTBI, or SGMII PHY interface.
1120  */
1121 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1122 {
1123 	int interface = priv->plat->mac_interface;
1124 
1125 	if (priv->dma_cap.pcs) {
1126 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1127 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1128 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1129 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1130 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1131 			priv->hw->pcs = STMMAC_PCS_RGMII;
1132 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1133 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1134 			priv->hw->pcs = STMMAC_PCS_SGMII;
1135 		}
1136 	}
1137 }
1138 
1139 /**
1140  * stmmac_init_phy - PHY initialization
1141  * @dev: net device structure
1142  * Description: it initializes the driver's PHY state, and attaches the PHY
1143  * to the mac driver.
1144  *  Return value:
1145  *  0 on success
1146  */
1147 static int stmmac_init_phy(struct net_device *dev)
1148 {
1149 	struct stmmac_priv *priv = netdev_priv(dev);
1150 	struct fwnode_handle *phy_fwnode;
1151 	struct fwnode_handle *fwnode;
1152 	int ret;
1153 
1154 	if (!phylink_expects_phy(priv->phylink))
1155 		return 0;
1156 
1157 	fwnode = priv->plat->port_node;
1158 	if (!fwnode)
1159 		fwnode = dev_fwnode(priv->device);
1160 
1161 	if (fwnode)
1162 		phy_fwnode = fwnode_get_phy_node(fwnode);
1163 	else
1164 		phy_fwnode = NULL;
1165 
1166 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1167 	 * manually parse it
1168 	 */
1169 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1170 		int addr = priv->plat->phy_addr;
1171 		struct phy_device *phydev;
1172 
1173 		if (addr < 0) {
1174 			netdev_err(priv->dev, "no phy found\n");
1175 			return -ENODEV;
1176 		}
1177 
1178 		phydev = mdiobus_get_phy(priv->mii, addr);
1179 		if (!phydev) {
1180 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1181 			return -ENODEV;
1182 		}
1183 
1184 		ret = phylink_connect_phy(priv->phylink, phydev);
1185 	} else {
1186 		fwnode_handle_put(phy_fwnode);
1187 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1188 	}
1189 
1190 	if (!priv->plat->pmt) {
1191 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1192 
1193 		phylink_ethtool_get_wol(priv->phylink, &wol);
1194 		device_set_wakeup_capable(priv->device, !!wol.supported);
1195 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1196 	}
1197 
1198 	return ret;
1199 }
1200 
1201 static void stmmac_set_half_duplex(struct stmmac_priv *priv)
1202 {
1203 	/* Half-Duplex can only work with single tx queue */
1204 	if (priv->plat->tx_queues_to_use > 1)
1205 		priv->phylink_config.mac_capabilities &=
1206 			~(MAC_10HD | MAC_100HD | MAC_1000HD);
1207 	else
1208 		priv->phylink_config.mac_capabilities |=
1209 			(MAC_10HD | MAC_100HD | MAC_1000HD);
1210 }
1211 
1212 static int stmmac_phy_setup(struct stmmac_priv *priv)
1213 {
1214 	struct stmmac_mdio_bus_data *mdio_bus_data;
1215 	int mode = priv->plat->phy_interface;
1216 	struct fwnode_handle *fwnode;
1217 	struct phylink *phylink;
1218 	int max_speed;
1219 
1220 	priv->phylink_config.dev = &priv->dev->dev;
1221 	priv->phylink_config.type = PHYLINK_NETDEV;
1222 	priv->phylink_config.mac_managed_pm = true;
1223 
1224 	mdio_bus_data = priv->plat->mdio_bus_data;
1225 	if (mdio_bus_data)
1226 		priv->phylink_config.ovr_an_inband =
1227 			mdio_bus_data->xpcs_an_inband;
1228 
1229 	/* Set the platform/firmware specified interface mode. Note, phylink
1230 	 * deals with the PHY interface mode, not the MAC interface mode.
1231 	 */
1232 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1233 
1234 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1235 	if (priv->hw->xpcs)
1236 		xpcs_get_interfaces(priv->hw->xpcs,
1237 				    priv->phylink_config.supported_interfaces);
1238 
1239 	priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1240 						MAC_10FD | MAC_100FD |
1241 						MAC_1000FD;
1242 
1243 	stmmac_set_half_duplex(priv);
1244 
1245 	/* Get the MAC specific capabilities */
1246 	stmmac_mac_phylink_get_caps(priv);
1247 
1248 	max_speed = priv->plat->max_speed;
1249 	if (max_speed)
1250 		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1251 
1252 	fwnode = priv->plat->port_node;
1253 	if (!fwnode)
1254 		fwnode = dev_fwnode(priv->device);
1255 
1256 	phylink = phylink_create(&priv->phylink_config, fwnode,
1257 				 mode, &stmmac_phylink_mac_ops);
1258 	if (IS_ERR(phylink))
1259 		return PTR_ERR(phylink);
1260 
1261 	priv->phylink = phylink;
1262 	return 0;
1263 }
1264 
1265 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1266 				    struct stmmac_dma_conf *dma_conf)
1267 {
1268 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1269 	unsigned int desc_size;
1270 	void *head_rx;
1271 	u32 queue;
1272 
1273 	/* Display RX rings */
1274 	for (queue = 0; queue < rx_cnt; queue++) {
1275 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1276 
1277 		pr_info("\tRX Queue %u rings\n", queue);
1278 
1279 		if (priv->extend_desc) {
1280 			head_rx = (void *)rx_q->dma_erx;
1281 			desc_size = sizeof(struct dma_extended_desc);
1282 		} else {
1283 			head_rx = (void *)rx_q->dma_rx;
1284 			desc_size = sizeof(struct dma_desc);
1285 		}
1286 
1287 		/* Display RX ring */
1288 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1289 				    rx_q->dma_rx_phy, desc_size);
1290 	}
1291 }
1292 
1293 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1294 				    struct stmmac_dma_conf *dma_conf)
1295 {
1296 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1297 	unsigned int desc_size;
1298 	void *head_tx;
1299 	u32 queue;
1300 
1301 	/* Display TX rings */
1302 	for (queue = 0; queue < tx_cnt; queue++) {
1303 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1304 
1305 		pr_info("\tTX Queue %d rings\n", queue);
1306 
1307 		if (priv->extend_desc) {
1308 			head_tx = (void *)tx_q->dma_etx;
1309 			desc_size = sizeof(struct dma_extended_desc);
1310 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1311 			head_tx = (void *)tx_q->dma_entx;
1312 			desc_size = sizeof(struct dma_edesc);
1313 		} else {
1314 			head_tx = (void *)tx_q->dma_tx;
1315 			desc_size = sizeof(struct dma_desc);
1316 		}
1317 
1318 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1319 				    tx_q->dma_tx_phy, desc_size);
1320 	}
1321 }
1322 
1323 static void stmmac_display_rings(struct stmmac_priv *priv,
1324 				 struct stmmac_dma_conf *dma_conf)
1325 {
1326 	/* Display RX ring */
1327 	stmmac_display_rx_rings(priv, dma_conf);
1328 
1329 	/* Display TX ring */
1330 	stmmac_display_tx_rings(priv, dma_conf);
1331 }
1332 
1333 static int stmmac_set_bfsize(int mtu, int bufsize)
1334 {
1335 	int ret = bufsize;
1336 
1337 	if (mtu >= BUF_SIZE_8KiB)
1338 		ret = BUF_SIZE_16KiB;
1339 	else if (mtu >= BUF_SIZE_4KiB)
1340 		ret = BUF_SIZE_8KiB;
1341 	else if (mtu >= BUF_SIZE_2KiB)
1342 		ret = BUF_SIZE_4KiB;
1343 	else if (mtu > DEFAULT_BUFSIZE)
1344 		ret = BUF_SIZE_2KiB;
1345 	else
1346 		ret = DEFAULT_BUFSIZE;
1347 
1348 	return ret;
1349 }
1350 
1351 /**
1352  * stmmac_clear_rx_descriptors - clear RX descriptors
1353  * @priv: driver private structure
1354  * @dma_conf: structure to take the dma data
1355  * @queue: RX queue index
1356  * Description: this function is called to clear the RX descriptors
1357  * in case of both basic and extended descriptors are used.
1358  */
1359 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1360 					struct stmmac_dma_conf *dma_conf,
1361 					u32 queue)
1362 {
1363 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1364 	int i;
1365 
1366 	/* Clear the RX descriptors */
1367 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1368 		if (priv->extend_desc)
1369 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1370 					priv->use_riwt, priv->mode,
1371 					(i == dma_conf->dma_rx_size - 1),
1372 					dma_conf->dma_buf_sz);
1373 		else
1374 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1375 					priv->use_riwt, priv->mode,
1376 					(i == dma_conf->dma_rx_size - 1),
1377 					dma_conf->dma_buf_sz);
1378 }
1379 
1380 /**
1381  * stmmac_clear_tx_descriptors - clear tx descriptors
1382  * @priv: driver private structure
1383  * @dma_conf: structure to take the dma data
1384  * @queue: TX queue index.
1385  * Description: this function is called to clear the TX descriptors
1386  * in case of both basic and extended descriptors are used.
1387  */
1388 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1389 					struct stmmac_dma_conf *dma_conf,
1390 					u32 queue)
1391 {
1392 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1393 	int i;
1394 
1395 	/* Clear the TX descriptors */
1396 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1397 		int last = (i == (dma_conf->dma_tx_size - 1));
1398 		struct dma_desc *p;
1399 
1400 		if (priv->extend_desc)
1401 			p = &tx_q->dma_etx[i].basic;
1402 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1403 			p = &tx_q->dma_entx[i].basic;
1404 		else
1405 			p = &tx_q->dma_tx[i];
1406 
1407 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1408 	}
1409 }
1410 
1411 /**
1412  * stmmac_clear_descriptors - clear descriptors
1413  * @priv: driver private structure
1414  * @dma_conf: structure to take the dma data
1415  * Description: this function is called to clear the TX and RX descriptors
1416  * in case of both basic and extended descriptors are used.
1417  */
1418 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1419 				     struct stmmac_dma_conf *dma_conf)
1420 {
1421 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1422 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1423 	u32 queue;
1424 
1425 	/* Clear the RX descriptors */
1426 	for (queue = 0; queue < rx_queue_cnt; queue++)
1427 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1428 
1429 	/* Clear the TX descriptors */
1430 	for (queue = 0; queue < tx_queue_cnt; queue++)
1431 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1432 }
1433 
1434 /**
1435  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1436  * @priv: driver private structure
1437  * @dma_conf: structure to take the dma data
1438  * @p: descriptor pointer
1439  * @i: descriptor index
1440  * @flags: gfp flag
1441  * @queue: RX queue index
1442  * Description: this function is called to allocate a receive buffer, perform
1443  * the DMA mapping and init the descriptor.
1444  */
1445 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1446 				  struct stmmac_dma_conf *dma_conf,
1447 				  struct dma_desc *p,
1448 				  int i, gfp_t flags, u32 queue)
1449 {
1450 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1451 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1452 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1453 
1454 	if (priv->dma_cap.host_dma_width <= 32)
1455 		gfp |= GFP_DMA32;
1456 
1457 	if (!buf->page) {
1458 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1459 		if (!buf->page)
1460 			return -ENOMEM;
1461 		buf->page_offset = stmmac_rx_offset(priv);
1462 	}
1463 
1464 	if (priv->sph && !buf->sec_page) {
1465 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1466 		if (!buf->sec_page)
1467 			return -ENOMEM;
1468 
1469 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1470 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1471 	} else {
1472 		buf->sec_page = NULL;
1473 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1474 	}
1475 
1476 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1477 
1478 	stmmac_set_desc_addr(priv, p, buf->addr);
1479 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1480 		stmmac_init_desc3(priv, p);
1481 
1482 	return 0;
1483 }
1484 
1485 /**
1486  * stmmac_free_rx_buffer - free RX dma buffers
1487  * @priv: private structure
1488  * @rx_q: RX queue
1489  * @i: buffer index.
1490  */
1491 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1492 				  struct stmmac_rx_queue *rx_q,
1493 				  int i)
1494 {
1495 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1496 
1497 	if (buf->page)
1498 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1499 	buf->page = NULL;
1500 
1501 	if (buf->sec_page)
1502 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1503 	buf->sec_page = NULL;
1504 }
1505 
1506 /**
1507  * stmmac_free_tx_buffer - free RX dma buffers
1508  * @priv: private structure
1509  * @dma_conf: structure to take the dma data
1510  * @queue: RX queue index
1511  * @i: buffer index.
1512  */
1513 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1514 				  struct stmmac_dma_conf *dma_conf,
1515 				  u32 queue, int i)
1516 {
1517 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1518 
1519 	if (tx_q->tx_skbuff_dma[i].buf &&
1520 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1521 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1522 			dma_unmap_page(priv->device,
1523 				       tx_q->tx_skbuff_dma[i].buf,
1524 				       tx_q->tx_skbuff_dma[i].len,
1525 				       DMA_TO_DEVICE);
1526 		else
1527 			dma_unmap_single(priv->device,
1528 					 tx_q->tx_skbuff_dma[i].buf,
1529 					 tx_q->tx_skbuff_dma[i].len,
1530 					 DMA_TO_DEVICE);
1531 	}
1532 
1533 	if (tx_q->xdpf[i] &&
1534 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1535 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1536 		xdp_return_frame(tx_q->xdpf[i]);
1537 		tx_q->xdpf[i] = NULL;
1538 	}
1539 
1540 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1541 		tx_q->xsk_frames_done++;
1542 
1543 	if (tx_q->tx_skbuff[i] &&
1544 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1545 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1546 		tx_q->tx_skbuff[i] = NULL;
1547 	}
1548 
1549 	tx_q->tx_skbuff_dma[i].buf = 0;
1550 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1551 }
1552 
1553 /**
1554  * dma_free_rx_skbufs - free RX dma buffers
1555  * @priv: private structure
1556  * @dma_conf: structure to take the dma data
1557  * @queue: RX queue index
1558  */
1559 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1560 			       struct stmmac_dma_conf *dma_conf,
1561 			       u32 queue)
1562 {
1563 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1564 	int i;
1565 
1566 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1567 		stmmac_free_rx_buffer(priv, rx_q, i);
1568 }
1569 
1570 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1571 				   struct stmmac_dma_conf *dma_conf,
1572 				   u32 queue, gfp_t flags)
1573 {
1574 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1575 	int i;
1576 
1577 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1578 		struct dma_desc *p;
1579 		int ret;
1580 
1581 		if (priv->extend_desc)
1582 			p = &((rx_q->dma_erx + i)->basic);
1583 		else
1584 			p = rx_q->dma_rx + i;
1585 
1586 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1587 					     queue);
1588 		if (ret)
1589 			return ret;
1590 
1591 		rx_q->buf_alloc_num++;
1592 	}
1593 
1594 	return 0;
1595 }
1596 
1597 /**
1598  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1599  * @priv: private structure
1600  * @dma_conf: structure to take the dma data
1601  * @queue: RX queue index
1602  */
1603 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1604 				struct stmmac_dma_conf *dma_conf,
1605 				u32 queue)
1606 {
1607 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1608 	int i;
1609 
1610 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1611 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1612 
1613 		if (!buf->xdp)
1614 			continue;
1615 
1616 		xsk_buff_free(buf->xdp);
1617 		buf->xdp = NULL;
1618 	}
1619 }
1620 
1621 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1622 				      struct stmmac_dma_conf *dma_conf,
1623 				      u32 queue)
1624 {
1625 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1626 	int i;
1627 
1628 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1629 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1630 	 * use this macro to make sure no size violations.
1631 	 */
1632 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1633 
1634 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1635 		struct stmmac_rx_buffer *buf;
1636 		dma_addr_t dma_addr;
1637 		struct dma_desc *p;
1638 
1639 		if (priv->extend_desc)
1640 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1641 		else
1642 			p = rx_q->dma_rx + i;
1643 
1644 		buf = &rx_q->buf_pool[i];
1645 
1646 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1647 		if (!buf->xdp)
1648 			return -ENOMEM;
1649 
1650 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1651 		stmmac_set_desc_addr(priv, p, dma_addr);
1652 		rx_q->buf_alloc_num++;
1653 	}
1654 
1655 	return 0;
1656 }
1657 
1658 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1659 {
1660 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1661 		return NULL;
1662 
1663 	return xsk_get_pool_from_qid(priv->dev, queue);
1664 }
1665 
1666 /**
1667  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1668  * @priv: driver private structure
1669  * @dma_conf: structure to take the dma data
1670  * @queue: RX queue index
1671  * @flags: gfp flag.
1672  * Description: this function initializes the DMA RX descriptors
1673  * and allocates the socket buffers. It supports the chained and ring
1674  * modes.
1675  */
1676 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1677 				    struct stmmac_dma_conf *dma_conf,
1678 				    u32 queue, gfp_t flags)
1679 {
1680 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1681 	int ret;
1682 
1683 	netif_dbg(priv, probe, priv->dev,
1684 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1685 		  (u32)rx_q->dma_rx_phy);
1686 
1687 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1688 
1689 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1690 
1691 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1692 
1693 	if (rx_q->xsk_pool) {
1694 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1695 						   MEM_TYPE_XSK_BUFF_POOL,
1696 						   NULL));
1697 		netdev_info(priv->dev,
1698 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1699 			    rx_q->queue_index);
1700 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1701 	} else {
1702 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1703 						   MEM_TYPE_PAGE_POOL,
1704 						   rx_q->page_pool));
1705 		netdev_info(priv->dev,
1706 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1707 			    rx_q->queue_index);
1708 	}
1709 
1710 	if (rx_q->xsk_pool) {
1711 		/* RX XDP ZC buffer pool may not be populated, e.g.
1712 		 * xdpsock TX-only.
1713 		 */
1714 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1715 	} else {
1716 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1717 		if (ret < 0)
1718 			return -ENOMEM;
1719 	}
1720 
1721 	/* Setup the chained descriptor addresses */
1722 	if (priv->mode == STMMAC_CHAIN_MODE) {
1723 		if (priv->extend_desc)
1724 			stmmac_mode_init(priv, rx_q->dma_erx,
1725 					 rx_q->dma_rx_phy,
1726 					 dma_conf->dma_rx_size, 1);
1727 		else
1728 			stmmac_mode_init(priv, rx_q->dma_rx,
1729 					 rx_q->dma_rx_phy,
1730 					 dma_conf->dma_rx_size, 0);
1731 	}
1732 
1733 	return 0;
1734 }
1735 
1736 static int init_dma_rx_desc_rings(struct net_device *dev,
1737 				  struct stmmac_dma_conf *dma_conf,
1738 				  gfp_t flags)
1739 {
1740 	struct stmmac_priv *priv = netdev_priv(dev);
1741 	u32 rx_count = priv->plat->rx_queues_to_use;
1742 	int queue;
1743 	int ret;
1744 
1745 	/* RX INITIALIZATION */
1746 	netif_dbg(priv, probe, priv->dev,
1747 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1748 
1749 	for (queue = 0; queue < rx_count; queue++) {
1750 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1751 		if (ret)
1752 			goto err_init_rx_buffers;
1753 	}
1754 
1755 	return 0;
1756 
1757 err_init_rx_buffers:
1758 	while (queue >= 0) {
1759 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1760 
1761 		if (rx_q->xsk_pool)
1762 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1763 		else
1764 			dma_free_rx_skbufs(priv, dma_conf, queue);
1765 
1766 		rx_q->buf_alloc_num = 0;
1767 		rx_q->xsk_pool = NULL;
1768 
1769 		queue--;
1770 	}
1771 
1772 	return ret;
1773 }
1774 
1775 /**
1776  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1777  * @priv: driver private structure
1778  * @dma_conf: structure to take the dma data
1779  * @queue: TX queue index
1780  * Description: this function initializes the DMA TX descriptors
1781  * and allocates the socket buffers. It supports the chained and ring
1782  * modes.
1783  */
1784 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1785 				    struct stmmac_dma_conf *dma_conf,
1786 				    u32 queue)
1787 {
1788 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1789 	int i;
1790 
1791 	netif_dbg(priv, probe, priv->dev,
1792 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1793 		  (u32)tx_q->dma_tx_phy);
1794 
1795 	/* Setup the chained descriptor addresses */
1796 	if (priv->mode == STMMAC_CHAIN_MODE) {
1797 		if (priv->extend_desc)
1798 			stmmac_mode_init(priv, tx_q->dma_etx,
1799 					 tx_q->dma_tx_phy,
1800 					 dma_conf->dma_tx_size, 1);
1801 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1802 			stmmac_mode_init(priv, tx_q->dma_tx,
1803 					 tx_q->dma_tx_phy,
1804 					 dma_conf->dma_tx_size, 0);
1805 	}
1806 
1807 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1808 
1809 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1810 		struct dma_desc *p;
1811 
1812 		if (priv->extend_desc)
1813 			p = &((tx_q->dma_etx + i)->basic);
1814 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1815 			p = &((tx_q->dma_entx + i)->basic);
1816 		else
1817 			p = tx_q->dma_tx + i;
1818 
1819 		stmmac_clear_desc(priv, p);
1820 
1821 		tx_q->tx_skbuff_dma[i].buf = 0;
1822 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1823 		tx_q->tx_skbuff_dma[i].len = 0;
1824 		tx_q->tx_skbuff_dma[i].last_segment = false;
1825 		tx_q->tx_skbuff[i] = NULL;
1826 	}
1827 
1828 	return 0;
1829 }
1830 
1831 static int init_dma_tx_desc_rings(struct net_device *dev,
1832 				  struct stmmac_dma_conf *dma_conf)
1833 {
1834 	struct stmmac_priv *priv = netdev_priv(dev);
1835 	u32 tx_queue_cnt;
1836 	u32 queue;
1837 
1838 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1839 
1840 	for (queue = 0; queue < tx_queue_cnt; queue++)
1841 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1842 
1843 	return 0;
1844 }
1845 
1846 /**
1847  * init_dma_desc_rings - init the RX/TX descriptor rings
1848  * @dev: net device structure
1849  * @dma_conf: structure to take the dma data
1850  * @flags: gfp flag.
1851  * Description: this function initializes the DMA RX/TX descriptors
1852  * and allocates the socket buffers. It supports the chained and ring
1853  * modes.
1854  */
1855 static int init_dma_desc_rings(struct net_device *dev,
1856 			       struct stmmac_dma_conf *dma_conf,
1857 			       gfp_t flags)
1858 {
1859 	struct stmmac_priv *priv = netdev_priv(dev);
1860 	int ret;
1861 
1862 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1863 	if (ret)
1864 		return ret;
1865 
1866 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1867 
1868 	stmmac_clear_descriptors(priv, dma_conf);
1869 
1870 	if (netif_msg_hw(priv))
1871 		stmmac_display_rings(priv, dma_conf);
1872 
1873 	return ret;
1874 }
1875 
1876 /**
1877  * dma_free_tx_skbufs - free TX dma buffers
1878  * @priv: private structure
1879  * @dma_conf: structure to take the dma data
1880  * @queue: TX queue index
1881  */
1882 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1883 			       struct stmmac_dma_conf *dma_conf,
1884 			       u32 queue)
1885 {
1886 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1887 	int i;
1888 
1889 	tx_q->xsk_frames_done = 0;
1890 
1891 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1892 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1893 
1894 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1895 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1896 		tx_q->xsk_frames_done = 0;
1897 		tx_q->xsk_pool = NULL;
1898 	}
1899 }
1900 
1901 /**
1902  * stmmac_free_tx_skbufs - free TX skb buffers
1903  * @priv: private structure
1904  */
1905 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1906 {
1907 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1908 	u32 queue;
1909 
1910 	for (queue = 0; queue < tx_queue_cnt; queue++)
1911 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1912 }
1913 
1914 /**
1915  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1916  * @priv: private structure
1917  * @dma_conf: structure to take the dma data
1918  * @queue: RX queue index
1919  */
1920 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1921 					 struct stmmac_dma_conf *dma_conf,
1922 					 u32 queue)
1923 {
1924 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1925 
1926 	/* Release the DMA RX socket buffers */
1927 	if (rx_q->xsk_pool)
1928 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1929 	else
1930 		dma_free_rx_skbufs(priv, dma_conf, queue);
1931 
1932 	rx_q->buf_alloc_num = 0;
1933 	rx_q->xsk_pool = NULL;
1934 
1935 	/* Free DMA regions of consistent memory previously allocated */
1936 	if (!priv->extend_desc)
1937 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1938 				  sizeof(struct dma_desc),
1939 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1940 	else
1941 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1942 				  sizeof(struct dma_extended_desc),
1943 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1944 
1945 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1946 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1947 
1948 	kfree(rx_q->buf_pool);
1949 	if (rx_q->page_pool)
1950 		page_pool_destroy(rx_q->page_pool);
1951 }
1952 
1953 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1954 				       struct stmmac_dma_conf *dma_conf)
1955 {
1956 	u32 rx_count = priv->plat->rx_queues_to_use;
1957 	u32 queue;
1958 
1959 	/* Free RX queue resources */
1960 	for (queue = 0; queue < rx_count; queue++)
1961 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1962 }
1963 
1964 /**
1965  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1966  * @priv: private structure
1967  * @dma_conf: structure to take the dma data
1968  * @queue: TX queue index
1969  */
1970 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1971 					 struct stmmac_dma_conf *dma_conf,
1972 					 u32 queue)
1973 {
1974 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1975 	size_t size;
1976 	void *addr;
1977 
1978 	/* Release the DMA TX socket buffers */
1979 	dma_free_tx_skbufs(priv, dma_conf, queue);
1980 
1981 	if (priv->extend_desc) {
1982 		size = sizeof(struct dma_extended_desc);
1983 		addr = tx_q->dma_etx;
1984 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1985 		size = sizeof(struct dma_edesc);
1986 		addr = tx_q->dma_entx;
1987 	} else {
1988 		size = sizeof(struct dma_desc);
1989 		addr = tx_q->dma_tx;
1990 	}
1991 
1992 	size *= dma_conf->dma_tx_size;
1993 
1994 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1995 
1996 	kfree(tx_q->tx_skbuff_dma);
1997 	kfree(tx_q->tx_skbuff);
1998 }
1999 
2000 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2001 				       struct stmmac_dma_conf *dma_conf)
2002 {
2003 	u32 tx_count = priv->plat->tx_queues_to_use;
2004 	u32 queue;
2005 
2006 	/* Free TX queue resources */
2007 	for (queue = 0; queue < tx_count; queue++)
2008 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2009 }
2010 
2011 /**
2012  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2013  * @priv: private structure
2014  * @dma_conf: structure to take the dma data
2015  * @queue: RX queue index
2016  * Description: according to which descriptor can be used (extend or basic)
2017  * this function allocates the resources for TX and RX paths. In case of
2018  * reception, for example, it pre-allocated the RX socket buffer in order to
2019  * allow zero-copy mechanism.
2020  */
2021 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2022 					 struct stmmac_dma_conf *dma_conf,
2023 					 u32 queue)
2024 {
2025 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2026 	struct stmmac_channel *ch = &priv->channel[queue];
2027 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2028 	struct page_pool_params pp_params = { 0 };
2029 	unsigned int num_pages;
2030 	unsigned int napi_id;
2031 	int ret;
2032 
2033 	rx_q->queue_index = queue;
2034 	rx_q->priv_data = priv;
2035 
2036 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2037 	pp_params.pool_size = dma_conf->dma_rx_size;
2038 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2039 	pp_params.order = ilog2(num_pages);
2040 	pp_params.nid = dev_to_node(priv->device);
2041 	pp_params.dev = priv->device;
2042 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2043 	pp_params.offset = stmmac_rx_offset(priv);
2044 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2045 
2046 	rx_q->page_pool = page_pool_create(&pp_params);
2047 	if (IS_ERR(rx_q->page_pool)) {
2048 		ret = PTR_ERR(rx_q->page_pool);
2049 		rx_q->page_pool = NULL;
2050 		return ret;
2051 	}
2052 
2053 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2054 				 sizeof(*rx_q->buf_pool),
2055 				 GFP_KERNEL);
2056 	if (!rx_q->buf_pool)
2057 		return -ENOMEM;
2058 
2059 	if (priv->extend_desc) {
2060 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2061 						   dma_conf->dma_rx_size *
2062 						   sizeof(struct dma_extended_desc),
2063 						   &rx_q->dma_rx_phy,
2064 						   GFP_KERNEL);
2065 		if (!rx_q->dma_erx)
2066 			return -ENOMEM;
2067 
2068 	} else {
2069 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2070 						  dma_conf->dma_rx_size *
2071 						  sizeof(struct dma_desc),
2072 						  &rx_q->dma_rx_phy,
2073 						  GFP_KERNEL);
2074 		if (!rx_q->dma_rx)
2075 			return -ENOMEM;
2076 	}
2077 
2078 	if (stmmac_xdp_is_enabled(priv) &&
2079 	    test_bit(queue, priv->af_xdp_zc_qps))
2080 		napi_id = ch->rxtx_napi.napi_id;
2081 	else
2082 		napi_id = ch->rx_napi.napi_id;
2083 
2084 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2085 			       rx_q->queue_index,
2086 			       napi_id);
2087 	if (ret) {
2088 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2089 		return -EINVAL;
2090 	}
2091 
2092 	return 0;
2093 }
2094 
2095 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2096 				       struct stmmac_dma_conf *dma_conf)
2097 {
2098 	u32 rx_count = priv->plat->rx_queues_to_use;
2099 	u32 queue;
2100 	int ret;
2101 
2102 	/* RX queues buffers and DMA */
2103 	for (queue = 0; queue < rx_count; queue++) {
2104 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2105 		if (ret)
2106 			goto err_dma;
2107 	}
2108 
2109 	return 0;
2110 
2111 err_dma:
2112 	free_dma_rx_desc_resources(priv, dma_conf);
2113 
2114 	return ret;
2115 }
2116 
2117 /**
2118  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2119  * @priv: private structure
2120  * @dma_conf: structure to take the dma data
2121  * @queue: TX queue index
2122  * Description: according to which descriptor can be used (extend or basic)
2123  * this function allocates the resources for TX and RX paths. In case of
2124  * reception, for example, it pre-allocated the RX socket buffer in order to
2125  * allow zero-copy mechanism.
2126  */
2127 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2128 					 struct stmmac_dma_conf *dma_conf,
2129 					 u32 queue)
2130 {
2131 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2132 	size_t size;
2133 	void *addr;
2134 
2135 	tx_q->queue_index = queue;
2136 	tx_q->priv_data = priv;
2137 
2138 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2139 				      sizeof(*tx_q->tx_skbuff_dma),
2140 				      GFP_KERNEL);
2141 	if (!tx_q->tx_skbuff_dma)
2142 		return -ENOMEM;
2143 
2144 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2145 				  sizeof(struct sk_buff *),
2146 				  GFP_KERNEL);
2147 	if (!tx_q->tx_skbuff)
2148 		return -ENOMEM;
2149 
2150 	if (priv->extend_desc)
2151 		size = sizeof(struct dma_extended_desc);
2152 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2153 		size = sizeof(struct dma_edesc);
2154 	else
2155 		size = sizeof(struct dma_desc);
2156 
2157 	size *= dma_conf->dma_tx_size;
2158 
2159 	addr = dma_alloc_coherent(priv->device, size,
2160 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2161 	if (!addr)
2162 		return -ENOMEM;
2163 
2164 	if (priv->extend_desc)
2165 		tx_q->dma_etx = addr;
2166 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2167 		tx_q->dma_entx = addr;
2168 	else
2169 		tx_q->dma_tx = addr;
2170 
2171 	return 0;
2172 }
2173 
2174 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2175 				       struct stmmac_dma_conf *dma_conf)
2176 {
2177 	u32 tx_count = priv->plat->tx_queues_to_use;
2178 	u32 queue;
2179 	int ret;
2180 
2181 	/* TX queues buffers and DMA */
2182 	for (queue = 0; queue < tx_count; queue++) {
2183 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2184 		if (ret)
2185 			goto err_dma;
2186 	}
2187 
2188 	return 0;
2189 
2190 err_dma:
2191 	free_dma_tx_desc_resources(priv, dma_conf);
2192 	return ret;
2193 }
2194 
2195 /**
2196  * alloc_dma_desc_resources - alloc TX/RX resources.
2197  * @priv: private structure
2198  * @dma_conf: structure to take the dma data
2199  * Description: according to which descriptor can be used (extend or basic)
2200  * this function allocates the resources for TX and RX paths. In case of
2201  * reception, for example, it pre-allocated the RX socket buffer in order to
2202  * allow zero-copy mechanism.
2203  */
2204 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2205 				    struct stmmac_dma_conf *dma_conf)
2206 {
2207 	/* RX Allocation */
2208 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2209 
2210 	if (ret)
2211 		return ret;
2212 
2213 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2214 
2215 	return ret;
2216 }
2217 
2218 /**
2219  * free_dma_desc_resources - free dma desc resources
2220  * @priv: private structure
2221  * @dma_conf: structure to take the dma data
2222  */
2223 static void free_dma_desc_resources(struct stmmac_priv *priv,
2224 				    struct stmmac_dma_conf *dma_conf)
2225 {
2226 	/* Release the DMA TX socket buffers */
2227 	free_dma_tx_desc_resources(priv, dma_conf);
2228 
2229 	/* Release the DMA RX socket buffers later
2230 	 * to ensure all pending XDP_TX buffers are returned.
2231 	 */
2232 	free_dma_rx_desc_resources(priv, dma_conf);
2233 }
2234 
2235 /**
2236  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2237  *  @priv: driver private structure
2238  *  Description: It is used for enabling the rx queues in the MAC
2239  */
2240 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2241 {
2242 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2243 	int queue;
2244 	u8 mode;
2245 
2246 	for (queue = 0; queue < rx_queues_count; queue++) {
2247 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2248 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2249 	}
2250 }
2251 
2252 /**
2253  * stmmac_start_rx_dma - start RX DMA channel
2254  * @priv: driver private structure
2255  * @chan: RX channel index
2256  * Description:
2257  * This starts a RX DMA channel
2258  */
2259 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2260 {
2261 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2262 	stmmac_start_rx(priv, priv->ioaddr, chan);
2263 }
2264 
2265 /**
2266  * stmmac_start_tx_dma - start TX DMA channel
2267  * @priv: driver private structure
2268  * @chan: TX channel index
2269  * Description:
2270  * This starts a TX DMA channel
2271  */
2272 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2273 {
2274 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2275 	stmmac_start_tx(priv, priv->ioaddr, chan);
2276 }
2277 
2278 /**
2279  * stmmac_stop_rx_dma - stop RX DMA channel
2280  * @priv: driver private structure
2281  * @chan: RX channel index
2282  * Description:
2283  * This stops a RX DMA channel
2284  */
2285 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2286 {
2287 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2288 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2289 }
2290 
2291 /**
2292  * stmmac_stop_tx_dma - stop TX DMA channel
2293  * @priv: driver private structure
2294  * @chan: TX channel index
2295  * Description:
2296  * This stops a TX DMA channel
2297  */
2298 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2299 {
2300 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2301 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2302 }
2303 
2304 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2305 {
2306 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2307 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2308 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2309 	u32 chan;
2310 
2311 	for (chan = 0; chan < dma_csr_ch; chan++) {
2312 		struct stmmac_channel *ch = &priv->channel[chan];
2313 		unsigned long flags;
2314 
2315 		spin_lock_irqsave(&ch->lock, flags);
2316 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2317 		spin_unlock_irqrestore(&ch->lock, flags);
2318 	}
2319 }
2320 
2321 /**
2322  * stmmac_start_all_dma - start all RX and TX DMA channels
2323  * @priv: driver private structure
2324  * Description:
2325  * This starts all the RX and TX DMA channels
2326  */
2327 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2328 {
2329 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2330 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2331 	u32 chan = 0;
2332 
2333 	for (chan = 0; chan < rx_channels_count; chan++)
2334 		stmmac_start_rx_dma(priv, chan);
2335 
2336 	for (chan = 0; chan < tx_channels_count; chan++)
2337 		stmmac_start_tx_dma(priv, chan);
2338 }
2339 
2340 /**
2341  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2342  * @priv: driver private structure
2343  * Description:
2344  * This stops the RX and TX DMA channels
2345  */
2346 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2347 {
2348 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2349 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2350 	u32 chan = 0;
2351 
2352 	for (chan = 0; chan < rx_channels_count; chan++)
2353 		stmmac_stop_rx_dma(priv, chan);
2354 
2355 	for (chan = 0; chan < tx_channels_count; chan++)
2356 		stmmac_stop_tx_dma(priv, chan);
2357 }
2358 
2359 /**
2360  *  stmmac_dma_operation_mode - HW DMA operation mode
2361  *  @priv: driver private structure
2362  *  Description: it is used for configuring the DMA operation mode register in
2363  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2364  */
2365 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2366 {
2367 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2368 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2369 	int rxfifosz = priv->plat->rx_fifo_size;
2370 	int txfifosz = priv->plat->tx_fifo_size;
2371 	u32 txmode = 0;
2372 	u32 rxmode = 0;
2373 	u32 chan = 0;
2374 	u8 qmode = 0;
2375 
2376 	if (rxfifosz == 0)
2377 		rxfifosz = priv->dma_cap.rx_fifo_size;
2378 	if (txfifosz == 0)
2379 		txfifosz = priv->dma_cap.tx_fifo_size;
2380 
2381 	/* Adjust for real per queue fifo size */
2382 	rxfifosz /= rx_channels_count;
2383 	txfifosz /= tx_channels_count;
2384 
2385 	if (priv->plat->force_thresh_dma_mode) {
2386 		txmode = tc;
2387 		rxmode = tc;
2388 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2389 		/*
2390 		 * In case of GMAC, SF mode can be enabled
2391 		 * to perform the TX COE in HW. This depends on:
2392 		 * 1) TX COE if actually supported
2393 		 * 2) There is no bugged Jumbo frame support
2394 		 *    that needs to not insert csum in the TDES.
2395 		 */
2396 		txmode = SF_DMA_MODE;
2397 		rxmode = SF_DMA_MODE;
2398 		priv->xstats.threshold = SF_DMA_MODE;
2399 	} else {
2400 		txmode = tc;
2401 		rxmode = SF_DMA_MODE;
2402 	}
2403 
2404 	/* configure all channels */
2405 	for (chan = 0; chan < rx_channels_count; chan++) {
2406 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2407 		u32 buf_size;
2408 
2409 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2410 
2411 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2412 				rxfifosz, qmode);
2413 
2414 		if (rx_q->xsk_pool) {
2415 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2416 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2417 					      buf_size,
2418 					      chan);
2419 		} else {
2420 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2421 					      priv->dma_conf.dma_buf_sz,
2422 					      chan);
2423 		}
2424 	}
2425 
2426 	for (chan = 0; chan < tx_channels_count; chan++) {
2427 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2428 
2429 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2430 				txfifosz, qmode);
2431 	}
2432 }
2433 
2434 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2435 {
2436 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2437 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2438 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2439 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2440 	unsigned int entry = tx_q->cur_tx;
2441 	struct dma_desc *tx_desc = NULL;
2442 	struct xdp_desc xdp_desc;
2443 	bool work_done = true;
2444 	u32 tx_set_ic_bit = 0;
2445 	unsigned long flags;
2446 
2447 	/* Avoids TX time-out as we are sharing with slow path */
2448 	txq_trans_cond_update(nq);
2449 
2450 	budget = min(budget, stmmac_tx_avail(priv, queue));
2451 
2452 	while (budget-- > 0) {
2453 		dma_addr_t dma_addr;
2454 		bool set_ic;
2455 
2456 		/* We are sharing with slow path and stop XSK TX desc submission when
2457 		 * available TX ring is less than threshold.
2458 		 */
2459 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2460 		    !netif_carrier_ok(priv->dev)) {
2461 			work_done = false;
2462 			break;
2463 		}
2464 
2465 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2466 			break;
2467 
2468 		if (likely(priv->extend_desc))
2469 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2470 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2471 			tx_desc = &tx_q->dma_entx[entry].basic;
2472 		else
2473 			tx_desc = tx_q->dma_tx + entry;
2474 
2475 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2476 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2477 
2478 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2479 
2480 		/* To return XDP buffer to XSK pool, we simple call
2481 		 * xsk_tx_completed(), so we don't need to fill up
2482 		 * 'buf' and 'xdpf'.
2483 		 */
2484 		tx_q->tx_skbuff_dma[entry].buf = 0;
2485 		tx_q->xdpf[entry] = NULL;
2486 
2487 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2488 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2489 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2490 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2491 
2492 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2493 
2494 		tx_q->tx_count_frames++;
2495 
2496 		if (!priv->tx_coal_frames[queue])
2497 			set_ic = false;
2498 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2499 			set_ic = true;
2500 		else
2501 			set_ic = false;
2502 
2503 		if (set_ic) {
2504 			tx_q->tx_count_frames = 0;
2505 			stmmac_set_tx_ic(priv, tx_desc);
2506 			tx_set_ic_bit++;
2507 		}
2508 
2509 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2510 				       true, priv->mode, true, true,
2511 				       xdp_desc.len);
2512 
2513 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2514 
2515 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2516 		entry = tx_q->cur_tx;
2517 	}
2518 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
2519 	txq_stats->tx_set_ic_bit += tx_set_ic_bit;
2520 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
2521 
2522 	if (tx_desc) {
2523 		stmmac_flush_tx_descriptors(priv, queue);
2524 		xsk_tx_release(pool);
2525 	}
2526 
2527 	/* Return true if all of the 3 conditions are met
2528 	 *  a) TX Budget is still available
2529 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2530 	 *     pending XSK TX for transmission)
2531 	 */
2532 	return !!budget && work_done;
2533 }
2534 
2535 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2536 {
2537 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2538 		tc += 64;
2539 
2540 		if (priv->plat->force_thresh_dma_mode)
2541 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2542 		else
2543 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2544 						      chan);
2545 
2546 		priv->xstats.threshold = tc;
2547 	}
2548 }
2549 
2550 /**
2551  * stmmac_tx_clean - to manage the transmission completion
2552  * @priv: driver private structure
2553  * @budget: napi budget limiting this functions packet handling
2554  * @queue: TX queue index
2555  * Description: it reclaims the transmit resources after transmission completes.
2556  */
2557 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2558 {
2559 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2560 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2561 	unsigned int bytes_compl = 0, pkts_compl = 0;
2562 	unsigned int entry, xmits = 0, count = 0;
2563 	u32 tx_packets = 0, tx_errors = 0;
2564 	unsigned long flags;
2565 
2566 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2567 
2568 	tx_q->xsk_frames_done = 0;
2569 
2570 	entry = tx_q->dirty_tx;
2571 
2572 	/* Try to clean all TX complete frame in 1 shot */
2573 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2574 		struct xdp_frame *xdpf;
2575 		struct sk_buff *skb;
2576 		struct dma_desc *p;
2577 		int status;
2578 
2579 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2580 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2581 			xdpf = tx_q->xdpf[entry];
2582 			skb = NULL;
2583 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2584 			xdpf = NULL;
2585 			skb = tx_q->tx_skbuff[entry];
2586 		} else {
2587 			xdpf = NULL;
2588 			skb = NULL;
2589 		}
2590 
2591 		if (priv->extend_desc)
2592 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2593 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2594 			p = &tx_q->dma_entx[entry].basic;
2595 		else
2596 			p = tx_q->dma_tx + entry;
2597 
2598 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2599 		/* Check if the descriptor is owned by the DMA */
2600 		if (unlikely(status & tx_dma_own))
2601 			break;
2602 
2603 		count++;
2604 
2605 		/* Make sure descriptor fields are read after reading
2606 		 * the own bit.
2607 		 */
2608 		dma_rmb();
2609 
2610 		/* Just consider the last segment and ...*/
2611 		if (likely(!(status & tx_not_ls))) {
2612 			/* ... verify the status error condition */
2613 			if (unlikely(status & tx_err)) {
2614 				tx_errors++;
2615 				if (unlikely(status & tx_err_bump_tc))
2616 					stmmac_bump_dma_threshold(priv, queue);
2617 			} else {
2618 				tx_packets++;
2619 			}
2620 			if (skb)
2621 				stmmac_get_tx_hwtstamp(priv, p, skb);
2622 		}
2623 
2624 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2625 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2626 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2627 				dma_unmap_page(priv->device,
2628 					       tx_q->tx_skbuff_dma[entry].buf,
2629 					       tx_q->tx_skbuff_dma[entry].len,
2630 					       DMA_TO_DEVICE);
2631 			else
2632 				dma_unmap_single(priv->device,
2633 						 tx_q->tx_skbuff_dma[entry].buf,
2634 						 tx_q->tx_skbuff_dma[entry].len,
2635 						 DMA_TO_DEVICE);
2636 			tx_q->tx_skbuff_dma[entry].buf = 0;
2637 			tx_q->tx_skbuff_dma[entry].len = 0;
2638 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2639 		}
2640 
2641 		stmmac_clean_desc3(priv, tx_q, p);
2642 
2643 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2644 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2645 
2646 		if (xdpf &&
2647 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2648 			xdp_return_frame_rx_napi(xdpf);
2649 			tx_q->xdpf[entry] = NULL;
2650 		}
2651 
2652 		if (xdpf &&
2653 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2654 			xdp_return_frame(xdpf);
2655 			tx_q->xdpf[entry] = NULL;
2656 		}
2657 
2658 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2659 			tx_q->xsk_frames_done++;
2660 
2661 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2662 			if (likely(skb)) {
2663 				pkts_compl++;
2664 				bytes_compl += skb->len;
2665 				dev_consume_skb_any(skb);
2666 				tx_q->tx_skbuff[entry] = NULL;
2667 			}
2668 		}
2669 
2670 		stmmac_release_tx_desc(priv, p, priv->mode);
2671 
2672 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2673 	}
2674 	tx_q->dirty_tx = entry;
2675 
2676 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2677 				  pkts_compl, bytes_compl);
2678 
2679 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2680 								queue))) &&
2681 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2682 
2683 		netif_dbg(priv, tx_done, priv->dev,
2684 			  "%s: restart transmit\n", __func__);
2685 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2686 	}
2687 
2688 	if (tx_q->xsk_pool) {
2689 		bool work_done;
2690 
2691 		if (tx_q->xsk_frames_done)
2692 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2693 
2694 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2695 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2696 
2697 		/* For XSK TX, we try to send as many as possible.
2698 		 * If XSK work done (XSK TX desc empty and budget still
2699 		 * available), return "budget - 1" to reenable TX IRQ.
2700 		 * Else, return "budget" to make NAPI continue polling.
2701 		 */
2702 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2703 					       STMMAC_XSK_TX_BUDGET_MAX);
2704 		if (work_done)
2705 			xmits = budget - 1;
2706 		else
2707 			xmits = budget;
2708 	}
2709 
2710 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2711 	    priv->eee_sw_timer_en) {
2712 		if (stmmac_enable_eee_mode(priv))
2713 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2714 	}
2715 
2716 	/* We still have pending packets, let's call for a new scheduling */
2717 	if (tx_q->dirty_tx != tx_q->cur_tx)
2718 		stmmac_tx_timer_arm(priv, queue);
2719 
2720 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
2721 	txq_stats->tx_packets += tx_packets;
2722 	txq_stats->tx_pkt_n += tx_packets;
2723 	txq_stats->tx_clean++;
2724 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
2725 
2726 	priv->xstats.tx_errors += tx_errors;
2727 
2728 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2729 
2730 	/* Combine decisions from TX clean and XSK TX */
2731 	return max(count, xmits);
2732 }
2733 
2734 /**
2735  * stmmac_tx_err - to manage the tx error
2736  * @priv: driver private structure
2737  * @chan: channel index
2738  * Description: it cleans the descriptors and restarts the transmission
2739  * in case of transmission errors.
2740  */
2741 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2742 {
2743 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2744 
2745 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2746 
2747 	stmmac_stop_tx_dma(priv, chan);
2748 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2749 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2750 	stmmac_reset_tx_queue(priv, chan);
2751 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2752 			    tx_q->dma_tx_phy, chan);
2753 	stmmac_start_tx_dma(priv, chan);
2754 
2755 	priv->xstats.tx_errors++;
2756 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2757 }
2758 
2759 /**
2760  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2761  *  @priv: driver private structure
2762  *  @txmode: TX operating mode
2763  *  @rxmode: RX operating mode
2764  *  @chan: channel index
2765  *  Description: it is used for configuring of the DMA operation mode in
2766  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2767  *  mode.
2768  */
2769 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2770 					  u32 rxmode, u32 chan)
2771 {
2772 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2773 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2774 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2775 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2776 	int rxfifosz = priv->plat->rx_fifo_size;
2777 	int txfifosz = priv->plat->tx_fifo_size;
2778 
2779 	if (rxfifosz == 0)
2780 		rxfifosz = priv->dma_cap.rx_fifo_size;
2781 	if (txfifosz == 0)
2782 		txfifosz = priv->dma_cap.tx_fifo_size;
2783 
2784 	/* Adjust for real per queue fifo size */
2785 	rxfifosz /= rx_channels_count;
2786 	txfifosz /= tx_channels_count;
2787 
2788 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2789 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2790 }
2791 
2792 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2793 {
2794 	int ret;
2795 
2796 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2797 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2798 	if (ret && (ret != -EINVAL)) {
2799 		stmmac_global_err(priv);
2800 		return true;
2801 	}
2802 
2803 	return false;
2804 }
2805 
2806 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2807 {
2808 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2809 						 &priv->xstats, chan, dir);
2810 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2811 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2812 	struct stmmac_channel *ch = &priv->channel[chan];
2813 	struct napi_struct *rx_napi;
2814 	struct napi_struct *tx_napi;
2815 	unsigned long flags;
2816 
2817 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2818 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2819 
2820 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2821 		if (napi_schedule_prep(rx_napi)) {
2822 			spin_lock_irqsave(&ch->lock, flags);
2823 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2824 			spin_unlock_irqrestore(&ch->lock, flags);
2825 			__napi_schedule(rx_napi);
2826 		}
2827 	}
2828 
2829 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2830 		if (napi_schedule_prep(tx_napi)) {
2831 			spin_lock_irqsave(&ch->lock, flags);
2832 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2833 			spin_unlock_irqrestore(&ch->lock, flags);
2834 			__napi_schedule(tx_napi);
2835 		}
2836 	}
2837 
2838 	return status;
2839 }
2840 
2841 /**
2842  * stmmac_dma_interrupt - DMA ISR
2843  * @priv: driver private structure
2844  * Description: this is the DMA ISR. It is called by the main ISR.
2845  * It calls the dwmac dma routine and schedule poll method in case of some
2846  * work can be done.
2847  */
2848 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2849 {
2850 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2851 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2852 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2853 				tx_channel_count : rx_channel_count;
2854 	u32 chan;
2855 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2856 
2857 	/* Make sure we never check beyond our status buffer. */
2858 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2859 		channels_to_check = ARRAY_SIZE(status);
2860 
2861 	for (chan = 0; chan < channels_to_check; chan++)
2862 		status[chan] = stmmac_napi_check(priv, chan,
2863 						 DMA_DIR_RXTX);
2864 
2865 	for (chan = 0; chan < tx_channel_count; chan++) {
2866 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2867 			/* Try to bump up the dma threshold on this failure */
2868 			stmmac_bump_dma_threshold(priv, chan);
2869 		} else if (unlikely(status[chan] == tx_hard_error)) {
2870 			stmmac_tx_err(priv, chan);
2871 		}
2872 	}
2873 }
2874 
2875 /**
2876  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2877  * @priv: driver private structure
2878  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2879  */
2880 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2881 {
2882 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2883 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2884 
2885 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2886 
2887 	if (priv->dma_cap.rmon) {
2888 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2889 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2890 	} else
2891 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2892 }
2893 
2894 /**
2895  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2896  * @priv: driver private structure
2897  * Description:
2898  *  new GMAC chip generations have a new register to indicate the
2899  *  presence of the optional feature/functions.
2900  *  This can be also used to override the value passed through the
2901  *  platform and necessary for old MAC10/100 and GMAC chips.
2902  */
2903 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2904 {
2905 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2906 }
2907 
2908 /**
2909  * stmmac_check_ether_addr - check if the MAC addr is valid
2910  * @priv: driver private structure
2911  * Description:
2912  * it is to verify if the MAC address is valid, in case of failures it
2913  * generates a random MAC address
2914  */
2915 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2916 {
2917 	u8 addr[ETH_ALEN];
2918 
2919 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2920 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2921 		if (is_valid_ether_addr(addr))
2922 			eth_hw_addr_set(priv->dev, addr);
2923 		else
2924 			eth_hw_addr_random(priv->dev);
2925 		dev_info(priv->device, "device MAC address %pM\n",
2926 			 priv->dev->dev_addr);
2927 	}
2928 }
2929 
2930 /**
2931  * stmmac_init_dma_engine - DMA init.
2932  * @priv: driver private structure
2933  * Description:
2934  * It inits the DMA invoking the specific MAC/GMAC callback.
2935  * Some DMA parameters can be passed from the platform;
2936  * in case of these are not passed a default is kept for the MAC or GMAC.
2937  */
2938 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2939 {
2940 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2941 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2942 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2943 	struct stmmac_rx_queue *rx_q;
2944 	struct stmmac_tx_queue *tx_q;
2945 	u32 chan = 0;
2946 	int atds = 0;
2947 	int ret = 0;
2948 
2949 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2950 		dev_err(priv->device, "Invalid DMA configuration\n");
2951 		return -EINVAL;
2952 	}
2953 
2954 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2955 		atds = 1;
2956 
2957 	ret = stmmac_reset(priv, priv->ioaddr);
2958 	if (ret) {
2959 		dev_err(priv->device, "Failed to reset the dma\n");
2960 		return ret;
2961 	}
2962 
2963 	/* DMA Configuration */
2964 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2965 
2966 	if (priv->plat->axi)
2967 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2968 
2969 	/* DMA CSR Channel configuration */
2970 	for (chan = 0; chan < dma_csr_ch; chan++) {
2971 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2972 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2973 	}
2974 
2975 	/* DMA RX Channel Configuration */
2976 	for (chan = 0; chan < rx_channels_count; chan++) {
2977 		rx_q = &priv->dma_conf.rx_queue[chan];
2978 
2979 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2980 				    rx_q->dma_rx_phy, chan);
2981 
2982 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2983 				     (rx_q->buf_alloc_num *
2984 				      sizeof(struct dma_desc));
2985 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2986 				       rx_q->rx_tail_addr, chan);
2987 	}
2988 
2989 	/* DMA TX Channel Configuration */
2990 	for (chan = 0; chan < tx_channels_count; chan++) {
2991 		tx_q = &priv->dma_conf.tx_queue[chan];
2992 
2993 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2994 				    tx_q->dma_tx_phy, chan);
2995 
2996 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2997 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2998 				       tx_q->tx_tail_addr, chan);
2999 	}
3000 
3001 	return ret;
3002 }
3003 
3004 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3005 {
3006 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3007 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3008 
3009 	if (!tx_coal_timer)
3010 		return;
3011 
3012 	hrtimer_start(&tx_q->txtimer,
3013 		      STMMAC_COAL_TIMER(tx_coal_timer),
3014 		      HRTIMER_MODE_REL);
3015 }
3016 
3017 /**
3018  * stmmac_tx_timer - mitigation sw timer for tx.
3019  * @t: data pointer
3020  * Description:
3021  * This is the timer handler to directly invoke the stmmac_tx_clean.
3022  */
3023 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3024 {
3025 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3026 	struct stmmac_priv *priv = tx_q->priv_data;
3027 	struct stmmac_channel *ch;
3028 	struct napi_struct *napi;
3029 
3030 	ch = &priv->channel[tx_q->queue_index];
3031 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3032 
3033 	if (likely(napi_schedule_prep(napi))) {
3034 		unsigned long flags;
3035 
3036 		spin_lock_irqsave(&ch->lock, flags);
3037 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3038 		spin_unlock_irqrestore(&ch->lock, flags);
3039 		__napi_schedule(napi);
3040 	}
3041 
3042 	return HRTIMER_NORESTART;
3043 }
3044 
3045 /**
3046  * stmmac_init_coalesce - init mitigation options.
3047  * @priv: driver private structure
3048  * Description:
3049  * This inits the coalesce parameters: i.e. timer rate,
3050  * timer handler and default threshold used for enabling the
3051  * interrupt on completion bit.
3052  */
3053 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3054 {
3055 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3056 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3057 	u32 chan;
3058 
3059 	for (chan = 0; chan < tx_channel_count; chan++) {
3060 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3061 
3062 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3063 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3064 
3065 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3066 		tx_q->txtimer.function = stmmac_tx_timer;
3067 	}
3068 
3069 	for (chan = 0; chan < rx_channel_count; chan++)
3070 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3071 }
3072 
3073 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3074 {
3075 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3076 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3077 	u32 chan;
3078 
3079 	/* set TX ring length */
3080 	for (chan = 0; chan < tx_channels_count; chan++)
3081 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3082 				       (priv->dma_conf.dma_tx_size - 1), chan);
3083 
3084 	/* set RX ring length */
3085 	for (chan = 0; chan < rx_channels_count; chan++)
3086 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3087 				       (priv->dma_conf.dma_rx_size - 1), chan);
3088 }
3089 
3090 /**
3091  *  stmmac_set_tx_queue_weight - Set TX queue weight
3092  *  @priv: driver private structure
3093  *  Description: It is used for setting TX queues weight
3094  */
3095 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3096 {
3097 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3098 	u32 weight;
3099 	u32 queue;
3100 
3101 	for (queue = 0; queue < tx_queues_count; queue++) {
3102 		weight = priv->plat->tx_queues_cfg[queue].weight;
3103 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3104 	}
3105 }
3106 
3107 /**
3108  *  stmmac_configure_cbs - Configure CBS in TX queue
3109  *  @priv: driver private structure
3110  *  Description: It is used for configuring CBS in AVB TX queues
3111  */
3112 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3113 {
3114 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3115 	u32 mode_to_use;
3116 	u32 queue;
3117 
3118 	/* queue 0 is reserved for legacy traffic */
3119 	for (queue = 1; queue < tx_queues_count; queue++) {
3120 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3121 		if (mode_to_use == MTL_QUEUE_DCB)
3122 			continue;
3123 
3124 		stmmac_config_cbs(priv, priv->hw,
3125 				priv->plat->tx_queues_cfg[queue].send_slope,
3126 				priv->plat->tx_queues_cfg[queue].idle_slope,
3127 				priv->plat->tx_queues_cfg[queue].high_credit,
3128 				priv->plat->tx_queues_cfg[queue].low_credit,
3129 				queue);
3130 	}
3131 }
3132 
3133 /**
3134  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3135  *  @priv: driver private structure
3136  *  Description: It is used for mapping RX queues to RX dma channels
3137  */
3138 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3139 {
3140 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3141 	u32 queue;
3142 	u32 chan;
3143 
3144 	for (queue = 0; queue < rx_queues_count; queue++) {
3145 		chan = priv->plat->rx_queues_cfg[queue].chan;
3146 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3147 	}
3148 }
3149 
3150 /**
3151  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3152  *  @priv: driver private structure
3153  *  Description: It is used for configuring the RX Queue Priority
3154  */
3155 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3156 {
3157 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3158 	u32 queue;
3159 	u32 prio;
3160 
3161 	for (queue = 0; queue < rx_queues_count; queue++) {
3162 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3163 			continue;
3164 
3165 		prio = priv->plat->rx_queues_cfg[queue].prio;
3166 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3167 	}
3168 }
3169 
3170 /**
3171  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3172  *  @priv: driver private structure
3173  *  Description: It is used for configuring the TX Queue Priority
3174  */
3175 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3176 {
3177 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3178 	u32 queue;
3179 	u32 prio;
3180 
3181 	for (queue = 0; queue < tx_queues_count; queue++) {
3182 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3183 			continue;
3184 
3185 		prio = priv->plat->tx_queues_cfg[queue].prio;
3186 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3187 	}
3188 }
3189 
3190 /**
3191  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3192  *  @priv: driver private structure
3193  *  Description: It is used for configuring the RX queue routing
3194  */
3195 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3196 {
3197 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3198 	u32 queue;
3199 	u8 packet;
3200 
3201 	for (queue = 0; queue < rx_queues_count; queue++) {
3202 		/* no specific packet type routing specified for the queue */
3203 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3204 			continue;
3205 
3206 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3207 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3208 	}
3209 }
3210 
3211 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3212 {
3213 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3214 		priv->rss.enable = false;
3215 		return;
3216 	}
3217 
3218 	if (priv->dev->features & NETIF_F_RXHASH)
3219 		priv->rss.enable = true;
3220 	else
3221 		priv->rss.enable = false;
3222 
3223 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3224 			     priv->plat->rx_queues_to_use);
3225 }
3226 
3227 /**
3228  *  stmmac_mtl_configuration - Configure MTL
3229  *  @priv: driver private structure
3230  *  Description: It is used for configurring MTL
3231  */
3232 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3233 {
3234 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3235 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3236 
3237 	if (tx_queues_count > 1)
3238 		stmmac_set_tx_queue_weight(priv);
3239 
3240 	/* Configure MTL RX algorithms */
3241 	if (rx_queues_count > 1)
3242 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3243 				priv->plat->rx_sched_algorithm);
3244 
3245 	/* Configure MTL TX algorithms */
3246 	if (tx_queues_count > 1)
3247 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3248 				priv->plat->tx_sched_algorithm);
3249 
3250 	/* Configure CBS in AVB TX queues */
3251 	if (tx_queues_count > 1)
3252 		stmmac_configure_cbs(priv);
3253 
3254 	/* Map RX MTL to DMA channels */
3255 	stmmac_rx_queue_dma_chan_map(priv);
3256 
3257 	/* Enable MAC RX Queues */
3258 	stmmac_mac_enable_rx_queues(priv);
3259 
3260 	/* Set RX priorities */
3261 	if (rx_queues_count > 1)
3262 		stmmac_mac_config_rx_queues_prio(priv);
3263 
3264 	/* Set TX priorities */
3265 	if (tx_queues_count > 1)
3266 		stmmac_mac_config_tx_queues_prio(priv);
3267 
3268 	/* Set RX routing */
3269 	if (rx_queues_count > 1)
3270 		stmmac_mac_config_rx_queues_routing(priv);
3271 
3272 	/* Receive Side Scaling */
3273 	if (rx_queues_count > 1)
3274 		stmmac_mac_config_rss(priv);
3275 }
3276 
3277 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3278 {
3279 	if (priv->dma_cap.asp) {
3280 		netdev_info(priv->dev, "Enabling Safety Features\n");
3281 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3282 					  priv->plat->safety_feat_cfg);
3283 	} else {
3284 		netdev_info(priv->dev, "No Safety Features support found\n");
3285 	}
3286 }
3287 
3288 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3289 {
3290 	char *name;
3291 
3292 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3293 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3294 
3295 	name = priv->wq_name;
3296 	sprintf(name, "%s-fpe", priv->dev->name);
3297 
3298 	priv->fpe_wq = create_singlethread_workqueue(name);
3299 	if (!priv->fpe_wq) {
3300 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3301 
3302 		return -ENOMEM;
3303 	}
3304 	netdev_info(priv->dev, "FPE workqueue start");
3305 
3306 	return 0;
3307 }
3308 
3309 /**
3310  * stmmac_hw_setup - setup mac in a usable state.
3311  *  @dev : pointer to the device structure.
3312  *  @ptp_register: register PTP if set
3313  *  Description:
3314  *  this is the main function to setup the HW in a usable state because the
3315  *  dma engine is reset, the core registers are configured (e.g. AXI,
3316  *  Checksum features, timers). The DMA is ready to start receiving and
3317  *  transmitting.
3318  *  Return value:
3319  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3320  *  file on failure.
3321  */
3322 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3323 {
3324 	struct stmmac_priv *priv = netdev_priv(dev);
3325 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3326 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3327 	bool sph_en;
3328 	u32 chan;
3329 	int ret;
3330 
3331 	/* DMA initialization and SW reset */
3332 	ret = stmmac_init_dma_engine(priv);
3333 	if (ret < 0) {
3334 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3335 			   __func__);
3336 		return ret;
3337 	}
3338 
3339 	/* Copy the MAC addr into the HW  */
3340 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3341 
3342 	/* PS and related bits will be programmed according to the speed */
3343 	if (priv->hw->pcs) {
3344 		int speed = priv->plat->mac_port_sel_speed;
3345 
3346 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3347 		    (speed == SPEED_1000)) {
3348 			priv->hw->ps = speed;
3349 		} else {
3350 			dev_warn(priv->device, "invalid port speed\n");
3351 			priv->hw->ps = 0;
3352 		}
3353 	}
3354 
3355 	/* Initialize the MAC Core */
3356 	stmmac_core_init(priv, priv->hw, dev);
3357 
3358 	/* Initialize MTL*/
3359 	stmmac_mtl_configuration(priv);
3360 
3361 	/* Initialize Safety Features */
3362 	stmmac_safety_feat_configuration(priv);
3363 
3364 	ret = stmmac_rx_ipc(priv, priv->hw);
3365 	if (!ret) {
3366 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3367 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3368 		priv->hw->rx_csum = 0;
3369 	}
3370 
3371 	/* Enable the MAC Rx/Tx */
3372 	stmmac_mac_set(priv, priv->ioaddr, true);
3373 
3374 	/* Set the HW DMA mode and the COE */
3375 	stmmac_dma_operation_mode(priv);
3376 
3377 	stmmac_mmc_setup(priv);
3378 
3379 	if (ptp_register) {
3380 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3381 		if (ret < 0)
3382 			netdev_warn(priv->dev,
3383 				    "failed to enable PTP reference clock: %pe\n",
3384 				    ERR_PTR(ret));
3385 	}
3386 
3387 	ret = stmmac_init_ptp(priv);
3388 	if (ret == -EOPNOTSUPP)
3389 		netdev_info(priv->dev, "PTP not supported by HW\n");
3390 	else if (ret)
3391 		netdev_warn(priv->dev, "PTP init failed\n");
3392 	else if (ptp_register)
3393 		stmmac_ptp_register(priv);
3394 
3395 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3396 
3397 	/* Convert the timer from msec to usec */
3398 	if (!priv->tx_lpi_timer)
3399 		priv->tx_lpi_timer = eee_timer * 1000;
3400 
3401 	if (priv->use_riwt) {
3402 		u32 queue;
3403 
3404 		for (queue = 0; queue < rx_cnt; queue++) {
3405 			if (!priv->rx_riwt[queue])
3406 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3407 
3408 			stmmac_rx_watchdog(priv, priv->ioaddr,
3409 					   priv->rx_riwt[queue], queue);
3410 		}
3411 	}
3412 
3413 	if (priv->hw->pcs)
3414 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3415 
3416 	/* set TX and RX rings length */
3417 	stmmac_set_rings_length(priv);
3418 
3419 	/* Enable TSO */
3420 	if (priv->tso) {
3421 		for (chan = 0; chan < tx_cnt; chan++) {
3422 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3423 
3424 			/* TSO and TBS cannot co-exist */
3425 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3426 				continue;
3427 
3428 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3429 		}
3430 	}
3431 
3432 	/* Enable Split Header */
3433 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3434 	for (chan = 0; chan < rx_cnt; chan++)
3435 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3436 
3437 
3438 	/* VLAN Tag Insertion */
3439 	if (priv->dma_cap.vlins)
3440 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3441 
3442 	/* TBS */
3443 	for (chan = 0; chan < tx_cnt; chan++) {
3444 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3445 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3446 
3447 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3448 	}
3449 
3450 	/* Configure real RX and TX queues */
3451 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3452 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3453 
3454 	/* Start the ball rolling... */
3455 	stmmac_start_all_dma(priv);
3456 
3457 	if (priv->dma_cap.fpesel) {
3458 		stmmac_fpe_start_wq(priv);
3459 
3460 		if (priv->plat->fpe_cfg->enable)
3461 			stmmac_fpe_handshake(priv, true);
3462 	}
3463 
3464 	return 0;
3465 }
3466 
3467 static void stmmac_hw_teardown(struct net_device *dev)
3468 {
3469 	struct stmmac_priv *priv = netdev_priv(dev);
3470 
3471 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3472 }
3473 
3474 static void stmmac_free_irq(struct net_device *dev,
3475 			    enum request_irq_err irq_err, int irq_idx)
3476 {
3477 	struct stmmac_priv *priv = netdev_priv(dev);
3478 	int j;
3479 
3480 	switch (irq_err) {
3481 	case REQ_IRQ_ERR_ALL:
3482 		irq_idx = priv->plat->tx_queues_to_use;
3483 		fallthrough;
3484 	case REQ_IRQ_ERR_TX:
3485 		for (j = irq_idx - 1; j >= 0; j--) {
3486 			if (priv->tx_irq[j] > 0) {
3487 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3488 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3489 			}
3490 		}
3491 		irq_idx = priv->plat->rx_queues_to_use;
3492 		fallthrough;
3493 	case REQ_IRQ_ERR_RX:
3494 		for (j = irq_idx - 1; j >= 0; j--) {
3495 			if (priv->rx_irq[j] > 0) {
3496 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3497 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3498 			}
3499 		}
3500 
3501 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3502 			free_irq(priv->sfty_ue_irq, dev);
3503 		fallthrough;
3504 	case REQ_IRQ_ERR_SFTY_UE:
3505 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3506 			free_irq(priv->sfty_ce_irq, dev);
3507 		fallthrough;
3508 	case REQ_IRQ_ERR_SFTY_CE:
3509 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3510 			free_irq(priv->lpi_irq, dev);
3511 		fallthrough;
3512 	case REQ_IRQ_ERR_LPI:
3513 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3514 			free_irq(priv->wol_irq, dev);
3515 		fallthrough;
3516 	case REQ_IRQ_ERR_WOL:
3517 		free_irq(dev->irq, dev);
3518 		fallthrough;
3519 	case REQ_IRQ_ERR_MAC:
3520 	case REQ_IRQ_ERR_NO:
3521 		/* If MAC IRQ request error, no more IRQ to free */
3522 		break;
3523 	}
3524 }
3525 
3526 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3527 {
3528 	struct stmmac_priv *priv = netdev_priv(dev);
3529 	enum request_irq_err irq_err;
3530 	cpumask_t cpu_mask;
3531 	int irq_idx = 0;
3532 	char *int_name;
3533 	int ret;
3534 	int i;
3535 
3536 	/* For common interrupt */
3537 	int_name = priv->int_name_mac;
3538 	sprintf(int_name, "%s:%s", dev->name, "mac");
3539 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3540 			  0, int_name, dev);
3541 	if (unlikely(ret < 0)) {
3542 		netdev_err(priv->dev,
3543 			   "%s: alloc mac MSI %d (error: %d)\n",
3544 			   __func__, dev->irq, ret);
3545 		irq_err = REQ_IRQ_ERR_MAC;
3546 		goto irq_error;
3547 	}
3548 
3549 	/* Request the Wake IRQ in case of another line
3550 	 * is used for WoL
3551 	 */
3552 	priv->wol_irq_disabled = true;
3553 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3554 		int_name = priv->int_name_wol;
3555 		sprintf(int_name, "%s:%s", dev->name, "wol");
3556 		ret = request_irq(priv->wol_irq,
3557 				  stmmac_mac_interrupt,
3558 				  0, int_name, dev);
3559 		if (unlikely(ret < 0)) {
3560 			netdev_err(priv->dev,
3561 				   "%s: alloc wol MSI %d (error: %d)\n",
3562 				   __func__, priv->wol_irq, ret);
3563 			irq_err = REQ_IRQ_ERR_WOL;
3564 			goto irq_error;
3565 		}
3566 	}
3567 
3568 	/* Request the LPI IRQ in case of another line
3569 	 * is used for LPI
3570 	 */
3571 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3572 		int_name = priv->int_name_lpi;
3573 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3574 		ret = request_irq(priv->lpi_irq,
3575 				  stmmac_mac_interrupt,
3576 				  0, int_name, dev);
3577 		if (unlikely(ret < 0)) {
3578 			netdev_err(priv->dev,
3579 				   "%s: alloc lpi MSI %d (error: %d)\n",
3580 				   __func__, priv->lpi_irq, ret);
3581 			irq_err = REQ_IRQ_ERR_LPI;
3582 			goto irq_error;
3583 		}
3584 	}
3585 
3586 	/* Request the Safety Feature Correctible Error line in
3587 	 * case of another line is used
3588 	 */
3589 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3590 		int_name = priv->int_name_sfty_ce;
3591 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3592 		ret = request_irq(priv->sfty_ce_irq,
3593 				  stmmac_safety_interrupt,
3594 				  0, int_name, dev);
3595 		if (unlikely(ret < 0)) {
3596 			netdev_err(priv->dev,
3597 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3598 				   __func__, priv->sfty_ce_irq, ret);
3599 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3600 			goto irq_error;
3601 		}
3602 	}
3603 
3604 	/* Request the Safety Feature Uncorrectible Error line in
3605 	 * case of another line is used
3606 	 */
3607 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3608 		int_name = priv->int_name_sfty_ue;
3609 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3610 		ret = request_irq(priv->sfty_ue_irq,
3611 				  stmmac_safety_interrupt,
3612 				  0, int_name, dev);
3613 		if (unlikely(ret < 0)) {
3614 			netdev_err(priv->dev,
3615 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3616 				   __func__, priv->sfty_ue_irq, ret);
3617 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3618 			goto irq_error;
3619 		}
3620 	}
3621 
3622 	/* Request Rx MSI irq */
3623 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3624 		if (i >= MTL_MAX_RX_QUEUES)
3625 			break;
3626 		if (priv->rx_irq[i] == 0)
3627 			continue;
3628 
3629 		int_name = priv->int_name_rx_irq[i];
3630 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3631 		ret = request_irq(priv->rx_irq[i],
3632 				  stmmac_msi_intr_rx,
3633 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3634 		if (unlikely(ret < 0)) {
3635 			netdev_err(priv->dev,
3636 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3637 				   __func__, i, priv->rx_irq[i], ret);
3638 			irq_err = REQ_IRQ_ERR_RX;
3639 			irq_idx = i;
3640 			goto irq_error;
3641 		}
3642 		cpumask_clear(&cpu_mask);
3643 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3644 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3645 	}
3646 
3647 	/* Request Tx MSI irq */
3648 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3649 		if (i >= MTL_MAX_TX_QUEUES)
3650 			break;
3651 		if (priv->tx_irq[i] == 0)
3652 			continue;
3653 
3654 		int_name = priv->int_name_tx_irq[i];
3655 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3656 		ret = request_irq(priv->tx_irq[i],
3657 				  stmmac_msi_intr_tx,
3658 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3659 		if (unlikely(ret < 0)) {
3660 			netdev_err(priv->dev,
3661 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3662 				   __func__, i, priv->tx_irq[i], ret);
3663 			irq_err = REQ_IRQ_ERR_TX;
3664 			irq_idx = i;
3665 			goto irq_error;
3666 		}
3667 		cpumask_clear(&cpu_mask);
3668 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3669 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3670 	}
3671 
3672 	return 0;
3673 
3674 irq_error:
3675 	stmmac_free_irq(dev, irq_err, irq_idx);
3676 	return ret;
3677 }
3678 
3679 static int stmmac_request_irq_single(struct net_device *dev)
3680 {
3681 	struct stmmac_priv *priv = netdev_priv(dev);
3682 	enum request_irq_err irq_err;
3683 	int ret;
3684 
3685 	ret = request_irq(dev->irq, stmmac_interrupt,
3686 			  IRQF_SHARED, dev->name, dev);
3687 	if (unlikely(ret < 0)) {
3688 		netdev_err(priv->dev,
3689 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3690 			   __func__, dev->irq, ret);
3691 		irq_err = REQ_IRQ_ERR_MAC;
3692 		goto irq_error;
3693 	}
3694 
3695 	/* Request the Wake IRQ in case of another line
3696 	 * is used for WoL
3697 	 */
3698 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3699 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3700 				  IRQF_SHARED, dev->name, dev);
3701 		if (unlikely(ret < 0)) {
3702 			netdev_err(priv->dev,
3703 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3704 				   __func__, priv->wol_irq, ret);
3705 			irq_err = REQ_IRQ_ERR_WOL;
3706 			goto irq_error;
3707 		}
3708 	}
3709 
3710 	/* Request the IRQ lines */
3711 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3712 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3713 				  IRQF_SHARED, dev->name, dev);
3714 		if (unlikely(ret < 0)) {
3715 			netdev_err(priv->dev,
3716 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3717 				   __func__, priv->lpi_irq, ret);
3718 			irq_err = REQ_IRQ_ERR_LPI;
3719 			goto irq_error;
3720 		}
3721 	}
3722 
3723 	return 0;
3724 
3725 irq_error:
3726 	stmmac_free_irq(dev, irq_err, 0);
3727 	return ret;
3728 }
3729 
3730 static int stmmac_request_irq(struct net_device *dev)
3731 {
3732 	struct stmmac_priv *priv = netdev_priv(dev);
3733 	int ret;
3734 
3735 	/* Request the IRQ lines */
3736 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3737 		ret = stmmac_request_irq_multi_msi(dev);
3738 	else
3739 		ret = stmmac_request_irq_single(dev);
3740 
3741 	return ret;
3742 }
3743 
3744 /**
3745  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3746  *  @priv: driver private structure
3747  *  @mtu: MTU to setup the dma queue and buf with
3748  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3749  *  Allocate the Tx/Rx DMA queue and init them.
3750  *  Return value:
3751  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3752  */
3753 static struct stmmac_dma_conf *
3754 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3755 {
3756 	struct stmmac_dma_conf *dma_conf;
3757 	int chan, bfsize, ret;
3758 
3759 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3760 	if (!dma_conf) {
3761 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3762 			   __func__);
3763 		return ERR_PTR(-ENOMEM);
3764 	}
3765 
3766 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3767 	if (bfsize < 0)
3768 		bfsize = 0;
3769 
3770 	if (bfsize < BUF_SIZE_16KiB)
3771 		bfsize = stmmac_set_bfsize(mtu, 0);
3772 
3773 	dma_conf->dma_buf_sz = bfsize;
3774 	/* Chose the tx/rx size from the already defined one in the
3775 	 * priv struct. (if defined)
3776 	 */
3777 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3778 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3779 
3780 	if (!dma_conf->dma_tx_size)
3781 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3782 	if (!dma_conf->dma_rx_size)
3783 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3784 
3785 	/* Earlier check for TBS */
3786 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3787 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3788 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3789 
3790 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3791 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3792 	}
3793 
3794 	ret = alloc_dma_desc_resources(priv, dma_conf);
3795 	if (ret < 0) {
3796 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3797 			   __func__);
3798 		goto alloc_error;
3799 	}
3800 
3801 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3802 	if (ret < 0) {
3803 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3804 			   __func__);
3805 		goto init_error;
3806 	}
3807 
3808 	return dma_conf;
3809 
3810 init_error:
3811 	free_dma_desc_resources(priv, dma_conf);
3812 alloc_error:
3813 	kfree(dma_conf);
3814 	return ERR_PTR(ret);
3815 }
3816 
3817 /**
3818  *  __stmmac_open - open entry point of the driver
3819  *  @dev : pointer to the device structure.
3820  *  @dma_conf :  structure to take the dma data
3821  *  Description:
3822  *  This function is the open entry point of the driver.
3823  *  Return value:
3824  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3825  *  file on failure.
3826  */
3827 static int __stmmac_open(struct net_device *dev,
3828 			 struct stmmac_dma_conf *dma_conf)
3829 {
3830 	struct stmmac_priv *priv = netdev_priv(dev);
3831 	int mode = priv->plat->phy_interface;
3832 	u32 chan;
3833 	int ret;
3834 
3835 	ret = pm_runtime_resume_and_get(priv->device);
3836 	if (ret < 0)
3837 		return ret;
3838 
3839 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3840 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
3841 	    (!priv->hw->xpcs ||
3842 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
3843 	    !priv->hw->lynx_pcs) {
3844 		ret = stmmac_init_phy(dev);
3845 		if (ret) {
3846 			netdev_err(priv->dev,
3847 				   "%s: Cannot attach to PHY (error: %d)\n",
3848 				   __func__, ret);
3849 			goto init_phy_error;
3850 		}
3851 	}
3852 
3853 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3854 
3855 	buf_sz = dma_conf->dma_buf_sz;
3856 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3857 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3858 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3859 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3860 
3861 	stmmac_reset_queues_param(priv);
3862 
3863 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3864 	    priv->plat->serdes_powerup) {
3865 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3866 		if (ret < 0) {
3867 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3868 				   __func__);
3869 			goto init_error;
3870 		}
3871 	}
3872 
3873 	ret = stmmac_hw_setup(dev, true);
3874 	if (ret < 0) {
3875 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3876 		goto init_error;
3877 	}
3878 
3879 	stmmac_init_coalesce(priv);
3880 
3881 	phylink_start(priv->phylink);
3882 	/* We may have called phylink_speed_down before */
3883 	phylink_speed_up(priv->phylink);
3884 
3885 	ret = stmmac_request_irq(dev);
3886 	if (ret)
3887 		goto irq_error;
3888 
3889 	stmmac_enable_all_queues(priv);
3890 	netif_tx_start_all_queues(priv->dev);
3891 	stmmac_enable_all_dma_irq(priv);
3892 
3893 	return 0;
3894 
3895 irq_error:
3896 	phylink_stop(priv->phylink);
3897 
3898 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3899 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3900 
3901 	stmmac_hw_teardown(dev);
3902 init_error:
3903 	phylink_disconnect_phy(priv->phylink);
3904 init_phy_error:
3905 	pm_runtime_put(priv->device);
3906 	return ret;
3907 }
3908 
3909 static int stmmac_open(struct net_device *dev)
3910 {
3911 	struct stmmac_priv *priv = netdev_priv(dev);
3912 	struct stmmac_dma_conf *dma_conf;
3913 	int ret;
3914 
3915 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3916 	if (IS_ERR(dma_conf))
3917 		return PTR_ERR(dma_conf);
3918 
3919 	ret = __stmmac_open(dev, dma_conf);
3920 	if (ret)
3921 		free_dma_desc_resources(priv, dma_conf);
3922 
3923 	kfree(dma_conf);
3924 	return ret;
3925 }
3926 
3927 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3928 {
3929 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3930 
3931 	if (priv->fpe_wq)
3932 		destroy_workqueue(priv->fpe_wq);
3933 
3934 	netdev_info(priv->dev, "FPE workqueue stop");
3935 }
3936 
3937 /**
3938  *  stmmac_release - close entry point of the driver
3939  *  @dev : device pointer.
3940  *  Description:
3941  *  This is the stop entry point of the driver.
3942  */
3943 static int stmmac_release(struct net_device *dev)
3944 {
3945 	struct stmmac_priv *priv = netdev_priv(dev);
3946 	u32 chan;
3947 
3948 	if (device_may_wakeup(priv->device))
3949 		phylink_speed_down(priv->phylink, false);
3950 	/* Stop and disconnect the PHY */
3951 	phylink_stop(priv->phylink);
3952 	phylink_disconnect_phy(priv->phylink);
3953 
3954 	stmmac_disable_all_queues(priv);
3955 
3956 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3957 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3958 
3959 	netif_tx_disable(dev);
3960 
3961 	/* Free the IRQ lines */
3962 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3963 
3964 	if (priv->eee_enabled) {
3965 		priv->tx_path_in_lpi_mode = false;
3966 		del_timer_sync(&priv->eee_ctrl_timer);
3967 	}
3968 
3969 	/* Stop TX/RX DMA and clear the descriptors */
3970 	stmmac_stop_all_dma(priv);
3971 
3972 	/* Release and free the Rx/Tx resources */
3973 	free_dma_desc_resources(priv, &priv->dma_conf);
3974 
3975 	/* Disable the MAC Rx/Tx */
3976 	stmmac_mac_set(priv, priv->ioaddr, false);
3977 
3978 	/* Powerdown Serdes if there is */
3979 	if (priv->plat->serdes_powerdown)
3980 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3981 
3982 	netif_carrier_off(dev);
3983 
3984 	stmmac_release_ptp(priv);
3985 
3986 	pm_runtime_put(priv->device);
3987 
3988 	if (priv->dma_cap.fpesel)
3989 		stmmac_fpe_stop_wq(priv);
3990 
3991 	return 0;
3992 }
3993 
3994 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3995 			       struct stmmac_tx_queue *tx_q)
3996 {
3997 	u16 tag = 0x0, inner_tag = 0x0;
3998 	u32 inner_type = 0x0;
3999 	struct dma_desc *p;
4000 
4001 	if (!priv->dma_cap.vlins)
4002 		return false;
4003 	if (!skb_vlan_tag_present(skb))
4004 		return false;
4005 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4006 		inner_tag = skb_vlan_tag_get(skb);
4007 		inner_type = STMMAC_VLAN_INSERT;
4008 	}
4009 
4010 	tag = skb_vlan_tag_get(skb);
4011 
4012 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4013 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4014 	else
4015 		p = &tx_q->dma_tx[tx_q->cur_tx];
4016 
4017 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4018 		return false;
4019 
4020 	stmmac_set_tx_owner(priv, p);
4021 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4022 	return true;
4023 }
4024 
4025 /**
4026  *  stmmac_tso_allocator - close entry point of the driver
4027  *  @priv: driver private structure
4028  *  @des: buffer start address
4029  *  @total_len: total length to fill in descriptors
4030  *  @last_segment: condition for the last descriptor
4031  *  @queue: TX queue index
4032  *  Description:
4033  *  This function fills descriptor and request new descriptors according to
4034  *  buffer length to fill
4035  */
4036 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4037 				 int total_len, bool last_segment, u32 queue)
4038 {
4039 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4040 	struct dma_desc *desc;
4041 	u32 buff_size;
4042 	int tmp_len;
4043 
4044 	tmp_len = total_len;
4045 
4046 	while (tmp_len > 0) {
4047 		dma_addr_t curr_addr;
4048 
4049 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4050 						priv->dma_conf.dma_tx_size);
4051 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4052 
4053 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4054 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4055 		else
4056 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4057 
4058 		curr_addr = des + (total_len - tmp_len);
4059 		if (priv->dma_cap.addr64 <= 32)
4060 			desc->des0 = cpu_to_le32(curr_addr);
4061 		else
4062 			stmmac_set_desc_addr(priv, desc, curr_addr);
4063 
4064 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4065 			    TSO_MAX_BUFF_SIZE : tmp_len;
4066 
4067 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4068 				0, 1,
4069 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4070 				0, 0);
4071 
4072 		tmp_len -= TSO_MAX_BUFF_SIZE;
4073 	}
4074 }
4075 
4076 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4077 {
4078 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4079 	int desc_size;
4080 
4081 	if (likely(priv->extend_desc))
4082 		desc_size = sizeof(struct dma_extended_desc);
4083 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4084 		desc_size = sizeof(struct dma_edesc);
4085 	else
4086 		desc_size = sizeof(struct dma_desc);
4087 
4088 	/* The own bit must be the latest setting done when prepare the
4089 	 * descriptor and then barrier is needed to make sure that
4090 	 * all is coherent before granting the DMA engine.
4091 	 */
4092 	wmb();
4093 
4094 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4095 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4096 }
4097 
4098 /**
4099  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4100  *  @skb : the socket buffer
4101  *  @dev : device pointer
4102  *  Description: this is the transmit function that is called on TSO frames
4103  *  (support available on GMAC4 and newer chips).
4104  *  Diagram below show the ring programming in case of TSO frames:
4105  *
4106  *  First Descriptor
4107  *   --------
4108  *   | DES0 |---> buffer1 = L2/L3/L4 header
4109  *   | DES1 |---> TCP Payload (can continue on next descr...)
4110  *   | DES2 |---> buffer 1 and 2 len
4111  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4112  *   --------
4113  *	|
4114  *     ...
4115  *	|
4116  *   --------
4117  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4118  *   | DES1 | --|
4119  *   | DES2 | --> buffer 1 and 2 len
4120  *   | DES3 |
4121  *   --------
4122  *
4123  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4124  */
4125 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4126 {
4127 	struct dma_desc *desc, *first, *mss_desc = NULL;
4128 	struct stmmac_priv *priv = netdev_priv(dev);
4129 	int nfrags = skb_shinfo(skb)->nr_frags;
4130 	u32 queue = skb_get_queue_mapping(skb);
4131 	unsigned int first_entry, tx_packets;
4132 	struct stmmac_txq_stats *txq_stats;
4133 	int tmp_pay_len = 0, first_tx;
4134 	struct stmmac_tx_queue *tx_q;
4135 	bool has_vlan, set_ic;
4136 	u8 proto_hdr_len, hdr;
4137 	unsigned long flags;
4138 	u32 pay_len, mss;
4139 	dma_addr_t des;
4140 	int i;
4141 
4142 	tx_q = &priv->dma_conf.tx_queue[queue];
4143 	txq_stats = &priv->xstats.txq_stats[queue];
4144 	first_tx = tx_q->cur_tx;
4145 
4146 	/* Compute header lengths */
4147 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4148 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4149 		hdr = sizeof(struct udphdr);
4150 	} else {
4151 		proto_hdr_len = skb_tcp_all_headers(skb);
4152 		hdr = tcp_hdrlen(skb);
4153 	}
4154 
4155 	/* Desc availability based on threshold should be enough safe */
4156 	if (unlikely(stmmac_tx_avail(priv, queue) <
4157 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4158 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4159 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4160 								queue));
4161 			/* This is a hard error, log it. */
4162 			netdev_err(priv->dev,
4163 				   "%s: Tx Ring full when queue awake\n",
4164 				   __func__);
4165 		}
4166 		return NETDEV_TX_BUSY;
4167 	}
4168 
4169 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4170 
4171 	mss = skb_shinfo(skb)->gso_size;
4172 
4173 	/* set new MSS value if needed */
4174 	if (mss != tx_q->mss) {
4175 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4176 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4177 		else
4178 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4179 
4180 		stmmac_set_mss(priv, mss_desc, mss);
4181 		tx_q->mss = mss;
4182 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4183 						priv->dma_conf.dma_tx_size);
4184 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4185 	}
4186 
4187 	if (netif_msg_tx_queued(priv)) {
4188 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4189 			__func__, hdr, proto_hdr_len, pay_len, mss);
4190 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4191 			skb->data_len);
4192 	}
4193 
4194 	/* Check if VLAN can be inserted by HW */
4195 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4196 
4197 	first_entry = tx_q->cur_tx;
4198 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4199 
4200 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4201 		desc = &tx_q->dma_entx[first_entry].basic;
4202 	else
4203 		desc = &tx_q->dma_tx[first_entry];
4204 	first = desc;
4205 
4206 	if (has_vlan)
4207 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4208 
4209 	/* first descriptor: fill Headers on Buf1 */
4210 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4211 			     DMA_TO_DEVICE);
4212 	if (dma_mapping_error(priv->device, des))
4213 		goto dma_map_err;
4214 
4215 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4216 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4217 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4218 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4219 
4220 	if (priv->dma_cap.addr64 <= 32) {
4221 		first->des0 = cpu_to_le32(des);
4222 
4223 		/* Fill start of payload in buff2 of first descriptor */
4224 		if (pay_len)
4225 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4226 
4227 		/* If needed take extra descriptors to fill the remaining payload */
4228 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4229 	} else {
4230 		stmmac_set_desc_addr(priv, first, des);
4231 		tmp_pay_len = pay_len;
4232 		des += proto_hdr_len;
4233 		pay_len = 0;
4234 	}
4235 
4236 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4237 
4238 	/* Prepare fragments */
4239 	for (i = 0; i < nfrags; i++) {
4240 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4241 
4242 		des = skb_frag_dma_map(priv->device, frag, 0,
4243 				       skb_frag_size(frag),
4244 				       DMA_TO_DEVICE);
4245 		if (dma_mapping_error(priv->device, des))
4246 			goto dma_map_err;
4247 
4248 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4249 				     (i == nfrags - 1), queue);
4250 
4251 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4252 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4253 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4254 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4255 	}
4256 
4257 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4258 
4259 	/* Only the last descriptor gets to point to the skb. */
4260 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4261 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4262 
4263 	/* Manage tx mitigation */
4264 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4265 	tx_q->tx_count_frames += tx_packets;
4266 
4267 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4268 		set_ic = true;
4269 	else if (!priv->tx_coal_frames[queue])
4270 		set_ic = false;
4271 	else if (tx_packets > priv->tx_coal_frames[queue])
4272 		set_ic = true;
4273 	else if ((tx_q->tx_count_frames %
4274 		  priv->tx_coal_frames[queue]) < tx_packets)
4275 		set_ic = true;
4276 	else
4277 		set_ic = false;
4278 
4279 	if (set_ic) {
4280 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4281 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4282 		else
4283 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4284 
4285 		tx_q->tx_count_frames = 0;
4286 		stmmac_set_tx_ic(priv, desc);
4287 	}
4288 
4289 	/* We've used all descriptors we need for this skb, however,
4290 	 * advance cur_tx so that it references a fresh descriptor.
4291 	 * ndo_start_xmit will fill this descriptor the next time it's
4292 	 * called and stmmac_tx_clean may clean up to this descriptor.
4293 	 */
4294 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4295 
4296 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4297 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4298 			  __func__);
4299 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4300 	}
4301 
4302 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4303 	txq_stats->tx_bytes += skb->len;
4304 	txq_stats->tx_tso_frames++;
4305 	txq_stats->tx_tso_nfrags += nfrags;
4306 	if (set_ic)
4307 		txq_stats->tx_set_ic_bit++;
4308 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4309 
4310 	if (priv->sarc_type)
4311 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4312 
4313 	skb_tx_timestamp(skb);
4314 
4315 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4316 		     priv->hwts_tx_en)) {
4317 		/* declare that device is doing timestamping */
4318 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4319 		stmmac_enable_tx_timestamp(priv, first);
4320 	}
4321 
4322 	/* Complete the first descriptor before granting the DMA */
4323 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4324 			proto_hdr_len,
4325 			pay_len,
4326 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4327 			hdr / 4, (skb->len - proto_hdr_len));
4328 
4329 	/* If context desc is used to change MSS */
4330 	if (mss_desc) {
4331 		/* Make sure that first descriptor has been completely
4332 		 * written, including its own bit. This is because MSS is
4333 		 * actually before first descriptor, so we need to make
4334 		 * sure that MSS's own bit is the last thing written.
4335 		 */
4336 		dma_wmb();
4337 		stmmac_set_tx_owner(priv, mss_desc);
4338 	}
4339 
4340 	if (netif_msg_pktdata(priv)) {
4341 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4342 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4343 			tx_q->cur_tx, first, nfrags);
4344 		pr_info(">>> frame to be transmitted: ");
4345 		print_pkt(skb->data, skb_headlen(skb));
4346 	}
4347 
4348 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4349 
4350 	stmmac_flush_tx_descriptors(priv, queue);
4351 	stmmac_tx_timer_arm(priv, queue);
4352 
4353 	return NETDEV_TX_OK;
4354 
4355 dma_map_err:
4356 	dev_err(priv->device, "Tx dma map failed\n");
4357 	dev_kfree_skb(skb);
4358 	priv->xstats.tx_dropped++;
4359 	return NETDEV_TX_OK;
4360 }
4361 
4362 /**
4363  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4364  * @skb: socket buffer to check
4365  *
4366  * Check if a packet has an ethertype that will trigger the IP header checks
4367  * and IP/TCP checksum engine of the stmmac core.
4368  *
4369  * Return: true if the ethertype can trigger the checksum engine, false
4370  * otherwise
4371  */
4372 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4373 {
4374 	int depth = 0;
4375 	__be16 proto;
4376 
4377 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4378 				    &depth);
4379 
4380 	return (depth <= ETH_HLEN) &&
4381 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4382 }
4383 
4384 /**
4385  *  stmmac_xmit - Tx entry point of the driver
4386  *  @skb : the socket buffer
4387  *  @dev : device pointer
4388  *  Description : this is the tx entry point of the driver.
4389  *  It programs the chain or the ring and supports oversized frames
4390  *  and SG feature.
4391  */
4392 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4393 {
4394 	unsigned int first_entry, tx_packets, enh_desc;
4395 	struct stmmac_priv *priv = netdev_priv(dev);
4396 	unsigned int nopaged_len = skb_headlen(skb);
4397 	int i, csum_insertion = 0, is_jumbo = 0;
4398 	u32 queue = skb_get_queue_mapping(skb);
4399 	int nfrags = skb_shinfo(skb)->nr_frags;
4400 	int gso = skb_shinfo(skb)->gso_type;
4401 	struct stmmac_txq_stats *txq_stats;
4402 	struct dma_edesc *tbs_desc = NULL;
4403 	struct dma_desc *desc, *first;
4404 	struct stmmac_tx_queue *tx_q;
4405 	bool has_vlan, set_ic;
4406 	int entry, first_tx;
4407 	unsigned long flags;
4408 	dma_addr_t des;
4409 
4410 	tx_q = &priv->dma_conf.tx_queue[queue];
4411 	txq_stats = &priv->xstats.txq_stats[queue];
4412 	first_tx = tx_q->cur_tx;
4413 
4414 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4415 		stmmac_disable_eee_mode(priv);
4416 
4417 	/* Manage oversized TCP frames for GMAC4 device */
4418 	if (skb_is_gso(skb) && priv->tso) {
4419 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4420 			return stmmac_tso_xmit(skb, dev);
4421 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4422 			return stmmac_tso_xmit(skb, dev);
4423 	}
4424 
4425 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4426 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4427 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4428 								queue));
4429 			/* This is a hard error, log it. */
4430 			netdev_err(priv->dev,
4431 				   "%s: Tx Ring full when queue awake\n",
4432 				   __func__);
4433 		}
4434 		return NETDEV_TX_BUSY;
4435 	}
4436 
4437 	/* Check if VLAN can be inserted by HW */
4438 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4439 
4440 	entry = tx_q->cur_tx;
4441 	first_entry = entry;
4442 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4443 
4444 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4445 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4446 	 * queues. In that case, checksum offloading for those queues that don't
4447 	 * support tx coe needs to fallback to software checksum calculation.
4448 	 *
4449 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4450 	 * also have to be checksummed in software.
4451 	 */
4452 	if (csum_insertion &&
4453 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4454 	     !stmmac_has_ip_ethertype(skb))) {
4455 		if (unlikely(skb_checksum_help(skb)))
4456 			goto dma_map_err;
4457 		csum_insertion = !csum_insertion;
4458 	}
4459 
4460 	if (likely(priv->extend_desc))
4461 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4462 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4463 		desc = &tx_q->dma_entx[entry].basic;
4464 	else
4465 		desc = tx_q->dma_tx + entry;
4466 
4467 	first = desc;
4468 
4469 	if (has_vlan)
4470 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4471 
4472 	enh_desc = priv->plat->enh_desc;
4473 	/* To program the descriptors according to the size of the frame */
4474 	if (enh_desc)
4475 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4476 
4477 	if (unlikely(is_jumbo)) {
4478 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4479 		if (unlikely(entry < 0) && (entry != -EINVAL))
4480 			goto dma_map_err;
4481 	}
4482 
4483 	for (i = 0; i < nfrags; i++) {
4484 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4485 		int len = skb_frag_size(frag);
4486 		bool last_segment = (i == (nfrags - 1));
4487 
4488 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4489 		WARN_ON(tx_q->tx_skbuff[entry]);
4490 
4491 		if (likely(priv->extend_desc))
4492 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4493 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4494 			desc = &tx_q->dma_entx[entry].basic;
4495 		else
4496 			desc = tx_q->dma_tx + entry;
4497 
4498 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4499 				       DMA_TO_DEVICE);
4500 		if (dma_mapping_error(priv->device, des))
4501 			goto dma_map_err; /* should reuse desc w/o issues */
4502 
4503 		tx_q->tx_skbuff_dma[entry].buf = des;
4504 
4505 		stmmac_set_desc_addr(priv, desc, des);
4506 
4507 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4508 		tx_q->tx_skbuff_dma[entry].len = len;
4509 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4510 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4511 
4512 		/* Prepare the descriptor and set the own bit too */
4513 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4514 				priv->mode, 1, last_segment, skb->len);
4515 	}
4516 
4517 	/* Only the last descriptor gets to point to the skb. */
4518 	tx_q->tx_skbuff[entry] = skb;
4519 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4520 
4521 	/* According to the coalesce parameter the IC bit for the latest
4522 	 * segment is reset and the timer re-started to clean the tx status.
4523 	 * This approach takes care about the fragments: desc is the first
4524 	 * element in case of no SG.
4525 	 */
4526 	tx_packets = (entry + 1) - first_tx;
4527 	tx_q->tx_count_frames += tx_packets;
4528 
4529 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4530 		set_ic = true;
4531 	else if (!priv->tx_coal_frames[queue])
4532 		set_ic = false;
4533 	else if (tx_packets > priv->tx_coal_frames[queue])
4534 		set_ic = true;
4535 	else if ((tx_q->tx_count_frames %
4536 		  priv->tx_coal_frames[queue]) < tx_packets)
4537 		set_ic = true;
4538 	else
4539 		set_ic = false;
4540 
4541 	if (set_ic) {
4542 		if (likely(priv->extend_desc))
4543 			desc = &tx_q->dma_etx[entry].basic;
4544 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4545 			desc = &tx_q->dma_entx[entry].basic;
4546 		else
4547 			desc = &tx_q->dma_tx[entry];
4548 
4549 		tx_q->tx_count_frames = 0;
4550 		stmmac_set_tx_ic(priv, desc);
4551 	}
4552 
4553 	/* We've used all descriptors we need for this skb, however,
4554 	 * advance cur_tx so that it references a fresh descriptor.
4555 	 * ndo_start_xmit will fill this descriptor the next time it's
4556 	 * called and stmmac_tx_clean may clean up to this descriptor.
4557 	 */
4558 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4559 	tx_q->cur_tx = entry;
4560 
4561 	if (netif_msg_pktdata(priv)) {
4562 		netdev_dbg(priv->dev,
4563 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4564 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4565 			   entry, first, nfrags);
4566 
4567 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4568 		print_pkt(skb->data, skb->len);
4569 	}
4570 
4571 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4572 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4573 			  __func__);
4574 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4575 	}
4576 
4577 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4578 	txq_stats->tx_bytes += skb->len;
4579 	if (set_ic)
4580 		txq_stats->tx_set_ic_bit++;
4581 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4582 
4583 	if (priv->sarc_type)
4584 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4585 
4586 	skb_tx_timestamp(skb);
4587 
4588 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4589 	 * problems because all the descriptors are actually ready to be
4590 	 * passed to the DMA engine.
4591 	 */
4592 	if (likely(!is_jumbo)) {
4593 		bool last_segment = (nfrags == 0);
4594 
4595 		des = dma_map_single(priv->device, skb->data,
4596 				     nopaged_len, DMA_TO_DEVICE);
4597 		if (dma_mapping_error(priv->device, des))
4598 			goto dma_map_err;
4599 
4600 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4601 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4602 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4603 
4604 		stmmac_set_desc_addr(priv, first, des);
4605 
4606 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4607 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4608 
4609 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4610 			     priv->hwts_tx_en)) {
4611 			/* declare that device is doing timestamping */
4612 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4613 			stmmac_enable_tx_timestamp(priv, first);
4614 		}
4615 
4616 		/* Prepare the first descriptor setting the OWN bit too */
4617 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4618 				csum_insertion, priv->mode, 0, last_segment,
4619 				skb->len);
4620 	}
4621 
4622 	if (tx_q->tbs & STMMAC_TBS_EN) {
4623 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4624 
4625 		tbs_desc = &tx_q->dma_entx[first_entry];
4626 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4627 	}
4628 
4629 	stmmac_set_tx_owner(priv, first);
4630 
4631 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4632 
4633 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4634 
4635 	stmmac_flush_tx_descriptors(priv, queue);
4636 	stmmac_tx_timer_arm(priv, queue);
4637 
4638 	return NETDEV_TX_OK;
4639 
4640 dma_map_err:
4641 	netdev_err(priv->dev, "Tx DMA map failed\n");
4642 	dev_kfree_skb(skb);
4643 	priv->xstats.tx_dropped++;
4644 	return NETDEV_TX_OK;
4645 }
4646 
4647 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4648 {
4649 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4650 	__be16 vlan_proto = veth->h_vlan_proto;
4651 	u16 vlanid;
4652 
4653 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4654 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4655 	    (vlan_proto == htons(ETH_P_8021AD) &&
4656 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4657 		/* pop the vlan tag */
4658 		vlanid = ntohs(veth->h_vlan_TCI);
4659 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4660 		skb_pull(skb, VLAN_HLEN);
4661 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4662 	}
4663 }
4664 
4665 /**
4666  * stmmac_rx_refill - refill used skb preallocated buffers
4667  * @priv: driver private structure
4668  * @queue: RX queue index
4669  * Description : this is to reallocate the skb for the reception process
4670  * that is based on zero-copy.
4671  */
4672 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4673 {
4674 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4675 	int dirty = stmmac_rx_dirty(priv, queue);
4676 	unsigned int entry = rx_q->dirty_rx;
4677 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4678 
4679 	if (priv->dma_cap.host_dma_width <= 32)
4680 		gfp |= GFP_DMA32;
4681 
4682 	while (dirty-- > 0) {
4683 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4684 		struct dma_desc *p;
4685 		bool use_rx_wd;
4686 
4687 		if (priv->extend_desc)
4688 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4689 		else
4690 			p = rx_q->dma_rx + entry;
4691 
4692 		if (!buf->page) {
4693 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4694 			if (!buf->page)
4695 				break;
4696 		}
4697 
4698 		if (priv->sph && !buf->sec_page) {
4699 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4700 			if (!buf->sec_page)
4701 				break;
4702 
4703 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4704 		}
4705 
4706 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4707 
4708 		stmmac_set_desc_addr(priv, p, buf->addr);
4709 		if (priv->sph)
4710 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4711 		else
4712 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4713 		stmmac_refill_desc3(priv, rx_q, p);
4714 
4715 		rx_q->rx_count_frames++;
4716 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4717 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4718 			rx_q->rx_count_frames = 0;
4719 
4720 		use_rx_wd = !priv->rx_coal_frames[queue];
4721 		use_rx_wd |= rx_q->rx_count_frames > 0;
4722 		if (!priv->use_riwt)
4723 			use_rx_wd = false;
4724 
4725 		dma_wmb();
4726 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4727 
4728 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4729 	}
4730 	rx_q->dirty_rx = entry;
4731 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4732 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4733 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4734 }
4735 
4736 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4737 				       struct dma_desc *p,
4738 				       int status, unsigned int len)
4739 {
4740 	unsigned int plen = 0, hlen = 0;
4741 	int coe = priv->hw->rx_csum;
4742 
4743 	/* Not first descriptor, buffer is always zero */
4744 	if (priv->sph && len)
4745 		return 0;
4746 
4747 	/* First descriptor, get split header length */
4748 	stmmac_get_rx_header_len(priv, p, &hlen);
4749 	if (priv->sph && hlen) {
4750 		priv->xstats.rx_split_hdr_pkt_n++;
4751 		return hlen;
4752 	}
4753 
4754 	/* First descriptor, not last descriptor and not split header */
4755 	if (status & rx_not_ls)
4756 		return priv->dma_conf.dma_buf_sz;
4757 
4758 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4759 
4760 	/* First descriptor and last descriptor and not split header */
4761 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4762 }
4763 
4764 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4765 				       struct dma_desc *p,
4766 				       int status, unsigned int len)
4767 {
4768 	int coe = priv->hw->rx_csum;
4769 	unsigned int plen = 0;
4770 
4771 	/* Not split header, buffer is not available */
4772 	if (!priv->sph)
4773 		return 0;
4774 
4775 	/* Not last descriptor */
4776 	if (status & rx_not_ls)
4777 		return priv->dma_conf.dma_buf_sz;
4778 
4779 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4780 
4781 	/* Last descriptor */
4782 	return plen - len;
4783 }
4784 
4785 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4786 				struct xdp_frame *xdpf, bool dma_map)
4787 {
4788 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4789 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4790 	unsigned int entry = tx_q->cur_tx;
4791 	struct dma_desc *tx_desc;
4792 	dma_addr_t dma_addr;
4793 	bool set_ic;
4794 
4795 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4796 		return STMMAC_XDP_CONSUMED;
4797 
4798 	if (likely(priv->extend_desc))
4799 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4800 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4801 		tx_desc = &tx_q->dma_entx[entry].basic;
4802 	else
4803 		tx_desc = tx_q->dma_tx + entry;
4804 
4805 	if (dma_map) {
4806 		dma_addr = dma_map_single(priv->device, xdpf->data,
4807 					  xdpf->len, DMA_TO_DEVICE);
4808 		if (dma_mapping_error(priv->device, dma_addr))
4809 			return STMMAC_XDP_CONSUMED;
4810 
4811 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4812 	} else {
4813 		struct page *page = virt_to_page(xdpf->data);
4814 
4815 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4816 			   xdpf->headroom;
4817 		dma_sync_single_for_device(priv->device, dma_addr,
4818 					   xdpf->len, DMA_BIDIRECTIONAL);
4819 
4820 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4821 	}
4822 
4823 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4824 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4825 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4826 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4827 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4828 
4829 	tx_q->xdpf[entry] = xdpf;
4830 
4831 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4832 
4833 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4834 			       true, priv->mode, true, true,
4835 			       xdpf->len);
4836 
4837 	tx_q->tx_count_frames++;
4838 
4839 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4840 		set_ic = true;
4841 	else
4842 		set_ic = false;
4843 
4844 	if (set_ic) {
4845 		unsigned long flags;
4846 		tx_q->tx_count_frames = 0;
4847 		stmmac_set_tx_ic(priv, tx_desc);
4848 		flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4849 		txq_stats->tx_set_ic_bit++;
4850 		u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4851 	}
4852 
4853 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4854 
4855 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4856 	tx_q->cur_tx = entry;
4857 
4858 	return STMMAC_XDP_TX;
4859 }
4860 
4861 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4862 				   int cpu)
4863 {
4864 	int index = cpu;
4865 
4866 	if (unlikely(index < 0))
4867 		index = 0;
4868 
4869 	while (index >= priv->plat->tx_queues_to_use)
4870 		index -= priv->plat->tx_queues_to_use;
4871 
4872 	return index;
4873 }
4874 
4875 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4876 				struct xdp_buff *xdp)
4877 {
4878 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4879 	int cpu = smp_processor_id();
4880 	struct netdev_queue *nq;
4881 	int queue;
4882 	int res;
4883 
4884 	if (unlikely(!xdpf))
4885 		return STMMAC_XDP_CONSUMED;
4886 
4887 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4888 	nq = netdev_get_tx_queue(priv->dev, queue);
4889 
4890 	__netif_tx_lock(nq, cpu);
4891 	/* Avoids TX time-out as we are sharing with slow path */
4892 	txq_trans_cond_update(nq);
4893 
4894 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4895 	if (res == STMMAC_XDP_TX)
4896 		stmmac_flush_tx_descriptors(priv, queue);
4897 
4898 	__netif_tx_unlock(nq);
4899 
4900 	return res;
4901 }
4902 
4903 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4904 				 struct bpf_prog *prog,
4905 				 struct xdp_buff *xdp)
4906 {
4907 	u32 act;
4908 	int res;
4909 
4910 	act = bpf_prog_run_xdp(prog, xdp);
4911 	switch (act) {
4912 	case XDP_PASS:
4913 		res = STMMAC_XDP_PASS;
4914 		break;
4915 	case XDP_TX:
4916 		res = stmmac_xdp_xmit_back(priv, xdp);
4917 		break;
4918 	case XDP_REDIRECT:
4919 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4920 			res = STMMAC_XDP_CONSUMED;
4921 		else
4922 			res = STMMAC_XDP_REDIRECT;
4923 		break;
4924 	default:
4925 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4926 		fallthrough;
4927 	case XDP_ABORTED:
4928 		trace_xdp_exception(priv->dev, prog, act);
4929 		fallthrough;
4930 	case XDP_DROP:
4931 		res = STMMAC_XDP_CONSUMED;
4932 		break;
4933 	}
4934 
4935 	return res;
4936 }
4937 
4938 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4939 					   struct xdp_buff *xdp)
4940 {
4941 	struct bpf_prog *prog;
4942 	int res;
4943 
4944 	prog = READ_ONCE(priv->xdp_prog);
4945 	if (!prog) {
4946 		res = STMMAC_XDP_PASS;
4947 		goto out;
4948 	}
4949 
4950 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
4951 out:
4952 	return ERR_PTR(-res);
4953 }
4954 
4955 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4956 				   int xdp_status)
4957 {
4958 	int cpu = smp_processor_id();
4959 	int queue;
4960 
4961 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4962 
4963 	if (xdp_status & STMMAC_XDP_TX)
4964 		stmmac_tx_timer_arm(priv, queue);
4965 
4966 	if (xdp_status & STMMAC_XDP_REDIRECT)
4967 		xdp_do_flush();
4968 }
4969 
4970 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4971 					       struct xdp_buff *xdp)
4972 {
4973 	unsigned int metasize = xdp->data - xdp->data_meta;
4974 	unsigned int datasize = xdp->data_end - xdp->data;
4975 	struct sk_buff *skb;
4976 
4977 	skb = __napi_alloc_skb(&ch->rxtx_napi,
4978 			       xdp->data_end - xdp->data_hard_start,
4979 			       GFP_ATOMIC | __GFP_NOWARN);
4980 	if (unlikely(!skb))
4981 		return NULL;
4982 
4983 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
4984 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4985 	if (metasize)
4986 		skb_metadata_set(skb, metasize);
4987 
4988 	return skb;
4989 }
4990 
4991 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4992 				   struct dma_desc *p, struct dma_desc *np,
4993 				   struct xdp_buff *xdp)
4994 {
4995 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
4996 	struct stmmac_channel *ch = &priv->channel[queue];
4997 	unsigned int len = xdp->data_end - xdp->data;
4998 	enum pkt_hash_types hash_type;
4999 	int coe = priv->hw->rx_csum;
5000 	unsigned long flags;
5001 	struct sk_buff *skb;
5002 	u32 hash;
5003 
5004 	skb = stmmac_construct_skb_zc(ch, xdp);
5005 	if (!skb) {
5006 		priv->xstats.rx_dropped++;
5007 		return;
5008 	}
5009 
5010 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5011 	stmmac_rx_vlan(priv->dev, skb);
5012 	skb->protocol = eth_type_trans(skb, priv->dev);
5013 
5014 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5015 		skb_checksum_none_assert(skb);
5016 	else
5017 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5018 
5019 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5020 		skb_set_hash(skb, hash, hash_type);
5021 
5022 	skb_record_rx_queue(skb, queue);
5023 	napi_gro_receive(&ch->rxtx_napi, skb);
5024 
5025 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5026 	rxq_stats->rx_pkt_n++;
5027 	rxq_stats->rx_bytes += len;
5028 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5029 }
5030 
5031 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5032 {
5033 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5034 	unsigned int entry = rx_q->dirty_rx;
5035 	struct dma_desc *rx_desc = NULL;
5036 	bool ret = true;
5037 
5038 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5039 
5040 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5041 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5042 		dma_addr_t dma_addr;
5043 		bool use_rx_wd;
5044 
5045 		if (!buf->xdp) {
5046 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5047 			if (!buf->xdp) {
5048 				ret = false;
5049 				break;
5050 			}
5051 		}
5052 
5053 		if (priv->extend_desc)
5054 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5055 		else
5056 			rx_desc = rx_q->dma_rx + entry;
5057 
5058 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5059 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5060 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5061 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5062 
5063 		rx_q->rx_count_frames++;
5064 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5065 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5066 			rx_q->rx_count_frames = 0;
5067 
5068 		use_rx_wd = !priv->rx_coal_frames[queue];
5069 		use_rx_wd |= rx_q->rx_count_frames > 0;
5070 		if (!priv->use_riwt)
5071 			use_rx_wd = false;
5072 
5073 		dma_wmb();
5074 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5075 
5076 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5077 	}
5078 
5079 	if (rx_desc) {
5080 		rx_q->dirty_rx = entry;
5081 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5082 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5083 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5084 	}
5085 
5086 	return ret;
5087 }
5088 
5089 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5090 {
5091 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5092 	 * to represent incoming packet, whereas cb field in the same structure
5093 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5094 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5095 	 */
5096 	return (struct stmmac_xdp_buff *)xdp;
5097 }
5098 
5099 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5100 {
5101 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5102 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5103 	unsigned int count = 0, error = 0, len = 0;
5104 	int dirty = stmmac_rx_dirty(priv, queue);
5105 	unsigned int next_entry = rx_q->cur_rx;
5106 	u32 rx_errors = 0, rx_dropped = 0;
5107 	unsigned int desc_size;
5108 	struct bpf_prog *prog;
5109 	bool failure = false;
5110 	unsigned long flags;
5111 	int xdp_status = 0;
5112 	int status = 0;
5113 
5114 	if (netif_msg_rx_status(priv)) {
5115 		void *rx_head;
5116 
5117 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5118 		if (priv->extend_desc) {
5119 			rx_head = (void *)rx_q->dma_erx;
5120 			desc_size = sizeof(struct dma_extended_desc);
5121 		} else {
5122 			rx_head = (void *)rx_q->dma_rx;
5123 			desc_size = sizeof(struct dma_desc);
5124 		}
5125 
5126 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5127 				    rx_q->dma_rx_phy, desc_size);
5128 	}
5129 	while (count < limit) {
5130 		struct stmmac_rx_buffer *buf;
5131 		struct stmmac_xdp_buff *ctx;
5132 		unsigned int buf1_len = 0;
5133 		struct dma_desc *np, *p;
5134 		int entry;
5135 		int res;
5136 
5137 		if (!count && rx_q->state_saved) {
5138 			error = rx_q->state.error;
5139 			len = rx_q->state.len;
5140 		} else {
5141 			rx_q->state_saved = false;
5142 			error = 0;
5143 			len = 0;
5144 		}
5145 
5146 		if (count >= limit)
5147 			break;
5148 
5149 read_again:
5150 		buf1_len = 0;
5151 		entry = next_entry;
5152 		buf = &rx_q->buf_pool[entry];
5153 
5154 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5155 			failure = failure ||
5156 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5157 			dirty = 0;
5158 		}
5159 
5160 		if (priv->extend_desc)
5161 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5162 		else
5163 			p = rx_q->dma_rx + entry;
5164 
5165 		/* read the status of the incoming frame */
5166 		status = stmmac_rx_status(priv, &priv->xstats, p);
5167 		/* check if managed by the DMA otherwise go ahead */
5168 		if (unlikely(status & dma_own))
5169 			break;
5170 
5171 		/* Prefetch the next RX descriptor */
5172 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5173 						priv->dma_conf.dma_rx_size);
5174 		next_entry = rx_q->cur_rx;
5175 
5176 		if (priv->extend_desc)
5177 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5178 		else
5179 			np = rx_q->dma_rx + next_entry;
5180 
5181 		prefetch(np);
5182 
5183 		/* Ensure a valid XSK buffer before proceed */
5184 		if (!buf->xdp)
5185 			break;
5186 
5187 		if (priv->extend_desc)
5188 			stmmac_rx_extended_status(priv, &priv->xstats,
5189 						  rx_q->dma_erx + entry);
5190 		if (unlikely(status == discard_frame)) {
5191 			xsk_buff_free(buf->xdp);
5192 			buf->xdp = NULL;
5193 			dirty++;
5194 			error = 1;
5195 			if (!priv->hwts_rx_en)
5196 				rx_errors++;
5197 		}
5198 
5199 		if (unlikely(error && (status & rx_not_ls)))
5200 			goto read_again;
5201 		if (unlikely(error)) {
5202 			count++;
5203 			continue;
5204 		}
5205 
5206 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5207 		if (likely(status & rx_not_ls)) {
5208 			xsk_buff_free(buf->xdp);
5209 			buf->xdp = NULL;
5210 			dirty++;
5211 			count++;
5212 			goto read_again;
5213 		}
5214 
5215 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5216 		ctx->priv = priv;
5217 		ctx->desc = p;
5218 		ctx->ndesc = np;
5219 
5220 		/* XDP ZC Frame only support primary buffers for now */
5221 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5222 		len += buf1_len;
5223 
5224 		/* ACS is disabled; strip manually. */
5225 		if (likely(!(status & rx_not_ls))) {
5226 			buf1_len -= ETH_FCS_LEN;
5227 			len -= ETH_FCS_LEN;
5228 		}
5229 
5230 		/* RX buffer is good and fit into a XSK pool buffer */
5231 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5232 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5233 
5234 		prog = READ_ONCE(priv->xdp_prog);
5235 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5236 
5237 		switch (res) {
5238 		case STMMAC_XDP_PASS:
5239 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5240 			xsk_buff_free(buf->xdp);
5241 			break;
5242 		case STMMAC_XDP_CONSUMED:
5243 			xsk_buff_free(buf->xdp);
5244 			rx_dropped++;
5245 			break;
5246 		case STMMAC_XDP_TX:
5247 		case STMMAC_XDP_REDIRECT:
5248 			xdp_status |= res;
5249 			break;
5250 		}
5251 
5252 		buf->xdp = NULL;
5253 		dirty++;
5254 		count++;
5255 	}
5256 
5257 	if (status & rx_not_ls) {
5258 		rx_q->state_saved = true;
5259 		rx_q->state.error = error;
5260 		rx_q->state.len = len;
5261 	}
5262 
5263 	stmmac_finalize_xdp_rx(priv, xdp_status);
5264 
5265 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5266 	rxq_stats->rx_pkt_n += count;
5267 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5268 
5269 	priv->xstats.rx_dropped += rx_dropped;
5270 	priv->xstats.rx_errors += rx_errors;
5271 
5272 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5273 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5274 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5275 		else
5276 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5277 
5278 		return (int)count;
5279 	}
5280 
5281 	return failure ? limit : (int)count;
5282 }
5283 
5284 /**
5285  * stmmac_rx - manage the receive process
5286  * @priv: driver private structure
5287  * @limit: napi bugget
5288  * @queue: RX queue index.
5289  * Description :  this the function called by the napi poll method.
5290  * It gets all the frames inside the ring.
5291  */
5292 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5293 {
5294 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5295 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5296 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5297 	struct stmmac_channel *ch = &priv->channel[queue];
5298 	unsigned int count = 0, error = 0, len = 0;
5299 	int status = 0, coe = priv->hw->rx_csum;
5300 	unsigned int next_entry = rx_q->cur_rx;
5301 	enum dma_data_direction dma_dir;
5302 	unsigned int desc_size;
5303 	struct sk_buff *skb = NULL;
5304 	struct stmmac_xdp_buff ctx;
5305 	unsigned long flags;
5306 	int xdp_status = 0;
5307 	int buf_sz;
5308 
5309 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5310 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5311 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5312 
5313 	if (netif_msg_rx_status(priv)) {
5314 		void *rx_head;
5315 
5316 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5317 		if (priv->extend_desc) {
5318 			rx_head = (void *)rx_q->dma_erx;
5319 			desc_size = sizeof(struct dma_extended_desc);
5320 		} else {
5321 			rx_head = (void *)rx_q->dma_rx;
5322 			desc_size = sizeof(struct dma_desc);
5323 		}
5324 
5325 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5326 				    rx_q->dma_rx_phy, desc_size);
5327 	}
5328 	while (count < limit) {
5329 		unsigned int buf1_len = 0, buf2_len = 0;
5330 		enum pkt_hash_types hash_type;
5331 		struct stmmac_rx_buffer *buf;
5332 		struct dma_desc *np, *p;
5333 		int entry;
5334 		u32 hash;
5335 
5336 		if (!count && rx_q->state_saved) {
5337 			skb = rx_q->state.skb;
5338 			error = rx_q->state.error;
5339 			len = rx_q->state.len;
5340 		} else {
5341 			rx_q->state_saved = false;
5342 			skb = NULL;
5343 			error = 0;
5344 			len = 0;
5345 		}
5346 
5347 read_again:
5348 		if (count >= limit)
5349 			break;
5350 
5351 		buf1_len = 0;
5352 		buf2_len = 0;
5353 		entry = next_entry;
5354 		buf = &rx_q->buf_pool[entry];
5355 
5356 		if (priv->extend_desc)
5357 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5358 		else
5359 			p = rx_q->dma_rx + entry;
5360 
5361 		/* read the status of the incoming frame */
5362 		status = stmmac_rx_status(priv, &priv->xstats, p);
5363 		/* check if managed by the DMA otherwise go ahead */
5364 		if (unlikely(status & dma_own))
5365 			break;
5366 
5367 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5368 						priv->dma_conf.dma_rx_size);
5369 		next_entry = rx_q->cur_rx;
5370 
5371 		if (priv->extend_desc)
5372 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5373 		else
5374 			np = rx_q->dma_rx + next_entry;
5375 
5376 		prefetch(np);
5377 
5378 		if (priv->extend_desc)
5379 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5380 		if (unlikely(status == discard_frame)) {
5381 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5382 			buf->page = NULL;
5383 			error = 1;
5384 			if (!priv->hwts_rx_en)
5385 				rx_errors++;
5386 		}
5387 
5388 		if (unlikely(error && (status & rx_not_ls)))
5389 			goto read_again;
5390 		if (unlikely(error)) {
5391 			dev_kfree_skb(skb);
5392 			skb = NULL;
5393 			count++;
5394 			continue;
5395 		}
5396 
5397 		/* Buffer is good. Go on. */
5398 
5399 		prefetch(page_address(buf->page) + buf->page_offset);
5400 		if (buf->sec_page)
5401 			prefetch(page_address(buf->sec_page));
5402 
5403 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5404 		len += buf1_len;
5405 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5406 		len += buf2_len;
5407 
5408 		/* ACS is disabled; strip manually. */
5409 		if (likely(!(status & rx_not_ls))) {
5410 			if (buf2_len) {
5411 				buf2_len -= ETH_FCS_LEN;
5412 				len -= ETH_FCS_LEN;
5413 			} else if (buf1_len) {
5414 				buf1_len -= ETH_FCS_LEN;
5415 				len -= ETH_FCS_LEN;
5416 			}
5417 		}
5418 
5419 		if (!skb) {
5420 			unsigned int pre_len, sync_len;
5421 
5422 			dma_sync_single_for_cpu(priv->device, buf->addr,
5423 						buf1_len, dma_dir);
5424 
5425 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5426 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5427 					 buf->page_offset, buf1_len, true);
5428 
5429 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5430 				  buf->page_offset;
5431 
5432 			ctx.priv = priv;
5433 			ctx.desc = p;
5434 			ctx.ndesc = np;
5435 
5436 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5437 			/* Due xdp_adjust_tail: DMA sync for_device
5438 			 * cover max len CPU touch
5439 			 */
5440 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5441 				   buf->page_offset;
5442 			sync_len = max(sync_len, pre_len);
5443 
5444 			/* For Not XDP_PASS verdict */
5445 			if (IS_ERR(skb)) {
5446 				unsigned int xdp_res = -PTR_ERR(skb);
5447 
5448 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5449 					page_pool_put_page(rx_q->page_pool,
5450 							   virt_to_head_page(ctx.xdp.data),
5451 							   sync_len, true);
5452 					buf->page = NULL;
5453 					rx_dropped++;
5454 
5455 					/* Clear skb as it was set as
5456 					 * status by XDP program.
5457 					 */
5458 					skb = NULL;
5459 
5460 					if (unlikely((status & rx_not_ls)))
5461 						goto read_again;
5462 
5463 					count++;
5464 					continue;
5465 				} else if (xdp_res & (STMMAC_XDP_TX |
5466 						      STMMAC_XDP_REDIRECT)) {
5467 					xdp_status |= xdp_res;
5468 					buf->page = NULL;
5469 					skb = NULL;
5470 					count++;
5471 					continue;
5472 				}
5473 			}
5474 		}
5475 
5476 		if (!skb) {
5477 			/* XDP program may expand or reduce tail */
5478 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5479 
5480 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5481 			if (!skb) {
5482 				rx_dropped++;
5483 				count++;
5484 				goto drain_data;
5485 			}
5486 
5487 			/* XDP program may adjust header */
5488 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5489 			skb_put(skb, buf1_len);
5490 
5491 			/* Data payload copied into SKB, page ready for recycle */
5492 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5493 			buf->page = NULL;
5494 		} else if (buf1_len) {
5495 			dma_sync_single_for_cpu(priv->device, buf->addr,
5496 						buf1_len, dma_dir);
5497 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5498 					buf->page, buf->page_offset, buf1_len,
5499 					priv->dma_conf.dma_buf_sz);
5500 
5501 			/* Data payload appended into SKB */
5502 			skb_mark_for_recycle(skb);
5503 			buf->page = NULL;
5504 		}
5505 
5506 		if (buf2_len) {
5507 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5508 						buf2_len, dma_dir);
5509 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5510 					buf->sec_page, 0, buf2_len,
5511 					priv->dma_conf.dma_buf_sz);
5512 
5513 			/* Data payload appended into SKB */
5514 			skb_mark_for_recycle(skb);
5515 			buf->sec_page = NULL;
5516 		}
5517 
5518 drain_data:
5519 		if (likely(status & rx_not_ls))
5520 			goto read_again;
5521 		if (!skb)
5522 			continue;
5523 
5524 		/* Got entire packet into SKB. Finish it. */
5525 
5526 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5527 		stmmac_rx_vlan(priv->dev, skb);
5528 		skb->protocol = eth_type_trans(skb, priv->dev);
5529 
5530 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5531 			skb_checksum_none_assert(skb);
5532 		else
5533 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5534 
5535 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5536 			skb_set_hash(skb, hash, hash_type);
5537 
5538 		skb_record_rx_queue(skb, queue);
5539 		napi_gro_receive(&ch->rx_napi, skb);
5540 		skb = NULL;
5541 
5542 		rx_packets++;
5543 		rx_bytes += len;
5544 		count++;
5545 	}
5546 
5547 	if (status & rx_not_ls || skb) {
5548 		rx_q->state_saved = true;
5549 		rx_q->state.skb = skb;
5550 		rx_q->state.error = error;
5551 		rx_q->state.len = len;
5552 	}
5553 
5554 	stmmac_finalize_xdp_rx(priv, xdp_status);
5555 
5556 	stmmac_rx_refill(priv, queue);
5557 
5558 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5559 	rxq_stats->rx_packets += rx_packets;
5560 	rxq_stats->rx_bytes += rx_bytes;
5561 	rxq_stats->rx_pkt_n += count;
5562 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5563 
5564 	priv->xstats.rx_dropped += rx_dropped;
5565 	priv->xstats.rx_errors += rx_errors;
5566 
5567 	return count;
5568 }
5569 
5570 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5571 {
5572 	struct stmmac_channel *ch =
5573 		container_of(napi, struct stmmac_channel, rx_napi);
5574 	struct stmmac_priv *priv = ch->priv_data;
5575 	struct stmmac_rxq_stats *rxq_stats;
5576 	u32 chan = ch->index;
5577 	unsigned long flags;
5578 	int work_done;
5579 
5580 	rxq_stats = &priv->xstats.rxq_stats[chan];
5581 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5582 	rxq_stats->napi_poll++;
5583 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5584 
5585 	work_done = stmmac_rx(priv, budget, chan);
5586 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5587 		unsigned long flags;
5588 
5589 		spin_lock_irqsave(&ch->lock, flags);
5590 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5591 		spin_unlock_irqrestore(&ch->lock, flags);
5592 	}
5593 
5594 	return work_done;
5595 }
5596 
5597 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5598 {
5599 	struct stmmac_channel *ch =
5600 		container_of(napi, struct stmmac_channel, tx_napi);
5601 	struct stmmac_priv *priv = ch->priv_data;
5602 	struct stmmac_txq_stats *txq_stats;
5603 	u32 chan = ch->index;
5604 	unsigned long flags;
5605 	int work_done;
5606 
5607 	txq_stats = &priv->xstats.txq_stats[chan];
5608 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
5609 	txq_stats->napi_poll++;
5610 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
5611 
5612 	work_done = stmmac_tx_clean(priv, budget, chan);
5613 	work_done = min(work_done, budget);
5614 
5615 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5616 		unsigned long flags;
5617 
5618 		spin_lock_irqsave(&ch->lock, flags);
5619 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5620 		spin_unlock_irqrestore(&ch->lock, flags);
5621 	}
5622 
5623 	return work_done;
5624 }
5625 
5626 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5627 {
5628 	struct stmmac_channel *ch =
5629 		container_of(napi, struct stmmac_channel, rxtx_napi);
5630 	struct stmmac_priv *priv = ch->priv_data;
5631 	int rx_done, tx_done, rxtx_done;
5632 	struct stmmac_rxq_stats *rxq_stats;
5633 	struct stmmac_txq_stats *txq_stats;
5634 	u32 chan = ch->index;
5635 	unsigned long flags;
5636 
5637 	rxq_stats = &priv->xstats.rxq_stats[chan];
5638 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5639 	rxq_stats->napi_poll++;
5640 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5641 
5642 	txq_stats = &priv->xstats.txq_stats[chan];
5643 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
5644 	txq_stats->napi_poll++;
5645 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
5646 
5647 	tx_done = stmmac_tx_clean(priv, budget, chan);
5648 	tx_done = min(tx_done, budget);
5649 
5650 	rx_done = stmmac_rx_zc(priv, budget, chan);
5651 
5652 	rxtx_done = max(tx_done, rx_done);
5653 
5654 	/* If either TX or RX work is not complete, return budget
5655 	 * and keep pooling
5656 	 */
5657 	if (rxtx_done >= budget)
5658 		return budget;
5659 
5660 	/* all work done, exit the polling mode */
5661 	if (napi_complete_done(napi, rxtx_done)) {
5662 		unsigned long flags;
5663 
5664 		spin_lock_irqsave(&ch->lock, flags);
5665 		/* Both RX and TX work done are compelte,
5666 		 * so enable both RX & TX IRQs.
5667 		 */
5668 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5669 		spin_unlock_irqrestore(&ch->lock, flags);
5670 	}
5671 
5672 	return min(rxtx_done, budget - 1);
5673 }
5674 
5675 /**
5676  *  stmmac_tx_timeout
5677  *  @dev : Pointer to net device structure
5678  *  @txqueue: the index of the hanging transmit queue
5679  *  Description: this function is called when a packet transmission fails to
5680  *   complete within a reasonable time. The driver will mark the error in the
5681  *   netdev structure and arrange for the device to be reset to a sane state
5682  *   in order to transmit a new packet.
5683  */
5684 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5685 {
5686 	struct stmmac_priv *priv = netdev_priv(dev);
5687 
5688 	stmmac_global_err(priv);
5689 }
5690 
5691 /**
5692  *  stmmac_set_rx_mode - entry point for multicast addressing
5693  *  @dev : pointer to the device structure
5694  *  Description:
5695  *  This function is a driver entry point which gets called by the kernel
5696  *  whenever multicast addresses must be enabled/disabled.
5697  *  Return value:
5698  *  void.
5699  */
5700 static void stmmac_set_rx_mode(struct net_device *dev)
5701 {
5702 	struct stmmac_priv *priv = netdev_priv(dev);
5703 
5704 	stmmac_set_filter(priv, priv->hw, dev);
5705 }
5706 
5707 /**
5708  *  stmmac_change_mtu - entry point to change MTU size for the device.
5709  *  @dev : device pointer.
5710  *  @new_mtu : the new MTU size for the device.
5711  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5712  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5713  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5714  *  Return value:
5715  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5716  *  file on failure.
5717  */
5718 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5719 {
5720 	struct stmmac_priv *priv = netdev_priv(dev);
5721 	int txfifosz = priv->plat->tx_fifo_size;
5722 	struct stmmac_dma_conf *dma_conf;
5723 	const int mtu = new_mtu;
5724 	int ret;
5725 
5726 	if (txfifosz == 0)
5727 		txfifosz = priv->dma_cap.tx_fifo_size;
5728 
5729 	txfifosz /= priv->plat->tx_queues_to_use;
5730 
5731 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5732 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5733 		return -EINVAL;
5734 	}
5735 
5736 	new_mtu = STMMAC_ALIGN(new_mtu);
5737 
5738 	/* If condition true, FIFO is too small or MTU too large */
5739 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5740 		return -EINVAL;
5741 
5742 	if (netif_running(dev)) {
5743 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5744 		/* Try to allocate the new DMA conf with the new mtu */
5745 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5746 		if (IS_ERR(dma_conf)) {
5747 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5748 				   mtu);
5749 			return PTR_ERR(dma_conf);
5750 		}
5751 
5752 		stmmac_release(dev);
5753 
5754 		ret = __stmmac_open(dev, dma_conf);
5755 		if (ret) {
5756 			free_dma_desc_resources(priv, dma_conf);
5757 			kfree(dma_conf);
5758 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5759 			return ret;
5760 		}
5761 
5762 		kfree(dma_conf);
5763 
5764 		stmmac_set_rx_mode(dev);
5765 	}
5766 
5767 	dev->mtu = mtu;
5768 	netdev_update_features(dev);
5769 
5770 	return 0;
5771 }
5772 
5773 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5774 					     netdev_features_t features)
5775 {
5776 	struct stmmac_priv *priv = netdev_priv(dev);
5777 
5778 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5779 		features &= ~NETIF_F_RXCSUM;
5780 
5781 	if (!priv->plat->tx_coe)
5782 		features &= ~NETIF_F_CSUM_MASK;
5783 
5784 	/* Some GMAC devices have a bugged Jumbo frame support that
5785 	 * needs to have the Tx COE disabled for oversized frames
5786 	 * (due to limited buffer sizes). In this case we disable
5787 	 * the TX csum insertion in the TDES and not use SF.
5788 	 */
5789 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5790 		features &= ~NETIF_F_CSUM_MASK;
5791 
5792 	/* Disable tso if asked by ethtool */
5793 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5794 		if (features & NETIF_F_TSO)
5795 			priv->tso = true;
5796 		else
5797 			priv->tso = false;
5798 	}
5799 
5800 	return features;
5801 }
5802 
5803 static int stmmac_set_features(struct net_device *netdev,
5804 			       netdev_features_t features)
5805 {
5806 	struct stmmac_priv *priv = netdev_priv(netdev);
5807 
5808 	/* Keep the COE Type in case of csum is supporting */
5809 	if (features & NETIF_F_RXCSUM)
5810 		priv->hw->rx_csum = priv->plat->rx_coe;
5811 	else
5812 		priv->hw->rx_csum = 0;
5813 	/* No check needed because rx_coe has been set before and it will be
5814 	 * fixed in case of issue.
5815 	 */
5816 	stmmac_rx_ipc(priv, priv->hw);
5817 
5818 	if (priv->sph_cap) {
5819 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5820 		u32 chan;
5821 
5822 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5823 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5824 	}
5825 
5826 	return 0;
5827 }
5828 
5829 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5830 {
5831 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5832 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5833 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5834 	bool *hs_enable = &fpe_cfg->hs_enable;
5835 
5836 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5837 		return;
5838 
5839 	/* If LP has sent verify mPacket, LP is FPE capable */
5840 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5841 		if (*lp_state < FPE_STATE_CAPABLE)
5842 			*lp_state = FPE_STATE_CAPABLE;
5843 
5844 		/* If user has requested FPE enable, quickly response */
5845 		if (*hs_enable)
5846 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5847 						fpe_cfg,
5848 						MPACKET_RESPONSE);
5849 	}
5850 
5851 	/* If Local has sent verify mPacket, Local is FPE capable */
5852 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5853 		if (*lo_state < FPE_STATE_CAPABLE)
5854 			*lo_state = FPE_STATE_CAPABLE;
5855 	}
5856 
5857 	/* If LP has sent response mPacket, LP is entering FPE ON */
5858 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5859 		*lp_state = FPE_STATE_ENTERING_ON;
5860 
5861 	/* If Local has sent response mPacket, Local is entering FPE ON */
5862 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5863 		*lo_state = FPE_STATE_ENTERING_ON;
5864 
5865 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5866 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5867 	    priv->fpe_wq) {
5868 		queue_work(priv->fpe_wq, &priv->fpe_task);
5869 	}
5870 }
5871 
5872 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5873 {
5874 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5875 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5876 	u32 queues_count;
5877 	u32 queue;
5878 	bool xmac;
5879 
5880 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5881 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5882 
5883 	if (priv->irq_wake)
5884 		pm_wakeup_event(priv->device, 0);
5885 
5886 	if (priv->dma_cap.estsel)
5887 		stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5888 				      &priv->xstats, tx_cnt);
5889 
5890 	if (priv->dma_cap.fpesel) {
5891 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5892 						   priv->dev);
5893 
5894 		stmmac_fpe_event_status(priv, status);
5895 	}
5896 
5897 	/* To handle GMAC own interrupts */
5898 	if ((priv->plat->has_gmac) || xmac) {
5899 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5900 
5901 		if (unlikely(status)) {
5902 			/* For LPI we need to save the tx status */
5903 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5904 				priv->tx_path_in_lpi_mode = true;
5905 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5906 				priv->tx_path_in_lpi_mode = false;
5907 		}
5908 
5909 		for (queue = 0; queue < queues_count; queue++) {
5910 			status = stmmac_host_mtl_irq_status(priv, priv->hw,
5911 							    queue);
5912 		}
5913 
5914 		/* PCS link status */
5915 		if (priv->hw->pcs &&
5916 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5917 			if (priv->xstats.pcs_link)
5918 				netif_carrier_on(priv->dev);
5919 			else
5920 				netif_carrier_off(priv->dev);
5921 		}
5922 
5923 		stmmac_timestamp_interrupt(priv, priv);
5924 	}
5925 }
5926 
5927 /**
5928  *  stmmac_interrupt - main ISR
5929  *  @irq: interrupt number.
5930  *  @dev_id: to pass the net device pointer.
5931  *  Description: this is the main driver interrupt service routine.
5932  *  It can call:
5933  *  o DMA service routine (to manage incoming frame reception and transmission
5934  *    status)
5935  *  o Core interrupts to manage: remote wake-up, management counter, LPI
5936  *    interrupts.
5937  */
5938 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5939 {
5940 	struct net_device *dev = (struct net_device *)dev_id;
5941 	struct stmmac_priv *priv = netdev_priv(dev);
5942 
5943 	/* Check if adapter is up */
5944 	if (test_bit(STMMAC_DOWN, &priv->state))
5945 		return IRQ_HANDLED;
5946 
5947 	/* Check if a fatal error happened */
5948 	if (stmmac_safety_feat_interrupt(priv))
5949 		return IRQ_HANDLED;
5950 
5951 	/* To handle Common interrupts */
5952 	stmmac_common_interrupt(priv);
5953 
5954 	/* To handle DMA interrupts */
5955 	stmmac_dma_interrupt(priv);
5956 
5957 	return IRQ_HANDLED;
5958 }
5959 
5960 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5961 {
5962 	struct net_device *dev = (struct net_device *)dev_id;
5963 	struct stmmac_priv *priv = netdev_priv(dev);
5964 
5965 	if (unlikely(!dev)) {
5966 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5967 		return IRQ_NONE;
5968 	}
5969 
5970 	/* Check if adapter is up */
5971 	if (test_bit(STMMAC_DOWN, &priv->state))
5972 		return IRQ_HANDLED;
5973 
5974 	/* To handle Common interrupts */
5975 	stmmac_common_interrupt(priv);
5976 
5977 	return IRQ_HANDLED;
5978 }
5979 
5980 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5981 {
5982 	struct net_device *dev = (struct net_device *)dev_id;
5983 	struct stmmac_priv *priv = netdev_priv(dev);
5984 
5985 	if (unlikely(!dev)) {
5986 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5987 		return IRQ_NONE;
5988 	}
5989 
5990 	/* Check if adapter is up */
5991 	if (test_bit(STMMAC_DOWN, &priv->state))
5992 		return IRQ_HANDLED;
5993 
5994 	/* Check if a fatal error happened */
5995 	stmmac_safety_feat_interrupt(priv);
5996 
5997 	return IRQ_HANDLED;
5998 }
5999 
6000 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6001 {
6002 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6003 	struct stmmac_dma_conf *dma_conf;
6004 	int chan = tx_q->queue_index;
6005 	struct stmmac_priv *priv;
6006 	int status;
6007 
6008 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6009 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6010 
6011 	if (unlikely(!data)) {
6012 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6013 		return IRQ_NONE;
6014 	}
6015 
6016 	/* Check if adapter is up */
6017 	if (test_bit(STMMAC_DOWN, &priv->state))
6018 		return IRQ_HANDLED;
6019 
6020 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6021 
6022 	if (unlikely(status & tx_hard_error_bump_tc)) {
6023 		/* Try to bump up the dma threshold on this failure */
6024 		stmmac_bump_dma_threshold(priv, chan);
6025 	} else if (unlikely(status == tx_hard_error)) {
6026 		stmmac_tx_err(priv, chan);
6027 	}
6028 
6029 	return IRQ_HANDLED;
6030 }
6031 
6032 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6033 {
6034 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6035 	struct stmmac_dma_conf *dma_conf;
6036 	int chan = rx_q->queue_index;
6037 	struct stmmac_priv *priv;
6038 
6039 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6040 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6041 
6042 	if (unlikely(!data)) {
6043 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6044 		return IRQ_NONE;
6045 	}
6046 
6047 	/* Check if adapter is up */
6048 	if (test_bit(STMMAC_DOWN, &priv->state))
6049 		return IRQ_HANDLED;
6050 
6051 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6052 
6053 	return IRQ_HANDLED;
6054 }
6055 
6056 /**
6057  *  stmmac_ioctl - Entry point for the Ioctl
6058  *  @dev: Device pointer.
6059  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6060  *  a proprietary structure used to pass information to the driver.
6061  *  @cmd: IOCTL command
6062  *  Description:
6063  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6064  */
6065 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6066 {
6067 	struct stmmac_priv *priv = netdev_priv (dev);
6068 	int ret = -EOPNOTSUPP;
6069 
6070 	if (!netif_running(dev))
6071 		return -EINVAL;
6072 
6073 	switch (cmd) {
6074 	case SIOCGMIIPHY:
6075 	case SIOCGMIIREG:
6076 	case SIOCSMIIREG:
6077 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6078 		break;
6079 	case SIOCSHWTSTAMP:
6080 		ret = stmmac_hwtstamp_set(dev, rq);
6081 		break;
6082 	case SIOCGHWTSTAMP:
6083 		ret = stmmac_hwtstamp_get(dev, rq);
6084 		break;
6085 	default:
6086 		break;
6087 	}
6088 
6089 	return ret;
6090 }
6091 
6092 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6093 				    void *cb_priv)
6094 {
6095 	struct stmmac_priv *priv = cb_priv;
6096 	int ret = -EOPNOTSUPP;
6097 
6098 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6099 		return ret;
6100 
6101 	__stmmac_disable_all_queues(priv);
6102 
6103 	switch (type) {
6104 	case TC_SETUP_CLSU32:
6105 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6106 		break;
6107 	case TC_SETUP_CLSFLOWER:
6108 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6109 		break;
6110 	default:
6111 		break;
6112 	}
6113 
6114 	stmmac_enable_all_queues(priv);
6115 	return ret;
6116 }
6117 
6118 static LIST_HEAD(stmmac_block_cb_list);
6119 
6120 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6121 			   void *type_data)
6122 {
6123 	struct stmmac_priv *priv = netdev_priv(ndev);
6124 
6125 	switch (type) {
6126 	case TC_QUERY_CAPS:
6127 		return stmmac_tc_query_caps(priv, priv, type_data);
6128 	case TC_SETUP_BLOCK:
6129 		return flow_block_cb_setup_simple(type_data,
6130 						  &stmmac_block_cb_list,
6131 						  stmmac_setup_tc_block_cb,
6132 						  priv, priv, true);
6133 	case TC_SETUP_QDISC_CBS:
6134 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6135 	case TC_SETUP_QDISC_TAPRIO:
6136 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6137 	case TC_SETUP_QDISC_ETF:
6138 		return stmmac_tc_setup_etf(priv, priv, type_data);
6139 	default:
6140 		return -EOPNOTSUPP;
6141 	}
6142 }
6143 
6144 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6145 			       struct net_device *sb_dev)
6146 {
6147 	int gso = skb_shinfo(skb)->gso_type;
6148 
6149 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6150 		/*
6151 		 * There is no way to determine the number of TSO/USO
6152 		 * capable Queues. Let's use always the Queue 0
6153 		 * because if TSO/USO is supported then at least this
6154 		 * one will be capable.
6155 		 */
6156 		return 0;
6157 	}
6158 
6159 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6160 }
6161 
6162 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6163 {
6164 	struct stmmac_priv *priv = netdev_priv(ndev);
6165 	int ret = 0;
6166 
6167 	ret = pm_runtime_resume_and_get(priv->device);
6168 	if (ret < 0)
6169 		return ret;
6170 
6171 	ret = eth_mac_addr(ndev, addr);
6172 	if (ret)
6173 		goto set_mac_error;
6174 
6175 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6176 
6177 set_mac_error:
6178 	pm_runtime_put(priv->device);
6179 
6180 	return ret;
6181 }
6182 
6183 #ifdef CONFIG_DEBUG_FS
6184 static struct dentry *stmmac_fs_dir;
6185 
6186 static void sysfs_display_ring(void *head, int size, int extend_desc,
6187 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6188 {
6189 	int i;
6190 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6191 	struct dma_desc *p = (struct dma_desc *)head;
6192 	dma_addr_t dma_addr;
6193 
6194 	for (i = 0; i < size; i++) {
6195 		if (extend_desc) {
6196 			dma_addr = dma_phy_addr + i * sizeof(*ep);
6197 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6198 				   i, &dma_addr,
6199 				   le32_to_cpu(ep->basic.des0),
6200 				   le32_to_cpu(ep->basic.des1),
6201 				   le32_to_cpu(ep->basic.des2),
6202 				   le32_to_cpu(ep->basic.des3));
6203 			ep++;
6204 		} else {
6205 			dma_addr = dma_phy_addr + i * sizeof(*p);
6206 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6207 				   i, &dma_addr,
6208 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6209 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6210 			p++;
6211 		}
6212 		seq_printf(seq, "\n");
6213 	}
6214 }
6215 
6216 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6217 {
6218 	struct net_device *dev = seq->private;
6219 	struct stmmac_priv *priv = netdev_priv(dev);
6220 	u32 rx_count = priv->plat->rx_queues_to_use;
6221 	u32 tx_count = priv->plat->tx_queues_to_use;
6222 	u32 queue;
6223 
6224 	if ((dev->flags & IFF_UP) == 0)
6225 		return 0;
6226 
6227 	for (queue = 0; queue < rx_count; queue++) {
6228 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6229 
6230 		seq_printf(seq, "RX Queue %d:\n", queue);
6231 
6232 		if (priv->extend_desc) {
6233 			seq_printf(seq, "Extended descriptor ring:\n");
6234 			sysfs_display_ring((void *)rx_q->dma_erx,
6235 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6236 		} else {
6237 			seq_printf(seq, "Descriptor ring:\n");
6238 			sysfs_display_ring((void *)rx_q->dma_rx,
6239 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6240 		}
6241 	}
6242 
6243 	for (queue = 0; queue < tx_count; queue++) {
6244 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6245 
6246 		seq_printf(seq, "TX Queue %d:\n", queue);
6247 
6248 		if (priv->extend_desc) {
6249 			seq_printf(seq, "Extended descriptor ring:\n");
6250 			sysfs_display_ring((void *)tx_q->dma_etx,
6251 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6252 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6253 			seq_printf(seq, "Descriptor ring:\n");
6254 			sysfs_display_ring((void *)tx_q->dma_tx,
6255 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6256 		}
6257 	}
6258 
6259 	return 0;
6260 }
6261 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6262 
6263 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6264 {
6265 	static const char * const dwxgmac_timestamp_source[] = {
6266 		"None",
6267 		"Internal",
6268 		"External",
6269 		"Both",
6270 	};
6271 	static const char * const dwxgmac_safety_feature_desc[] = {
6272 		"No",
6273 		"All Safety Features with ECC and Parity",
6274 		"All Safety Features without ECC or Parity",
6275 		"All Safety Features with Parity Only",
6276 		"ECC Only",
6277 		"UNDEFINED",
6278 		"UNDEFINED",
6279 		"UNDEFINED",
6280 	};
6281 	struct net_device *dev = seq->private;
6282 	struct stmmac_priv *priv = netdev_priv(dev);
6283 
6284 	if (!priv->hw_cap_support) {
6285 		seq_printf(seq, "DMA HW features not supported\n");
6286 		return 0;
6287 	}
6288 
6289 	seq_printf(seq, "==============================\n");
6290 	seq_printf(seq, "\tDMA HW features\n");
6291 	seq_printf(seq, "==============================\n");
6292 
6293 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6294 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6295 	seq_printf(seq, "\t1000 Mbps: %s\n",
6296 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6297 	seq_printf(seq, "\tHalf duplex: %s\n",
6298 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6299 	if (priv->plat->has_xgmac) {
6300 		seq_printf(seq,
6301 			   "\tNumber of Additional MAC address registers: %d\n",
6302 			   priv->dma_cap.multi_addr);
6303 	} else {
6304 		seq_printf(seq, "\tHash Filter: %s\n",
6305 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6306 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6307 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6308 	}
6309 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6310 		   (priv->dma_cap.pcs) ? "Y" : "N");
6311 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6312 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6313 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6314 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6315 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6316 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6317 	seq_printf(seq, "\tRMON module: %s\n",
6318 		   (priv->dma_cap.rmon) ? "Y" : "N");
6319 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6320 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6321 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6322 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6323 	if (priv->plat->has_xgmac)
6324 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6325 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6326 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6327 		   (priv->dma_cap.eee) ? "Y" : "N");
6328 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6329 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6330 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6331 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6332 	    priv->plat->has_xgmac) {
6333 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6334 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6335 	} else {
6336 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6337 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6338 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6339 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6340 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6341 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6342 	}
6343 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6344 		   priv->dma_cap.number_rx_channel);
6345 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6346 		   priv->dma_cap.number_tx_channel);
6347 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6348 		   priv->dma_cap.number_rx_queues);
6349 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6350 		   priv->dma_cap.number_tx_queues);
6351 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6352 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6353 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6354 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6355 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6356 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6357 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6358 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6359 		   priv->dma_cap.pps_out_num);
6360 	seq_printf(seq, "\tSafety Features: %s\n",
6361 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6362 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6363 		   priv->dma_cap.frpsel ? "Y" : "N");
6364 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6365 		   priv->dma_cap.host_dma_width);
6366 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6367 		   priv->dma_cap.rssen ? "Y" : "N");
6368 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6369 		   priv->dma_cap.vlhash ? "Y" : "N");
6370 	seq_printf(seq, "\tSplit Header: %s\n",
6371 		   priv->dma_cap.sphen ? "Y" : "N");
6372 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6373 		   priv->dma_cap.vlins ? "Y" : "N");
6374 	seq_printf(seq, "\tDouble VLAN: %s\n",
6375 		   priv->dma_cap.dvlan ? "Y" : "N");
6376 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6377 		   priv->dma_cap.l3l4fnum);
6378 	seq_printf(seq, "\tARP Offloading: %s\n",
6379 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6380 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6381 		   priv->dma_cap.estsel ? "Y" : "N");
6382 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6383 		   priv->dma_cap.fpesel ? "Y" : "N");
6384 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6385 		   priv->dma_cap.tbssel ? "Y" : "N");
6386 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6387 		   priv->dma_cap.tbs_ch_num);
6388 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6389 		   priv->dma_cap.sgfsel ? "Y" : "N");
6390 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6391 		   BIT(priv->dma_cap.ttsfd) >> 1);
6392 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6393 		   priv->dma_cap.numtc);
6394 	seq_printf(seq, "\tDCB Feature: %s\n",
6395 		   priv->dma_cap.dcben ? "Y" : "N");
6396 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6397 		   priv->dma_cap.advthword ? "Y" : "N");
6398 	seq_printf(seq, "\tPTP Offload: %s\n",
6399 		   priv->dma_cap.ptoen ? "Y" : "N");
6400 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6401 		   priv->dma_cap.osten ? "Y" : "N");
6402 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6403 		   priv->dma_cap.pfcen ? "Y" : "N");
6404 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6405 		   BIT(priv->dma_cap.frpes) << 6);
6406 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6407 		   BIT(priv->dma_cap.frpbs) << 6);
6408 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6409 		   priv->dma_cap.frppipe_num);
6410 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6411 		   priv->dma_cap.nrvf_num ?
6412 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6413 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6414 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6415 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6416 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6417 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6418 		   priv->dma_cap.cbtisel ? "Y" : "N");
6419 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6420 		   priv->dma_cap.aux_snapshot_n);
6421 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6422 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6423 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6424 		   priv->dma_cap.edma ? "Y" : "N");
6425 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6426 		   priv->dma_cap.ediffc ? "Y" : "N");
6427 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6428 		   priv->dma_cap.vxn ? "Y" : "N");
6429 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6430 		   priv->dma_cap.dbgmem ? "Y" : "N");
6431 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6432 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6433 	return 0;
6434 }
6435 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6436 
6437 /* Use network device events to rename debugfs file entries.
6438  */
6439 static int stmmac_device_event(struct notifier_block *unused,
6440 			       unsigned long event, void *ptr)
6441 {
6442 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6443 	struct stmmac_priv *priv = netdev_priv(dev);
6444 
6445 	if (dev->netdev_ops != &stmmac_netdev_ops)
6446 		goto done;
6447 
6448 	switch (event) {
6449 	case NETDEV_CHANGENAME:
6450 		if (priv->dbgfs_dir)
6451 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6452 							 priv->dbgfs_dir,
6453 							 stmmac_fs_dir,
6454 							 dev->name);
6455 		break;
6456 	}
6457 done:
6458 	return NOTIFY_DONE;
6459 }
6460 
6461 static struct notifier_block stmmac_notifier = {
6462 	.notifier_call = stmmac_device_event,
6463 };
6464 
6465 static void stmmac_init_fs(struct net_device *dev)
6466 {
6467 	struct stmmac_priv *priv = netdev_priv(dev);
6468 
6469 	rtnl_lock();
6470 
6471 	/* Create per netdev entries */
6472 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6473 
6474 	/* Entry to report DMA RX/TX rings */
6475 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6476 			    &stmmac_rings_status_fops);
6477 
6478 	/* Entry to report the DMA HW features */
6479 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6480 			    &stmmac_dma_cap_fops);
6481 
6482 	rtnl_unlock();
6483 }
6484 
6485 static void stmmac_exit_fs(struct net_device *dev)
6486 {
6487 	struct stmmac_priv *priv = netdev_priv(dev);
6488 
6489 	debugfs_remove_recursive(priv->dbgfs_dir);
6490 }
6491 #endif /* CONFIG_DEBUG_FS */
6492 
6493 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6494 {
6495 	unsigned char *data = (unsigned char *)&vid_le;
6496 	unsigned char data_byte = 0;
6497 	u32 crc = ~0x0;
6498 	u32 temp = 0;
6499 	int i, bits;
6500 
6501 	bits = get_bitmask_order(VLAN_VID_MASK);
6502 	for (i = 0; i < bits; i++) {
6503 		if ((i % 8) == 0)
6504 			data_byte = data[i / 8];
6505 
6506 		temp = ((crc & 1) ^ data_byte) & 1;
6507 		crc >>= 1;
6508 		data_byte >>= 1;
6509 
6510 		if (temp)
6511 			crc ^= 0xedb88320;
6512 	}
6513 
6514 	return crc;
6515 }
6516 
6517 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6518 {
6519 	u32 crc, hash = 0;
6520 	__le16 pmatch = 0;
6521 	int count = 0;
6522 	u16 vid = 0;
6523 
6524 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6525 		__le16 vid_le = cpu_to_le16(vid);
6526 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6527 		hash |= (1 << crc);
6528 		count++;
6529 	}
6530 
6531 	if (!priv->dma_cap.vlhash) {
6532 		if (count > 2) /* VID = 0 always passes filter */
6533 			return -EOPNOTSUPP;
6534 
6535 		pmatch = cpu_to_le16(vid);
6536 		hash = 0;
6537 	}
6538 
6539 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6540 }
6541 
6542 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6543 {
6544 	struct stmmac_priv *priv = netdev_priv(ndev);
6545 	bool is_double = false;
6546 	int ret;
6547 
6548 	ret = pm_runtime_resume_and_get(priv->device);
6549 	if (ret < 0)
6550 		return ret;
6551 
6552 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6553 		is_double = true;
6554 
6555 	set_bit(vid, priv->active_vlans);
6556 	ret = stmmac_vlan_update(priv, is_double);
6557 	if (ret) {
6558 		clear_bit(vid, priv->active_vlans);
6559 		goto err_pm_put;
6560 	}
6561 
6562 	if (priv->hw->num_vlan) {
6563 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6564 		if (ret)
6565 			goto err_pm_put;
6566 	}
6567 err_pm_put:
6568 	pm_runtime_put(priv->device);
6569 
6570 	return ret;
6571 }
6572 
6573 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6574 {
6575 	struct stmmac_priv *priv = netdev_priv(ndev);
6576 	bool is_double = false;
6577 	int ret;
6578 
6579 	ret = pm_runtime_resume_and_get(priv->device);
6580 	if (ret < 0)
6581 		return ret;
6582 
6583 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6584 		is_double = true;
6585 
6586 	clear_bit(vid, priv->active_vlans);
6587 
6588 	if (priv->hw->num_vlan) {
6589 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6590 		if (ret)
6591 			goto del_vlan_error;
6592 	}
6593 
6594 	ret = stmmac_vlan_update(priv, is_double);
6595 
6596 del_vlan_error:
6597 	pm_runtime_put(priv->device);
6598 
6599 	return ret;
6600 }
6601 
6602 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6603 {
6604 	struct stmmac_priv *priv = netdev_priv(dev);
6605 
6606 	switch (bpf->command) {
6607 	case XDP_SETUP_PROG:
6608 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6609 	case XDP_SETUP_XSK_POOL:
6610 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6611 					     bpf->xsk.queue_id);
6612 	default:
6613 		return -EOPNOTSUPP;
6614 	}
6615 }
6616 
6617 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6618 			   struct xdp_frame **frames, u32 flags)
6619 {
6620 	struct stmmac_priv *priv = netdev_priv(dev);
6621 	int cpu = smp_processor_id();
6622 	struct netdev_queue *nq;
6623 	int i, nxmit = 0;
6624 	int queue;
6625 
6626 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6627 		return -ENETDOWN;
6628 
6629 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6630 		return -EINVAL;
6631 
6632 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6633 	nq = netdev_get_tx_queue(priv->dev, queue);
6634 
6635 	__netif_tx_lock(nq, cpu);
6636 	/* Avoids TX time-out as we are sharing with slow path */
6637 	txq_trans_cond_update(nq);
6638 
6639 	for (i = 0; i < num_frames; i++) {
6640 		int res;
6641 
6642 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6643 		if (res == STMMAC_XDP_CONSUMED)
6644 			break;
6645 
6646 		nxmit++;
6647 	}
6648 
6649 	if (flags & XDP_XMIT_FLUSH) {
6650 		stmmac_flush_tx_descriptors(priv, queue);
6651 		stmmac_tx_timer_arm(priv, queue);
6652 	}
6653 
6654 	__netif_tx_unlock(nq);
6655 
6656 	return nxmit;
6657 }
6658 
6659 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6660 {
6661 	struct stmmac_channel *ch = &priv->channel[queue];
6662 	unsigned long flags;
6663 
6664 	spin_lock_irqsave(&ch->lock, flags);
6665 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6666 	spin_unlock_irqrestore(&ch->lock, flags);
6667 
6668 	stmmac_stop_rx_dma(priv, queue);
6669 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6670 }
6671 
6672 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6673 {
6674 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6675 	struct stmmac_channel *ch = &priv->channel[queue];
6676 	unsigned long flags;
6677 	u32 buf_size;
6678 	int ret;
6679 
6680 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6681 	if (ret) {
6682 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6683 		return;
6684 	}
6685 
6686 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6687 	if (ret) {
6688 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6689 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6690 		return;
6691 	}
6692 
6693 	stmmac_reset_rx_queue(priv, queue);
6694 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6695 
6696 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6697 			    rx_q->dma_rx_phy, rx_q->queue_index);
6698 
6699 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6700 			     sizeof(struct dma_desc));
6701 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6702 			       rx_q->rx_tail_addr, rx_q->queue_index);
6703 
6704 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6705 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6706 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6707 				      buf_size,
6708 				      rx_q->queue_index);
6709 	} else {
6710 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6711 				      priv->dma_conf.dma_buf_sz,
6712 				      rx_q->queue_index);
6713 	}
6714 
6715 	stmmac_start_rx_dma(priv, queue);
6716 
6717 	spin_lock_irqsave(&ch->lock, flags);
6718 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6719 	spin_unlock_irqrestore(&ch->lock, flags);
6720 }
6721 
6722 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6723 {
6724 	struct stmmac_channel *ch = &priv->channel[queue];
6725 	unsigned long flags;
6726 
6727 	spin_lock_irqsave(&ch->lock, flags);
6728 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6729 	spin_unlock_irqrestore(&ch->lock, flags);
6730 
6731 	stmmac_stop_tx_dma(priv, queue);
6732 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6733 }
6734 
6735 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6736 {
6737 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6738 	struct stmmac_channel *ch = &priv->channel[queue];
6739 	unsigned long flags;
6740 	int ret;
6741 
6742 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6743 	if (ret) {
6744 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6745 		return;
6746 	}
6747 
6748 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6749 	if (ret) {
6750 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6751 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6752 		return;
6753 	}
6754 
6755 	stmmac_reset_tx_queue(priv, queue);
6756 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6757 
6758 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6759 			    tx_q->dma_tx_phy, tx_q->queue_index);
6760 
6761 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6762 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6763 
6764 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6765 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6766 			       tx_q->tx_tail_addr, tx_q->queue_index);
6767 
6768 	stmmac_start_tx_dma(priv, queue);
6769 
6770 	spin_lock_irqsave(&ch->lock, flags);
6771 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6772 	spin_unlock_irqrestore(&ch->lock, flags);
6773 }
6774 
6775 void stmmac_xdp_release(struct net_device *dev)
6776 {
6777 	struct stmmac_priv *priv = netdev_priv(dev);
6778 	u32 chan;
6779 
6780 	/* Ensure tx function is not running */
6781 	netif_tx_disable(dev);
6782 
6783 	/* Disable NAPI process */
6784 	stmmac_disable_all_queues(priv);
6785 
6786 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6787 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6788 
6789 	/* Free the IRQ lines */
6790 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6791 
6792 	/* Stop TX/RX DMA channels */
6793 	stmmac_stop_all_dma(priv);
6794 
6795 	/* Release and free the Rx/Tx resources */
6796 	free_dma_desc_resources(priv, &priv->dma_conf);
6797 
6798 	/* Disable the MAC Rx/Tx */
6799 	stmmac_mac_set(priv, priv->ioaddr, false);
6800 
6801 	/* set trans_start so we don't get spurious
6802 	 * watchdogs during reset
6803 	 */
6804 	netif_trans_update(dev);
6805 	netif_carrier_off(dev);
6806 }
6807 
6808 int stmmac_xdp_open(struct net_device *dev)
6809 {
6810 	struct stmmac_priv *priv = netdev_priv(dev);
6811 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6812 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6813 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6814 	struct stmmac_rx_queue *rx_q;
6815 	struct stmmac_tx_queue *tx_q;
6816 	u32 buf_size;
6817 	bool sph_en;
6818 	u32 chan;
6819 	int ret;
6820 
6821 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6822 	if (ret < 0) {
6823 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6824 			   __func__);
6825 		goto dma_desc_error;
6826 	}
6827 
6828 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6829 	if (ret < 0) {
6830 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6831 			   __func__);
6832 		goto init_error;
6833 	}
6834 
6835 	stmmac_reset_queues_param(priv);
6836 
6837 	/* DMA CSR Channel configuration */
6838 	for (chan = 0; chan < dma_csr_ch; chan++) {
6839 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6840 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6841 	}
6842 
6843 	/* Adjust Split header */
6844 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6845 
6846 	/* DMA RX Channel Configuration */
6847 	for (chan = 0; chan < rx_cnt; chan++) {
6848 		rx_q = &priv->dma_conf.rx_queue[chan];
6849 
6850 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6851 				    rx_q->dma_rx_phy, chan);
6852 
6853 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6854 				     (rx_q->buf_alloc_num *
6855 				      sizeof(struct dma_desc));
6856 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6857 				       rx_q->rx_tail_addr, chan);
6858 
6859 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6860 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6861 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6862 					      buf_size,
6863 					      rx_q->queue_index);
6864 		} else {
6865 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6866 					      priv->dma_conf.dma_buf_sz,
6867 					      rx_q->queue_index);
6868 		}
6869 
6870 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6871 	}
6872 
6873 	/* DMA TX Channel Configuration */
6874 	for (chan = 0; chan < tx_cnt; chan++) {
6875 		tx_q = &priv->dma_conf.tx_queue[chan];
6876 
6877 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6878 				    tx_q->dma_tx_phy, chan);
6879 
6880 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6881 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6882 				       tx_q->tx_tail_addr, chan);
6883 
6884 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6885 		tx_q->txtimer.function = stmmac_tx_timer;
6886 	}
6887 
6888 	/* Enable the MAC Rx/Tx */
6889 	stmmac_mac_set(priv, priv->ioaddr, true);
6890 
6891 	/* Start Rx & Tx DMA Channels */
6892 	stmmac_start_all_dma(priv);
6893 
6894 	ret = stmmac_request_irq(dev);
6895 	if (ret)
6896 		goto irq_error;
6897 
6898 	/* Enable NAPI process*/
6899 	stmmac_enable_all_queues(priv);
6900 	netif_carrier_on(dev);
6901 	netif_tx_start_all_queues(dev);
6902 	stmmac_enable_all_dma_irq(priv);
6903 
6904 	return 0;
6905 
6906 irq_error:
6907 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6908 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6909 
6910 	stmmac_hw_teardown(dev);
6911 init_error:
6912 	free_dma_desc_resources(priv, &priv->dma_conf);
6913 dma_desc_error:
6914 	return ret;
6915 }
6916 
6917 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6918 {
6919 	struct stmmac_priv *priv = netdev_priv(dev);
6920 	struct stmmac_rx_queue *rx_q;
6921 	struct stmmac_tx_queue *tx_q;
6922 	struct stmmac_channel *ch;
6923 
6924 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6925 	    !netif_carrier_ok(priv->dev))
6926 		return -ENETDOWN;
6927 
6928 	if (!stmmac_xdp_is_enabled(priv))
6929 		return -EINVAL;
6930 
6931 	if (queue >= priv->plat->rx_queues_to_use ||
6932 	    queue >= priv->plat->tx_queues_to_use)
6933 		return -EINVAL;
6934 
6935 	rx_q = &priv->dma_conf.rx_queue[queue];
6936 	tx_q = &priv->dma_conf.tx_queue[queue];
6937 	ch = &priv->channel[queue];
6938 
6939 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6940 		return -EINVAL;
6941 
6942 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6943 		/* EQoS does not have per-DMA channel SW interrupt,
6944 		 * so we schedule RX Napi straight-away.
6945 		 */
6946 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6947 			__napi_schedule(&ch->rxtx_napi);
6948 	}
6949 
6950 	return 0;
6951 }
6952 
6953 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6954 {
6955 	struct stmmac_priv *priv = netdev_priv(dev);
6956 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6957 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6958 	unsigned int start;
6959 	int q;
6960 
6961 	for (q = 0; q < tx_cnt; q++) {
6962 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
6963 		u64 tx_packets;
6964 		u64 tx_bytes;
6965 
6966 		do {
6967 			start = u64_stats_fetch_begin(&txq_stats->syncp);
6968 			tx_packets = txq_stats->tx_packets;
6969 			tx_bytes   = txq_stats->tx_bytes;
6970 		} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
6971 
6972 		stats->tx_packets += tx_packets;
6973 		stats->tx_bytes += tx_bytes;
6974 	}
6975 
6976 	for (q = 0; q < rx_cnt; q++) {
6977 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
6978 		u64 rx_packets;
6979 		u64 rx_bytes;
6980 
6981 		do {
6982 			start = u64_stats_fetch_begin(&rxq_stats->syncp);
6983 			rx_packets = rxq_stats->rx_packets;
6984 			rx_bytes   = rxq_stats->rx_bytes;
6985 		} while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
6986 
6987 		stats->rx_packets += rx_packets;
6988 		stats->rx_bytes += rx_bytes;
6989 	}
6990 
6991 	stats->rx_dropped = priv->xstats.rx_dropped;
6992 	stats->rx_errors = priv->xstats.rx_errors;
6993 	stats->tx_dropped = priv->xstats.tx_dropped;
6994 	stats->tx_errors = priv->xstats.tx_errors;
6995 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
6996 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
6997 	stats->rx_length_errors = priv->xstats.rx_length;
6998 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
6999 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7000 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7001 }
7002 
7003 static const struct net_device_ops stmmac_netdev_ops = {
7004 	.ndo_open = stmmac_open,
7005 	.ndo_start_xmit = stmmac_xmit,
7006 	.ndo_stop = stmmac_release,
7007 	.ndo_change_mtu = stmmac_change_mtu,
7008 	.ndo_fix_features = stmmac_fix_features,
7009 	.ndo_set_features = stmmac_set_features,
7010 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7011 	.ndo_tx_timeout = stmmac_tx_timeout,
7012 	.ndo_eth_ioctl = stmmac_ioctl,
7013 	.ndo_get_stats64 = stmmac_get_stats64,
7014 	.ndo_setup_tc = stmmac_setup_tc,
7015 	.ndo_select_queue = stmmac_select_queue,
7016 	.ndo_set_mac_address = stmmac_set_mac_address,
7017 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7018 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7019 	.ndo_bpf = stmmac_bpf,
7020 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7021 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7022 };
7023 
7024 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7025 {
7026 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7027 		return;
7028 	if (test_bit(STMMAC_DOWN, &priv->state))
7029 		return;
7030 
7031 	netdev_err(priv->dev, "Reset adapter.\n");
7032 
7033 	rtnl_lock();
7034 	netif_trans_update(priv->dev);
7035 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7036 		usleep_range(1000, 2000);
7037 
7038 	set_bit(STMMAC_DOWN, &priv->state);
7039 	dev_close(priv->dev);
7040 	dev_open(priv->dev, NULL);
7041 	clear_bit(STMMAC_DOWN, &priv->state);
7042 	clear_bit(STMMAC_RESETING, &priv->state);
7043 	rtnl_unlock();
7044 }
7045 
7046 static void stmmac_service_task(struct work_struct *work)
7047 {
7048 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7049 			service_task);
7050 
7051 	stmmac_reset_subtask(priv);
7052 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7053 }
7054 
7055 /**
7056  *  stmmac_hw_init - Init the MAC device
7057  *  @priv: driver private structure
7058  *  Description: this function is to configure the MAC device according to
7059  *  some platform parameters or the HW capability register. It prepares the
7060  *  driver to use either ring or chain modes and to setup either enhanced or
7061  *  normal descriptors.
7062  */
7063 static int stmmac_hw_init(struct stmmac_priv *priv)
7064 {
7065 	int ret;
7066 
7067 	/* dwmac-sun8i only work in chain mode */
7068 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7069 		chain_mode = 1;
7070 	priv->chain_mode = chain_mode;
7071 
7072 	/* Initialize HW Interface */
7073 	ret = stmmac_hwif_init(priv);
7074 	if (ret)
7075 		return ret;
7076 
7077 	/* Get the HW capability (new GMAC newer than 3.50a) */
7078 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7079 	if (priv->hw_cap_support) {
7080 		dev_info(priv->device, "DMA HW capability register supported\n");
7081 
7082 		/* We can override some gmac/dma configuration fields: e.g.
7083 		 * enh_desc, tx_coe (e.g. that are passed through the
7084 		 * platform) with the values from the HW capability
7085 		 * register (if supported).
7086 		 */
7087 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7088 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7089 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7090 		priv->hw->pmt = priv->plat->pmt;
7091 		if (priv->dma_cap.hash_tb_sz) {
7092 			priv->hw->multicast_filter_bins =
7093 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7094 			priv->hw->mcast_bits_log2 =
7095 					ilog2(priv->hw->multicast_filter_bins);
7096 		}
7097 
7098 		/* TXCOE doesn't work in thresh DMA mode */
7099 		if (priv->plat->force_thresh_dma_mode)
7100 			priv->plat->tx_coe = 0;
7101 		else
7102 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7103 
7104 		/* In case of GMAC4 rx_coe is from HW cap register. */
7105 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7106 
7107 		if (priv->dma_cap.rx_coe_type2)
7108 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7109 		else if (priv->dma_cap.rx_coe_type1)
7110 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7111 
7112 	} else {
7113 		dev_info(priv->device, "No HW DMA feature register supported\n");
7114 	}
7115 
7116 	if (priv->plat->rx_coe) {
7117 		priv->hw->rx_csum = priv->plat->rx_coe;
7118 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7119 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7120 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7121 	}
7122 	if (priv->plat->tx_coe)
7123 		dev_info(priv->device, "TX Checksum insertion supported\n");
7124 
7125 	if (priv->plat->pmt) {
7126 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7127 		device_set_wakeup_capable(priv->device, 1);
7128 	}
7129 
7130 	if (priv->dma_cap.tsoen)
7131 		dev_info(priv->device, "TSO supported\n");
7132 
7133 	priv->hw->vlan_fail_q_en =
7134 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7135 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7136 
7137 	/* Run HW quirks, if any */
7138 	if (priv->hwif_quirks) {
7139 		ret = priv->hwif_quirks(priv);
7140 		if (ret)
7141 			return ret;
7142 	}
7143 
7144 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7145 	 * In some case, for example on bugged HW this feature
7146 	 * has to be disable and this can be done by passing the
7147 	 * riwt_off field from the platform.
7148 	 */
7149 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7150 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7151 		priv->use_riwt = 1;
7152 		dev_info(priv->device,
7153 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7154 	}
7155 
7156 	return 0;
7157 }
7158 
7159 static void stmmac_napi_add(struct net_device *dev)
7160 {
7161 	struct stmmac_priv *priv = netdev_priv(dev);
7162 	u32 queue, maxq;
7163 
7164 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7165 
7166 	for (queue = 0; queue < maxq; queue++) {
7167 		struct stmmac_channel *ch = &priv->channel[queue];
7168 
7169 		ch->priv_data = priv;
7170 		ch->index = queue;
7171 		spin_lock_init(&ch->lock);
7172 
7173 		if (queue < priv->plat->rx_queues_to_use) {
7174 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7175 		}
7176 		if (queue < priv->plat->tx_queues_to_use) {
7177 			netif_napi_add_tx(dev, &ch->tx_napi,
7178 					  stmmac_napi_poll_tx);
7179 		}
7180 		if (queue < priv->plat->rx_queues_to_use &&
7181 		    queue < priv->plat->tx_queues_to_use) {
7182 			netif_napi_add(dev, &ch->rxtx_napi,
7183 				       stmmac_napi_poll_rxtx);
7184 		}
7185 	}
7186 }
7187 
7188 static void stmmac_napi_del(struct net_device *dev)
7189 {
7190 	struct stmmac_priv *priv = netdev_priv(dev);
7191 	u32 queue, maxq;
7192 
7193 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7194 
7195 	for (queue = 0; queue < maxq; queue++) {
7196 		struct stmmac_channel *ch = &priv->channel[queue];
7197 
7198 		if (queue < priv->plat->rx_queues_to_use)
7199 			netif_napi_del(&ch->rx_napi);
7200 		if (queue < priv->plat->tx_queues_to_use)
7201 			netif_napi_del(&ch->tx_napi);
7202 		if (queue < priv->plat->rx_queues_to_use &&
7203 		    queue < priv->plat->tx_queues_to_use) {
7204 			netif_napi_del(&ch->rxtx_napi);
7205 		}
7206 	}
7207 }
7208 
7209 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7210 {
7211 	struct stmmac_priv *priv = netdev_priv(dev);
7212 	int ret = 0, i;
7213 
7214 	if (netif_running(dev))
7215 		stmmac_release(dev);
7216 
7217 	stmmac_napi_del(dev);
7218 
7219 	priv->plat->rx_queues_to_use = rx_cnt;
7220 	priv->plat->tx_queues_to_use = tx_cnt;
7221 	if (!netif_is_rxfh_configured(dev))
7222 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7223 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7224 									rx_cnt);
7225 
7226 	stmmac_set_half_duplex(priv);
7227 	stmmac_napi_add(dev);
7228 
7229 	if (netif_running(dev))
7230 		ret = stmmac_open(dev);
7231 
7232 	return ret;
7233 }
7234 
7235 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7236 {
7237 	struct stmmac_priv *priv = netdev_priv(dev);
7238 	int ret = 0;
7239 
7240 	if (netif_running(dev))
7241 		stmmac_release(dev);
7242 
7243 	priv->dma_conf.dma_rx_size = rx_size;
7244 	priv->dma_conf.dma_tx_size = tx_size;
7245 
7246 	if (netif_running(dev))
7247 		ret = stmmac_open(dev);
7248 
7249 	return ret;
7250 }
7251 
7252 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7253 static void stmmac_fpe_lp_task(struct work_struct *work)
7254 {
7255 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7256 						fpe_task);
7257 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7258 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7259 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7260 	bool *hs_enable = &fpe_cfg->hs_enable;
7261 	bool *enable = &fpe_cfg->enable;
7262 	int retries = 20;
7263 
7264 	while (retries-- > 0) {
7265 		/* Bail out immediately if FPE handshake is OFF */
7266 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7267 			break;
7268 
7269 		if (*lo_state == FPE_STATE_ENTERING_ON &&
7270 		    *lp_state == FPE_STATE_ENTERING_ON) {
7271 			stmmac_fpe_configure(priv, priv->ioaddr,
7272 					     fpe_cfg,
7273 					     priv->plat->tx_queues_to_use,
7274 					     priv->plat->rx_queues_to_use,
7275 					     *enable);
7276 
7277 			netdev_info(priv->dev, "configured FPE\n");
7278 
7279 			*lo_state = FPE_STATE_ON;
7280 			*lp_state = FPE_STATE_ON;
7281 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7282 			break;
7283 		}
7284 
7285 		if ((*lo_state == FPE_STATE_CAPABLE ||
7286 		     *lo_state == FPE_STATE_ENTERING_ON) &&
7287 		     *lp_state != FPE_STATE_ON) {
7288 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7289 				    *lo_state, *lp_state);
7290 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7291 						fpe_cfg,
7292 						MPACKET_VERIFY);
7293 		}
7294 		/* Sleep then retry */
7295 		msleep(500);
7296 	}
7297 
7298 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7299 }
7300 
7301 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7302 {
7303 	if (priv->plat->fpe_cfg->hs_enable != enable) {
7304 		if (enable) {
7305 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7306 						priv->plat->fpe_cfg,
7307 						MPACKET_VERIFY);
7308 		} else {
7309 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7310 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7311 		}
7312 
7313 		priv->plat->fpe_cfg->hs_enable = enable;
7314 	}
7315 }
7316 
7317 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7318 {
7319 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7320 	struct dma_desc *desc_contains_ts = ctx->desc;
7321 	struct stmmac_priv *priv = ctx->priv;
7322 	struct dma_desc *ndesc = ctx->ndesc;
7323 	struct dma_desc *desc = ctx->desc;
7324 	u64 ns = 0;
7325 
7326 	if (!priv->hwts_rx_en)
7327 		return -ENODATA;
7328 
7329 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7330 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7331 		desc_contains_ts = ndesc;
7332 
7333 	/* Check if timestamp is available */
7334 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7335 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7336 		ns -= priv->plat->cdc_error_adj;
7337 		*timestamp = ns_to_ktime(ns);
7338 		return 0;
7339 	}
7340 
7341 	return -ENODATA;
7342 }
7343 
7344 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7345 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7346 };
7347 
7348 /**
7349  * stmmac_dvr_probe
7350  * @device: device pointer
7351  * @plat_dat: platform data pointer
7352  * @res: stmmac resource pointer
7353  * Description: this is the main probe function used to
7354  * call the alloc_etherdev, allocate the priv structure.
7355  * Return:
7356  * returns 0 on success, otherwise errno.
7357  */
7358 int stmmac_dvr_probe(struct device *device,
7359 		     struct plat_stmmacenet_data *plat_dat,
7360 		     struct stmmac_resources *res)
7361 {
7362 	struct net_device *ndev = NULL;
7363 	struct stmmac_priv *priv;
7364 	u32 rxq;
7365 	int i, ret = 0;
7366 
7367 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7368 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7369 	if (!ndev)
7370 		return -ENOMEM;
7371 
7372 	SET_NETDEV_DEV(ndev, device);
7373 
7374 	priv = netdev_priv(ndev);
7375 	priv->device = device;
7376 	priv->dev = ndev;
7377 
7378 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7379 		u64_stats_init(&priv->xstats.rxq_stats[i].syncp);
7380 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7381 		u64_stats_init(&priv->xstats.txq_stats[i].syncp);
7382 
7383 	stmmac_set_ethtool_ops(ndev);
7384 	priv->pause = pause;
7385 	priv->plat = plat_dat;
7386 	priv->ioaddr = res->addr;
7387 	priv->dev->base_addr = (unsigned long)res->addr;
7388 	priv->plat->dma_cfg->multi_msi_en =
7389 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7390 
7391 	priv->dev->irq = res->irq;
7392 	priv->wol_irq = res->wol_irq;
7393 	priv->lpi_irq = res->lpi_irq;
7394 	priv->sfty_ce_irq = res->sfty_ce_irq;
7395 	priv->sfty_ue_irq = res->sfty_ue_irq;
7396 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7397 		priv->rx_irq[i] = res->rx_irq[i];
7398 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7399 		priv->tx_irq[i] = res->tx_irq[i];
7400 
7401 	if (!is_zero_ether_addr(res->mac))
7402 		eth_hw_addr_set(priv->dev, res->mac);
7403 
7404 	dev_set_drvdata(device, priv->dev);
7405 
7406 	/* Verify driver arguments */
7407 	stmmac_verify_args();
7408 
7409 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7410 	if (!priv->af_xdp_zc_qps)
7411 		return -ENOMEM;
7412 
7413 	/* Allocate workqueue */
7414 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7415 	if (!priv->wq) {
7416 		dev_err(priv->device, "failed to create workqueue\n");
7417 		ret = -ENOMEM;
7418 		goto error_wq_init;
7419 	}
7420 
7421 	INIT_WORK(&priv->service_task, stmmac_service_task);
7422 
7423 	/* Initialize Link Partner FPE workqueue */
7424 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7425 
7426 	/* Override with kernel parameters if supplied XXX CRS XXX
7427 	 * this needs to have multiple instances
7428 	 */
7429 	if ((phyaddr >= 0) && (phyaddr <= 31))
7430 		priv->plat->phy_addr = phyaddr;
7431 
7432 	if (priv->plat->stmmac_rst) {
7433 		ret = reset_control_assert(priv->plat->stmmac_rst);
7434 		reset_control_deassert(priv->plat->stmmac_rst);
7435 		/* Some reset controllers have only reset callback instead of
7436 		 * assert + deassert callbacks pair.
7437 		 */
7438 		if (ret == -ENOTSUPP)
7439 			reset_control_reset(priv->plat->stmmac_rst);
7440 	}
7441 
7442 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7443 	if (ret == -ENOTSUPP)
7444 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7445 			ERR_PTR(ret));
7446 
7447 	/* Wait a bit for the reset to take effect */
7448 	udelay(10);
7449 
7450 	/* Init MAC and get the capabilities */
7451 	ret = stmmac_hw_init(priv);
7452 	if (ret)
7453 		goto error_hw_init;
7454 
7455 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7456 	 */
7457 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7458 		priv->plat->dma_cfg->dche = false;
7459 
7460 	stmmac_check_ether_addr(priv);
7461 
7462 	ndev->netdev_ops = &stmmac_netdev_ops;
7463 
7464 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7465 
7466 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7467 			    NETIF_F_RXCSUM;
7468 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7469 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7470 
7471 	ret = stmmac_tc_init(priv, priv);
7472 	if (!ret) {
7473 		ndev->hw_features |= NETIF_F_HW_TC;
7474 	}
7475 
7476 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7477 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7478 		if (priv->plat->has_gmac4)
7479 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7480 		priv->tso = true;
7481 		dev_info(priv->device, "TSO feature enabled\n");
7482 	}
7483 
7484 	if (priv->dma_cap.sphen &&
7485 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7486 		ndev->hw_features |= NETIF_F_GRO;
7487 		priv->sph_cap = true;
7488 		priv->sph = priv->sph_cap;
7489 		dev_info(priv->device, "SPH feature enabled\n");
7490 	}
7491 
7492 	/* Ideally our host DMA address width is the same as for the
7493 	 * device. However, it may differ and then we have to use our
7494 	 * host DMA width for allocation and the device DMA width for
7495 	 * register handling.
7496 	 */
7497 	if (priv->plat->host_dma_width)
7498 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7499 	else
7500 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7501 
7502 	if (priv->dma_cap.host_dma_width) {
7503 		ret = dma_set_mask_and_coherent(device,
7504 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7505 		if (!ret) {
7506 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7507 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7508 
7509 			/*
7510 			 * If more than 32 bits can be addressed, make sure to
7511 			 * enable enhanced addressing mode.
7512 			 */
7513 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7514 				priv->plat->dma_cfg->eame = true;
7515 		} else {
7516 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7517 			if (ret) {
7518 				dev_err(priv->device, "Failed to set DMA Mask\n");
7519 				goto error_hw_init;
7520 			}
7521 
7522 			priv->dma_cap.host_dma_width = 32;
7523 		}
7524 	}
7525 
7526 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7527 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7528 #ifdef STMMAC_VLAN_TAG_USED
7529 	/* Both mac100 and gmac support receive VLAN tag detection */
7530 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7531 	if (priv->dma_cap.vlhash) {
7532 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7533 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7534 	}
7535 	if (priv->dma_cap.vlins) {
7536 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7537 		if (priv->dma_cap.dvlan)
7538 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7539 	}
7540 #endif
7541 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7542 
7543 	priv->xstats.threshold = tc;
7544 
7545 	/* Initialize RSS */
7546 	rxq = priv->plat->rx_queues_to_use;
7547 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7548 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7549 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7550 
7551 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7552 		ndev->features |= NETIF_F_RXHASH;
7553 
7554 	ndev->vlan_features |= ndev->features;
7555 	/* TSO doesn't work on VLANs yet */
7556 	ndev->vlan_features &= ~NETIF_F_TSO;
7557 
7558 	/* MTU range: 46 - hw-specific max */
7559 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7560 	if (priv->plat->has_xgmac)
7561 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7562 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7563 		ndev->max_mtu = JUMBO_LEN;
7564 	else
7565 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7566 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7567 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7568 	 */
7569 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7570 	    (priv->plat->maxmtu >= ndev->min_mtu))
7571 		ndev->max_mtu = priv->plat->maxmtu;
7572 	else if (priv->plat->maxmtu < ndev->min_mtu)
7573 		dev_warn(priv->device,
7574 			 "%s: warning: maxmtu having invalid value (%d)\n",
7575 			 __func__, priv->plat->maxmtu);
7576 
7577 	if (flow_ctrl)
7578 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7579 
7580 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7581 
7582 	/* Setup channels NAPI */
7583 	stmmac_napi_add(ndev);
7584 
7585 	mutex_init(&priv->lock);
7586 
7587 	/* If a specific clk_csr value is passed from the platform
7588 	 * this means that the CSR Clock Range selection cannot be
7589 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7590 	 * set the MDC clock dynamically according to the csr actual
7591 	 * clock input.
7592 	 */
7593 	if (priv->plat->clk_csr >= 0)
7594 		priv->clk_csr = priv->plat->clk_csr;
7595 	else
7596 		stmmac_clk_csr_set(priv);
7597 
7598 	stmmac_check_pcs_mode(priv);
7599 
7600 	pm_runtime_get_noresume(device);
7601 	pm_runtime_set_active(device);
7602 	if (!pm_runtime_enabled(device))
7603 		pm_runtime_enable(device);
7604 
7605 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7606 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
7607 		/* MDIO bus Registration */
7608 		ret = stmmac_mdio_register(ndev);
7609 		if (ret < 0) {
7610 			dev_err_probe(priv->device, ret,
7611 				      "%s: MDIO bus (id: %d) registration failed\n",
7612 				      __func__, priv->plat->bus_id);
7613 			goto error_mdio_register;
7614 		}
7615 	}
7616 
7617 	if (priv->plat->speed_mode_2500)
7618 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7619 
7620 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7621 		ret = stmmac_xpcs_setup(priv->mii);
7622 		if (ret)
7623 			goto error_xpcs_setup;
7624 	}
7625 
7626 	ret = stmmac_phy_setup(priv);
7627 	if (ret) {
7628 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7629 		goto error_phy_setup;
7630 	}
7631 
7632 	ret = register_netdev(ndev);
7633 	if (ret) {
7634 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7635 			__func__, ret);
7636 		goto error_netdev_register;
7637 	}
7638 
7639 #ifdef CONFIG_DEBUG_FS
7640 	stmmac_init_fs(ndev);
7641 #endif
7642 
7643 	if (priv->plat->dump_debug_regs)
7644 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7645 
7646 	/* Let pm_runtime_put() disable the clocks.
7647 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7648 	 */
7649 	pm_runtime_put(device);
7650 
7651 	return ret;
7652 
7653 error_netdev_register:
7654 	phylink_destroy(priv->phylink);
7655 error_xpcs_setup:
7656 error_phy_setup:
7657 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7658 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7659 		stmmac_mdio_unregister(ndev);
7660 error_mdio_register:
7661 	stmmac_napi_del(ndev);
7662 error_hw_init:
7663 	destroy_workqueue(priv->wq);
7664 error_wq_init:
7665 	bitmap_free(priv->af_xdp_zc_qps);
7666 
7667 	return ret;
7668 }
7669 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7670 
7671 /**
7672  * stmmac_dvr_remove
7673  * @dev: device pointer
7674  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7675  * changes the link status, releases the DMA descriptor rings.
7676  */
7677 void stmmac_dvr_remove(struct device *dev)
7678 {
7679 	struct net_device *ndev = dev_get_drvdata(dev);
7680 	struct stmmac_priv *priv = netdev_priv(ndev);
7681 
7682 	netdev_info(priv->dev, "%s: removing driver", __func__);
7683 
7684 	pm_runtime_get_sync(dev);
7685 
7686 	stmmac_stop_all_dma(priv);
7687 	stmmac_mac_set(priv, priv->ioaddr, false);
7688 	netif_carrier_off(ndev);
7689 	unregister_netdev(ndev);
7690 
7691 #ifdef CONFIG_DEBUG_FS
7692 	stmmac_exit_fs(ndev);
7693 #endif
7694 	phylink_destroy(priv->phylink);
7695 	if (priv->plat->stmmac_rst)
7696 		reset_control_assert(priv->plat->stmmac_rst);
7697 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7698 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7699 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7700 		stmmac_mdio_unregister(ndev);
7701 	destroy_workqueue(priv->wq);
7702 	mutex_destroy(&priv->lock);
7703 	bitmap_free(priv->af_xdp_zc_qps);
7704 
7705 	pm_runtime_disable(dev);
7706 	pm_runtime_put_noidle(dev);
7707 }
7708 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7709 
7710 /**
7711  * stmmac_suspend - suspend callback
7712  * @dev: device pointer
7713  * Description: this is the function to suspend the device and it is called
7714  * by the platform driver to stop the network queue, release the resources,
7715  * program the PMT register (for WoL), clean and release driver resources.
7716  */
7717 int stmmac_suspend(struct device *dev)
7718 {
7719 	struct net_device *ndev = dev_get_drvdata(dev);
7720 	struct stmmac_priv *priv = netdev_priv(ndev);
7721 	u32 chan;
7722 
7723 	if (!ndev || !netif_running(ndev))
7724 		return 0;
7725 
7726 	mutex_lock(&priv->lock);
7727 
7728 	netif_device_detach(ndev);
7729 
7730 	stmmac_disable_all_queues(priv);
7731 
7732 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7733 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7734 
7735 	if (priv->eee_enabled) {
7736 		priv->tx_path_in_lpi_mode = false;
7737 		del_timer_sync(&priv->eee_ctrl_timer);
7738 	}
7739 
7740 	/* Stop TX/RX DMA */
7741 	stmmac_stop_all_dma(priv);
7742 
7743 	if (priv->plat->serdes_powerdown)
7744 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7745 
7746 	/* Enable Power down mode by programming the PMT regs */
7747 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7748 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7749 		priv->irq_wake = 1;
7750 	} else {
7751 		stmmac_mac_set(priv, priv->ioaddr, false);
7752 		pinctrl_pm_select_sleep_state(priv->device);
7753 	}
7754 
7755 	mutex_unlock(&priv->lock);
7756 
7757 	rtnl_lock();
7758 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7759 		phylink_suspend(priv->phylink, true);
7760 	} else {
7761 		if (device_may_wakeup(priv->device))
7762 			phylink_speed_down(priv->phylink, false);
7763 		phylink_suspend(priv->phylink, false);
7764 	}
7765 	rtnl_unlock();
7766 
7767 	if (priv->dma_cap.fpesel) {
7768 		/* Disable FPE */
7769 		stmmac_fpe_configure(priv, priv->ioaddr,
7770 				     priv->plat->fpe_cfg,
7771 				     priv->plat->tx_queues_to_use,
7772 				     priv->plat->rx_queues_to_use, false);
7773 
7774 		stmmac_fpe_handshake(priv, false);
7775 		stmmac_fpe_stop_wq(priv);
7776 	}
7777 
7778 	priv->speed = SPEED_UNKNOWN;
7779 	return 0;
7780 }
7781 EXPORT_SYMBOL_GPL(stmmac_suspend);
7782 
7783 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7784 {
7785 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7786 
7787 	rx_q->cur_rx = 0;
7788 	rx_q->dirty_rx = 0;
7789 }
7790 
7791 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7792 {
7793 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7794 
7795 	tx_q->cur_tx = 0;
7796 	tx_q->dirty_tx = 0;
7797 	tx_q->mss = 0;
7798 
7799 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7800 }
7801 
7802 /**
7803  * stmmac_reset_queues_param - reset queue parameters
7804  * @priv: device pointer
7805  */
7806 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7807 {
7808 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7809 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7810 	u32 queue;
7811 
7812 	for (queue = 0; queue < rx_cnt; queue++)
7813 		stmmac_reset_rx_queue(priv, queue);
7814 
7815 	for (queue = 0; queue < tx_cnt; queue++)
7816 		stmmac_reset_tx_queue(priv, queue);
7817 }
7818 
7819 /**
7820  * stmmac_resume - resume callback
7821  * @dev: device pointer
7822  * Description: when resume this function is invoked to setup the DMA and CORE
7823  * in a usable state.
7824  */
7825 int stmmac_resume(struct device *dev)
7826 {
7827 	struct net_device *ndev = dev_get_drvdata(dev);
7828 	struct stmmac_priv *priv = netdev_priv(ndev);
7829 	int ret;
7830 
7831 	if (!netif_running(ndev))
7832 		return 0;
7833 
7834 	/* Power Down bit, into the PM register, is cleared
7835 	 * automatically as soon as a magic packet or a Wake-up frame
7836 	 * is received. Anyway, it's better to manually clear
7837 	 * this bit because it can generate problems while resuming
7838 	 * from another devices (e.g. serial console).
7839 	 */
7840 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7841 		mutex_lock(&priv->lock);
7842 		stmmac_pmt(priv, priv->hw, 0);
7843 		mutex_unlock(&priv->lock);
7844 		priv->irq_wake = 0;
7845 	} else {
7846 		pinctrl_pm_select_default_state(priv->device);
7847 		/* reset the phy so that it's ready */
7848 		if (priv->mii)
7849 			stmmac_mdio_reset(priv->mii);
7850 	}
7851 
7852 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7853 	    priv->plat->serdes_powerup) {
7854 		ret = priv->plat->serdes_powerup(ndev,
7855 						 priv->plat->bsp_priv);
7856 
7857 		if (ret < 0)
7858 			return ret;
7859 	}
7860 
7861 	rtnl_lock();
7862 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7863 		phylink_resume(priv->phylink);
7864 	} else {
7865 		phylink_resume(priv->phylink);
7866 		if (device_may_wakeup(priv->device))
7867 			phylink_speed_up(priv->phylink);
7868 	}
7869 	rtnl_unlock();
7870 
7871 	rtnl_lock();
7872 	mutex_lock(&priv->lock);
7873 
7874 	stmmac_reset_queues_param(priv);
7875 
7876 	stmmac_free_tx_skbufs(priv);
7877 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7878 
7879 	stmmac_hw_setup(ndev, false);
7880 	stmmac_init_coalesce(priv);
7881 	stmmac_set_rx_mode(ndev);
7882 
7883 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7884 
7885 	stmmac_enable_all_queues(priv);
7886 	stmmac_enable_all_dma_irq(priv);
7887 
7888 	mutex_unlock(&priv->lock);
7889 	rtnl_unlock();
7890 
7891 	netif_device_attach(ndev);
7892 
7893 	return 0;
7894 }
7895 EXPORT_SYMBOL_GPL(stmmac_resume);
7896 
7897 #ifndef MODULE
7898 static int __init stmmac_cmdline_opt(char *str)
7899 {
7900 	char *opt;
7901 
7902 	if (!str || !*str)
7903 		return 1;
7904 	while ((opt = strsep(&str, ",")) != NULL) {
7905 		if (!strncmp(opt, "debug:", 6)) {
7906 			if (kstrtoint(opt + 6, 0, &debug))
7907 				goto err;
7908 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7909 			if (kstrtoint(opt + 8, 0, &phyaddr))
7910 				goto err;
7911 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7912 			if (kstrtoint(opt + 7, 0, &buf_sz))
7913 				goto err;
7914 		} else if (!strncmp(opt, "tc:", 3)) {
7915 			if (kstrtoint(opt + 3, 0, &tc))
7916 				goto err;
7917 		} else if (!strncmp(opt, "watchdog:", 9)) {
7918 			if (kstrtoint(opt + 9, 0, &watchdog))
7919 				goto err;
7920 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7921 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7922 				goto err;
7923 		} else if (!strncmp(opt, "pause:", 6)) {
7924 			if (kstrtoint(opt + 6, 0, &pause))
7925 				goto err;
7926 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7927 			if (kstrtoint(opt + 10, 0, &eee_timer))
7928 				goto err;
7929 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7930 			if (kstrtoint(opt + 11, 0, &chain_mode))
7931 				goto err;
7932 		}
7933 	}
7934 	return 1;
7935 
7936 err:
7937 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7938 	return 1;
7939 }
7940 
7941 __setup("stmmaceth=", stmmac_cmdline_opt);
7942 #endif /* MODULE */
7943 
7944 static int __init stmmac_init(void)
7945 {
7946 #ifdef CONFIG_DEBUG_FS
7947 	/* Create debugfs main directory if it doesn't exist yet */
7948 	if (!stmmac_fs_dir)
7949 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7950 	register_netdevice_notifier(&stmmac_notifier);
7951 #endif
7952 
7953 	return 0;
7954 }
7955 
7956 static void __exit stmmac_exit(void)
7957 {
7958 #ifdef CONFIG_DEBUG_FS
7959 	unregister_netdevice_notifier(&stmmac_notifier);
7960 	debugfs_remove_recursive(stmmac_fs_dir);
7961 #endif
7962 }
7963 
7964 module_init(stmmac_init)
7965 module_exit(stmmac_exit)
7966 
7967 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7968 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7969 MODULE_LICENSE("GPL");
7970