1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Using PCS we cannot dial with the phy registers at this stage
475 	 * so we do not support extra feature like EEE.
476 	 */
477 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
478 	    priv->hw->pcs == STMMAC_PCS_RTBI)
479 		return false;
480 
481 	/* Check if MAC core supports the EEE feature. */
482 	if (!priv->dma_cap.eee)
483 		return false;
484 
485 	mutex_lock(&priv->lock);
486 
487 	/* Check if it needs to be deactivated */
488 	if (!priv->eee_active) {
489 		if (priv->eee_enabled) {
490 			netdev_dbg(priv->dev, "disable EEE\n");
491 			stmmac_lpi_entry_timer_config(priv, 0);
492 			del_timer_sync(&priv->eee_ctrl_timer);
493 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494 			if (priv->hw->xpcs)
495 				xpcs_config_eee(priv->hw->xpcs,
496 						priv->plat->mult_fact_100ns,
497 						false);
498 		}
499 		mutex_unlock(&priv->lock);
500 		return false;
501 	}
502 
503 	if (priv->eee_active && !priv->eee_enabled) {
504 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506 				     eee_tw_timer);
507 		if (priv->hw->xpcs)
508 			xpcs_config_eee(priv->hw->xpcs,
509 					priv->plat->mult_fact_100ns,
510 					true);
511 	}
512 
513 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514 		del_timer_sync(&priv->eee_ctrl_timer);
515 		priv->tx_path_in_lpi_mode = false;
516 		stmmac_lpi_entry_timer_config(priv, 1);
517 	} else {
518 		stmmac_lpi_entry_timer_config(priv, 0);
519 		mod_timer(&priv->eee_ctrl_timer,
520 			  STMMAC_LPI_T(priv->tx_lpi_timer));
521 	}
522 
523 	mutex_unlock(&priv->lock);
524 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525 	return true;
526 }
527 
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529  * @priv: driver private structure
530  * @p : descriptor pointer
531  * @skb : the socket buffer
532  * Description :
533  * This function will read timestamp from the descriptor & pass it to stack.
534  * and also perform some sanity checks.
535  */
536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537 				   struct dma_desc *p, struct sk_buff *skb)
538 {
539 	struct skb_shared_hwtstamps shhwtstamp;
540 	bool found = false;
541 	u64 ns = 0;
542 
543 	if (!priv->hwts_tx_en)
544 		return;
545 
546 	/* exit if skb doesn't support hw tstamp */
547 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548 		return;
549 
550 	/* check tx tstamp status */
551 	if (stmmac_get_tx_timestamp_status(priv, p)) {
552 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553 		found = true;
554 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555 		found = true;
556 	}
557 
558 	if (found) {
559 		ns -= priv->plat->cdc_error_adj;
560 
561 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
563 
564 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565 		/* pass tstamp to stack */
566 		skb_tstamp_tx(skb, &shhwtstamp);
567 	}
568 }
569 
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571  * @priv: driver private structure
572  * @p : descriptor pointer
573  * @np : next descriptor pointer
574  * @skb : the socket buffer
575  * Description :
576  * This function will read received packet's timestamp from the descriptor
577  * and pass it to stack. It also perform some sanity checks.
578  */
579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580 				   struct dma_desc *np, struct sk_buff *skb)
581 {
582 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
583 	struct dma_desc *desc = p;
584 	u64 ns = 0;
585 
586 	if (!priv->hwts_rx_en)
587 		return;
588 	/* For GMAC4, the valid timestamp is from CTX next desc. */
589 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590 		desc = np;
591 
592 	/* Check if timestamp is available */
593 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595 
596 		ns -= priv->plat->cdc_error_adj;
597 
598 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599 		shhwtstamp = skb_hwtstamps(skb);
600 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
602 	} else  {
603 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604 	}
605 }
606 
607 /**
608  *  stmmac_hwtstamp_set - control hardware timestamping.
609  *  @dev: device pointer.
610  *  @ifr: An IOCTL specific structure, that can contain a pointer to
611  *  a proprietary structure used to pass information to the driver.
612  *  Description:
613  *  This function configures the MAC to enable/disable both outgoing(TX)
614  *  and incoming(RX) packets time stamping based on user input.
615  *  Return Value:
616  *  0 on success and an appropriate -ve integer on failure.
617  */
618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620 	struct stmmac_priv *priv = netdev_priv(dev);
621 	struct hwtstamp_config config;
622 	u32 ptp_v2 = 0;
623 	u32 tstamp_all = 0;
624 	u32 ptp_over_ipv4_udp = 0;
625 	u32 ptp_over_ipv6_udp = 0;
626 	u32 ptp_over_ethernet = 0;
627 	u32 snap_type_sel = 0;
628 	u32 ts_master_en = 0;
629 	u32 ts_event_en = 0;
630 
631 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632 		netdev_alert(priv->dev, "No support for HW time stamping\n");
633 		priv->hwts_tx_en = 0;
634 		priv->hwts_rx_en = 0;
635 
636 		return -EOPNOTSUPP;
637 	}
638 
639 	if (copy_from_user(&config, ifr->ifr_data,
640 			   sizeof(config)))
641 		return -EFAULT;
642 
643 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 		   __func__, config.flags, config.tx_type, config.rx_filter);
645 
646 	if (config.tx_type != HWTSTAMP_TX_OFF &&
647 	    config.tx_type != HWTSTAMP_TX_ON)
648 		return -ERANGE;
649 
650 	if (priv->adv_ts) {
651 		switch (config.rx_filter) {
652 		case HWTSTAMP_FILTER_NONE:
653 			/* time stamp no incoming packet at all */
654 			config.rx_filter = HWTSTAMP_FILTER_NONE;
655 			break;
656 
657 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 			/* PTP v1, UDP, any kind of event packet */
659 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 			/* 'xmac' hardware can support Sync, Pdelay_Req and
661 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 			 * This leaves Delay_Req timestamps out.
663 			 * Enable all events *and* general purpose message
664 			 * timestamping
665 			 */
666 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			break;
670 
671 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 			/* PTP v1, UDP, Sync packet */
673 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 			/* take time stamp for SYNC messages only */
675 			ts_event_en = PTP_TCR_TSEVNTENA;
676 
677 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 			break;
680 
681 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 			/* PTP v1, UDP, Delay_req packet */
683 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 			/* take time stamp for Delay_Req messages only */
685 			ts_master_en = PTP_TCR_TSMSTRENA;
686 			ts_event_en = PTP_TCR_TSEVNTENA;
687 
688 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 			break;
691 
692 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 			/* PTP v2, UDP, any kind of event packet */
694 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 			ptp_v2 = PTP_TCR_TSVER2ENA;
696 			/* take time stamp for all event messages */
697 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698 
699 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 			break;
702 
703 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 			/* PTP v2, UDP, Sync packet */
705 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 			ptp_v2 = PTP_TCR_TSVER2ENA;
707 			/* take time stamp for SYNC messages only */
708 			ts_event_en = PTP_TCR_TSEVNTENA;
709 
710 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 			break;
713 
714 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 			/* PTP v2, UDP, Delay_req packet */
716 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 			ptp_v2 = PTP_TCR_TSVER2ENA;
718 			/* take time stamp for Delay_Req messages only */
719 			ts_master_en = PTP_TCR_TSMSTRENA;
720 			ts_event_en = PTP_TCR_TSEVNTENA;
721 
722 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 			break;
725 
726 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 			/* PTP v2/802.AS1 any layer, any kind of event packet */
728 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 			ptp_v2 = PTP_TCR_TSVER2ENA;
730 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 			if (priv->synopsys_id < DWMAC_CORE_4_10)
732 				ts_event_en = PTP_TCR_TSEVNTENA;
733 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 			ptp_over_ethernet = PTP_TCR_TSIPENA;
736 			break;
737 
738 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 			/* PTP v2/802.AS1, any layer, Sync packet */
740 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 			ptp_v2 = PTP_TCR_TSVER2ENA;
742 			/* take time stamp for SYNC messages only */
743 			ts_event_en = PTP_TCR_TSEVNTENA;
744 
745 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 			ptp_over_ethernet = PTP_TCR_TSIPENA;
748 			break;
749 
750 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 			/* PTP v2/802.AS1, any layer, Delay_req packet */
752 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 			ptp_v2 = PTP_TCR_TSVER2ENA;
754 			/* take time stamp for Delay_Req messages only */
755 			ts_master_en = PTP_TCR_TSMSTRENA;
756 			ts_event_en = PTP_TCR_TSEVNTENA;
757 
758 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 			ptp_over_ethernet = PTP_TCR_TSIPENA;
761 			break;
762 
763 		case HWTSTAMP_FILTER_NTP_ALL:
764 		case HWTSTAMP_FILTER_ALL:
765 			/* time stamp any incoming packet */
766 			config.rx_filter = HWTSTAMP_FILTER_ALL;
767 			tstamp_all = PTP_TCR_TSENALL;
768 			break;
769 
770 		default:
771 			return -ERANGE;
772 		}
773 	} else {
774 		switch (config.rx_filter) {
775 		case HWTSTAMP_FILTER_NONE:
776 			config.rx_filter = HWTSTAMP_FILTER_NONE;
777 			break;
778 		default:
779 			/* PTP v1, UDP, any kind of event packet */
780 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 			break;
782 		}
783 	}
784 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786 
787 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
788 
789 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
790 		priv->systime_flags |= tstamp_all | ptp_v2 |
791 				       ptp_over_ethernet | ptp_over_ipv6_udp |
792 				       ptp_over_ipv4_udp | ts_event_en |
793 				       ts_master_en | snap_type_sel;
794 	}
795 
796 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797 
798 	memcpy(&priv->tstamp_config, &config, sizeof(config));
799 
800 	return copy_to_user(ifr->ifr_data, &config,
801 			    sizeof(config)) ? -EFAULT : 0;
802 }
803 
804 /**
805  *  stmmac_hwtstamp_get - read hardware timestamping.
806  *  @dev: device pointer.
807  *  @ifr: An IOCTL specific structure, that can contain a pointer to
808  *  a proprietary structure used to pass information to the driver.
809  *  Description:
810  *  This function obtain the current hardware timestamping settings
811  *  as requested.
812  */
813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815 	struct stmmac_priv *priv = netdev_priv(dev);
816 	struct hwtstamp_config *config = &priv->tstamp_config;
817 
818 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819 		return -EOPNOTSUPP;
820 
821 	return copy_to_user(ifr->ifr_data, config,
822 			    sizeof(*config)) ? -EFAULT : 0;
823 }
824 
825 /**
826  * stmmac_init_tstamp_counter - init hardware timestamping counter
827  * @priv: driver private structure
828  * @systime_flags: timestamping flags
829  * Description:
830  * Initialize hardware counter for packet timestamping.
831  * This is valid as long as the interface is open and not suspended.
832  * Will be rerun after resuming from suspend, case in which the timestamping
833  * flags updated by stmmac_hwtstamp_set() also need to be restored.
834  */
835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838 	struct timespec64 now;
839 	u32 sec_inc = 0;
840 	u64 temp = 0;
841 
842 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843 		return -EOPNOTSUPP;
844 
845 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846 	priv->systime_flags = systime_flags;
847 
848 	/* program Sub Second Increment reg */
849 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850 					   priv->plat->clk_ptp_rate,
851 					   xmac, &sec_inc);
852 	temp = div_u64(1000000000ULL, sec_inc);
853 
854 	/* Store sub second increment for later use */
855 	priv->sub_second_inc = sec_inc;
856 
857 	/* calculate default added value:
858 	 * formula is :
859 	 * addend = (2^32)/freq_div_ratio;
860 	 * where, freq_div_ratio = 1e9ns/sec_inc
861 	 */
862 	temp = (u64)(temp << 32);
863 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865 
866 	/* initialize system time */
867 	ktime_get_real_ts64(&now);
868 
869 	/* lower 32 bits of tv_sec are safe until y2106 */
870 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871 
872 	return 0;
873 }
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875 
876 /**
877  * stmmac_init_ptp - init PTP
878  * @priv: driver private structure
879  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880  * This is done by looking at the HW cap. register.
881  * This function also registers the ptp driver.
882  */
883 static int stmmac_init_ptp(struct stmmac_priv *priv)
884 {
885 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886 	int ret;
887 
888 	if (priv->plat->ptp_clk_freq_config)
889 		priv->plat->ptp_clk_freq_config(priv);
890 
891 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892 	if (ret)
893 		return ret;
894 
895 	priv->adv_ts = 0;
896 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 	if (xmac && priv->dma_cap.atime_stamp)
898 		priv->adv_ts = 1;
899 	/* Dwmac 3.x core with extend_desc can support adv_ts */
900 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901 		priv->adv_ts = 1;
902 
903 	if (priv->dma_cap.time_stamp)
904 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
905 
906 	if (priv->adv_ts)
907 		netdev_info(priv->dev,
908 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
909 
910 	priv->hwts_tx_en = 0;
911 	priv->hwts_rx_en = 0;
912 
913 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
914 		stmmac_hwtstamp_correct_latency(priv, priv);
915 
916 	return 0;
917 }
918 
919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
922 	stmmac_ptp_unregister(priv);
923 }
924 
925 /**
926  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
927  *  @priv: driver private structure
928  *  @duplex: duplex passed to the next function
929  *  Description: It is used for configuring the flow control in all queues
930  */
931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933 	u32 tx_cnt = priv->plat->tx_queues_to_use;
934 
935 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936 			priv->pause, tx_cnt);
937 }
938 
939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
940 						 phy_interface_t interface)
941 {
942 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943 
944 	if (priv->hw->xpcs)
945 		return &priv->hw->xpcs->pcs;
946 
947 	if (priv->hw->lynx_pcs)
948 		return priv->hw->lynx_pcs;
949 
950 	return NULL;
951 }
952 
953 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
954 			      const struct phylink_link_state *state)
955 {
956 	/* Nothing to do, xpcs_config() handles everything */
957 }
958 
959 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
960 {
961 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
962 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
963 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
964 	bool *hs_enable = &fpe_cfg->hs_enable;
965 
966 	if (is_up && *hs_enable) {
967 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
968 	} else {
969 		*lo_state = FPE_STATE_OFF;
970 		*lp_state = FPE_STATE_OFF;
971 	}
972 }
973 
974 static void stmmac_mac_link_down(struct phylink_config *config,
975 				 unsigned int mode, phy_interface_t interface)
976 {
977 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
978 
979 	stmmac_mac_set(priv, priv->ioaddr, false);
980 	priv->eee_active = false;
981 	priv->tx_lpi_enabled = false;
982 	priv->eee_enabled = stmmac_eee_init(priv);
983 	stmmac_set_eee_pls(priv, priv->hw, false);
984 
985 	if (priv->dma_cap.fpesel)
986 		stmmac_fpe_link_state_handle(priv, false);
987 }
988 
989 static void stmmac_mac_link_up(struct phylink_config *config,
990 			       struct phy_device *phy,
991 			       unsigned int mode, phy_interface_t interface,
992 			       int speed, int duplex,
993 			       bool tx_pause, bool rx_pause)
994 {
995 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
996 	u32 old_ctrl, ctrl;
997 
998 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
999 	    priv->plat->serdes_powerup)
1000 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1001 
1002 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1003 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1004 
1005 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1006 		switch (speed) {
1007 		case SPEED_10000:
1008 			ctrl |= priv->hw->link.xgmii.speed10000;
1009 			break;
1010 		case SPEED_5000:
1011 			ctrl |= priv->hw->link.xgmii.speed5000;
1012 			break;
1013 		case SPEED_2500:
1014 			ctrl |= priv->hw->link.xgmii.speed2500;
1015 			break;
1016 		default:
1017 			return;
1018 		}
1019 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1020 		switch (speed) {
1021 		case SPEED_100000:
1022 			ctrl |= priv->hw->link.xlgmii.speed100000;
1023 			break;
1024 		case SPEED_50000:
1025 			ctrl |= priv->hw->link.xlgmii.speed50000;
1026 			break;
1027 		case SPEED_40000:
1028 			ctrl |= priv->hw->link.xlgmii.speed40000;
1029 			break;
1030 		case SPEED_25000:
1031 			ctrl |= priv->hw->link.xlgmii.speed25000;
1032 			break;
1033 		case SPEED_10000:
1034 			ctrl |= priv->hw->link.xgmii.speed10000;
1035 			break;
1036 		case SPEED_2500:
1037 			ctrl |= priv->hw->link.speed2500;
1038 			break;
1039 		case SPEED_1000:
1040 			ctrl |= priv->hw->link.speed1000;
1041 			break;
1042 		default:
1043 			return;
1044 		}
1045 	} else {
1046 		switch (speed) {
1047 		case SPEED_2500:
1048 			ctrl |= priv->hw->link.speed2500;
1049 			break;
1050 		case SPEED_1000:
1051 			ctrl |= priv->hw->link.speed1000;
1052 			break;
1053 		case SPEED_100:
1054 			ctrl |= priv->hw->link.speed100;
1055 			break;
1056 		case SPEED_10:
1057 			ctrl |= priv->hw->link.speed10;
1058 			break;
1059 		default:
1060 			return;
1061 		}
1062 	}
1063 
1064 	priv->speed = speed;
1065 
1066 	if (priv->plat->fix_mac_speed)
1067 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1068 
1069 	if (!duplex)
1070 		ctrl &= ~priv->hw->link.duplex;
1071 	else
1072 		ctrl |= priv->hw->link.duplex;
1073 
1074 	/* Flow Control operation */
1075 	if (rx_pause && tx_pause)
1076 		priv->flow_ctrl = FLOW_AUTO;
1077 	else if (rx_pause && !tx_pause)
1078 		priv->flow_ctrl = FLOW_RX;
1079 	else if (!rx_pause && tx_pause)
1080 		priv->flow_ctrl = FLOW_TX;
1081 	else
1082 		priv->flow_ctrl = FLOW_OFF;
1083 
1084 	stmmac_mac_flow_ctrl(priv, duplex);
1085 
1086 	if (ctrl != old_ctrl)
1087 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1088 
1089 	stmmac_mac_set(priv, priv->ioaddr, true);
1090 	if (phy && priv->dma_cap.eee) {
1091 		priv->eee_active =
1092 			phy_init_eee(phy, !(priv->plat->flags &
1093 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1094 		priv->eee_enabled = stmmac_eee_init(priv);
1095 		priv->tx_lpi_enabled = priv->eee_enabled;
1096 		stmmac_set_eee_pls(priv, priv->hw, true);
1097 	}
1098 
1099 	if (priv->dma_cap.fpesel)
1100 		stmmac_fpe_link_state_handle(priv, true);
1101 
1102 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1103 		stmmac_hwtstamp_correct_latency(priv, priv);
1104 }
1105 
1106 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1107 	.mac_select_pcs = stmmac_mac_select_pcs,
1108 	.mac_config = stmmac_mac_config,
1109 	.mac_link_down = stmmac_mac_link_down,
1110 	.mac_link_up = stmmac_mac_link_up,
1111 };
1112 
1113 /**
1114  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1115  * @priv: driver private structure
1116  * Description: this is to verify if the HW supports the PCS.
1117  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1118  * configured for the TBI, RTBI, or SGMII PHY interface.
1119  */
1120 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1121 {
1122 	int interface = priv->plat->mac_interface;
1123 
1124 	if (priv->dma_cap.pcs) {
1125 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1126 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1127 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1128 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1129 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1130 			priv->hw->pcs = STMMAC_PCS_RGMII;
1131 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1132 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1133 			priv->hw->pcs = STMMAC_PCS_SGMII;
1134 		}
1135 	}
1136 }
1137 
1138 /**
1139  * stmmac_init_phy - PHY initialization
1140  * @dev: net device structure
1141  * Description: it initializes the driver's PHY state, and attaches the PHY
1142  * to the mac driver.
1143  *  Return value:
1144  *  0 on success
1145  */
1146 static int stmmac_init_phy(struct net_device *dev)
1147 {
1148 	struct stmmac_priv *priv = netdev_priv(dev);
1149 	struct fwnode_handle *phy_fwnode;
1150 	struct fwnode_handle *fwnode;
1151 	int ret;
1152 
1153 	if (!phylink_expects_phy(priv->phylink))
1154 		return 0;
1155 
1156 	fwnode = priv->plat->port_node;
1157 	if (!fwnode)
1158 		fwnode = dev_fwnode(priv->device);
1159 
1160 	if (fwnode)
1161 		phy_fwnode = fwnode_get_phy_node(fwnode);
1162 	else
1163 		phy_fwnode = NULL;
1164 
1165 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1166 	 * manually parse it
1167 	 */
1168 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1169 		int addr = priv->plat->phy_addr;
1170 		struct phy_device *phydev;
1171 
1172 		if (addr < 0) {
1173 			netdev_err(priv->dev, "no phy found\n");
1174 			return -ENODEV;
1175 		}
1176 
1177 		phydev = mdiobus_get_phy(priv->mii, addr);
1178 		if (!phydev) {
1179 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1180 			return -ENODEV;
1181 		}
1182 
1183 		ret = phylink_connect_phy(priv->phylink, phydev);
1184 	} else {
1185 		fwnode_handle_put(phy_fwnode);
1186 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1187 	}
1188 
1189 	if (!priv->plat->pmt) {
1190 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1191 
1192 		phylink_ethtool_get_wol(priv->phylink, &wol);
1193 		device_set_wakeup_capable(priv->device, !!wol.supported);
1194 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1195 	}
1196 
1197 	return ret;
1198 }
1199 
1200 static void stmmac_set_half_duplex(struct stmmac_priv *priv)
1201 {
1202 	/* Half-Duplex can only work with single tx queue */
1203 	if (priv->plat->tx_queues_to_use > 1)
1204 		priv->phylink_config.mac_capabilities &=
1205 			~(MAC_10HD | MAC_100HD | MAC_1000HD);
1206 	else
1207 		priv->phylink_config.mac_capabilities |=
1208 			(MAC_10HD | MAC_100HD | MAC_1000HD);
1209 }
1210 
1211 static int stmmac_phy_setup(struct stmmac_priv *priv)
1212 {
1213 	struct stmmac_mdio_bus_data *mdio_bus_data;
1214 	int mode = priv->plat->phy_interface;
1215 	struct fwnode_handle *fwnode;
1216 	struct phylink *phylink;
1217 	int max_speed;
1218 
1219 	priv->phylink_config.dev = &priv->dev->dev;
1220 	priv->phylink_config.type = PHYLINK_NETDEV;
1221 	priv->phylink_config.mac_managed_pm = true;
1222 
1223 	mdio_bus_data = priv->plat->mdio_bus_data;
1224 	if (mdio_bus_data)
1225 		priv->phylink_config.ovr_an_inband =
1226 			mdio_bus_data->xpcs_an_inband;
1227 
1228 	/* Set the platform/firmware specified interface mode. Note, phylink
1229 	 * deals with the PHY interface mode, not the MAC interface mode.
1230 	 */
1231 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1232 
1233 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1234 	if (priv->hw->xpcs)
1235 		xpcs_get_interfaces(priv->hw->xpcs,
1236 				    priv->phylink_config.supported_interfaces);
1237 
1238 	priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1239 						MAC_10FD | MAC_100FD |
1240 						MAC_1000FD;
1241 
1242 	stmmac_set_half_duplex(priv);
1243 
1244 	/* Get the MAC specific capabilities */
1245 	stmmac_mac_phylink_get_caps(priv);
1246 
1247 	max_speed = priv->plat->max_speed;
1248 	if (max_speed)
1249 		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1250 
1251 	fwnode = priv->plat->port_node;
1252 	if (!fwnode)
1253 		fwnode = dev_fwnode(priv->device);
1254 
1255 	phylink = phylink_create(&priv->phylink_config, fwnode,
1256 				 mode, &stmmac_phylink_mac_ops);
1257 	if (IS_ERR(phylink))
1258 		return PTR_ERR(phylink);
1259 
1260 	priv->phylink = phylink;
1261 	return 0;
1262 }
1263 
1264 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1265 				    struct stmmac_dma_conf *dma_conf)
1266 {
1267 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1268 	unsigned int desc_size;
1269 	void *head_rx;
1270 	u32 queue;
1271 
1272 	/* Display RX rings */
1273 	for (queue = 0; queue < rx_cnt; queue++) {
1274 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1275 
1276 		pr_info("\tRX Queue %u rings\n", queue);
1277 
1278 		if (priv->extend_desc) {
1279 			head_rx = (void *)rx_q->dma_erx;
1280 			desc_size = sizeof(struct dma_extended_desc);
1281 		} else {
1282 			head_rx = (void *)rx_q->dma_rx;
1283 			desc_size = sizeof(struct dma_desc);
1284 		}
1285 
1286 		/* Display RX ring */
1287 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1288 				    rx_q->dma_rx_phy, desc_size);
1289 	}
1290 }
1291 
1292 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1293 				    struct stmmac_dma_conf *dma_conf)
1294 {
1295 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1296 	unsigned int desc_size;
1297 	void *head_tx;
1298 	u32 queue;
1299 
1300 	/* Display TX rings */
1301 	for (queue = 0; queue < tx_cnt; queue++) {
1302 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1303 
1304 		pr_info("\tTX Queue %d rings\n", queue);
1305 
1306 		if (priv->extend_desc) {
1307 			head_tx = (void *)tx_q->dma_etx;
1308 			desc_size = sizeof(struct dma_extended_desc);
1309 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1310 			head_tx = (void *)tx_q->dma_entx;
1311 			desc_size = sizeof(struct dma_edesc);
1312 		} else {
1313 			head_tx = (void *)tx_q->dma_tx;
1314 			desc_size = sizeof(struct dma_desc);
1315 		}
1316 
1317 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1318 				    tx_q->dma_tx_phy, desc_size);
1319 	}
1320 }
1321 
1322 static void stmmac_display_rings(struct stmmac_priv *priv,
1323 				 struct stmmac_dma_conf *dma_conf)
1324 {
1325 	/* Display RX ring */
1326 	stmmac_display_rx_rings(priv, dma_conf);
1327 
1328 	/* Display TX ring */
1329 	stmmac_display_tx_rings(priv, dma_conf);
1330 }
1331 
1332 static int stmmac_set_bfsize(int mtu, int bufsize)
1333 {
1334 	int ret = bufsize;
1335 
1336 	if (mtu >= BUF_SIZE_8KiB)
1337 		ret = BUF_SIZE_16KiB;
1338 	else if (mtu >= BUF_SIZE_4KiB)
1339 		ret = BUF_SIZE_8KiB;
1340 	else if (mtu >= BUF_SIZE_2KiB)
1341 		ret = BUF_SIZE_4KiB;
1342 	else if (mtu > DEFAULT_BUFSIZE)
1343 		ret = BUF_SIZE_2KiB;
1344 	else
1345 		ret = DEFAULT_BUFSIZE;
1346 
1347 	return ret;
1348 }
1349 
1350 /**
1351  * stmmac_clear_rx_descriptors - clear RX descriptors
1352  * @priv: driver private structure
1353  * @dma_conf: structure to take the dma data
1354  * @queue: RX queue index
1355  * Description: this function is called to clear the RX descriptors
1356  * in case of both basic and extended descriptors are used.
1357  */
1358 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1359 					struct stmmac_dma_conf *dma_conf,
1360 					u32 queue)
1361 {
1362 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1363 	int i;
1364 
1365 	/* Clear the RX descriptors */
1366 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1367 		if (priv->extend_desc)
1368 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1369 					priv->use_riwt, priv->mode,
1370 					(i == dma_conf->dma_rx_size - 1),
1371 					dma_conf->dma_buf_sz);
1372 		else
1373 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1374 					priv->use_riwt, priv->mode,
1375 					(i == dma_conf->dma_rx_size - 1),
1376 					dma_conf->dma_buf_sz);
1377 }
1378 
1379 /**
1380  * stmmac_clear_tx_descriptors - clear tx descriptors
1381  * @priv: driver private structure
1382  * @dma_conf: structure to take the dma data
1383  * @queue: TX queue index.
1384  * Description: this function is called to clear the TX descriptors
1385  * in case of both basic and extended descriptors are used.
1386  */
1387 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1388 					struct stmmac_dma_conf *dma_conf,
1389 					u32 queue)
1390 {
1391 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1392 	int i;
1393 
1394 	/* Clear the TX descriptors */
1395 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1396 		int last = (i == (dma_conf->dma_tx_size - 1));
1397 		struct dma_desc *p;
1398 
1399 		if (priv->extend_desc)
1400 			p = &tx_q->dma_etx[i].basic;
1401 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1402 			p = &tx_q->dma_entx[i].basic;
1403 		else
1404 			p = &tx_q->dma_tx[i];
1405 
1406 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1407 	}
1408 }
1409 
1410 /**
1411  * stmmac_clear_descriptors - clear descriptors
1412  * @priv: driver private structure
1413  * @dma_conf: structure to take the dma data
1414  * Description: this function is called to clear the TX and RX descriptors
1415  * in case of both basic and extended descriptors are used.
1416  */
1417 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1418 				     struct stmmac_dma_conf *dma_conf)
1419 {
1420 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1421 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1422 	u32 queue;
1423 
1424 	/* Clear the RX descriptors */
1425 	for (queue = 0; queue < rx_queue_cnt; queue++)
1426 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1427 
1428 	/* Clear the TX descriptors */
1429 	for (queue = 0; queue < tx_queue_cnt; queue++)
1430 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1431 }
1432 
1433 /**
1434  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1435  * @priv: driver private structure
1436  * @dma_conf: structure to take the dma data
1437  * @p: descriptor pointer
1438  * @i: descriptor index
1439  * @flags: gfp flag
1440  * @queue: RX queue index
1441  * Description: this function is called to allocate a receive buffer, perform
1442  * the DMA mapping and init the descriptor.
1443  */
1444 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1445 				  struct stmmac_dma_conf *dma_conf,
1446 				  struct dma_desc *p,
1447 				  int i, gfp_t flags, u32 queue)
1448 {
1449 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1450 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1451 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1452 
1453 	if (priv->dma_cap.host_dma_width <= 32)
1454 		gfp |= GFP_DMA32;
1455 
1456 	if (!buf->page) {
1457 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1458 		if (!buf->page)
1459 			return -ENOMEM;
1460 		buf->page_offset = stmmac_rx_offset(priv);
1461 	}
1462 
1463 	if (priv->sph && !buf->sec_page) {
1464 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1465 		if (!buf->sec_page)
1466 			return -ENOMEM;
1467 
1468 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1469 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1470 	} else {
1471 		buf->sec_page = NULL;
1472 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1473 	}
1474 
1475 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1476 
1477 	stmmac_set_desc_addr(priv, p, buf->addr);
1478 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1479 		stmmac_init_desc3(priv, p);
1480 
1481 	return 0;
1482 }
1483 
1484 /**
1485  * stmmac_free_rx_buffer - free RX dma buffers
1486  * @priv: private structure
1487  * @rx_q: RX queue
1488  * @i: buffer index.
1489  */
1490 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1491 				  struct stmmac_rx_queue *rx_q,
1492 				  int i)
1493 {
1494 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1495 
1496 	if (buf->page)
1497 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1498 	buf->page = NULL;
1499 
1500 	if (buf->sec_page)
1501 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1502 	buf->sec_page = NULL;
1503 }
1504 
1505 /**
1506  * stmmac_free_tx_buffer - free RX dma buffers
1507  * @priv: private structure
1508  * @dma_conf: structure to take the dma data
1509  * @queue: RX queue index
1510  * @i: buffer index.
1511  */
1512 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1513 				  struct stmmac_dma_conf *dma_conf,
1514 				  u32 queue, int i)
1515 {
1516 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1517 
1518 	if (tx_q->tx_skbuff_dma[i].buf &&
1519 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1520 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1521 			dma_unmap_page(priv->device,
1522 				       tx_q->tx_skbuff_dma[i].buf,
1523 				       tx_q->tx_skbuff_dma[i].len,
1524 				       DMA_TO_DEVICE);
1525 		else
1526 			dma_unmap_single(priv->device,
1527 					 tx_q->tx_skbuff_dma[i].buf,
1528 					 tx_q->tx_skbuff_dma[i].len,
1529 					 DMA_TO_DEVICE);
1530 	}
1531 
1532 	if (tx_q->xdpf[i] &&
1533 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1534 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1535 		xdp_return_frame(tx_q->xdpf[i]);
1536 		tx_q->xdpf[i] = NULL;
1537 	}
1538 
1539 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1540 		tx_q->xsk_frames_done++;
1541 
1542 	if (tx_q->tx_skbuff[i] &&
1543 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1544 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1545 		tx_q->tx_skbuff[i] = NULL;
1546 	}
1547 
1548 	tx_q->tx_skbuff_dma[i].buf = 0;
1549 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1550 }
1551 
1552 /**
1553  * dma_free_rx_skbufs - free RX dma buffers
1554  * @priv: private structure
1555  * @dma_conf: structure to take the dma data
1556  * @queue: RX queue index
1557  */
1558 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1559 			       struct stmmac_dma_conf *dma_conf,
1560 			       u32 queue)
1561 {
1562 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1563 	int i;
1564 
1565 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1566 		stmmac_free_rx_buffer(priv, rx_q, i);
1567 }
1568 
1569 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1570 				   struct stmmac_dma_conf *dma_conf,
1571 				   u32 queue, gfp_t flags)
1572 {
1573 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1574 	int i;
1575 
1576 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1577 		struct dma_desc *p;
1578 		int ret;
1579 
1580 		if (priv->extend_desc)
1581 			p = &((rx_q->dma_erx + i)->basic);
1582 		else
1583 			p = rx_q->dma_rx + i;
1584 
1585 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1586 					     queue);
1587 		if (ret)
1588 			return ret;
1589 
1590 		rx_q->buf_alloc_num++;
1591 	}
1592 
1593 	return 0;
1594 }
1595 
1596 /**
1597  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1598  * @priv: private structure
1599  * @dma_conf: structure to take the dma data
1600  * @queue: RX queue index
1601  */
1602 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1603 				struct stmmac_dma_conf *dma_conf,
1604 				u32 queue)
1605 {
1606 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1607 	int i;
1608 
1609 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1610 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1611 
1612 		if (!buf->xdp)
1613 			continue;
1614 
1615 		xsk_buff_free(buf->xdp);
1616 		buf->xdp = NULL;
1617 	}
1618 }
1619 
1620 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1621 				      struct stmmac_dma_conf *dma_conf,
1622 				      u32 queue)
1623 {
1624 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1625 	int i;
1626 
1627 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1628 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1629 	 * use this macro to make sure no size violations.
1630 	 */
1631 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1632 
1633 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1634 		struct stmmac_rx_buffer *buf;
1635 		dma_addr_t dma_addr;
1636 		struct dma_desc *p;
1637 
1638 		if (priv->extend_desc)
1639 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1640 		else
1641 			p = rx_q->dma_rx + i;
1642 
1643 		buf = &rx_q->buf_pool[i];
1644 
1645 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1646 		if (!buf->xdp)
1647 			return -ENOMEM;
1648 
1649 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1650 		stmmac_set_desc_addr(priv, p, dma_addr);
1651 		rx_q->buf_alloc_num++;
1652 	}
1653 
1654 	return 0;
1655 }
1656 
1657 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1658 {
1659 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1660 		return NULL;
1661 
1662 	return xsk_get_pool_from_qid(priv->dev, queue);
1663 }
1664 
1665 /**
1666  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1667  * @priv: driver private structure
1668  * @dma_conf: structure to take the dma data
1669  * @queue: RX queue index
1670  * @flags: gfp flag.
1671  * Description: this function initializes the DMA RX descriptors
1672  * and allocates the socket buffers. It supports the chained and ring
1673  * modes.
1674  */
1675 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1676 				    struct stmmac_dma_conf *dma_conf,
1677 				    u32 queue, gfp_t flags)
1678 {
1679 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1680 	int ret;
1681 
1682 	netif_dbg(priv, probe, priv->dev,
1683 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1684 		  (u32)rx_q->dma_rx_phy);
1685 
1686 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1687 
1688 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1689 
1690 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1691 
1692 	if (rx_q->xsk_pool) {
1693 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1694 						   MEM_TYPE_XSK_BUFF_POOL,
1695 						   NULL));
1696 		netdev_info(priv->dev,
1697 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1698 			    rx_q->queue_index);
1699 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1700 	} else {
1701 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1702 						   MEM_TYPE_PAGE_POOL,
1703 						   rx_q->page_pool));
1704 		netdev_info(priv->dev,
1705 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1706 			    rx_q->queue_index);
1707 	}
1708 
1709 	if (rx_q->xsk_pool) {
1710 		/* RX XDP ZC buffer pool may not be populated, e.g.
1711 		 * xdpsock TX-only.
1712 		 */
1713 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1714 	} else {
1715 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1716 		if (ret < 0)
1717 			return -ENOMEM;
1718 	}
1719 
1720 	/* Setup the chained descriptor addresses */
1721 	if (priv->mode == STMMAC_CHAIN_MODE) {
1722 		if (priv->extend_desc)
1723 			stmmac_mode_init(priv, rx_q->dma_erx,
1724 					 rx_q->dma_rx_phy,
1725 					 dma_conf->dma_rx_size, 1);
1726 		else
1727 			stmmac_mode_init(priv, rx_q->dma_rx,
1728 					 rx_q->dma_rx_phy,
1729 					 dma_conf->dma_rx_size, 0);
1730 	}
1731 
1732 	return 0;
1733 }
1734 
1735 static int init_dma_rx_desc_rings(struct net_device *dev,
1736 				  struct stmmac_dma_conf *dma_conf,
1737 				  gfp_t flags)
1738 {
1739 	struct stmmac_priv *priv = netdev_priv(dev);
1740 	u32 rx_count = priv->plat->rx_queues_to_use;
1741 	int queue;
1742 	int ret;
1743 
1744 	/* RX INITIALIZATION */
1745 	netif_dbg(priv, probe, priv->dev,
1746 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1747 
1748 	for (queue = 0; queue < rx_count; queue++) {
1749 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1750 		if (ret)
1751 			goto err_init_rx_buffers;
1752 	}
1753 
1754 	return 0;
1755 
1756 err_init_rx_buffers:
1757 	while (queue >= 0) {
1758 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1759 
1760 		if (rx_q->xsk_pool)
1761 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1762 		else
1763 			dma_free_rx_skbufs(priv, dma_conf, queue);
1764 
1765 		rx_q->buf_alloc_num = 0;
1766 		rx_q->xsk_pool = NULL;
1767 
1768 		queue--;
1769 	}
1770 
1771 	return ret;
1772 }
1773 
1774 /**
1775  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1776  * @priv: driver private structure
1777  * @dma_conf: structure to take the dma data
1778  * @queue: TX queue index
1779  * Description: this function initializes the DMA TX descriptors
1780  * and allocates the socket buffers. It supports the chained and ring
1781  * modes.
1782  */
1783 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1784 				    struct stmmac_dma_conf *dma_conf,
1785 				    u32 queue)
1786 {
1787 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1788 	int i;
1789 
1790 	netif_dbg(priv, probe, priv->dev,
1791 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1792 		  (u32)tx_q->dma_tx_phy);
1793 
1794 	/* Setup the chained descriptor addresses */
1795 	if (priv->mode == STMMAC_CHAIN_MODE) {
1796 		if (priv->extend_desc)
1797 			stmmac_mode_init(priv, tx_q->dma_etx,
1798 					 tx_q->dma_tx_phy,
1799 					 dma_conf->dma_tx_size, 1);
1800 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1801 			stmmac_mode_init(priv, tx_q->dma_tx,
1802 					 tx_q->dma_tx_phy,
1803 					 dma_conf->dma_tx_size, 0);
1804 	}
1805 
1806 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1807 
1808 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1809 		struct dma_desc *p;
1810 
1811 		if (priv->extend_desc)
1812 			p = &((tx_q->dma_etx + i)->basic);
1813 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1814 			p = &((tx_q->dma_entx + i)->basic);
1815 		else
1816 			p = tx_q->dma_tx + i;
1817 
1818 		stmmac_clear_desc(priv, p);
1819 
1820 		tx_q->tx_skbuff_dma[i].buf = 0;
1821 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1822 		tx_q->tx_skbuff_dma[i].len = 0;
1823 		tx_q->tx_skbuff_dma[i].last_segment = false;
1824 		tx_q->tx_skbuff[i] = NULL;
1825 	}
1826 
1827 	return 0;
1828 }
1829 
1830 static int init_dma_tx_desc_rings(struct net_device *dev,
1831 				  struct stmmac_dma_conf *dma_conf)
1832 {
1833 	struct stmmac_priv *priv = netdev_priv(dev);
1834 	u32 tx_queue_cnt;
1835 	u32 queue;
1836 
1837 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1838 
1839 	for (queue = 0; queue < tx_queue_cnt; queue++)
1840 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1841 
1842 	return 0;
1843 }
1844 
1845 /**
1846  * init_dma_desc_rings - init the RX/TX descriptor rings
1847  * @dev: net device structure
1848  * @dma_conf: structure to take the dma data
1849  * @flags: gfp flag.
1850  * Description: this function initializes the DMA RX/TX descriptors
1851  * and allocates the socket buffers. It supports the chained and ring
1852  * modes.
1853  */
1854 static int init_dma_desc_rings(struct net_device *dev,
1855 			       struct stmmac_dma_conf *dma_conf,
1856 			       gfp_t flags)
1857 {
1858 	struct stmmac_priv *priv = netdev_priv(dev);
1859 	int ret;
1860 
1861 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1862 	if (ret)
1863 		return ret;
1864 
1865 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1866 
1867 	stmmac_clear_descriptors(priv, dma_conf);
1868 
1869 	if (netif_msg_hw(priv))
1870 		stmmac_display_rings(priv, dma_conf);
1871 
1872 	return ret;
1873 }
1874 
1875 /**
1876  * dma_free_tx_skbufs - free TX dma buffers
1877  * @priv: private structure
1878  * @dma_conf: structure to take the dma data
1879  * @queue: TX queue index
1880  */
1881 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1882 			       struct stmmac_dma_conf *dma_conf,
1883 			       u32 queue)
1884 {
1885 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1886 	int i;
1887 
1888 	tx_q->xsk_frames_done = 0;
1889 
1890 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1891 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1892 
1893 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1894 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1895 		tx_q->xsk_frames_done = 0;
1896 		tx_q->xsk_pool = NULL;
1897 	}
1898 }
1899 
1900 /**
1901  * stmmac_free_tx_skbufs - free TX skb buffers
1902  * @priv: private structure
1903  */
1904 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1905 {
1906 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1907 	u32 queue;
1908 
1909 	for (queue = 0; queue < tx_queue_cnt; queue++)
1910 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1911 }
1912 
1913 /**
1914  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1915  * @priv: private structure
1916  * @dma_conf: structure to take the dma data
1917  * @queue: RX queue index
1918  */
1919 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1920 					 struct stmmac_dma_conf *dma_conf,
1921 					 u32 queue)
1922 {
1923 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1924 
1925 	/* Release the DMA RX socket buffers */
1926 	if (rx_q->xsk_pool)
1927 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1928 	else
1929 		dma_free_rx_skbufs(priv, dma_conf, queue);
1930 
1931 	rx_q->buf_alloc_num = 0;
1932 	rx_q->xsk_pool = NULL;
1933 
1934 	/* Free DMA regions of consistent memory previously allocated */
1935 	if (!priv->extend_desc)
1936 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1937 				  sizeof(struct dma_desc),
1938 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1939 	else
1940 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1941 				  sizeof(struct dma_extended_desc),
1942 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1943 
1944 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1945 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1946 
1947 	kfree(rx_q->buf_pool);
1948 	if (rx_q->page_pool)
1949 		page_pool_destroy(rx_q->page_pool);
1950 }
1951 
1952 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1953 				       struct stmmac_dma_conf *dma_conf)
1954 {
1955 	u32 rx_count = priv->plat->rx_queues_to_use;
1956 	u32 queue;
1957 
1958 	/* Free RX queue resources */
1959 	for (queue = 0; queue < rx_count; queue++)
1960 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1961 }
1962 
1963 /**
1964  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1965  * @priv: private structure
1966  * @dma_conf: structure to take the dma data
1967  * @queue: TX queue index
1968  */
1969 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1970 					 struct stmmac_dma_conf *dma_conf,
1971 					 u32 queue)
1972 {
1973 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1974 	size_t size;
1975 	void *addr;
1976 
1977 	/* Release the DMA TX socket buffers */
1978 	dma_free_tx_skbufs(priv, dma_conf, queue);
1979 
1980 	if (priv->extend_desc) {
1981 		size = sizeof(struct dma_extended_desc);
1982 		addr = tx_q->dma_etx;
1983 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1984 		size = sizeof(struct dma_edesc);
1985 		addr = tx_q->dma_entx;
1986 	} else {
1987 		size = sizeof(struct dma_desc);
1988 		addr = tx_q->dma_tx;
1989 	}
1990 
1991 	size *= dma_conf->dma_tx_size;
1992 
1993 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1994 
1995 	kfree(tx_q->tx_skbuff_dma);
1996 	kfree(tx_q->tx_skbuff);
1997 }
1998 
1999 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2000 				       struct stmmac_dma_conf *dma_conf)
2001 {
2002 	u32 tx_count = priv->plat->tx_queues_to_use;
2003 	u32 queue;
2004 
2005 	/* Free TX queue resources */
2006 	for (queue = 0; queue < tx_count; queue++)
2007 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2008 }
2009 
2010 /**
2011  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2012  * @priv: private structure
2013  * @dma_conf: structure to take the dma data
2014  * @queue: RX queue index
2015  * Description: according to which descriptor can be used (extend or basic)
2016  * this function allocates the resources for TX and RX paths. In case of
2017  * reception, for example, it pre-allocated the RX socket buffer in order to
2018  * allow zero-copy mechanism.
2019  */
2020 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2021 					 struct stmmac_dma_conf *dma_conf,
2022 					 u32 queue)
2023 {
2024 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2025 	struct stmmac_channel *ch = &priv->channel[queue];
2026 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2027 	struct page_pool_params pp_params = { 0 };
2028 	unsigned int num_pages;
2029 	unsigned int napi_id;
2030 	int ret;
2031 
2032 	rx_q->queue_index = queue;
2033 	rx_q->priv_data = priv;
2034 
2035 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2036 	pp_params.pool_size = dma_conf->dma_rx_size;
2037 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2038 	pp_params.order = ilog2(num_pages);
2039 	pp_params.nid = dev_to_node(priv->device);
2040 	pp_params.dev = priv->device;
2041 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2042 	pp_params.offset = stmmac_rx_offset(priv);
2043 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2044 
2045 	rx_q->page_pool = page_pool_create(&pp_params);
2046 	if (IS_ERR(rx_q->page_pool)) {
2047 		ret = PTR_ERR(rx_q->page_pool);
2048 		rx_q->page_pool = NULL;
2049 		return ret;
2050 	}
2051 
2052 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2053 				 sizeof(*rx_q->buf_pool),
2054 				 GFP_KERNEL);
2055 	if (!rx_q->buf_pool)
2056 		return -ENOMEM;
2057 
2058 	if (priv->extend_desc) {
2059 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2060 						   dma_conf->dma_rx_size *
2061 						   sizeof(struct dma_extended_desc),
2062 						   &rx_q->dma_rx_phy,
2063 						   GFP_KERNEL);
2064 		if (!rx_q->dma_erx)
2065 			return -ENOMEM;
2066 
2067 	} else {
2068 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2069 						  dma_conf->dma_rx_size *
2070 						  sizeof(struct dma_desc),
2071 						  &rx_q->dma_rx_phy,
2072 						  GFP_KERNEL);
2073 		if (!rx_q->dma_rx)
2074 			return -ENOMEM;
2075 	}
2076 
2077 	if (stmmac_xdp_is_enabled(priv) &&
2078 	    test_bit(queue, priv->af_xdp_zc_qps))
2079 		napi_id = ch->rxtx_napi.napi_id;
2080 	else
2081 		napi_id = ch->rx_napi.napi_id;
2082 
2083 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2084 			       rx_q->queue_index,
2085 			       napi_id);
2086 	if (ret) {
2087 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2088 		return -EINVAL;
2089 	}
2090 
2091 	return 0;
2092 }
2093 
2094 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2095 				       struct stmmac_dma_conf *dma_conf)
2096 {
2097 	u32 rx_count = priv->plat->rx_queues_to_use;
2098 	u32 queue;
2099 	int ret;
2100 
2101 	/* RX queues buffers and DMA */
2102 	for (queue = 0; queue < rx_count; queue++) {
2103 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2104 		if (ret)
2105 			goto err_dma;
2106 	}
2107 
2108 	return 0;
2109 
2110 err_dma:
2111 	free_dma_rx_desc_resources(priv, dma_conf);
2112 
2113 	return ret;
2114 }
2115 
2116 /**
2117  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2118  * @priv: private structure
2119  * @dma_conf: structure to take the dma data
2120  * @queue: TX queue index
2121  * Description: according to which descriptor can be used (extend or basic)
2122  * this function allocates the resources for TX and RX paths. In case of
2123  * reception, for example, it pre-allocated the RX socket buffer in order to
2124  * allow zero-copy mechanism.
2125  */
2126 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2127 					 struct stmmac_dma_conf *dma_conf,
2128 					 u32 queue)
2129 {
2130 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2131 	size_t size;
2132 	void *addr;
2133 
2134 	tx_q->queue_index = queue;
2135 	tx_q->priv_data = priv;
2136 
2137 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2138 				      sizeof(*tx_q->tx_skbuff_dma),
2139 				      GFP_KERNEL);
2140 	if (!tx_q->tx_skbuff_dma)
2141 		return -ENOMEM;
2142 
2143 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2144 				  sizeof(struct sk_buff *),
2145 				  GFP_KERNEL);
2146 	if (!tx_q->tx_skbuff)
2147 		return -ENOMEM;
2148 
2149 	if (priv->extend_desc)
2150 		size = sizeof(struct dma_extended_desc);
2151 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2152 		size = sizeof(struct dma_edesc);
2153 	else
2154 		size = sizeof(struct dma_desc);
2155 
2156 	size *= dma_conf->dma_tx_size;
2157 
2158 	addr = dma_alloc_coherent(priv->device, size,
2159 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2160 	if (!addr)
2161 		return -ENOMEM;
2162 
2163 	if (priv->extend_desc)
2164 		tx_q->dma_etx = addr;
2165 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2166 		tx_q->dma_entx = addr;
2167 	else
2168 		tx_q->dma_tx = addr;
2169 
2170 	return 0;
2171 }
2172 
2173 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2174 				       struct stmmac_dma_conf *dma_conf)
2175 {
2176 	u32 tx_count = priv->plat->tx_queues_to_use;
2177 	u32 queue;
2178 	int ret;
2179 
2180 	/* TX queues buffers and DMA */
2181 	for (queue = 0; queue < tx_count; queue++) {
2182 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2183 		if (ret)
2184 			goto err_dma;
2185 	}
2186 
2187 	return 0;
2188 
2189 err_dma:
2190 	free_dma_tx_desc_resources(priv, dma_conf);
2191 	return ret;
2192 }
2193 
2194 /**
2195  * alloc_dma_desc_resources - alloc TX/RX resources.
2196  * @priv: private structure
2197  * @dma_conf: structure to take the dma data
2198  * Description: according to which descriptor can be used (extend or basic)
2199  * this function allocates the resources for TX and RX paths. In case of
2200  * reception, for example, it pre-allocated the RX socket buffer in order to
2201  * allow zero-copy mechanism.
2202  */
2203 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2204 				    struct stmmac_dma_conf *dma_conf)
2205 {
2206 	/* RX Allocation */
2207 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2208 
2209 	if (ret)
2210 		return ret;
2211 
2212 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2213 
2214 	return ret;
2215 }
2216 
2217 /**
2218  * free_dma_desc_resources - free dma desc resources
2219  * @priv: private structure
2220  * @dma_conf: structure to take the dma data
2221  */
2222 static void free_dma_desc_resources(struct stmmac_priv *priv,
2223 				    struct stmmac_dma_conf *dma_conf)
2224 {
2225 	/* Release the DMA TX socket buffers */
2226 	free_dma_tx_desc_resources(priv, dma_conf);
2227 
2228 	/* Release the DMA RX socket buffers later
2229 	 * to ensure all pending XDP_TX buffers are returned.
2230 	 */
2231 	free_dma_rx_desc_resources(priv, dma_conf);
2232 }
2233 
2234 /**
2235  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2236  *  @priv: driver private structure
2237  *  Description: It is used for enabling the rx queues in the MAC
2238  */
2239 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2240 {
2241 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2242 	int queue;
2243 	u8 mode;
2244 
2245 	for (queue = 0; queue < rx_queues_count; queue++) {
2246 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2247 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2248 	}
2249 }
2250 
2251 /**
2252  * stmmac_start_rx_dma - start RX DMA channel
2253  * @priv: driver private structure
2254  * @chan: RX channel index
2255  * Description:
2256  * This starts a RX DMA channel
2257  */
2258 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2259 {
2260 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2261 	stmmac_start_rx(priv, priv->ioaddr, chan);
2262 }
2263 
2264 /**
2265  * stmmac_start_tx_dma - start TX DMA channel
2266  * @priv: driver private structure
2267  * @chan: TX channel index
2268  * Description:
2269  * This starts a TX DMA channel
2270  */
2271 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2272 {
2273 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2274 	stmmac_start_tx(priv, priv->ioaddr, chan);
2275 }
2276 
2277 /**
2278  * stmmac_stop_rx_dma - stop RX DMA channel
2279  * @priv: driver private structure
2280  * @chan: RX channel index
2281  * Description:
2282  * This stops a RX DMA channel
2283  */
2284 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2285 {
2286 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2287 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2288 }
2289 
2290 /**
2291  * stmmac_stop_tx_dma - stop TX DMA channel
2292  * @priv: driver private structure
2293  * @chan: TX channel index
2294  * Description:
2295  * This stops a TX DMA channel
2296  */
2297 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2298 {
2299 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2300 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2301 }
2302 
2303 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2304 {
2305 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2306 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2307 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2308 	u32 chan;
2309 
2310 	for (chan = 0; chan < dma_csr_ch; chan++) {
2311 		struct stmmac_channel *ch = &priv->channel[chan];
2312 		unsigned long flags;
2313 
2314 		spin_lock_irqsave(&ch->lock, flags);
2315 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2316 		spin_unlock_irqrestore(&ch->lock, flags);
2317 	}
2318 }
2319 
2320 /**
2321  * stmmac_start_all_dma - start all RX and TX DMA channels
2322  * @priv: driver private structure
2323  * Description:
2324  * This starts all the RX and TX DMA channels
2325  */
2326 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2327 {
2328 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2329 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2330 	u32 chan = 0;
2331 
2332 	for (chan = 0; chan < rx_channels_count; chan++)
2333 		stmmac_start_rx_dma(priv, chan);
2334 
2335 	for (chan = 0; chan < tx_channels_count; chan++)
2336 		stmmac_start_tx_dma(priv, chan);
2337 }
2338 
2339 /**
2340  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2341  * @priv: driver private structure
2342  * Description:
2343  * This stops the RX and TX DMA channels
2344  */
2345 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2346 {
2347 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2348 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2349 	u32 chan = 0;
2350 
2351 	for (chan = 0; chan < rx_channels_count; chan++)
2352 		stmmac_stop_rx_dma(priv, chan);
2353 
2354 	for (chan = 0; chan < tx_channels_count; chan++)
2355 		stmmac_stop_tx_dma(priv, chan);
2356 }
2357 
2358 /**
2359  *  stmmac_dma_operation_mode - HW DMA operation mode
2360  *  @priv: driver private structure
2361  *  Description: it is used for configuring the DMA operation mode register in
2362  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2363  */
2364 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2365 {
2366 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2367 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2368 	int rxfifosz = priv->plat->rx_fifo_size;
2369 	int txfifosz = priv->plat->tx_fifo_size;
2370 	u32 txmode = 0;
2371 	u32 rxmode = 0;
2372 	u32 chan = 0;
2373 	u8 qmode = 0;
2374 
2375 	if (rxfifosz == 0)
2376 		rxfifosz = priv->dma_cap.rx_fifo_size;
2377 	if (txfifosz == 0)
2378 		txfifosz = priv->dma_cap.tx_fifo_size;
2379 
2380 	/* Adjust for real per queue fifo size */
2381 	rxfifosz /= rx_channels_count;
2382 	txfifosz /= tx_channels_count;
2383 
2384 	if (priv->plat->force_thresh_dma_mode) {
2385 		txmode = tc;
2386 		rxmode = tc;
2387 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2388 		/*
2389 		 * In case of GMAC, SF mode can be enabled
2390 		 * to perform the TX COE in HW. This depends on:
2391 		 * 1) TX COE if actually supported
2392 		 * 2) There is no bugged Jumbo frame support
2393 		 *    that needs to not insert csum in the TDES.
2394 		 */
2395 		txmode = SF_DMA_MODE;
2396 		rxmode = SF_DMA_MODE;
2397 		priv->xstats.threshold = SF_DMA_MODE;
2398 	} else {
2399 		txmode = tc;
2400 		rxmode = SF_DMA_MODE;
2401 	}
2402 
2403 	/* configure all channels */
2404 	for (chan = 0; chan < rx_channels_count; chan++) {
2405 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2406 		u32 buf_size;
2407 
2408 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2409 
2410 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2411 				rxfifosz, qmode);
2412 
2413 		if (rx_q->xsk_pool) {
2414 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2415 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2416 					      buf_size,
2417 					      chan);
2418 		} else {
2419 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2420 					      priv->dma_conf.dma_buf_sz,
2421 					      chan);
2422 		}
2423 	}
2424 
2425 	for (chan = 0; chan < tx_channels_count; chan++) {
2426 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2427 
2428 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2429 				txfifosz, qmode);
2430 	}
2431 }
2432 
2433 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2434 {
2435 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2436 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2437 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2438 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2439 	unsigned int entry = tx_q->cur_tx;
2440 	struct dma_desc *tx_desc = NULL;
2441 	struct xdp_desc xdp_desc;
2442 	bool work_done = true;
2443 	u32 tx_set_ic_bit = 0;
2444 	unsigned long flags;
2445 
2446 	/* Avoids TX time-out as we are sharing with slow path */
2447 	txq_trans_cond_update(nq);
2448 
2449 	budget = min(budget, stmmac_tx_avail(priv, queue));
2450 
2451 	while (budget-- > 0) {
2452 		dma_addr_t dma_addr;
2453 		bool set_ic;
2454 
2455 		/* We are sharing with slow path and stop XSK TX desc submission when
2456 		 * available TX ring is less than threshold.
2457 		 */
2458 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2459 		    !netif_carrier_ok(priv->dev)) {
2460 			work_done = false;
2461 			break;
2462 		}
2463 
2464 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2465 			break;
2466 
2467 		if (likely(priv->extend_desc))
2468 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2469 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2470 			tx_desc = &tx_q->dma_entx[entry].basic;
2471 		else
2472 			tx_desc = tx_q->dma_tx + entry;
2473 
2474 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2475 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2476 
2477 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2478 
2479 		/* To return XDP buffer to XSK pool, we simple call
2480 		 * xsk_tx_completed(), so we don't need to fill up
2481 		 * 'buf' and 'xdpf'.
2482 		 */
2483 		tx_q->tx_skbuff_dma[entry].buf = 0;
2484 		tx_q->xdpf[entry] = NULL;
2485 
2486 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2487 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2488 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2489 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2490 
2491 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2492 
2493 		tx_q->tx_count_frames++;
2494 
2495 		if (!priv->tx_coal_frames[queue])
2496 			set_ic = false;
2497 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2498 			set_ic = true;
2499 		else
2500 			set_ic = false;
2501 
2502 		if (set_ic) {
2503 			tx_q->tx_count_frames = 0;
2504 			stmmac_set_tx_ic(priv, tx_desc);
2505 			tx_set_ic_bit++;
2506 		}
2507 
2508 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2509 				       true, priv->mode, true, true,
2510 				       xdp_desc.len);
2511 
2512 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2513 
2514 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2515 		entry = tx_q->cur_tx;
2516 	}
2517 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
2518 	txq_stats->tx_set_ic_bit += tx_set_ic_bit;
2519 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
2520 
2521 	if (tx_desc) {
2522 		stmmac_flush_tx_descriptors(priv, queue);
2523 		xsk_tx_release(pool);
2524 	}
2525 
2526 	/* Return true if all of the 3 conditions are met
2527 	 *  a) TX Budget is still available
2528 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2529 	 *     pending XSK TX for transmission)
2530 	 */
2531 	return !!budget && work_done;
2532 }
2533 
2534 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2535 {
2536 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2537 		tc += 64;
2538 
2539 		if (priv->plat->force_thresh_dma_mode)
2540 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2541 		else
2542 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2543 						      chan);
2544 
2545 		priv->xstats.threshold = tc;
2546 	}
2547 }
2548 
2549 /**
2550  * stmmac_tx_clean - to manage the transmission completion
2551  * @priv: driver private structure
2552  * @budget: napi budget limiting this functions packet handling
2553  * @queue: TX queue index
2554  * Description: it reclaims the transmit resources after transmission completes.
2555  */
2556 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2557 {
2558 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2559 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2560 	unsigned int bytes_compl = 0, pkts_compl = 0;
2561 	unsigned int entry, xmits = 0, count = 0;
2562 	u32 tx_packets = 0, tx_errors = 0;
2563 	unsigned long flags;
2564 
2565 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2566 
2567 	tx_q->xsk_frames_done = 0;
2568 
2569 	entry = tx_q->dirty_tx;
2570 
2571 	/* Try to clean all TX complete frame in 1 shot */
2572 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2573 		struct xdp_frame *xdpf;
2574 		struct sk_buff *skb;
2575 		struct dma_desc *p;
2576 		int status;
2577 
2578 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2579 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2580 			xdpf = tx_q->xdpf[entry];
2581 			skb = NULL;
2582 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2583 			xdpf = NULL;
2584 			skb = tx_q->tx_skbuff[entry];
2585 		} else {
2586 			xdpf = NULL;
2587 			skb = NULL;
2588 		}
2589 
2590 		if (priv->extend_desc)
2591 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2592 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2593 			p = &tx_q->dma_entx[entry].basic;
2594 		else
2595 			p = tx_q->dma_tx + entry;
2596 
2597 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2598 		/* Check if the descriptor is owned by the DMA */
2599 		if (unlikely(status & tx_dma_own))
2600 			break;
2601 
2602 		count++;
2603 
2604 		/* Make sure descriptor fields are read after reading
2605 		 * the own bit.
2606 		 */
2607 		dma_rmb();
2608 
2609 		/* Just consider the last segment and ...*/
2610 		if (likely(!(status & tx_not_ls))) {
2611 			/* ... verify the status error condition */
2612 			if (unlikely(status & tx_err)) {
2613 				tx_errors++;
2614 				if (unlikely(status & tx_err_bump_tc))
2615 					stmmac_bump_dma_threshold(priv, queue);
2616 			} else {
2617 				tx_packets++;
2618 			}
2619 			if (skb)
2620 				stmmac_get_tx_hwtstamp(priv, p, skb);
2621 		}
2622 
2623 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2624 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2625 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2626 				dma_unmap_page(priv->device,
2627 					       tx_q->tx_skbuff_dma[entry].buf,
2628 					       tx_q->tx_skbuff_dma[entry].len,
2629 					       DMA_TO_DEVICE);
2630 			else
2631 				dma_unmap_single(priv->device,
2632 						 tx_q->tx_skbuff_dma[entry].buf,
2633 						 tx_q->tx_skbuff_dma[entry].len,
2634 						 DMA_TO_DEVICE);
2635 			tx_q->tx_skbuff_dma[entry].buf = 0;
2636 			tx_q->tx_skbuff_dma[entry].len = 0;
2637 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2638 		}
2639 
2640 		stmmac_clean_desc3(priv, tx_q, p);
2641 
2642 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2643 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2644 
2645 		if (xdpf &&
2646 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2647 			xdp_return_frame_rx_napi(xdpf);
2648 			tx_q->xdpf[entry] = NULL;
2649 		}
2650 
2651 		if (xdpf &&
2652 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2653 			xdp_return_frame(xdpf);
2654 			tx_q->xdpf[entry] = NULL;
2655 		}
2656 
2657 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2658 			tx_q->xsk_frames_done++;
2659 
2660 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2661 			if (likely(skb)) {
2662 				pkts_compl++;
2663 				bytes_compl += skb->len;
2664 				dev_consume_skb_any(skb);
2665 				tx_q->tx_skbuff[entry] = NULL;
2666 			}
2667 		}
2668 
2669 		stmmac_release_tx_desc(priv, p, priv->mode);
2670 
2671 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2672 	}
2673 	tx_q->dirty_tx = entry;
2674 
2675 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2676 				  pkts_compl, bytes_compl);
2677 
2678 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2679 								queue))) &&
2680 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2681 
2682 		netif_dbg(priv, tx_done, priv->dev,
2683 			  "%s: restart transmit\n", __func__);
2684 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2685 	}
2686 
2687 	if (tx_q->xsk_pool) {
2688 		bool work_done;
2689 
2690 		if (tx_q->xsk_frames_done)
2691 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2692 
2693 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2694 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2695 
2696 		/* For XSK TX, we try to send as many as possible.
2697 		 * If XSK work done (XSK TX desc empty and budget still
2698 		 * available), return "budget - 1" to reenable TX IRQ.
2699 		 * Else, return "budget" to make NAPI continue polling.
2700 		 */
2701 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2702 					       STMMAC_XSK_TX_BUDGET_MAX);
2703 		if (work_done)
2704 			xmits = budget - 1;
2705 		else
2706 			xmits = budget;
2707 	}
2708 
2709 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2710 	    priv->eee_sw_timer_en) {
2711 		if (stmmac_enable_eee_mode(priv))
2712 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2713 	}
2714 
2715 	/* We still have pending packets, let's call for a new scheduling */
2716 	if (tx_q->dirty_tx != tx_q->cur_tx)
2717 		stmmac_tx_timer_arm(priv, queue);
2718 
2719 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
2720 	txq_stats->tx_packets += tx_packets;
2721 	txq_stats->tx_pkt_n += tx_packets;
2722 	txq_stats->tx_clean++;
2723 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
2724 
2725 	priv->xstats.tx_errors += tx_errors;
2726 
2727 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2728 
2729 	/* Combine decisions from TX clean and XSK TX */
2730 	return max(count, xmits);
2731 }
2732 
2733 /**
2734  * stmmac_tx_err - to manage the tx error
2735  * @priv: driver private structure
2736  * @chan: channel index
2737  * Description: it cleans the descriptors and restarts the transmission
2738  * in case of transmission errors.
2739  */
2740 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2741 {
2742 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2743 
2744 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2745 
2746 	stmmac_stop_tx_dma(priv, chan);
2747 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2748 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2749 	stmmac_reset_tx_queue(priv, chan);
2750 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2751 			    tx_q->dma_tx_phy, chan);
2752 	stmmac_start_tx_dma(priv, chan);
2753 
2754 	priv->xstats.tx_errors++;
2755 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2756 }
2757 
2758 /**
2759  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2760  *  @priv: driver private structure
2761  *  @txmode: TX operating mode
2762  *  @rxmode: RX operating mode
2763  *  @chan: channel index
2764  *  Description: it is used for configuring of the DMA operation mode in
2765  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2766  *  mode.
2767  */
2768 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2769 					  u32 rxmode, u32 chan)
2770 {
2771 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2772 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2773 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2774 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2775 	int rxfifosz = priv->plat->rx_fifo_size;
2776 	int txfifosz = priv->plat->tx_fifo_size;
2777 
2778 	if (rxfifosz == 0)
2779 		rxfifosz = priv->dma_cap.rx_fifo_size;
2780 	if (txfifosz == 0)
2781 		txfifosz = priv->dma_cap.tx_fifo_size;
2782 
2783 	/* Adjust for real per queue fifo size */
2784 	rxfifosz /= rx_channels_count;
2785 	txfifosz /= tx_channels_count;
2786 
2787 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2788 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2789 }
2790 
2791 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2792 {
2793 	int ret;
2794 
2795 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2796 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2797 	if (ret && (ret != -EINVAL)) {
2798 		stmmac_global_err(priv);
2799 		return true;
2800 	}
2801 
2802 	return false;
2803 }
2804 
2805 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2806 {
2807 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2808 						 &priv->xstats, chan, dir);
2809 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2810 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2811 	struct stmmac_channel *ch = &priv->channel[chan];
2812 	struct napi_struct *rx_napi;
2813 	struct napi_struct *tx_napi;
2814 	unsigned long flags;
2815 
2816 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2817 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2818 
2819 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2820 		if (napi_schedule_prep(rx_napi)) {
2821 			spin_lock_irqsave(&ch->lock, flags);
2822 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2823 			spin_unlock_irqrestore(&ch->lock, flags);
2824 			__napi_schedule(rx_napi);
2825 		}
2826 	}
2827 
2828 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2829 		if (napi_schedule_prep(tx_napi)) {
2830 			spin_lock_irqsave(&ch->lock, flags);
2831 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2832 			spin_unlock_irqrestore(&ch->lock, flags);
2833 			__napi_schedule(tx_napi);
2834 		}
2835 	}
2836 
2837 	return status;
2838 }
2839 
2840 /**
2841  * stmmac_dma_interrupt - DMA ISR
2842  * @priv: driver private structure
2843  * Description: this is the DMA ISR. It is called by the main ISR.
2844  * It calls the dwmac dma routine and schedule poll method in case of some
2845  * work can be done.
2846  */
2847 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2848 {
2849 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2850 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2851 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2852 				tx_channel_count : rx_channel_count;
2853 	u32 chan;
2854 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2855 
2856 	/* Make sure we never check beyond our status buffer. */
2857 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2858 		channels_to_check = ARRAY_SIZE(status);
2859 
2860 	for (chan = 0; chan < channels_to_check; chan++)
2861 		status[chan] = stmmac_napi_check(priv, chan,
2862 						 DMA_DIR_RXTX);
2863 
2864 	for (chan = 0; chan < tx_channel_count; chan++) {
2865 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2866 			/* Try to bump up the dma threshold on this failure */
2867 			stmmac_bump_dma_threshold(priv, chan);
2868 		} else if (unlikely(status[chan] == tx_hard_error)) {
2869 			stmmac_tx_err(priv, chan);
2870 		}
2871 	}
2872 }
2873 
2874 /**
2875  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2876  * @priv: driver private structure
2877  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2878  */
2879 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2880 {
2881 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2882 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2883 
2884 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2885 
2886 	if (priv->dma_cap.rmon) {
2887 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2888 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2889 	} else
2890 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2891 }
2892 
2893 /**
2894  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2895  * @priv: driver private structure
2896  * Description:
2897  *  new GMAC chip generations have a new register to indicate the
2898  *  presence of the optional feature/functions.
2899  *  This can be also used to override the value passed through the
2900  *  platform and necessary for old MAC10/100 and GMAC chips.
2901  */
2902 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2903 {
2904 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2905 }
2906 
2907 /**
2908  * stmmac_check_ether_addr - check if the MAC addr is valid
2909  * @priv: driver private structure
2910  * Description:
2911  * it is to verify if the MAC address is valid, in case of failures it
2912  * generates a random MAC address
2913  */
2914 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2915 {
2916 	u8 addr[ETH_ALEN];
2917 
2918 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2919 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2920 		if (is_valid_ether_addr(addr))
2921 			eth_hw_addr_set(priv->dev, addr);
2922 		else
2923 			eth_hw_addr_random(priv->dev);
2924 		dev_info(priv->device, "device MAC address %pM\n",
2925 			 priv->dev->dev_addr);
2926 	}
2927 }
2928 
2929 /**
2930  * stmmac_init_dma_engine - DMA init.
2931  * @priv: driver private structure
2932  * Description:
2933  * It inits the DMA invoking the specific MAC/GMAC callback.
2934  * Some DMA parameters can be passed from the platform;
2935  * in case of these are not passed a default is kept for the MAC or GMAC.
2936  */
2937 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2938 {
2939 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2940 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2941 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2942 	struct stmmac_rx_queue *rx_q;
2943 	struct stmmac_tx_queue *tx_q;
2944 	u32 chan = 0;
2945 	int atds = 0;
2946 	int ret = 0;
2947 
2948 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2949 		dev_err(priv->device, "Invalid DMA configuration\n");
2950 		return -EINVAL;
2951 	}
2952 
2953 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2954 		atds = 1;
2955 
2956 	ret = stmmac_reset(priv, priv->ioaddr);
2957 	if (ret) {
2958 		dev_err(priv->device, "Failed to reset the dma\n");
2959 		return ret;
2960 	}
2961 
2962 	/* DMA Configuration */
2963 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2964 
2965 	if (priv->plat->axi)
2966 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2967 
2968 	/* DMA CSR Channel configuration */
2969 	for (chan = 0; chan < dma_csr_ch; chan++) {
2970 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2971 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2972 	}
2973 
2974 	/* DMA RX Channel Configuration */
2975 	for (chan = 0; chan < rx_channels_count; chan++) {
2976 		rx_q = &priv->dma_conf.rx_queue[chan];
2977 
2978 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2979 				    rx_q->dma_rx_phy, chan);
2980 
2981 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2982 				     (rx_q->buf_alloc_num *
2983 				      sizeof(struct dma_desc));
2984 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2985 				       rx_q->rx_tail_addr, chan);
2986 	}
2987 
2988 	/* DMA TX Channel Configuration */
2989 	for (chan = 0; chan < tx_channels_count; chan++) {
2990 		tx_q = &priv->dma_conf.tx_queue[chan];
2991 
2992 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2993 				    tx_q->dma_tx_phy, chan);
2994 
2995 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2996 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2997 				       tx_q->tx_tail_addr, chan);
2998 	}
2999 
3000 	return ret;
3001 }
3002 
3003 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3004 {
3005 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3006 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3007 
3008 	if (!tx_coal_timer)
3009 		return;
3010 
3011 	hrtimer_start(&tx_q->txtimer,
3012 		      STMMAC_COAL_TIMER(tx_coal_timer),
3013 		      HRTIMER_MODE_REL);
3014 }
3015 
3016 /**
3017  * stmmac_tx_timer - mitigation sw timer for tx.
3018  * @t: data pointer
3019  * Description:
3020  * This is the timer handler to directly invoke the stmmac_tx_clean.
3021  */
3022 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3023 {
3024 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3025 	struct stmmac_priv *priv = tx_q->priv_data;
3026 	struct stmmac_channel *ch;
3027 	struct napi_struct *napi;
3028 
3029 	ch = &priv->channel[tx_q->queue_index];
3030 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3031 
3032 	if (likely(napi_schedule_prep(napi))) {
3033 		unsigned long flags;
3034 
3035 		spin_lock_irqsave(&ch->lock, flags);
3036 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3037 		spin_unlock_irqrestore(&ch->lock, flags);
3038 		__napi_schedule(napi);
3039 	}
3040 
3041 	return HRTIMER_NORESTART;
3042 }
3043 
3044 /**
3045  * stmmac_init_coalesce - init mitigation options.
3046  * @priv: driver private structure
3047  * Description:
3048  * This inits the coalesce parameters: i.e. timer rate,
3049  * timer handler and default threshold used for enabling the
3050  * interrupt on completion bit.
3051  */
3052 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3053 {
3054 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3055 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3056 	u32 chan;
3057 
3058 	for (chan = 0; chan < tx_channel_count; chan++) {
3059 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3060 
3061 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3062 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3063 
3064 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3065 		tx_q->txtimer.function = stmmac_tx_timer;
3066 	}
3067 
3068 	for (chan = 0; chan < rx_channel_count; chan++)
3069 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3070 }
3071 
3072 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3073 {
3074 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3075 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3076 	u32 chan;
3077 
3078 	/* set TX ring length */
3079 	for (chan = 0; chan < tx_channels_count; chan++)
3080 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3081 				       (priv->dma_conf.dma_tx_size - 1), chan);
3082 
3083 	/* set RX ring length */
3084 	for (chan = 0; chan < rx_channels_count; chan++)
3085 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3086 				       (priv->dma_conf.dma_rx_size - 1), chan);
3087 }
3088 
3089 /**
3090  *  stmmac_set_tx_queue_weight - Set TX queue weight
3091  *  @priv: driver private structure
3092  *  Description: It is used for setting TX queues weight
3093  */
3094 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3095 {
3096 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3097 	u32 weight;
3098 	u32 queue;
3099 
3100 	for (queue = 0; queue < tx_queues_count; queue++) {
3101 		weight = priv->plat->tx_queues_cfg[queue].weight;
3102 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3103 	}
3104 }
3105 
3106 /**
3107  *  stmmac_configure_cbs - Configure CBS in TX queue
3108  *  @priv: driver private structure
3109  *  Description: It is used for configuring CBS in AVB TX queues
3110  */
3111 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3112 {
3113 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3114 	u32 mode_to_use;
3115 	u32 queue;
3116 
3117 	/* queue 0 is reserved for legacy traffic */
3118 	for (queue = 1; queue < tx_queues_count; queue++) {
3119 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3120 		if (mode_to_use == MTL_QUEUE_DCB)
3121 			continue;
3122 
3123 		stmmac_config_cbs(priv, priv->hw,
3124 				priv->plat->tx_queues_cfg[queue].send_slope,
3125 				priv->plat->tx_queues_cfg[queue].idle_slope,
3126 				priv->plat->tx_queues_cfg[queue].high_credit,
3127 				priv->plat->tx_queues_cfg[queue].low_credit,
3128 				queue);
3129 	}
3130 }
3131 
3132 /**
3133  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3134  *  @priv: driver private structure
3135  *  Description: It is used for mapping RX queues to RX dma channels
3136  */
3137 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3138 {
3139 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3140 	u32 queue;
3141 	u32 chan;
3142 
3143 	for (queue = 0; queue < rx_queues_count; queue++) {
3144 		chan = priv->plat->rx_queues_cfg[queue].chan;
3145 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3146 	}
3147 }
3148 
3149 /**
3150  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3151  *  @priv: driver private structure
3152  *  Description: It is used for configuring the RX Queue Priority
3153  */
3154 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3155 {
3156 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3157 	u32 queue;
3158 	u32 prio;
3159 
3160 	for (queue = 0; queue < rx_queues_count; queue++) {
3161 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3162 			continue;
3163 
3164 		prio = priv->plat->rx_queues_cfg[queue].prio;
3165 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3166 	}
3167 }
3168 
3169 /**
3170  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3171  *  @priv: driver private structure
3172  *  Description: It is used for configuring the TX Queue Priority
3173  */
3174 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3175 {
3176 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3177 	u32 queue;
3178 	u32 prio;
3179 
3180 	for (queue = 0; queue < tx_queues_count; queue++) {
3181 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3182 			continue;
3183 
3184 		prio = priv->plat->tx_queues_cfg[queue].prio;
3185 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3186 	}
3187 }
3188 
3189 /**
3190  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3191  *  @priv: driver private structure
3192  *  Description: It is used for configuring the RX queue routing
3193  */
3194 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3195 {
3196 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3197 	u32 queue;
3198 	u8 packet;
3199 
3200 	for (queue = 0; queue < rx_queues_count; queue++) {
3201 		/* no specific packet type routing specified for the queue */
3202 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3203 			continue;
3204 
3205 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3206 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3207 	}
3208 }
3209 
3210 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3211 {
3212 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3213 		priv->rss.enable = false;
3214 		return;
3215 	}
3216 
3217 	if (priv->dev->features & NETIF_F_RXHASH)
3218 		priv->rss.enable = true;
3219 	else
3220 		priv->rss.enable = false;
3221 
3222 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3223 			     priv->plat->rx_queues_to_use);
3224 }
3225 
3226 /**
3227  *  stmmac_mtl_configuration - Configure MTL
3228  *  @priv: driver private structure
3229  *  Description: It is used for configurring MTL
3230  */
3231 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3232 {
3233 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3234 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3235 
3236 	if (tx_queues_count > 1)
3237 		stmmac_set_tx_queue_weight(priv);
3238 
3239 	/* Configure MTL RX algorithms */
3240 	if (rx_queues_count > 1)
3241 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3242 				priv->plat->rx_sched_algorithm);
3243 
3244 	/* Configure MTL TX algorithms */
3245 	if (tx_queues_count > 1)
3246 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3247 				priv->plat->tx_sched_algorithm);
3248 
3249 	/* Configure CBS in AVB TX queues */
3250 	if (tx_queues_count > 1)
3251 		stmmac_configure_cbs(priv);
3252 
3253 	/* Map RX MTL to DMA channels */
3254 	stmmac_rx_queue_dma_chan_map(priv);
3255 
3256 	/* Enable MAC RX Queues */
3257 	stmmac_mac_enable_rx_queues(priv);
3258 
3259 	/* Set RX priorities */
3260 	if (rx_queues_count > 1)
3261 		stmmac_mac_config_rx_queues_prio(priv);
3262 
3263 	/* Set TX priorities */
3264 	if (tx_queues_count > 1)
3265 		stmmac_mac_config_tx_queues_prio(priv);
3266 
3267 	/* Set RX routing */
3268 	if (rx_queues_count > 1)
3269 		stmmac_mac_config_rx_queues_routing(priv);
3270 
3271 	/* Receive Side Scaling */
3272 	if (rx_queues_count > 1)
3273 		stmmac_mac_config_rss(priv);
3274 }
3275 
3276 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3277 {
3278 	if (priv->dma_cap.asp) {
3279 		netdev_info(priv->dev, "Enabling Safety Features\n");
3280 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3281 					  priv->plat->safety_feat_cfg);
3282 	} else {
3283 		netdev_info(priv->dev, "No Safety Features support found\n");
3284 	}
3285 }
3286 
3287 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3288 {
3289 	char *name;
3290 
3291 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3292 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3293 
3294 	name = priv->wq_name;
3295 	sprintf(name, "%s-fpe", priv->dev->name);
3296 
3297 	priv->fpe_wq = create_singlethread_workqueue(name);
3298 	if (!priv->fpe_wq) {
3299 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3300 
3301 		return -ENOMEM;
3302 	}
3303 	netdev_info(priv->dev, "FPE workqueue start");
3304 
3305 	return 0;
3306 }
3307 
3308 /**
3309  * stmmac_hw_setup - setup mac in a usable state.
3310  *  @dev : pointer to the device structure.
3311  *  @ptp_register: register PTP if set
3312  *  Description:
3313  *  this is the main function to setup the HW in a usable state because the
3314  *  dma engine is reset, the core registers are configured (e.g. AXI,
3315  *  Checksum features, timers). The DMA is ready to start receiving and
3316  *  transmitting.
3317  *  Return value:
3318  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3319  *  file on failure.
3320  */
3321 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3322 {
3323 	struct stmmac_priv *priv = netdev_priv(dev);
3324 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3325 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3326 	bool sph_en;
3327 	u32 chan;
3328 	int ret;
3329 
3330 	/* DMA initialization and SW reset */
3331 	ret = stmmac_init_dma_engine(priv);
3332 	if (ret < 0) {
3333 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3334 			   __func__);
3335 		return ret;
3336 	}
3337 
3338 	/* Copy the MAC addr into the HW  */
3339 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3340 
3341 	/* PS and related bits will be programmed according to the speed */
3342 	if (priv->hw->pcs) {
3343 		int speed = priv->plat->mac_port_sel_speed;
3344 
3345 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3346 		    (speed == SPEED_1000)) {
3347 			priv->hw->ps = speed;
3348 		} else {
3349 			dev_warn(priv->device, "invalid port speed\n");
3350 			priv->hw->ps = 0;
3351 		}
3352 	}
3353 
3354 	/* Initialize the MAC Core */
3355 	stmmac_core_init(priv, priv->hw, dev);
3356 
3357 	/* Initialize MTL*/
3358 	stmmac_mtl_configuration(priv);
3359 
3360 	/* Initialize Safety Features */
3361 	stmmac_safety_feat_configuration(priv);
3362 
3363 	ret = stmmac_rx_ipc(priv, priv->hw);
3364 	if (!ret) {
3365 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3366 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3367 		priv->hw->rx_csum = 0;
3368 	}
3369 
3370 	/* Enable the MAC Rx/Tx */
3371 	stmmac_mac_set(priv, priv->ioaddr, true);
3372 
3373 	/* Set the HW DMA mode and the COE */
3374 	stmmac_dma_operation_mode(priv);
3375 
3376 	stmmac_mmc_setup(priv);
3377 
3378 	if (ptp_register) {
3379 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3380 		if (ret < 0)
3381 			netdev_warn(priv->dev,
3382 				    "failed to enable PTP reference clock: %pe\n",
3383 				    ERR_PTR(ret));
3384 	}
3385 
3386 	ret = stmmac_init_ptp(priv);
3387 	if (ret == -EOPNOTSUPP)
3388 		netdev_info(priv->dev, "PTP not supported by HW\n");
3389 	else if (ret)
3390 		netdev_warn(priv->dev, "PTP init failed\n");
3391 	else if (ptp_register)
3392 		stmmac_ptp_register(priv);
3393 
3394 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3395 
3396 	/* Convert the timer from msec to usec */
3397 	if (!priv->tx_lpi_timer)
3398 		priv->tx_lpi_timer = eee_timer * 1000;
3399 
3400 	if (priv->use_riwt) {
3401 		u32 queue;
3402 
3403 		for (queue = 0; queue < rx_cnt; queue++) {
3404 			if (!priv->rx_riwt[queue])
3405 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3406 
3407 			stmmac_rx_watchdog(priv, priv->ioaddr,
3408 					   priv->rx_riwt[queue], queue);
3409 		}
3410 	}
3411 
3412 	if (priv->hw->pcs)
3413 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3414 
3415 	/* set TX and RX rings length */
3416 	stmmac_set_rings_length(priv);
3417 
3418 	/* Enable TSO */
3419 	if (priv->tso) {
3420 		for (chan = 0; chan < tx_cnt; chan++) {
3421 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3422 
3423 			/* TSO and TBS cannot co-exist */
3424 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3425 				continue;
3426 
3427 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3428 		}
3429 	}
3430 
3431 	/* Enable Split Header */
3432 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3433 	for (chan = 0; chan < rx_cnt; chan++)
3434 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3435 
3436 
3437 	/* VLAN Tag Insertion */
3438 	if (priv->dma_cap.vlins)
3439 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3440 
3441 	/* TBS */
3442 	for (chan = 0; chan < tx_cnt; chan++) {
3443 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3444 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3445 
3446 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3447 	}
3448 
3449 	/* Configure real RX and TX queues */
3450 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3451 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3452 
3453 	/* Start the ball rolling... */
3454 	stmmac_start_all_dma(priv);
3455 
3456 	if (priv->dma_cap.fpesel) {
3457 		stmmac_fpe_start_wq(priv);
3458 
3459 		if (priv->plat->fpe_cfg->enable)
3460 			stmmac_fpe_handshake(priv, true);
3461 	}
3462 
3463 	return 0;
3464 }
3465 
3466 static void stmmac_hw_teardown(struct net_device *dev)
3467 {
3468 	struct stmmac_priv *priv = netdev_priv(dev);
3469 
3470 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3471 }
3472 
3473 static void stmmac_free_irq(struct net_device *dev,
3474 			    enum request_irq_err irq_err, int irq_idx)
3475 {
3476 	struct stmmac_priv *priv = netdev_priv(dev);
3477 	int j;
3478 
3479 	switch (irq_err) {
3480 	case REQ_IRQ_ERR_ALL:
3481 		irq_idx = priv->plat->tx_queues_to_use;
3482 		fallthrough;
3483 	case REQ_IRQ_ERR_TX:
3484 		for (j = irq_idx - 1; j >= 0; j--) {
3485 			if (priv->tx_irq[j] > 0) {
3486 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3487 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3488 			}
3489 		}
3490 		irq_idx = priv->plat->rx_queues_to_use;
3491 		fallthrough;
3492 	case REQ_IRQ_ERR_RX:
3493 		for (j = irq_idx - 1; j >= 0; j--) {
3494 			if (priv->rx_irq[j] > 0) {
3495 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3496 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3497 			}
3498 		}
3499 
3500 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3501 			free_irq(priv->sfty_ue_irq, dev);
3502 		fallthrough;
3503 	case REQ_IRQ_ERR_SFTY_UE:
3504 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3505 			free_irq(priv->sfty_ce_irq, dev);
3506 		fallthrough;
3507 	case REQ_IRQ_ERR_SFTY_CE:
3508 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3509 			free_irq(priv->lpi_irq, dev);
3510 		fallthrough;
3511 	case REQ_IRQ_ERR_LPI:
3512 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3513 			free_irq(priv->wol_irq, dev);
3514 		fallthrough;
3515 	case REQ_IRQ_ERR_WOL:
3516 		free_irq(dev->irq, dev);
3517 		fallthrough;
3518 	case REQ_IRQ_ERR_MAC:
3519 	case REQ_IRQ_ERR_NO:
3520 		/* If MAC IRQ request error, no more IRQ to free */
3521 		break;
3522 	}
3523 }
3524 
3525 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3526 {
3527 	struct stmmac_priv *priv = netdev_priv(dev);
3528 	enum request_irq_err irq_err;
3529 	cpumask_t cpu_mask;
3530 	int irq_idx = 0;
3531 	char *int_name;
3532 	int ret;
3533 	int i;
3534 
3535 	/* For common interrupt */
3536 	int_name = priv->int_name_mac;
3537 	sprintf(int_name, "%s:%s", dev->name, "mac");
3538 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3539 			  0, int_name, dev);
3540 	if (unlikely(ret < 0)) {
3541 		netdev_err(priv->dev,
3542 			   "%s: alloc mac MSI %d (error: %d)\n",
3543 			   __func__, dev->irq, ret);
3544 		irq_err = REQ_IRQ_ERR_MAC;
3545 		goto irq_error;
3546 	}
3547 
3548 	/* Request the Wake IRQ in case of another line
3549 	 * is used for WoL
3550 	 */
3551 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3552 		int_name = priv->int_name_wol;
3553 		sprintf(int_name, "%s:%s", dev->name, "wol");
3554 		ret = request_irq(priv->wol_irq,
3555 				  stmmac_mac_interrupt,
3556 				  0, int_name, dev);
3557 		if (unlikely(ret < 0)) {
3558 			netdev_err(priv->dev,
3559 				   "%s: alloc wol MSI %d (error: %d)\n",
3560 				   __func__, priv->wol_irq, ret);
3561 			irq_err = REQ_IRQ_ERR_WOL;
3562 			goto irq_error;
3563 		}
3564 	}
3565 
3566 	/* Request the LPI IRQ in case of another line
3567 	 * is used for LPI
3568 	 */
3569 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3570 		int_name = priv->int_name_lpi;
3571 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3572 		ret = request_irq(priv->lpi_irq,
3573 				  stmmac_mac_interrupt,
3574 				  0, int_name, dev);
3575 		if (unlikely(ret < 0)) {
3576 			netdev_err(priv->dev,
3577 				   "%s: alloc lpi MSI %d (error: %d)\n",
3578 				   __func__, priv->lpi_irq, ret);
3579 			irq_err = REQ_IRQ_ERR_LPI;
3580 			goto irq_error;
3581 		}
3582 	}
3583 
3584 	/* Request the Safety Feature Correctible Error line in
3585 	 * case of another line is used
3586 	 */
3587 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3588 		int_name = priv->int_name_sfty_ce;
3589 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3590 		ret = request_irq(priv->sfty_ce_irq,
3591 				  stmmac_safety_interrupt,
3592 				  0, int_name, dev);
3593 		if (unlikely(ret < 0)) {
3594 			netdev_err(priv->dev,
3595 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3596 				   __func__, priv->sfty_ce_irq, ret);
3597 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3598 			goto irq_error;
3599 		}
3600 	}
3601 
3602 	/* Request the Safety Feature Uncorrectible Error line in
3603 	 * case of another line is used
3604 	 */
3605 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3606 		int_name = priv->int_name_sfty_ue;
3607 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3608 		ret = request_irq(priv->sfty_ue_irq,
3609 				  stmmac_safety_interrupt,
3610 				  0, int_name, dev);
3611 		if (unlikely(ret < 0)) {
3612 			netdev_err(priv->dev,
3613 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3614 				   __func__, priv->sfty_ue_irq, ret);
3615 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3616 			goto irq_error;
3617 		}
3618 	}
3619 
3620 	/* Request Rx MSI irq */
3621 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3622 		if (i >= MTL_MAX_RX_QUEUES)
3623 			break;
3624 		if (priv->rx_irq[i] == 0)
3625 			continue;
3626 
3627 		int_name = priv->int_name_rx_irq[i];
3628 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3629 		ret = request_irq(priv->rx_irq[i],
3630 				  stmmac_msi_intr_rx,
3631 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3632 		if (unlikely(ret < 0)) {
3633 			netdev_err(priv->dev,
3634 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3635 				   __func__, i, priv->rx_irq[i], ret);
3636 			irq_err = REQ_IRQ_ERR_RX;
3637 			irq_idx = i;
3638 			goto irq_error;
3639 		}
3640 		cpumask_clear(&cpu_mask);
3641 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3642 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3643 	}
3644 
3645 	/* Request Tx MSI irq */
3646 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3647 		if (i >= MTL_MAX_TX_QUEUES)
3648 			break;
3649 		if (priv->tx_irq[i] == 0)
3650 			continue;
3651 
3652 		int_name = priv->int_name_tx_irq[i];
3653 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3654 		ret = request_irq(priv->tx_irq[i],
3655 				  stmmac_msi_intr_tx,
3656 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3657 		if (unlikely(ret < 0)) {
3658 			netdev_err(priv->dev,
3659 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3660 				   __func__, i, priv->tx_irq[i], ret);
3661 			irq_err = REQ_IRQ_ERR_TX;
3662 			irq_idx = i;
3663 			goto irq_error;
3664 		}
3665 		cpumask_clear(&cpu_mask);
3666 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3667 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3668 	}
3669 
3670 	return 0;
3671 
3672 irq_error:
3673 	stmmac_free_irq(dev, irq_err, irq_idx);
3674 	return ret;
3675 }
3676 
3677 static int stmmac_request_irq_single(struct net_device *dev)
3678 {
3679 	struct stmmac_priv *priv = netdev_priv(dev);
3680 	enum request_irq_err irq_err;
3681 	int ret;
3682 
3683 	ret = request_irq(dev->irq, stmmac_interrupt,
3684 			  IRQF_SHARED, dev->name, dev);
3685 	if (unlikely(ret < 0)) {
3686 		netdev_err(priv->dev,
3687 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3688 			   __func__, dev->irq, ret);
3689 		irq_err = REQ_IRQ_ERR_MAC;
3690 		goto irq_error;
3691 	}
3692 
3693 	/* Request the Wake IRQ in case of another line
3694 	 * is used for WoL
3695 	 */
3696 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3697 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3698 				  IRQF_SHARED, dev->name, dev);
3699 		if (unlikely(ret < 0)) {
3700 			netdev_err(priv->dev,
3701 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3702 				   __func__, priv->wol_irq, ret);
3703 			irq_err = REQ_IRQ_ERR_WOL;
3704 			goto irq_error;
3705 		}
3706 	}
3707 
3708 	/* Request the IRQ lines */
3709 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3710 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3711 				  IRQF_SHARED, dev->name, dev);
3712 		if (unlikely(ret < 0)) {
3713 			netdev_err(priv->dev,
3714 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3715 				   __func__, priv->lpi_irq, ret);
3716 			irq_err = REQ_IRQ_ERR_LPI;
3717 			goto irq_error;
3718 		}
3719 	}
3720 
3721 	return 0;
3722 
3723 irq_error:
3724 	stmmac_free_irq(dev, irq_err, 0);
3725 	return ret;
3726 }
3727 
3728 static int stmmac_request_irq(struct net_device *dev)
3729 {
3730 	struct stmmac_priv *priv = netdev_priv(dev);
3731 	int ret;
3732 
3733 	/* Request the IRQ lines */
3734 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3735 		ret = stmmac_request_irq_multi_msi(dev);
3736 	else
3737 		ret = stmmac_request_irq_single(dev);
3738 
3739 	return ret;
3740 }
3741 
3742 /**
3743  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3744  *  @priv: driver private structure
3745  *  @mtu: MTU to setup the dma queue and buf with
3746  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3747  *  Allocate the Tx/Rx DMA queue and init them.
3748  *  Return value:
3749  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3750  */
3751 static struct stmmac_dma_conf *
3752 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3753 {
3754 	struct stmmac_dma_conf *dma_conf;
3755 	int chan, bfsize, ret;
3756 
3757 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3758 	if (!dma_conf) {
3759 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3760 			   __func__);
3761 		return ERR_PTR(-ENOMEM);
3762 	}
3763 
3764 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3765 	if (bfsize < 0)
3766 		bfsize = 0;
3767 
3768 	if (bfsize < BUF_SIZE_16KiB)
3769 		bfsize = stmmac_set_bfsize(mtu, 0);
3770 
3771 	dma_conf->dma_buf_sz = bfsize;
3772 	/* Chose the tx/rx size from the already defined one in the
3773 	 * priv struct. (if defined)
3774 	 */
3775 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3776 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3777 
3778 	if (!dma_conf->dma_tx_size)
3779 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3780 	if (!dma_conf->dma_rx_size)
3781 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3782 
3783 	/* Earlier check for TBS */
3784 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3785 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3786 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3787 
3788 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3789 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3790 	}
3791 
3792 	ret = alloc_dma_desc_resources(priv, dma_conf);
3793 	if (ret < 0) {
3794 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3795 			   __func__);
3796 		goto alloc_error;
3797 	}
3798 
3799 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3800 	if (ret < 0) {
3801 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3802 			   __func__);
3803 		goto init_error;
3804 	}
3805 
3806 	return dma_conf;
3807 
3808 init_error:
3809 	free_dma_desc_resources(priv, dma_conf);
3810 alloc_error:
3811 	kfree(dma_conf);
3812 	return ERR_PTR(ret);
3813 }
3814 
3815 /**
3816  *  __stmmac_open - open entry point of the driver
3817  *  @dev : pointer to the device structure.
3818  *  @dma_conf :  structure to take the dma data
3819  *  Description:
3820  *  This function is the open entry point of the driver.
3821  *  Return value:
3822  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3823  *  file on failure.
3824  */
3825 static int __stmmac_open(struct net_device *dev,
3826 			 struct stmmac_dma_conf *dma_conf)
3827 {
3828 	struct stmmac_priv *priv = netdev_priv(dev);
3829 	int mode = priv->plat->phy_interface;
3830 	u32 chan;
3831 	int ret;
3832 
3833 	ret = pm_runtime_resume_and_get(priv->device);
3834 	if (ret < 0)
3835 		return ret;
3836 
3837 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3838 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
3839 	    (!priv->hw->xpcs ||
3840 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
3841 	    !priv->hw->lynx_pcs) {
3842 		ret = stmmac_init_phy(dev);
3843 		if (ret) {
3844 			netdev_err(priv->dev,
3845 				   "%s: Cannot attach to PHY (error: %d)\n",
3846 				   __func__, ret);
3847 			goto init_phy_error;
3848 		}
3849 	}
3850 
3851 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3852 
3853 	buf_sz = dma_conf->dma_buf_sz;
3854 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3855 
3856 	stmmac_reset_queues_param(priv);
3857 
3858 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3859 	    priv->plat->serdes_powerup) {
3860 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3861 		if (ret < 0) {
3862 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3863 				   __func__);
3864 			goto init_error;
3865 		}
3866 	}
3867 
3868 	ret = stmmac_hw_setup(dev, true);
3869 	if (ret < 0) {
3870 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3871 		goto init_error;
3872 	}
3873 
3874 	stmmac_init_coalesce(priv);
3875 
3876 	phylink_start(priv->phylink);
3877 	/* We may have called phylink_speed_down before */
3878 	phylink_speed_up(priv->phylink);
3879 
3880 	ret = stmmac_request_irq(dev);
3881 	if (ret)
3882 		goto irq_error;
3883 
3884 	stmmac_enable_all_queues(priv);
3885 	netif_tx_start_all_queues(priv->dev);
3886 	stmmac_enable_all_dma_irq(priv);
3887 
3888 	return 0;
3889 
3890 irq_error:
3891 	phylink_stop(priv->phylink);
3892 
3893 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3894 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3895 
3896 	stmmac_hw_teardown(dev);
3897 init_error:
3898 	phylink_disconnect_phy(priv->phylink);
3899 init_phy_error:
3900 	pm_runtime_put(priv->device);
3901 	return ret;
3902 }
3903 
3904 static int stmmac_open(struct net_device *dev)
3905 {
3906 	struct stmmac_priv *priv = netdev_priv(dev);
3907 	struct stmmac_dma_conf *dma_conf;
3908 	int ret;
3909 
3910 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3911 	if (IS_ERR(dma_conf))
3912 		return PTR_ERR(dma_conf);
3913 
3914 	ret = __stmmac_open(dev, dma_conf);
3915 	if (ret)
3916 		free_dma_desc_resources(priv, dma_conf);
3917 
3918 	kfree(dma_conf);
3919 	return ret;
3920 }
3921 
3922 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3923 {
3924 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3925 
3926 	if (priv->fpe_wq)
3927 		destroy_workqueue(priv->fpe_wq);
3928 
3929 	netdev_info(priv->dev, "FPE workqueue stop");
3930 }
3931 
3932 /**
3933  *  stmmac_release - close entry point of the driver
3934  *  @dev : device pointer.
3935  *  Description:
3936  *  This is the stop entry point of the driver.
3937  */
3938 static int stmmac_release(struct net_device *dev)
3939 {
3940 	struct stmmac_priv *priv = netdev_priv(dev);
3941 	u32 chan;
3942 
3943 	if (device_may_wakeup(priv->device))
3944 		phylink_speed_down(priv->phylink, false);
3945 	/* Stop and disconnect the PHY */
3946 	phylink_stop(priv->phylink);
3947 	phylink_disconnect_phy(priv->phylink);
3948 
3949 	stmmac_disable_all_queues(priv);
3950 
3951 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3952 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3953 
3954 	netif_tx_disable(dev);
3955 
3956 	/* Free the IRQ lines */
3957 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3958 
3959 	if (priv->eee_enabled) {
3960 		priv->tx_path_in_lpi_mode = false;
3961 		del_timer_sync(&priv->eee_ctrl_timer);
3962 	}
3963 
3964 	/* Stop TX/RX DMA and clear the descriptors */
3965 	stmmac_stop_all_dma(priv);
3966 
3967 	/* Release and free the Rx/Tx resources */
3968 	free_dma_desc_resources(priv, &priv->dma_conf);
3969 
3970 	/* Disable the MAC Rx/Tx */
3971 	stmmac_mac_set(priv, priv->ioaddr, false);
3972 
3973 	/* Powerdown Serdes if there is */
3974 	if (priv->plat->serdes_powerdown)
3975 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3976 
3977 	netif_carrier_off(dev);
3978 
3979 	stmmac_release_ptp(priv);
3980 
3981 	pm_runtime_put(priv->device);
3982 
3983 	if (priv->dma_cap.fpesel)
3984 		stmmac_fpe_stop_wq(priv);
3985 
3986 	return 0;
3987 }
3988 
3989 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3990 			       struct stmmac_tx_queue *tx_q)
3991 {
3992 	u16 tag = 0x0, inner_tag = 0x0;
3993 	u32 inner_type = 0x0;
3994 	struct dma_desc *p;
3995 
3996 	if (!priv->dma_cap.vlins)
3997 		return false;
3998 	if (!skb_vlan_tag_present(skb))
3999 		return false;
4000 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4001 		inner_tag = skb_vlan_tag_get(skb);
4002 		inner_type = STMMAC_VLAN_INSERT;
4003 	}
4004 
4005 	tag = skb_vlan_tag_get(skb);
4006 
4007 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4008 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4009 	else
4010 		p = &tx_q->dma_tx[tx_q->cur_tx];
4011 
4012 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4013 		return false;
4014 
4015 	stmmac_set_tx_owner(priv, p);
4016 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4017 	return true;
4018 }
4019 
4020 /**
4021  *  stmmac_tso_allocator - close entry point of the driver
4022  *  @priv: driver private structure
4023  *  @des: buffer start address
4024  *  @total_len: total length to fill in descriptors
4025  *  @last_segment: condition for the last descriptor
4026  *  @queue: TX queue index
4027  *  Description:
4028  *  This function fills descriptor and request new descriptors according to
4029  *  buffer length to fill
4030  */
4031 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4032 				 int total_len, bool last_segment, u32 queue)
4033 {
4034 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4035 	struct dma_desc *desc;
4036 	u32 buff_size;
4037 	int tmp_len;
4038 
4039 	tmp_len = total_len;
4040 
4041 	while (tmp_len > 0) {
4042 		dma_addr_t curr_addr;
4043 
4044 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4045 						priv->dma_conf.dma_tx_size);
4046 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4047 
4048 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4049 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4050 		else
4051 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4052 
4053 		curr_addr = des + (total_len - tmp_len);
4054 		if (priv->dma_cap.addr64 <= 32)
4055 			desc->des0 = cpu_to_le32(curr_addr);
4056 		else
4057 			stmmac_set_desc_addr(priv, desc, curr_addr);
4058 
4059 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4060 			    TSO_MAX_BUFF_SIZE : tmp_len;
4061 
4062 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4063 				0, 1,
4064 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4065 				0, 0);
4066 
4067 		tmp_len -= TSO_MAX_BUFF_SIZE;
4068 	}
4069 }
4070 
4071 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4072 {
4073 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4074 	int desc_size;
4075 
4076 	if (likely(priv->extend_desc))
4077 		desc_size = sizeof(struct dma_extended_desc);
4078 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4079 		desc_size = sizeof(struct dma_edesc);
4080 	else
4081 		desc_size = sizeof(struct dma_desc);
4082 
4083 	/* The own bit must be the latest setting done when prepare the
4084 	 * descriptor and then barrier is needed to make sure that
4085 	 * all is coherent before granting the DMA engine.
4086 	 */
4087 	wmb();
4088 
4089 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4090 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4091 }
4092 
4093 /**
4094  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4095  *  @skb : the socket buffer
4096  *  @dev : device pointer
4097  *  Description: this is the transmit function that is called on TSO frames
4098  *  (support available on GMAC4 and newer chips).
4099  *  Diagram below show the ring programming in case of TSO frames:
4100  *
4101  *  First Descriptor
4102  *   --------
4103  *   | DES0 |---> buffer1 = L2/L3/L4 header
4104  *   | DES1 |---> TCP Payload (can continue on next descr...)
4105  *   | DES2 |---> buffer 1 and 2 len
4106  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4107  *   --------
4108  *	|
4109  *     ...
4110  *	|
4111  *   --------
4112  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4113  *   | DES1 | --|
4114  *   | DES2 | --> buffer 1 and 2 len
4115  *   | DES3 |
4116  *   --------
4117  *
4118  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4119  */
4120 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4121 {
4122 	struct dma_desc *desc, *first, *mss_desc = NULL;
4123 	struct stmmac_priv *priv = netdev_priv(dev);
4124 	int nfrags = skb_shinfo(skb)->nr_frags;
4125 	u32 queue = skb_get_queue_mapping(skb);
4126 	unsigned int first_entry, tx_packets;
4127 	struct stmmac_txq_stats *txq_stats;
4128 	int tmp_pay_len = 0, first_tx;
4129 	struct stmmac_tx_queue *tx_q;
4130 	bool has_vlan, set_ic;
4131 	u8 proto_hdr_len, hdr;
4132 	unsigned long flags;
4133 	u32 pay_len, mss;
4134 	dma_addr_t des;
4135 	int i;
4136 
4137 	tx_q = &priv->dma_conf.tx_queue[queue];
4138 	txq_stats = &priv->xstats.txq_stats[queue];
4139 	first_tx = tx_q->cur_tx;
4140 
4141 	/* Compute header lengths */
4142 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4143 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4144 		hdr = sizeof(struct udphdr);
4145 	} else {
4146 		proto_hdr_len = skb_tcp_all_headers(skb);
4147 		hdr = tcp_hdrlen(skb);
4148 	}
4149 
4150 	/* Desc availability based on threshold should be enough safe */
4151 	if (unlikely(stmmac_tx_avail(priv, queue) <
4152 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4153 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4154 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4155 								queue));
4156 			/* This is a hard error, log it. */
4157 			netdev_err(priv->dev,
4158 				   "%s: Tx Ring full when queue awake\n",
4159 				   __func__);
4160 		}
4161 		return NETDEV_TX_BUSY;
4162 	}
4163 
4164 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4165 
4166 	mss = skb_shinfo(skb)->gso_size;
4167 
4168 	/* set new MSS value if needed */
4169 	if (mss != tx_q->mss) {
4170 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4171 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4172 		else
4173 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4174 
4175 		stmmac_set_mss(priv, mss_desc, mss);
4176 		tx_q->mss = mss;
4177 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4178 						priv->dma_conf.dma_tx_size);
4179 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4180 	}
4181 
4182 	if (netif_msg_tx_queued(priv)) {
4183 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4184 			__func__, hdr, proto_hdr_len, pay_len, mss);
4185 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4186 			skb->data_len);
4187 	}
4188 
4189 	/* Check if VLAN can be inserted by HW */
4190 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4191 
4192 	first_entry = tx_q->cur_tx;
4193 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4194 
4195 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4196 		desc = &tx_q->dma_entx[first_entry].basic;
4197 	else
4198 		desc = &tx_q->dma_tx[first_entry];
4199 	first = desc;
4200 
4201 	if (has_vlan)
4202 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4203 
4204 	/* first descriptor: fill Headers on Buf1 */
4205 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4206 			     DMA_TO_DEVICE);
4207 	if (dma_mapping_error(priv->device, des))
4208 		goto dma_map_err;
4209 
4210 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4211 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4212 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4213 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4214 
4215 	if (priv->dma_cap.addr64 <= 32) {
4216 		first->des0 = cpu_to_le32(des);
4217 
4218 		/* Fill start of payload in buff2 of first descriptor */
4219 		if (pay_len)
4220 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4221 
4222 		/* If needed take extra descriptors to fill the remaining payload */
4223 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4224 	} else {
4225 		stmmac_set_desc_addr(priv, first, des);
4226 		tmp_pay_len = pay_len;
4227 		des += proto_hdr_len;
4228 		pay_len = 0;
4229 	}
4230 
4231 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4232 
4233 	/* Prepare fragments */
4234 	for (i = 0; i < nfrags; i++) {
4235 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4236 
4237 		des = skb_frag_dma_map(priv->device, frag, 0,
4238 				       skb_frag_size(frag),
4239 				       DMA_TO_DEVICE);
4240 		if (dma_mapping_error(priv->device, des))
4241 			goto dma_map_err;
4242 
4243 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4244 				     (i == nfrags - 1), queue);
4245 
4246 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4247 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4248 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4249 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4250 	}
4251 
4252 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4253 
4254 	/* Only the last descriptor gets to point to the skb. */
4255 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4256 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4257 
4258 	/* Manage tx mitigation */
4259 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4260 	tx_q->tx_count_frames += tx_packets;
4261 
4262 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4263 		set_ic = true;
4264 	else if (!priv->tx_coal_frames[queue])
4265 		set_ic = false;
4266 	else if (tx_packets > priv->tx_coal_frames[queue])
4267 		set_ic = true;
4268 	else if ((tx_q->tx_count_frames %
4269 		  priv->tx_coal_frames[queue]) < tx_packets)
4270 		set_ic = true;
4271 	else
4272 		set_ic = false;
4273 
4274 	if (set_ic) {
4275 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4276 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4277 		else
4278 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4279 
4280 		tx_q->tx_count_frames = 0;
4281 		stmmac_set_tx_ic(priv, desc);
4282 	}
4283 
4284 	/* We've used all descriptors we need for this skb, however,
4285 	 * advance cur_tx so that it references a fresh descriptor.
4286 	 * ndo_start_xmit will fill this descriptor the next time it's
4287 	 * called and stmmac_tx_clean may clean up to this descriptor.
4288 	 */
4289 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4290 
4291 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4292 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4293 			  __func__);
4294 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4295 	}
4296 
4297 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4298 	txq_stats->tx_bytes += skb->len;
4299 	txq_stats->tx_tso_frames++;
4300 	txq_stats->tx_tso_nfrags += nfrags;
4301 	if (set_ic)
4302 		txq_stats->tx_set_ic_bit++;
4303 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4304 
4305 	if (priv->sarc_type)
4306 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4307 
4308 	skb_tx_timestamp(skb);
4309 
4310 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4311 		     priv->hwts_tx_en)) {
4312 		/* declare that device is doing timestamping */
4313 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4314 		stmmac_enable_tx_timestamp(priv, first);
4315 	}
4316 
4317 	/* Complete the first descriptor before granting the DMA */
4318 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4319 			proto_hdr_len,
4320 			pay_len,
4321 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4322 			hdr / 4, (skb->len - proto_hdr_len));
4323 
4324 	/* If context desc is used to change MSS */
4325 	if (mss_desc) {
4326 		/* Make sure that first descriptor has been completely
4327 		 * written, including its own bit. This is because MSS is
4328 		 * actually before first descriptor, so we need to make
4329 		 * sure that MSS's own bit is the last thing written.
4330 		 */
4331 		dma_wmb();
4332 		stmmac_set_tx_owner(priv, mss_desc);
4333 	}
4334 
4335 	if (netif_msg_pktdata(priv)) {
4336 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4337 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4338 			tx_q->cur_tx, first, nfrags);
4339 		pr_info(">>> frame to be transmitted: ");
4340 		print_pkt(skb->data, skb_headlen(skb));
4341 	}
4342 
4343 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4344 
4345 	stmmac_flush_tx_descriptors(priv, queue);
4346 	stmmac_tx_timer_arm(priv, queue);
4347 
4348 	return NETDEV_TX_OK;
4349 
4350 dma_map_err:
4351 	dev_err(priv->device, "Tx dma map failed\n");
4352 	dev_kfree_skb(skb);
4353 	priv->xstats.tx_dropped++;
4354 	return NETDEV_TX_OK;
4355 }
4356 
4357 /**
4358  *  stmmac_xmit - Tx entry point of the driver
4359  *  @skb : the socket buffer
4360  *  @dev : device pointer
4361  *  Description : this is the tx entry point of the driver.
4362  *  It programs the chain or the ring and supports oversized frames
4363  *  and SG feature.
4364  */
4365 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4366 {
4367 	unsigned int first_entry, tx_packets, enh_desc;
4368 	struct stmmac_priv *priv = netdev_priv(dev);
4369 	unsigned int nopaged_len = skb_headlen(skb);
4370 	int i, csum_insertion = 0, is_jumbo = 0;
4371 	u32 queue = skb_get_queue_mapping(skb);
4372 	int nfrags = skb_shinfo(skb)->nr_frags;
4373 	int gso = skb_shinfo(skb)->gso_type;
4374 	struct stmmac_txq_stats *txq_stats;
4375 	struct dma_edesc *tbs_desc = NULL;
4376 	struct dma_desc *desc, *first;
4377 	struct stmmac_tx_queue *tx_q;
4378 	bool has_vlan, set_ic;
4379 	int entry, first_tx;
4380 	unsigned long flags;
4381 	dma_addr_t des;
4382 
4383 	tx_q = &priv->dma_conf.tx_queue[queue];
4384 	txq_stats = &priv->xstats.txq_stats[queue];
4385 	first_tx = tx_q->cur_tx;
4386 
4387 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4388 		stmmac_disable_eee_mode(priv);
4389 
4390 	/* Manage oversized TCP frames for GMAC4 device */
4391 	if (skb_is_gso(skb) && priv->tso) {
4392 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4393 			return stmmac_tso_xmit(skb, dev);
4394 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4395 			return stmmac_tso_xmit(skb, dev);
4396 	}
4397 
4398 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4399 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4400 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4401 								queue));
4402 			/* This is a hard error, log it. */
4403 			netdev_err(priv->dev,
4404 				   "%s: Tx Ring full when queue awake\n",
4405 				   __func__);
4406 		}
4407 		return NETDEV_TX_BUSY;
4408 	}
4409 
4410 	/* Check if VLAN can be inserted by HW */
4411 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4412 
4413 	entry = tx_q->cur_tx;
4414 	first_entry = entry;
4415 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4416 
4417 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4418 
4419 	if (likely(priv->extend_desc))
4420 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4421 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4422 		desc = &tx_q->dma_entx[entry].basic;
4423 	else
4424 		desc = tx_q->dma_tx + entry;
4425 
4426 	first = desc;
4427 
4428 	if (has_vlan)
4429 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4430 
4431 	enh_desc = priv->plat->enh_desc;
4432 	/* To program the descriptors according to the size of the frame */
4433 	if (enh_desc)
4434 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4435 
4436 	if (unlikely(is_jumbo)) {
4437 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4438 		if (unlikely(entry < 0) && (entry != -EINVAL))
4439 			goto dma_map_err;
4440 	}
4441 
4442 	for (i = 0; i < nfrags; i++) {
4443 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4444 		int len = skb_frag_size(frag);
4445 		bool last_segment = (i == (nfrags - 1));
4446 
4447 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4448 		WARN_ON(tx_q->tx_skbuff[entry]);
4449 
4450 		if (likely(priv->extend_desc))
4451 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4452 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4453 			desc = &tx_q->dma_entx[entry].basic;
4454 		else
4455 			desc = tx_q->dma_tx + entry;
4456 
4457 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4458 				       DMA_TO_DEVICE);
4459 		if (dma_mapping_error(priv->device, des))
4460 			goto dma_map_err; /* should reuse desc w/o issues */
4461 
4462 		tx_q->tx_skbuff_dma[entry].buf = des;
4463 
4464 		stmmac_set_desc_addr(priv, desc, des);
4465 
4466 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4467 		tx_q->tx_skbuff_dma[entry].len = len;
4468 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4469 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4470 
4471 		/* Prepare the descriptor and set the own bit too */
4472 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4473 				priv->mode, 1, last_segment, skb->len);
4474 	}
4475 
4476 	/* Only the last descriptor gets to point to the skb. */
4477 	tx_q->tx_skbuff[entry] = skb;
4478 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4479 
4480 	/* According to the coalesce parameter the IC bit for the latest
4481 	 * segment is reset and the timer re-started to clean the tx status.
4482 	 * This approach takes care about the fragments: desc is the first
4483 	 * element in case of no SG.
4484 	 */
4485 	tx_packets = (entry + 1) - first_tx;
4486 	tx_q->tx_count_frames += tx_packets;
4487 
4488 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4489 		set_ic = true;
4490 	else if (!priv->tx_coal_frames[queue])
4491 		set_ic = false;
4492 	else if (tx_packets > priv->tx_coal_frames[queue])
4493 		set_ic = true;
4494 	else if ((tx_q->tx_count_frames %
4495 		  priv->tx_coal_frames[queue]) < tx_packets)
4496 		set_ic = true;
4497 	else
4498 		set_ic = false;
4499 
4500 	if (set_ic) {
4501 		if (likely(priv->extend_desc))
4502 			desc = &tx_q->dma_etx[entry].basic;
4503 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4504 			desc = &tx_q->dma_entx[entry].basic;
4505 		else
4506 			desc = &tx_q->dma_tx[entry];
4507 
4508 		tx_q->tx_count_frames = 0;
4509 		stmmac_set_tx_ic(priv, desc);
4510 	}
4511 
4512 	/* We've used all descriptors we need for this skb, however,
4513 	 * advance cur_tx so that it references a fresh descriptor.
4514 	 * ndo_start_xmit will fill this descriptor the next time it's
4515 	 * called and stmmac_tx_clean may clean up to this descriptor.
4516 	 */
4517 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4518 	tx_q->cur_tx = entry;
4519 
4520 	if (netif_msg_pktdata(priv)) {
4521 		netdev_dbg(priv->dev,
4522 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4523 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4524 			   entry, first, nfrags);
4525 
4526 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4527 		print_pkt(skb->data, skb->len);
4528 	}
4529 
4530 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4531 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4532 			  __func__);
4533 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4534 	}
4535 
4536 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4537 	txq_stats->tx_bytes += skb->len;
4538 	if (set_ic)
4539 		txq_stats->tx_set_ic_bit++;
4540 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4541 
4542 	if (priv->sarc_type)
4543 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4544 
4545 	skb_tx_timestamp(skb);
4546 
4547 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4548 	 * problems because all the descriptors are actually ready to be
4549 	 * passed to the DMA engine.
4550 	 */
4551 	if (likely(!is_jumbo)) {
4552 		bool last_segment = (nfrags == 0);
4553 
4554 		des = dma_map_single(priv->device, skb->data,
4555 				     nopaged_len, DMA_TO_DEVICE);
4556 		if (dma_mapping_error(priv->device, des))
4557 			goto dma_map_err;
4558 
4559 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4560 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4561 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4562 
4563 		stmmac_set_desc_addr(priv, first, des);
4564 
4565 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4566 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4567 
4568 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4569 			     priv->hwts_tx_en)) {
4570 			/* declare that device is doing timestamping */
4571 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4572 			stmmac_enable_tx_timestamp(priv, first);
4573 		}
4574 
4575 		/* Prepare the first descriptor setting the OWN bit too */
4576 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4577 				csum_insertion, priv->mode, 0, last_segment,
4578 				skb->len);
4579 	}
4580 
4581 	if (tx_q->tbs & STMMAC_TBS_EN) {
4582 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4583 
4584 		tbs_desc = &tx_q->dma_entx[first_entry];
4585 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4586 	}
4587 
4588 	stmmac_set_tx_owner(priv, first);
4589 
4590 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4591 
4592 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4593 
4594 	stmmac_flush_tx_descriptors(priv, queue);
4595 	stmmac_tx_timer_arm(priv, queue);
4596 
4597 	return NETDEV_TX_OK;
4598 
4599 dma_map_err:
4600 	netdev_err(priv->dev, "Tx DMA map failed\n");
4601 	dev_kfree_skb(skb);
4602 	priv->xstats.tx_dropped++;
4603 	return NETDEV_TX_OK;
4604 }
4605 
4606 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4607 {
4608 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4609 	__be16 vlan_proto = veth->h_vlan_proto;
4610 	u16 vlanid;
4611 
4612 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4613 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4614 	    (vlan_proto == htons(ETH_P_8021AD) &&
4615 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4616 		/* pop the vlan tag */
4617 		vlanid = ntohs(veth->h_vlan_TCI);
4618 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4619 		skb_pull(skb, VLAN_HLEN);
4620 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4621 	}
4622 }
4623 
4624 /**
4625  * stmmac_rx_refill - refill used skb preallocated buffers
4626  * @priv: driver private structure
4627  * @queue: RX queue index
4628  * Description : this is to reallocate the skb for the reception process
4629  * that is based on zero-copy.
4630  */
4631 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4632 {
4633 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4634 	int dirty = stmmac_rx_dirty(priv, queue);
4635 	unsigned int entry = rx_q->dirty_rx;
4636 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4637 
4638 	if (priv->dma_cap.host_dma_width <= 32)
4639 		gfp |= GFP_DMA32;
4640 
4641 	while (dirty-- > 0) {
4642 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4643 		struct dma_desc *p;
4644 		bool use_rx_wd;
4645 
4646 		if (priv->extend_desc)
4647 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4648 		else
4649 			p = rx_q->dma_rx + entry;
4650 
4651 		if (!buf->page) {
4652 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4653 			if (!buf->page)
4654 				break;
4655 		}
4656 
4657 		if (priv->sph && !buf->sec_page) {
4658 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4659 			if (!buf->sec_page)
4660 				break;
4661 
4662 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4663 		}
4664 
4665 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4666 
4667 		stmmac_set_desc_addr(priv, p, buf->addr);
4668 		if (priv->sph)
4669 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4670 		else
4671 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4672 		stmmac_refill_desc3(priv, rx_q, p);
4673 
4674 		rx_q->rx_count_frames++;
4675 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4676 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4677 			rx_q->rx_count_frames = 0;
4678 
4679 		use_rx_wd = !priv->rx_coal_frames[queue];
4680 		use_rx_wd |= rx_q->rx_count_frames > 0;
4681 		if (!priv->use_riwt)
4682 			use_rx_wd = false;
4683 
4684 		dma_wmb();
4685 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4686 
4687 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4688 	}
4689 	rx_q->dirty_rx = entry;
4690 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4691 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4692 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4693 }
4694 
4695 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4696 				       struct dma_desc *p,
4697 				       int status, unsigned int len)
4698 {
4699 	unsigned int plen = 0, hlen = 0;
4700 	int coe = priv->hw->rx_csum;
4701 
4702 	/* Not first descriptor, buffer is always zero */
4703 	if (priv->sph && len)
4704 		return 0;
4705 
4706 	/* First descriptor, get split header length */
4707 	stmmac_get_rx_header_len(priv, p, &hlen);
4708 	if (priv->sph && hlen) {
4709 		priv->xstats.rx_split_hdr_pkt_n++;
4710 		return hlen;
4711 	}
4712 
4713 	/* First descriptor, not last descriptor and not split header */
4714 	if (status & rx_not_ls)
4715 		return priv->dma_conf.dma_buf_sz;
4716 
4717 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4718 
4719 	/* First descriptor and last descriptor and not split header */
4720 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4721 }
4722 
4723 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4724 				       struct dma_desc *p,
4725 				       int status, unsigned int len)
4726 {
4727 	int coe = priv->hw->rx_csum;
4728 	unsigned int plen = 0;
4729 
4730 	/* Not split header, buffer is not available */
4731 	if (!priv->sph)
4732 		return 0;
4733 
4734 	/* Not last descriptor */
4735 	if (status & rx_not_ls)
4736 		return priv->dma_conf.dma_buf_sz;
4737 
4738 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4739 
4740 	/* Last descriptor */
4741 	return plen - len;
4742 }
4743 
4744 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4745 				struct xdp_frame *xdpf, bool dma_map)
4746 {
4747 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4748 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4749 	unsigned int entry = tx_q->cur_tx;
4750 	struct dma_desc *tx_desc;
4751 	dma_addr_t dma_addr;
4752 	bool set_ic;
4753 
4754 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4755 		return STMMAC_XDP_CONSUMED;
4756 
4757 	if (likely(priv->extend_desc))
4758 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4759 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4760 		tx_desc = &tx_q->dma_entx[entry].basic;
4761 	else
4762 		tx_desc = tx_q->dma_tx + entry;
4763 
4764 	if (dma_map) {
4765 		dma_addr = dma_map_single(priv->device, xdpf->data,
4766 					  xdpf->len, DMA_TO_DEVICE);
4767 		if (dma_mapping_error(priv->device, dma_addr))
4768 			return STMMAC_XDP_CONSUMED;
4769 
4770 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4771 	} else {
4772 		struct page *page = virt_to_page(xdpf->data);
4773 
4774 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4775 			   xdpf->headroom;
4776 		dma_sync_single_for_device(priv->device, dma_addr,
4777 					   xdpf->len, DMA_BIDIRECTIONAL);
4778 
4779 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4780 	}
4781 
4782 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4783 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4784 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4785 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4786 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4787 
4788 	tx_q->xdpf[entry] = xdpf;
4789 
4790 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4791 
4792 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4793 			       true, priv->mode, true, true,
4794 			       xdpf->len);
4795 
4796 	tx_q->tx_count_frames++;
4797 
4798 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4799 		set_ic = true;
4800 	else
4801 		set_ic = false;
4802 
4803 	if (set_ic) {
4804 		unsigned long flags;
4805 		tx_q->tx_count_frames = 0;
4806 		stmmac_set_tx_ic(priv, tx_desc);
4807 		flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4808 		txq_stats->tx_set_ic_bit++;
4809 		u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4810 	}
4811 
4812 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4813 
4814 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4815 	tx_q->cur_tx = entry;
4816 
4817 	return STMMAC_XDP_TX;
4818 }
4819 
4820 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4821 				   int cpu)
4822 {
4823 	int index = cpu;
4824 
4825 	if (unlikely(index < 0))
4826 		index = 0;
4827 
4828 	while (index >= priv->plat->tx_queues_to_use)
4829 		index -= priv->plat->tx_queues_to_use;
4830 
4831 	return index;
4832 }
4833 
4834 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4835 				struct xdp_buff *xdp)
4836 {
4837 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4838 	int cpu = smp_processor_id();
4839 	struct netdev_queue *nq;
4840 	int queue;
4841 	int res;
4842 
4843 	if (unlikely(!xdpf))
4844 		return STMMAC_XDP_CONSUMED;
4845 
4846 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4847 	nq = netdev_get_tx_queue(priv->dev, queue);
4848 
4849 	__netif_tx_lock(nq, cpu);
4850 	/* Avoids TX time-out as we are sharing with slow path */
4851 	txq_trans_cond_update(nq);
4852 
4853 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4854 	if (res == STMMAC_XDP_TX)
4855 		stmmac_flush_tx_descriptors(priv, queue);
4856 
4857 	__netif_tx_unlock(nq);
4858 
4859 	return res;
4860 }
4861 
4862 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4863 				 struct bpf_prog *prog,
4864 				 struct xdp_buff *xdp)
4865 {
4866 	u32 act;
4867 	int res;
4868 
4869 	act = bpf_prog_run_xdp(prog, xdp);
4870 	switch (act) {
4871 	case XDP_PASS:
4872 		res = STMMAC_XDP_PASS;
4873 		break;
4874 	case XDP_TX:
4875 		res = stmmac_xdp_xmit_back(priv, xdp);
4876 		break;
4877 	case XDP_REDIRECT:
4878 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4879 			res = STMMAC_XDP_CONSUMED;
4880 		else
4881 			res = STMMAC_XDP_REDIRECT;
4882 		break;
4883 	default:
4884 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4885 		fallthrough;
4886 	case XDP_ABORTED:
4887 		trace_xdp_exception(priv->dev, prog, act);
4888 		fallthrough;
4889 	case XDP_DROP:
4890 		res = STMMAC_XDP_CONSUMED;
4891 		break;
4892 	}
4893 
4894 	return res;
4895 }
4896 
4897 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4898 					   struct xdp_buff *xdp)
4899 {
4900 	struct bpf_prog *prog;
4901 	int res;
4902 
4903 	prog = READ_ONCE(priv->xdp_prog);
4904 	if (!prog) {
4905 		res = STMMAC_XDP_PASS;
4906 		goto out;
4907 	}
4908 
4909 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
4910 out:
4911 	return ERR_PTR(-res);
4912 }
4913 
4914 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4915 				   int xdp_status)
4916 {
4917 	int cpu = smp_processor_id();
4918 	int queue;
4919 
4920 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4921 
4922 	if (xdp_status & STMMAC_XDP_TX)
4923 		stmmac_tx_timer_arm(priv, queue);
4924 
4925 	if (xdp_status & STMMAC_XDP_REDIRECT)
4926 		xdp_do_flush();
4927 }
4928 
4929 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4930 					       struct xdp_buff *xdp)
4931 {
4932 	unsigned int metasize = xdp->data - xdp->data_meta;
4933 	unsigned int datasize = xdp->data_end - xdp->data;
4934 	struct sk_buff *skb;
4935 
4936 	skb = __napi_alloc_skb(&ch->rxtx_napi,
4937 			       xdp->data_end - xdp->data_hard_start,
4938 			       GFP_ATOMIC | __GFP_NOWARN);
4939 	if (unlikely(!skb))
4940 		return NULL;
4941 
4942 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
4943 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4944 	if (metasize)
4945 		skb_metadata_set(skb, metasize);
4946 
4947 	return skb;
4948 }
4949 
4950 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4951 				   struct dma_desc *p, struct dma_desc *np,
4952 				   struct xdp_buff *xdp)
4953 {
4954 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
4955 	struct stmmac_channel *ch = &priv->channel[queue];
4956 	unsigned int len = xdp->data_end - xdp->data;
4957 	enum pkt_hash_types hash_type;
4958 	int coe = priv->hw->rx_csum;
4959 	unsigned long flags;
4960 	struct sk_buff *skb;
4961 	u32 hash;
4962 
4963 	skb = stmmac_construct_skb_zc(ch, xdp);
4964 	if (!skb) {
4965 		priv->xstats.rx_dropped++;
4966 		return;
4967 	}
4968 
4969 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
4970 	stmmac_rx_vlan(priv->dev, skb);
4971 	skb->protocol = eth_type_trans(skb, priv->dev);
4972 
4973 	if (unlikely(!coe))
4974 		skb_checksum_none_assert(skb);
4975 	else
4976 		skb->ip_summed = CHECKSUM_UNNECESSARY;
4977 
4978 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4979 		skb_set_hash(skb, hash, hash_type);
4980 
4981 	skb_record_rx_queue(skb, queue);
4982 	napi_gro_receive(&ch->rxtx_napi, skb);
4983 
4984 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
4985 	rxq_stats->rx_pkt_n++;
4986 	rxq_stats->rx_bytes += len;
4987 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
4988 }
4989 
4990 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4991 {
4992 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4993 	unsigned int entry = rx_q->dirty_rx;
4994 	struct dma_desc *rx_desc = NULL;
4995 	bool ret = true;
4996 
4997 	budget = min(budget, stmmac_rx_dirty(priv, queue));
4998 
4999 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5000 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5001 		dma_addr_t dma_addr;
5002 		bool use_rx_wd;
5003 
5004 		if (!buf->xdp) {
5005 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5006 			if (!buf->xdp) {
5007 				ret = false;
5008 				break;
5009 			}
5010 		}
5011 
5012 		if (priv->extend_desc)
5013 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5014 		else
5015 			rx_desc = rx_q->dma_rx + entry;
5016 
5017 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5018 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5019 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5020 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5021 
5022 		rx_q->rx_count_frames++;
5023 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5024 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5025 			rx_q->rx_count_frames = 0;
5026 
5027 		use_rx_wd = !priv->rx_coal_frames[queue];
5028 		use_rx_wd |= rx_q->rx_count_frames > 0;
5029 		if (!priv->use_riwt)
5030 			use_rx_wd = false;
5031 
5032 		dma_wmb();
5033 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5034 
5035 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5036 	}
5037 
5038 	if (rx_desc) {
5039 		rx_q->dirty_rx = entry;
5040 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5041 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5042 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5043 	}
5044 
5045 	return ret;
5046 }
5047 
5048 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5049 {
5050 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5051 	 * to represent incoming packet, whereas cb field in the same structure
5052 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5053 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5054 	 */
5055 	return (struct stmmac_xdp_buff *)xdp;
5056 }
5057 
5058 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5059 {
5060 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5061 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5062 	unsigned int count = 0, error = 0, len = 0;
5063 	int dirty = stmmac_rx_dirty(priv, queue);
5064 	unsigned int next_entry = rx_q->cur_rx;
5065 	u32 rx_errors = 0, rx_dropped = 0;
5066 	unsigned int desc_size;
5067 	struct bpf_prog *prog;
5068 	bool failure = false;
5069 	unsigned long flags;
5070 	int xdp_status = 0;
5071 	int status = 0;
5072 
5073 	if (netif_msg_rx_status(priv)) {
5074 		void *rx_head;
5075 
5076 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5077 		if (priv->extend_desc) {
5078 			rx_head = (void *)rx_q->dma_erx;
5079 			desc_size = sizeof(struct dma_extended_desc);
5080 		} else {
5081 			rx_head = (void *)rx_q->dma_rx;
5082 			desc_size = sizeof(struct dma_desc);
5083 		}
5084 
5085 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5086 				    rx_q->dma_rx_phy, desc_size);
5087 	}
5088 	while (count < limit) {
5089 		struct stmmac_rx_buffer *buf;
5090 		struct stmmac_xdp_buff *ctx;
5091 		unsigned int buf1_len = 0;
5092 		struct dma_desc *np, *p;
5093 		int entry;
5094 		int res;
5095 
5096 		if (!count && rx_q->state_saved) {
5097 			error = rx_q->state.error;
5098 			len = rx_q->state.len;
5099 		} else {
5100 			rx_q->state_saved = false;
5101 			error = 0;
5102 			len = 0;
5103 		}
5104 
5105 		if (count >= limit)
5106 			break;
5107 
5108 read_again:
5109 		buf1_len = 0;
5110 		entry = next_entry;
5111 		buf = &rx_q->buf_pool[entry];
5112 
5113 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5114 			failure = failure ||
5115 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5116 			dirty = 0;
5117 		}
5118 
5119 		if (priv->extend_desc)
5120 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5121 		else
5122 			p = rx_q->dma_rx + entry;
5123 
5124 		/* read the status of the incoming frame */
5125 		status = stmmac_rx_status(priv, &priv->xstats, p);
5126 		/* check if managed by the DMA otherwise go ahead */
5127 		if (unlikely(status & dma_own))
5128 			break;
5129 
5130 		/* Prefetch the next RX descriptor */
5131 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5132 						priv->dma_conf.dma_rx_size);
5133 		next_entry = rx_q->cur_rx;
5134 
5135 		if (priv->extend_desc)
5136 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5137 		else
5138 			np = rx_q->dma_rx + next_entry;
5139 
5140 		prefetch(np);
5141 
5142 		/* Ensure a valid XSK buffer before proceed */
5143 		if (!buf->xdp)
5144 			break;
5145 
5146 		if (priv->extend_desc)
5147 			stmmac_rx_extended_status(priv, &priv->xstats,
5148 						  rx_q->dma_erx + entry);
5149 		if (unlikely(status == discard_frame)) {
5150 			xsk_buff_free(buf->xdp);
5151 			buf->xdp = NULL;
5152 			dirty++;
5153 			error = 1;
5154 			if (!priv->hwts_rx_en)
5155 				rx_errors++;
5156 		}
5157 
5158 		if (unlikely(error && (status & rx_not_ls)))
5159 			goto read_again;
5160 		if (unlikely(error)) {
5161 			count++;
5162 			continue;
5163 		}
5164 
5165 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5166 		if (likely(status & rx_not_ls)) {
5167 			xsk_buff_free(buf->xdp);
5168 			buf->xdp = NULL;
5169 			dirty++;
5170 			count++;
5171 			goto read_again;
5172 		}
5173 
5174 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5175 		ctx->priv = priv;
5176 		ctx->desc = p;
5177 		ctx->ndesc = np;
5178 
5179 		/* XDP ZC Frame only support primary buffers for now */
5180 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5181 		len += buf1_len;
5182 
5183 		/* ACS is disabled; strip manually. */
5184 		if (likely(!(status & rx_not_ls))) {
5185 			buf1_len -= ETH_FCS_LEN;
5186 			len -= ETH_FCS_LEN;
5187 		}
5188 
5189 		/* RX buffer is good and fit into a XSK pool buffer */
5190 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5191 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5192 
5193 		prog = READ_ONCE(priv->xdp_prog);
5194 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5195 
5196 		switch (res) {
5197 		case STMMAC_XDP_PASS:
5198 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5199 			xsk_buff_free(buf->xdp);
5200 			break;
5201 		case STMMAC_XDP_CONSUMED:
5202 			xsk_buff_free(buf->xdp);
5203 			rx_dropped++;
5204 			break;
5205 		case STMMAC_XDP_TX:
5206 		case STMMAC_XDP_REDIRECT:
5207 			xdp_status |= res;
5208 			break;
5209 		}
5210 
5211 		buf->xdp = NULL;
5212 		dirty++;
5213 		count++;
5214 	}
5215 
5216 	if (status & rx_not_ls) {
5217 		rx_q->state_saved = true;
5218 		rx_q->state.error = error;
5219 		rx_q->state.len = len;
5220 	}
5221 
5222 	stmmac_finalize_xdp_rx(priv, xdp_status);
5223 
5224 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5225 	rxq_stats->rx_pkt_n += count;
5226 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5227 
5228 	priv->xstats.rx_dropped += rx_dropped;
5229 	priv->xstats.rx_errors += rx_errors;
5230 
5231 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5232 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5233 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5234 		else
5235 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5236 
5237 		return (int)count;
5238 	}
5239 
5240 	return failure ? limit : (int)count;
5241 }
5242 
5243 /**
5244  * stmmac_rx - manage the receive process
5245  * @priv: driver private structure
5246  * @limit: napi bugget
5247  * @queue: RX queue index.
5248  * Description :  this the function called by the napi poll method.
5249  * It gets all the frames inside the ring.
5250  */
5251 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5252 {
5253 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5254 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5255 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5256 	struct stmmac_channel *ch = &priv->channel[queue];
5257 	unsigned int count = 0, error = 0, len = 0;
5258 	int status = 0, coe = priv->hw->rx_csum;
5259 	unsigned int next_entry = rx_q->cur_rx;
5260 	enum dma_data_direction dma_dir;
5261 	unsigned int desc_size;
5262 	struct sk_buff *skb = NULL;
5263 	struct stmmac_xdp_buff ctx;
5264 	unsigned long flags;
5265 	int xdp_status = 0;
5266 	int buf_sz;
5267 
5268 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5269 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5270 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5271 
5272 	if (netif_msg_rx_status(priv)) {
5273 		void *rx_head;
5274 
5275 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5276 		if (priv->extend_desc) {
5277 			rx_head = (void *)rx_q->dma_erx;
5278 			desc_size = sizeof(struct dma_extended_desc);
5279 		} else {
5280 			rx_head = (void *)rx_q->dma_rx;
5281 			desc_size = sizeof(struct dma_desc);
5282 		}
5283 
5284 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5285 				    rx_q->dma_rx_phy, desc_size);
5286 	}
5287 	while (count < limit) {
5288 		unsigned int buf1_len = 0, buf2_len = 0;
5289 		enum pkt_hash_types hash_type;
5290 		struct stmmac_rx_buffer *buf;
5291 		struct dma_desc *np, *p;
5292 		int entry;
5293 		u32 hash;
5294 
5295 		if (!count && rx_q->state_saved) {
5296 			skb = rx_q->state.skb;
5297 			error = rx_q->state.error;
5298 			len = rx_q->state.len;
5299 		} else {
5300 			rx_q->state_saved = false;
5301 			skb = NULL;
5302 			error = 0;
5303 			len = 0;
5304 		}
5305 
5306 read_again:
5307 		if (count >= limit)
5308 			break;
5309 
5310 		buf1_len = 0;
5311 		buf2_len = 0;
5312 		entry = next_entry;
5313 		buf = &rx_q->buf_pool[entry];
5314 
5315 		if (priv->extend_desc)
5316 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5317 		else
5318 			p = rx_q->dma_rx + entry;
5319 
5320 		/* read the status of the incoming frame */
5321 		status = stmmac_rx_status(priv, &priv->xstats, p);
5322 		/* check if managed by the DMA otherwise go ahead */
5323 		if (unlikely(status & dma_own))
5324 			break;
5325 
5326 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5327 						priv->dma_conf.dma_rx_size);
5328 		next_entry = rx_q->cur_rx;
5329 
5330 		if (priv->extend_desc)
5331 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5332 		else
5333 			np = rx_q->dma_rx + next_entry;
5334 
5335 		prefetch(np);
5336 
5337 		if (priv->extend_desc)
5338 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5339 		if (unlikely(status == discard_frame)) {
5340 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5341 			buf->page = NULL;
5342 			error = 1;
5343 			if (!priv->hwts_rx_en)
5344 				rx_errors++;
5345 		}
5346 
5347 		if (unlikely(error && (status & rx_not_ls)))
5348 			goto read_again;
5349 		if (unlikely(error)) {
5350 			dev_kfree_skb(skb);
5351 			skb = NULL;
5352 			count++;
5353 			continue;
5354 		}
5355 
5356 		/* Buffer is good. Go on. */
5357 
5358 		prefetch(page_address(buf->page) + buf->page_offset);
5359 		if (buf->sec_page)
5360 			prefetch(page_address(buf->sec_page));
5361 
5362 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5363 		len += buf1_len;
5364 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5365 		len += buf2_len;
5366 
5367 		/* ACS is disabled; strip manually. */
5368 		if (likely(!(status & rx_not_ls))) {
5369 			if (buf2_len) {
5370 				buf2_len -= ETH_FCS_LEN;
5371 				len -= ETH_FCS_LEN;
5372 			} else if (buf1_len) {
5373 				buf1_len -= ETH_FCS_LEN;
5374 				len -= ETH_FCS_LEN;
5375 			}
5376 		}
5377 
5378 		if (!skb) {
5379 			unsigned int pre_len, sync_len;
5380 
5381 			dma_sync_single_for_cpu(priv->device, buf->addr,
5382 						buf1_len, dma_dir);
5383 
5384 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5385 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5386 					 buf->page_offset, buf1_len, true);
5387 
5388 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5389 				  buf->page_offset;
5390 
5391 			ctx.priv = priv;
5392 			ctx.desc = p;
5393 			ctx.ndesc = np;
5394 
5395 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5396 			/* Due xdp_adjust_tail: DMA sync for_device
5397 			 * cover max len CPU touch
5398 			 */
5399 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5400 				   buf->page_offset;
5401 			sync_len = max(sync_len, pre_len);
5402 
5403 			/* For Not XDP_PASS verdict */
5404 			if (IS_ERR(skb)) {
5405 				unsigned int xdp_res = -PTR_ERR(skb);
5406 
5407 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5408 					page_pool_put_page(rx_q->page_pool,
5409 							   virt_to_head_page(ctx.xdp.data),
5410 							   sync_len, true);
5411 					buf->page = NULL;
5412 					rx_dropped++;
5413 
5414 					/* Clear skb as it was set as
5415 					 * status by XDP program.
5416 					 */
5417 					skb = NULL;
5418 
5419 					if (unlikely((status & rx_not_ls)))
5420 						goto read_again;
5421 
5422 					count++;
5423 					continue;
5424 				} else if (xdp_res & (STMMAC_XDP_TX |
5425 						      STMMAC_XDP_REDIRECT)) {
5426 					xdp_status |= xdp_res;
5427 					buf->page = NULL;
5428 					skb = NULL;
5429 					count++;
5430 					continue;
5431 				}
5432 			}
5433 		}
5434 
5435 		if (!skb) {
5436 			/* XDP program may expand or reduce tail */
5437 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5438 
5439 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5440 			if (!skb) {
5441 				rx_dropped++;
5442 				count++;
5443 				goto drain_data;
5444 			}
5445 
5446 			/* XDP program may adjust header */
5447 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5448 			skb_put(skb, buf1_len);
5449 
5450 			/* Data payload copied into SKB, page ready for recycle */
5451 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5452 			buf->page = NULL;
5453 		} else if (buf1_len) {
5454 			dma_sync_single_for_cpu(priv->device, buf->addr,
5455 						buf1_len, dma_dir);
5456 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5457 					buf->page, buf->page_offset, buf1_len,
5458 					priv->dma_conf.dma_buf_sz);
5459 
5460 			/* Data payload appended into SKB */
5461 			skb_mark_for_recycle(skb);
5462 			buf->page = NULL;
5463 		}
5464 
5465 		if (buf2_len) {
5466 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5467 						buf2_len, dma_dir);
5468 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5469 					buf->sec_page, 0, buf2_len,
5470 					priv->dma_conf.dma_buf_sz);
5471 
5472 			/* Data payload appended into SKB */
5473 			skb_mark_for_recycle(skb);
5474 			buf->sec_page = NULL;
5475 		}
5476 
5477 drain_data:
5478 		if (likely(status & rx_not_ls))
5479 			goto read_again;
5480 		if (!skb)
5481 			continue;
5482 
5483 		/* Got entire packet into SKB. Finish it. */
5484 
5485 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5486 		stmmac_rx_vlan(priv->dev, skb);
5487 		skb->protocol = eth_type_trans(skb, priv->dev);
5488 
5489 		if (unlikely(!coe))
5490 			skb_checksum_none_assert(skb);
5491 		else
5492 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5493 
5494 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5495 			skb_set_hash(skb, hash, hash_type);
5496 
5497 		skb_record_rx_queue(skb, queue);
5498 		napi_gro_receive(&ch->rx_napi, skb);
5499 		skb = NULL;
5500 
5501 		rx_packets++;
5502 		rx_bytes += len;
5503 		count++;
5504 	}
5505 
5506 	if (status & rx_not_ls || skb) {
5507 		rx_q->state_saved = true;
5508 		rx_q->state.skb = skb;
5509 		rx_q->state.error = error;
5510 		rx_q->state.len = len;
5511 	}
5512 
5513 	stmmac_finalize_xdp_rx(priv, xdp_status);
5514 
5515 	stmmac_rx_refill(priv, queue);
5516 
5517 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5518 	rxq_stats->rx_packets += rx_packets;
5519 	rxq_stats->rx_bytes += rx_bytes;
5520 	rxq_stats->rx_pkt_n += count;
5521 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5522 
5523 	priv->xstats.rx_dropped += rx_dropped;
5524 	priv->xstats.rx_errors += rx_errors;
5525 
5526 	return count;
5527 }
5528 
5529 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5530 {
5531 	struct stmmac_channel *ch =
5532 		container_of(napi, struct stmmac_channel, rx_napi);
5533 	struct stmmac_priv *priv = ch->priv_data;
5534 	struct stmmac_rxq_stats *rxq_stats;
5535 	u32 chan = ch->index;
5536 	unsigned long flags;
5537 	int work_done;
5538 
5539 	rxq_stats = &priv->xstats.rxq_stats[chan];
5540 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5541 	rxq_stats->napi_poll++;
5542 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5543 
5544 	work_done = stmmac_rx(priv, budget, chan);
5545 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5546 		unsigned long flags;
5547 
5548 		spin_lock_irqsave(&ch->lock, flags);
5549 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5550 		spin_unlock_irqrestore(&ch->lock, flags);
5551 	}
5552 
5553 	return work_done;
5554 }
5555 
5556 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5557 {
5558 	struct stmmac_channel *ch =
5559 		container_of(napi, struct stmmac_channel, tx_napi);
5560 	struct stmmac_priv *priv = ch->priv_data;
5561 	struct stmmac_txq_stats *txq_stats;
5562 	u32 chan = ch->index;
5563 	unsigned long flags;
5564 	int work_done;
5565 
5566 	txq_stats = &priv->xstats.txq_stats[chan];
5567 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
5568 	txq_stats->napi_poll++;
5569 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
5570 
5571 	work_done = stmmac_tx_clean(priv, budget, chan);
5572 	work_done = min(work_done, budget);
5573 
5574 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5575 		unsigned long flags;
5576 
5577 		spin_lock_irqsave(&ch->lock, flags);
5578 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5579 		spin_unlock_irqrestore(&ch->lock, flags);
5580 	}
5581 
5582 	return work_done;
5583 }
5584 
5585 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5586 {
5587 	struct stmmac_channel *ch =
5588 		container_of(napi, struct stmmac_channel, rxtx_napi);
5589 	struct stmmac_priv *priv = ch->priv_data;
5590 	int rx_done, tx_done, rxtx_done;
5591 	struct stmmac_rxq_stats *rxq_stats;
5592 	struct stmmac_txq_stats *txq_stats;
5593 	u32 chan = ch->index;
5594 	unsigned long flags;
5595 
5596 	rxq_stats = &priv->xstats.rxq_stats[chan];
5597 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5598 	rxq_stats->napi_poll++;
5599 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5600 
5601 	txq_stats = &priv->xstats.txq_stats[chan];
5602 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
5603 	txq_stats->napi_poll++;
5604 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
5605 
5606 	tx_done = stmmac_tx_clean(priv, budget, chan);
5607 	tx_done = min(tx_done, budget);
5608 
5609 	rx_done = stmmac_rx_zc(priv, budget, chan);
5610 
5611 	rxtx_done = max(tx_done, rx_done);
5612 
5613 	/* If either TX or RX work is not complete, return budget
5614 	 * and keep pooling
5615 	 */
5616 	if (rxtx_done >= budget)
5617 		return budget;
5618 
5619 	/* all work done, exit the polling mode */
5620 	if (napi_complete_done(napi, rxtx_done)) {
5621 		unsigned long flags;
5622 
5623 		spin_lock_irqsave(&ch->lock, flags);
5624 		/* Both RX and TX work done are compelte,
5625 		 * so enable both RX & TX IRQs.
5626 		 */
5627 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5628 		spin_unlock_irqrestore(&ch->lock, flags);
5629 	}
5630 
5631 	return min(rxtx_done, budget - 1);
5632 }
5633 
5634 /**
5635  *  stmmac_tx_timeout
5636  *  @dev : Pointer to net device structure
5637  *  @txqueue: the index of the hanging transmit queue
5638  *  Description: this function is called when a packet transmission fails to
5639  *   complete within a reasonable time. The driver will mark the error in the
5640  *   netdev structure and arrange for the device to be reset to a sane state
5641  *   in order to transmit a new packet.
5642  */
5643 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5644 {
5645 	struct stmmac_priv *priv = netdev_priv(dev);
5646 
5647 	stmmac_global_err(priv);
5648 }
5649 
5650 /**
5651  *  stmmac_set_rx_mode - entry point for multicast addressing
5652  *  @dev : pointer to the device structure
5653  *  Description:
5654  *  This function is a driver entry point which gets called by the kernel
5655  *  whenever multicast addresses must be enabled/disabled.
5656  *  Return value:
5657  *  void.
5658  */
5659 static void stmmac_set_rx_mode(struct net_device *dev)
5660 {
5661 	struct stmmac_priv *priv = netdev_priv(dev);
5662 
5663 	stmmac_set_filter(priv, priv->hw, dev);
5664 }
5665 
5666 /**
5667  *  stmmac_change_mtu - entry point to change MTU size for the device.
5668  *  @dev : device pointer.
5669  *  @new_mtu : the new MTU size for the device.
5670  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5671  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5672  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5673  *  Return value:
5674  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5675  *  file on failure.
5676  */
5677 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5678 {
5679 	struct stmmac_priv *priv = netdev_priv(dev);
5680 	int txfifosz = priv->plat->tx_fifo_size;
5681 	struct stmmac_dma_conf *dma_conf;
5682 	const int mtu = new_mtu;
5683 	int ret;
5684 
5685 	if (txfifosz == 0)
5686 		txfifosz = priv->dma_cap.tx_fifo_size;
5687 
5688 	txfifosz /= priv->plat->tx_queues_to_use;
5689 
5690 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5691 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5692 		return -EINVAL;
5693 	}
5694 
5695 	new_mtu = STMMAC_ALIGN(new_mtu);
5696 
5697 	/* If condition true, FIFO is too small or MTU too large */
5698 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5699 		return -EINVAL;
5700 
5701 	if (netif_running(dev)) {
5702 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5703 		/* Try to allocate the new DMA conf with the new mtu */
5704 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5705 		if (IS_ERR(dma_conf)) {
5706 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5707 				   mtu);
5708 			return PTR_ERR(dma_conf);
5709 		}
5710 
5711 		stmmac_release(dev);
5712 
5713 		ret = __stmmac_open(dev, dma_conf);
5714 		if (ret) {
5715 			free_dma_desc_resources(priv, dma_conf);
5716 			kfree(dma_conf);
5717 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5718 			return ret;
5719 		}
5720 
5721 		kfree(dma_conf);
5722 
5723 		stmmac_set_rx_mode(dev);
5724 	}
5725 
5726 	dev->mtu = mtu;
5727 	netdev_update_features(dev);
5728 
5729 	return 0;
5730 }
5731 
5732 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5733 					     netdev_features_t features)
5734 {
5735 	struct stmmac_priv *priv = netdev_priv(dev);
5736 
5737 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5738 		features &= ~NETIF_F_RXCSUM;
5739 
5740 	if (!priv->plat->tx_coe)
5741 		features &= ~NETIF_F_CSUM_MASK;
5742 
5743 	/* Some GMAC devices have a bugged Jumbo frame support that
5744 	 * needs to have the Tx COE disabled for oversized frames
5745 	 * (due to limited buffer sizes). In this case we disable
5746 	 * the TX csum insertion in the TDES and not use SF.
5747 	 */
5748 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5749 		features &= ~NETIF_F_CSUM_MASK;
5750 
5751 	/* Disable tso if asked by ethtool */
5752 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5753 		if (features & NETIF_F_TSO)
5754 			priv->tso = true;
5755 		else
5756 			priv->tso = false;
5757 	}
5758 
5759 	return features;
5760 }
5761 
5762 static int stmmac_set_features(struct net_device *netdev,
5763 			       netdev_features_t features)
5764 {
5765 	struct stmmac_priv *priv = netdev_priv(netdev);
5766 
5767 	/* Keep the COE Type in case of csum is supporting */
5768 	if (features & NETIF_F_RXCSUM)
5769 		priv->hw->rx_csum = priv->plat->rx_coe;
5770 	else
5771 		priv->hw->rx_csum = 0;
5772 	/* No check needed because rx_coe has been set before and it will be
5773 	 * fixed in case of issue.
5774 	 */
5775 	stmmac_rx_ipc(priv, priv->hw);
5776 
5777 	if (priv->sph_cap) {
5778 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5779 		u32 chan;
5780 
5781 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5782 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5783 	}
5784 
5785 	return 0;
5786 }
5787 
5788 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5789 {
5790 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5791 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5792 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5793 	bool *hs_enable = &fpe_cfg->hs_enable;
5794 
5795 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5796 		return;
5797 
5798 	/* If LP has sent verify mPacket, LP is FPE capable */
5799 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5800 		if (*lp_state < FPE_STATE_CAPABLE)
5801 			*lp_state = FPE_STATE_CAPABLE;
5802 
5803 		/* If user has requested FPE enable, quickly response */
5804 		if (*hs_enable)
5805 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5806 						MPACKET_RESPONSE);
5807 	}
5808 
5809 	/* If Local has sent verify mPacket, Local is FPE capable */
5810 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5811 		if (*lo_state < FPE_STATE_CAPABLE)
5812 			*lo_state = FPE_STATE_CAPABLE;
5813 	}
5814 
5815 	/* If LP has sent response mPacket, LP is entering FPE ON */
5816 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5817 		*lp_state = FPE_STATE_ENTERING_ON;
5818 
5819 	/* If Local has sent response mPacket, Local is entering FPE ON */
5820 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5821 		*lo_state = FPE_STATE_ENTERING_ON;
5822 
5823 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5824 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5825 	    priv->fpe_wq) {
5826 		queue_work(priv->fpe_wq, &priv->fpe_task);
5827 	}
5828 }
5829 
5830 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5831 {
5832 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5833 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5834 	u32 queues_count;
5835 	u32 queue;
5836 	bool xmac;
5837 
5838 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5839 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5840 
5841 	if (priv->irq_wake)
5842 		pm_wakeup_event(priv->device, 0);
5843 
5844 	if (priv->dma_cap.estsel)
5845 		stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5846 				      &priv->xstats, tx_cnt);
5847 
5848 	if (priv->dma_cap.fpesel) {
5849 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5850 						   priv->dev);
5851 
5852 		stmmac_fpe_event_status(priv, status);
5853 	}
5854 
5855 	/* To handle GMAC own interrupts */
5856 	if ((priv->plat->has_gmac) || xmac) {
5857 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5858 
5859 		if (unlikely(status)) {
5860 			/* For LPI we need to save the tx status */
5861 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5862 				priv->tx_path_in_lpi_mode = true;
5863 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5864 				priv->tx_path_in_lpi_mode = false;
5865 		}
5866 
5867 		for (queue = 0; queue < queues_count; queue++) {
5868 			status = stmmac_host_mtl_irq_status(priv, priv->hw,
5869 							    queue);
5870 		}
5871 
5872 		/* PCS link status */
5873 		if (priv->hw->pcs &&
5874 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5875 			if (priv->xstats.pcs_link)
5876 				netif_carrier_on(priv->dev);
5877 			else
5878 				netif_carrier_off(priv->dev);
5879 		}
5880 
5881 		stmmac_timestamp_interrupt(priv, priv);
5882 	}
5883 }
5884 
5885 /**
5886  *  stmmac_interrupt - main ISR
5887  *  @irq: interrupt number.
5888  *  @dev_id: to pass the net device pointer.
5889  *  Description: this is the main driver interrupt service routine.
5890  *  It can call:
5891  *  o DMA service routine (to manage incoming frame reception and transmission
5892  *    status)
5893  *  o Core interrupts to manage: remote wake-up, management counter, LPI
5894  *    interrupts.
5895  */
5896 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5897 {
5898 	struct net_device *dev = (struct net_device *)dev_id;
5899 	struct stmmac_priv *priv = netdev_priv(dev);
5900 
5901 	/* Check if adapter is up */
5902 	if (test_bit(STMMAC_DOWN, &priv->state))
5903 		return IRQ_HANDLED;
5904 
5905 	/* Check if a fatal error happened */
5906 	if (stmmac_safety_feat_interrupt(priv))
5907 		return IRQ_HANDLED;
5908 
5909 	/* To handle Common interrupts */
5910 	stmmac_common_interrupt(priv);
5911 
5912 	/* To handle DMA interrupts */
5913 	stmmac_dma_interrupt(priv);
5914 
5915 	return IRQ_HANDLED;
5916 }
5917 
5918 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5919 {
5920 	struct net_device *dev = (struct net_device *)dev_id;
5921 	struct stmmac_priv *priv = netdev_priv(dev);
5922 
5923 	if (unlikely(!dev)) {
5924 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5925 		return IRQ_NONE;
5926 	}
5927 
5928 	/* Check if adapter is up */
5929 	if (test_bit(STMMAC_DOWN, &priv->state))
5930 		return IRQ_HANDLED;
5931 
5932 	/* To handle Common interrupts */
5933 	stmmac_common_interrupt(priv);
5934 
5935 	return IRQ_HANDLED;
5936 }
5937 
5938 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5939 {
5940 	struct net_device *dev = (struct net_device *)dev_id;
5941 	struct stmmac_priv *priv = netdev_priv(dev);
5942 
5943 	if (unlikely(!dev)) {
5944 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5945 		return IRQ_NONE;
5946 	}
5947 
5948 	/* Check if adapter is up */
5949 	if (test_bit(STMMAC_DOWN, &priv->state))
5950 		return IRQ_HANDLED;
5951 
5952 	/* Check if a fatal error happened */
5953 	stmmac_safety_feat_interrupt(priv);
5954 
5955 	return IRQ_HANDLED;
5956 }
5957 
5958 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5959 {
5960 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5961 	struct stmmac_dma_conf *dma_conf;
5962 	int chan = tx_q->queue_index;
5963 	struct stmmac_priv *priv;
5964 	int status;
5965 
5966 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
5967 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5968 
5969 	if (unlikely(!data)) {
5970 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5971 		return IRQ_NONE;
5972 	}
5973 
5974 	/* Check if adapter is up */
5975 	if (test_bit(STMMAC_DOWN, &priv->state))
5976 		return IRQ_HANDLED;
5977 
5978 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5979 
5980 	if (unlikely(status & tx_hard_error_bump_tc)) {
5981 		/* Try to bump up the dma threshold on this failure */
5982 		stmmac_bump_dma_threshold(priv, chan);
5983 	} else if (unlikely(status == tx_hard_error)) {
5984 		stmmac_tx_err(priv, chan);
5985 	}
5986 
5987 	return IRQ_HANDLED;
5988 }
5989 
5990 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
5991 {
5992 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
5993 	struct stmmac_dma_conf *dma_conf;
5994 	int chan = rx_q->queue_index;
5995 	struct stmmac_priv *priv;
5996 
5997 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
5998 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5999 
6000 	if (unlikely(!data)) {
6001 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6002 		return IRQ_NONE;
6003 	}
6004 
6005 	/* Check if adapter is up */
6006 	if (test_bit(STMMAC_DOWN, &priv->state))
6007 		return IRQ_HANDLED;
6008 
6009 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6010 
6011 	return IRQ_HANDLED;
6012 }
6013 
6014 /**
6015  *  stmmac_ioctl - Entry point for the Ioctl
6016  *  @dev: Device pointer.
6017  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6018  *  a proprietary structure used to pass information to the driver.
6019  *  @cmd: IOCTL command
6020  *  Description:
6021  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6022  */
6023 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6024 {
6025 	struct stmmac_priv *priv = netdev_priv (dev);
6026 	int ret = -EOPNOTSUPP;
6027 
6028 	if (!netif_running(dev))
6029 		return -EINVAL;
6030 
6031 	switch (cmd) {
6032 	case SIOCGMIIPHY:
6033 	case SIOCGMIIREG:
6034 	case SIOCSMIIREG:
6035 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6036 		break;
6037 	case SIOCSHWTSTAMP:
6038 		ret = stmmac_hwtstamp_set(dev, rq);
6039 		break;
6040 	case SIOCGHWTSTAMP:
6041 		ret = stmmac_hwtstamp_get(dev, rq);
6042 		break;
6043 	default:
6044 		break;
6045 	}
6046 
6047 	return ret;
6048 }
6049 
6050 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6051 				    void *cb_priv)
6052 {
6053 	struct stmmac_priv *priv = cb_priv;
6054 	int ret = -EOPNOTSUPP;
6055 
6056 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6057 		return ret;
6058 
6059 	__stmmac_disable_all_queues(priv);
6060 
6061 	switch (type) {
6062 	case TC_SETUP_CLSU32:
6063 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6064 		break;
6065 	case TC_SETUP_CLSFLOWER:
6066 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6067 		break;
6068 	default:
6069 		break;
6070 	}
6071 
6072 	stmmac_enable_all_queues(priv);
6073 	return ret;
6074 }
6075 
6076 static LIST_HEAD(stmmac_block_cb_list);
6077 
6078 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6079 			   void *type_data)
6080 {
6081 	struct stmmac_priv *priv = netdev_priv(ndev);
6082 
6083 	switch (type) {
6084 	case TC_QUERY_CAPS:
6085 		return stmmac_tc_query_caps(priv, priv, type_data);
6086 	case TC_SETUP_BLOCK:
6087 		return flow_block_cb_setup_simple(type_data,
6088 						  &stmmac_block_cb_list,
6089 						  stmmac_setup_tc_block_cb,
6090 						  priv, priv, true);
6091 	case TC_SETUP_QDISC_CBS:
6092 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6093 	case TC_SETUP_QDISC_TAPRIO:
6094 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6095 	case TC_SETUP_QDISC_ETF:
6096 		return stmmac_tc_setup_etf(priv, priv, type_data);
6097 	default:
6098 		return -EOPNOTSUPP;
6099 	}
6100 }
6101 
6102 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6103 			       struct net_device *sb_dev)
6104 {
6105 	int gso = skb_shinfo(skb)->gso_type;
6106 
6107 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6108 		/*
6109 		 * There is no way to determine the number of TSO/USO
6110 		 * capable Queues. Let's use always the Queue 0
6111 		 * because if TSO/USO is supported then at least this
6112 		 * one will be capable.
6113 		 */
6114 		return 0;
6115 	}
6116 
6117 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6118 }
6119 
6120 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6121 {
6122 	struct stmmac_priv *priv = netdev_priv(ndev);
6123 	int ret = 0;
6124 
6125 	ret = pm_runtime_resume_and_get(priv->device);
6126 	if (ret < 0)
6127 		return ret;
6128 
6129 	ret = eth_mac_addr(ndev, addr);
6130 	if (ret)
6131 		goto set_mac_error;
6132 
6133 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6134 
6135 set_mac_error:
6136 	pm_runtime_put(priv->device);
6137 
6138 	return ret;
6139 }
6140 
6141 #ifdef CONFIG_DEBUG_FS
6142 static struct dentry *stmmac_fs_dir;
6143 
6144 static void sysfs_display_ring(void *head, int size, int extend_desc,
6145 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6146 {
6147 	int i;
6148 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6149 	struct dma_desc *p = (struct dma_desc *)head;
6150 	dma_addr_t dma_addr;
6151 
6152 	for (i = 0; i < size; i++) {
6153 		if (extend_desc) {
6154 			dma_addr = dma_phy_addr + i * sizeof(*ep);
6155 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6156 				   i, &dma_addr,
6157 				   le32_to_cpu(ep->basic.des0),
6158 				   le32_to_cpu(ep->basic.des1),
6159 				   le32_to_cpu(ep->basic.des2),
6160 				   le32_to_cpu(ep->basic.des3));
6161 			ep++;
6162 		} else {
6163 			dma_addr = dma_phy_addr + i * sizeof(*p);
6164 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6165 				   i, &dma_addr,
6166 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6167 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6168 			p++;
6169 		}
6170 		seq_printf(seq, "\n");
6171 	}
6172 }
6173 
6174 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6175 {
6176 	struct net_device *dev = seq->private;
6177 	struct stmmac_priv *priv = netdev_priv(dev);
6178 	u32 rx_count = priv->plat->rx_queues_to_use;
6179 	u32 tx_count = priv->plat->tx_queues_to_use;
6180 	u32 queue;
6181 
6182 	if ((dev->flags & IFF_UP) == 0)
6183 		return 0;
6184 
6185 	for (queue = 0; queue < rx_count; queue++) {
6186 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6187 
6188 		seq_printf(seq, "RX Queue %d:\n", queue);
6189 
6190 		if (priv->extend_desc) {
6191 			seq_printf(seq, "Extended descriptor ring:\n");
6192 			sysfs_display_ring((void *)rx_q->dma_erx,
6193 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6194 		} else {
6195 			seq_printf(seq, "Descriptor ring:\n");
6196 			sysfs_display_ring((void *)rx_q->dma_rx,
6197 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6198 		}
6199 	}
6200 
6201 	for (queue = 0; queue < tx_count; queue++) {
6202 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6203 
6204 		seq_printf(seq, "TX Queue %d:\n", queue);
6205 
6206 		if (priv->extend_desc) {
6207 			seq_printf(seq, "Extended descriptor ring:\n");
6208 			sysfs_display_ring((void *)tx_q->dma_etx,
6209 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6210 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6211 			seq_printf(seq, "Descriptor ring:\n");
6212 			sysfs_display_ring((void *)tx_q->dma_tx,
6213 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6214 		}
6215 	}
6216 
6217 	return 0;
6218 }
6219 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6220 
6221 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6222 {
6223 	static const char * const dwxgmac_timestamp_source[] = {
6224 		"None",
6225 		"Internal",
6226 		"External",
6227 		"Both",
6228 	};
6229 	static const char * const dwxgmac_safety_feature_desc[] = {
6230 		"No",
6231 		"All Safety Features with ECC and Parity",
6232 		"All Safety Features without ECC or Parity",
6233 		"All Safety Features with Parity Only",
6234 		"ECC Only",
6235 		"UNDEFINED",
6236 		"UNDEFINED",
6237 		"UNDEFINED",
6238 	};
6239 	struct net_device *dev = seq->private;
6240 	struct stmmac_priv *priv = netdev_priv(dev);
6241 
6242 	if (!priv->hw_cap_support) {
6243 		seq_printf(seq, "DMA HW features not supported\n");
6244 		return 0;
6245 	}
6246 
6247 	seq_printf(seq, "==============================\n");
6248 	seq_printf(seq, "\tDMA HW features\n");
6249 	seq_printf(seq, "==============================\n");
6250 
6251 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6252 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6253 	seq_printf(seq, "\t1000 Mbps: %s\n",
6254 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6255 	seq_printf(seq, "\tHalf duplex: %s\n",
6256 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6257 	if (priv->plat->has_xgmac) {
6258 		seq_printf(seq,
6259 			   "\tNumber of Additional MAC address registers: %d\n",
6260 			   priv->dma_cap.multi_addr);
6261 	} else {
6262 		seq_printf(seq, "\tHash Filter: %s\n",
6263 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6264 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6265 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6266 	}
6267 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6268 		   (priv->dma_cap.pcs) ? "Y" : "N");
6269 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6270 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6271 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6272 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6273 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6274 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6275 	seq_printf(seq, "\tRMON module: %s\n",
6276 		   (priv->dma_cap.rmon) ? "Y" : "N");
6277 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6278 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6279 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6280 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6281 	if (priv->plat->has_xgmac)
6282 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6283 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6284 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6285 		   (priv->dma_cap.eee) ? "Y" : "N");
6286 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6287 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6288 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6289 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6290 	    priv->plat->has_xgmac) {
6291 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6292 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6293 	} else {
6294 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6295 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6296 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6297 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6298 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6299 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6300 	}
6301 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6302 		   priv->dma_cap.number_rx_channel);
6303 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6304 		   priv->dma_cap.number_tx_channel);
6305 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6306 		   priv->dma_cap.number_rx_queues);
6307 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6308 		   priv->dma_cap.number_tx_queues);
6309 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6310 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6311 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6312 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6313 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6314 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6315 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6316 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6317 		   priv->dma_cap.pps_out_num);
6318 	seq_printf(seq, "\tSafety Features: %s\n",
6319 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6320 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6321 		   priv->dma_cap.frpsel ? "Y" : "N");
6322 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6323 		   priv->dma_cap.host_dma_width);
6324 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6325 		   priv->dma_cap.rssen ? "Y" : "N");
6326 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6327 		   priv->dma_cap.vlhash ? "Y" : "N");
6328 	seq_printf(seq, "\tSplit Header: %s\n",
6329 		   priv->dma_cap.sphen ? "Y" : "N");
6330 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6331 		   priv->dma_cap.vlins ? "Y" : "N");
6332 	seq_printf(seq, "\tDouble VLAN: %s\n",
6333 		   priv->dma_cap.dvlan ? "Y" : "N");
6334 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6335 		   priv->dma_cap.l3l4fnum);
6336 	seq_printf(seq, "\tARP Offloading: %s\n",
6337 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6338 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6339 		   priv->dma_cap.estsel ? "Y" : "N");
6340 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6341 		   priv->dma_cap.fpesel ? "Y" : "N");
6342 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6343 		   priv->dma_cap.tbssel ? "Y" : "N");
6344 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6345 		   priv->dma_cap.tbs_ch_num);
6346 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6347 		   priv->dma_cap.sgfsel ? "Y" : "N");
6348 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6349 		   BIT(priv->dma_cap.ttsfd) >> 1);
6350 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6351 		   priv->dma_cap.numtc);
6352 	seq_printf(seq, "\tDCB Feature: %s\n",
6353 		   priv->dma_cap.dcben ? "Y" : "N");
6354 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6355 		   priv->dma_cap.advthword ? "Y" : "N");
6356 	seq_printf(seq, "\tPTP Offload: %s\n",
6357 		   priv->dma_cap.ptoen ? "Y" : "N");
6358 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6359 		   priv->dma_cap.osten ? "Y" : "N");
6360 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6361 		   priv->dma_cap.pfcen ? "Y" : "N");
6362 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6363 		   BIT(priv->dma_cap.frpes) << 6);
6364 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6365 		   BIT(priv->dma_cap.frpbs) << 6);
6366 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6367 		   priv->dma_cap.frppipe_num);
6368 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6369 		   priv->dma_cap.nrvf_num ?
6370 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6371 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6372 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6373 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6374 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6375 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6376 		   priv->dma_cap.cbtisel ? "Y" : "N");
6377 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6378 		   priv->dma_cap.aux_snapshot_n);
6379 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6380 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6381 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6382 		   priv->dma_cap.edma ? "Y" : "N");
6383 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6384 		   priv->dma_cap.ediffc ? "Y" : "N");
6385 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6386 		   priv->dma_cap.vxn ? "Y" : "N");
6387 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6388 		   priv->dma_cap.dbgmem ? "Y" : "N");
6389 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6390 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6391 	return 0;
6392 }
6393 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6394 
6395 /* Use network device events to rename debugfs file entries.
6396  */
6397 static int stmmac_device_event(struct notifier_block *unused,
6398 			       unsigned long event, void *ptr)
6399 {
6400 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6401 	struct stmmac_priv *priv = netdev_priv(dev);
6402 
6403 	if (dev->netdev_ops != &stmmac_netdev_ops)
6404 		goto done;
6405 
6406 	switch (event) {
6407 	case NETDEV_CHANGENAME:
6408 		if (priv->dbgfs_dir)
6409 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6410 							 priv->dbgfs_dir,
6411 							 stmmac_fs_dir,
6412 							 dev->name);
6413 		break;
6414 	}
6415 done:
6416 	return NOTIFY_DONE;
6417 }
6418 
6419 static struct notifier_block stmmac_notifier = {
6420 	.notifier_call = stmmac_device_event,
6421 };
6422 
6423 static void stmmac_init_fs(struct net_device *dev)
6424 {
6425 	struct stmmac_priv *priv = netdev_priv(dev);
6426 
6427 	rtnl_lock();
6428 
6429 	/* Create per netdev entries */
6430 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6431 
6432 	/* Entry to report DMA RX/TX rings */
6433 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6434 			    &stmmac_rings_status_fops);
6435 
6436 	/* Entry to report the DMA HW features */
6437 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6438 			    &stmmac_dma_cap_fops);
6439 
6440 	rtnl_unlock();
6441 }
6442 
6443 static void stmmac_exit_fs(struct net_device *dev)
6444 {
6445 	struct stmmac_priv *priv = netdev_priv(dev);
6446 
6447 	debugfs_remove_recursive(priv->dbgfs_dir);
6448 }
6449 #endif /* CONFIG_DEBUG_FS */
6450 
6451 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6452 {
6453 	unsigned char *data = (unsigned char *)&vid_le;
6454 	unsigned char data_byte = 0;
6455 	u32 crc = ~0x0;
6456 	u32 temp = 0;
6457 	int i, bits;
6458 
6459 	bits = get_bitmask_order(VLAN_VID_MASK);
6460 	for (i = 0; i < bits; i++) {
6461 		if ((i % 8) == 0)
6462 			data_byte = data[i / 8];
6463 
6464 		temp = ((crc & 1) ^ data_byte) & 1;
6465 		crc >>= 1;
6466 		data_byte >>= 1;
6467 
6468 		if (temp)
6469 			crc ^= 0xedb88320;
6470 	}
6471 
6472 	return crc;
6473 }
6474 
6475 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6476 {
6477 	u32 crc, hash = 0;
6478 	__le16 pmatch = 0;
6479 	int count = 0;
6480 	u16 vid = 0;
6481 
6482 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6483 		__le16 vid_le = cpu_to_le16(vid);
6484 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6485 		hash |= (1 << crc);
6486 		count++;
6487 	}
6488 
6489 	if (!priv->dma_cap.vlhash) {
6490 		if (count > 2) /* VID = 0 always passes filter */
6491 			return -EOPNOTSUPP;
6492 
6493 		pmatch = cpu_to_le16(vid);
6494 		hash = 0;
6495 	}
6496 
6497 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6498 }
6499 
6500 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6501 {
6502 	struct stmmac_priv *priv = netdev_priv(ndev);
6503 	bool is_double = false;
6504 	int ret;
6505 
6506 	ret = pm_runtime_resume_and_get(priv->device);
6507 	if (ret < 0)
6508 		return ret;
6509 
6510 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6511 		is_double = true;
6512 
6513 	set_bit(vid, priv->active_vlans);
6514 	ret = stmmac_vlan_update(priv, is_double);
6515 	if (ret) {
6516 		clear_bit(vid, priv->active_vlans);
6517 		goto err_pm_put;
6518 	}
6519 
6520 	if (priv->hw->num_vlan) {
6521 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6522 		if (ret)
6523 			goto err_pm_put;
6524 	}
6525 err_pm_put:
6526 	pm_runtime_put(priv->device);
6527 
6528 	return ret;
6529 }
6530 
6531 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6532 {
6533 	struct stmmac_priv *priv = netdev_priv(ndev);
6534 	bool is_double = false;
6535 	int ret;
6536 
6537 	ret = pm_runtime_resume_and_get(priv->device);
6538 	if (ret < 0)
6539 		return ret;
6540 
6541 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6542 		is_double = true;
6543 
6544 	clear_bit(vid, priv->active_vlans);
6545 
6546 	if (priv->hw->num_vlan) {
6547 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6548 		if (ret)
6549 			goto del_vlan_error;
6550 	}
6551 
6552 	ret = stmmac_vlan_update(priv, is_double);
6553 
6554 del_vlan_error:
6555 	pm_runtime_put(priv->device);
6556 
6557 	return ret;
6558 }
6559 
6560 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6561 {
6562 	struct stmmac_priv *priv = netdev_priv(dev);
6563 
6564 	switch (bpf->command) {
6565 	case XDP_SETUP_PROG:
6566 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6567 	case XDP_SETUP_XSK_POOL:
6568 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6569 					     bpf->xsk.queue_id);
6570 	default:
6571 		return -EOPNOTSUPP;
6572 	}
6573 }
6574 
6575 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6576 			   struct xdp_frame **frames, u32 flags)
6577 {
6578 	struct stmmac_priv *priv = netdev_priv(dev);
6579 	int cpu = smp_processor_id();
6580 	struct netdev_queue *nq;
6581 	int i, nxmit = 0;
6582 	int queue;
6583 
6584 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6585 		return -ENETDOWN;
6586 
6587 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6588 		return -EINVAL;
6589 
6590 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6591 	nq = netdev_get_tx_queue(priv->dev, queue);
6592 
6593 	__netif_tx_lock(nq, cpu);
6594 	/* Avoids TX time-out as we are sharing with slow path */
6595 	txq_trans_cond_update(nq);
6596 
6597 	for (i = 0; i < num_frames; i++) {
6598 		int res;
6599 
6600 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6601 		if (res == STMMAC_XDP_CONSUMED)
6602 			break;
6603 
6604 		nxmit++;
6605 	}
6606 
6607 	if (flags & XDP_XMIT_FLUSH) {
6608 		stmmac_flush_tx_descriptors(priv, queue);
6609 		stmmac_tx_timer_arm(priv, queue);
6610 	}
6611 
6612 	__netif_tx_unlock(nq);
6613 
6614 	return nxmit;
6615 }
6616 
6617 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6618 {
6619 	struct stmmac_channel *ch = &priv->channel[queue];
6620 	unsigned long flags;
6621 
6622 	spin_lock_irqsave(&ch->lock, flags);
6623 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6624 	spin_unlock_irqrestore(&ch->lock, flags);
6625 
6626 	stmmac_stop_rx_dma(priv, queue);
6627 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6628 }
6629 
6630 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6631 {
6632 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6633 	struct stmmac_channel *ch = &priv->channel[queue];
6634 	unsigned long flags;
6635 	u32 buf_size;
6636 	int ret;
6637 
6638 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6639 	if (ret) {
6640 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6641 		return;
6642 	}
6643 
6644 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6645 	if (ret) {
6646 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6647 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6648 		return;
6649 	}
6650 
6651 	stmmac_reset_rx_queue(priv, queue);
6652 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6653 
6654 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6655 			    rx_q->dma_rx_phy, rx_q->queue_index);
6656 
6657 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6658 			     sizeof(struct dma_desc));
6659 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6660 			       rx_q->rx_tail_addr, rx_q->queue_index);
6661 
6662 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6663 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6664 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6665 				      buf_size,
6666 				      rx_q->queue_index);
6667 	} else {
6668 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6669 				      priv->dma_conf.dma_buf_sz,
6670 				      rx_q->queue_index);
6671 	}
6672 
6673 	stmmac_start_rx_dma(priv, queue);
6674 
6675 	spin_lock_irqsave(&ch->lock, flags);
6676 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6677 	spin_unlock_irqrestore(&ch->lock, flags);
6678 }
6679 
6680 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6681 {
6682 	struct stmmac_channel *ch = &priv->channel[queue];
6683 	unsigned long flags;
6684 
6685 	spin_lock_irqsave(&ch->lock, flags);
6686 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6687 	spin_unlock_irqrestore(&ch->lock, flags);
6688 
6689 	stmmac_stop_tx_dma(priv, queue);
6690 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6691 }
6692 
6693 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6694 {
6695 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6696 	struct stmmac_channel *ch = &priv->channel[queue];
6697 	unsigned long flags;
6698 	int ret;
6699 
6700 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6701 	if (ret) {
6702 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6703 		return;
6704 	}
6705 
6706 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6707 	if (ret) {
6708 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6709 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6710 		return;
6711 	}
6712 
6713 	stmmac_reset_tx_queue(priv, queue);
6714 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6715 
6716 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6717 			    tx_q->dma_tx_phy, tx_q->queue_index);
6718 
6719 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6720 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6721 
6722 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6723 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6724 			       tx_q->tx_tail_addr, tx_q->queue_index);
6725 
6726 	stmmac_start_tx_dma(priv, queue);
6727 
6728 	spin_lock_irqsave(&ch->lock, flags);
6729 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6730 	spin_unlock_irqrestore(&ch->lock, flags);
6731 }
6732 
6733 void stmmac_xdp_release(struct net_device *dev)
6734 {
6735 	struct stmmac_priv *priv = netdev_priv(dev);
6736 	u32 chan;
6737 
6738 	/* Ensure tx function is not running */
6739 	netif_tx_disable(dev);
6740 
6741 	/* Disable NAPI process */
6742 	stmmac_disable_all_queues(priv);
6743 
6744 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6745 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6746 
6747 	/* Free the IRQ lines */
6748 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6749 
6750 	/* Stop TX/RX DMA channels */
6751 	stmmac_stop_all_dma(priv);
6752 
6753 	/* Release and free the Rx/Tx resources */
6754 	free_dma_desc_resources(priv, &priv->dma_conf);
6755 
6756 	/* Disable the MAC Rx/Tx */
6757 	stmmac_mac_set(priv, priv->ioaddr, false);
6758 
6759 	/* set trans_start so we don't get spurious
6760 	 * watchdogs during reset
6761 	 */
6762 	netif_trans_update(dev);
6763 	netif_carrier_off(dev);
6764 }
6765 
6766 int stmmac_xdp_open(struct net_device *dev)
6767 {
6768 	struct stmmac_priv *priv = netdev_priv(dev);
6769 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6770 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6771 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6772 	struct stmmac_rx_queue *rx_q;
6773 	struct stmmac_tx_queue *tx_q;
6774 	u32 buf_size;
6775 	bool sph_en;
6776 	u32 chan;
6777 	int ret;
6778 
6779 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6780 	if (ret < 0) {
6781 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6782 			   __func__);
6783 		goto dma_desc_error;
6784 	}
6785 
6786 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6787 	if (ret < 0) {
6788 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6789 			   __func__);
6790 		goto init_error;
6791 	}
6792 
6793 	stmmac_reset_queues_param(priv);
6794 
6795 	/* DMA CSR Channel configuration */
6796 	for (chan = 0; chan < dma_csr_ch; chan++) {
6797 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6798 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6799 	}
6800 
6801 	/* Adjust Split header */
6802 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6803 
6804 	/* DMA RX Channel Configuration */
6805 	for (chan = 0; chan < rx_cnt; chan++) {
6806 		rx_q = &priv->dma_conf.rx_queue[chan];
6807 
6808 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6809 				    rx_q->dma_rx_phy, chan);
6810 
6811 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6812 				     (rx_q->buf_alloc_num *
6813 				      sizeof(struct dma_desc));
6814 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6815 				       rx_q->rx_tail_addr, chan);
6816 
6817 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6818 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6819 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6820 					      buf_size,
6821 					      rx_q->queue_index);
6822 		} else {
6823 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6824 					      priv->dma_conf.dma_buf_sz,
6825 					      rx_q->queue_index);
6826 		}
6827 
6828 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6829 	}
6830 
6831 	/* DMA TX Channel Configuration */
6832 	for (chan = 0; chan < tx_cnt; chan++) {
6833 		tx_q = &priv->dma_conf.tx_queue[chan];
6834 
6835 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6836 				    tx_q->dma_tx_phy, chan);
6837 
6838 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6839 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6840 				       tx_q->tx_tail_addr, chan);
6841 
6842 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6843 		tx_q->txtimer.function = stmmac_tx_timer;
6844 	}
6845 
6846 	/* Enable the MAC Rx/Tx */
6847 	stmmac_mac_set(priv, priv->ioaddr, true);
6848 
6849 	/* Start Rx & Tx DMA Channels */
6850 	stmmac_start_all_dma(priv);
6851 
6852 	ret = stmmac_request_irq(dev);
6853 	if (ret)
6854 		goto irq_error;
6855 
6856 	/* Enable NAPI process*/
6857 	stmmac_enable_all_queues(priv);
6858 	netif_carrier_on(dev);
6859 	netif_tx_start_all_queues(dev);
6860 	stmmac_enable_all_dma_irq(priv);
6861 
6862 	return 0;
6863 
6864 irq_error:
6865 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6866 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6867 
6868 	stmmac_hw_teardown(dev);
6869 init_error:
6870 	free_dma_desc_resources(priv, &priv->dma_conf);
6871 dma_desc_error:
6872 	return ret;
6873 }
6874 
6875 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6876 {
6877 	struct stmmac_priv *priv = netdev_priv(dev);
6878 	struct stmmac_rx_queue *rx_q;
6879 	struct stmmac_tx_queue *tx_q;
6880 	struct stmmac_channel *ch;
6881 
6882 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6883 	    !netif_carrier_ok(priv->dev))
6884 		return -ENETDOWN;
6885 
6886 	if (!stmmac_xdp_is_enabled(priv))
6887 		return -EINVAL;
6888 
6889 	if (queue >= priv->plat->rx_queues_to_use ||
6890 	    queue >= priv->plat->tx_queues_to_use)
6891 		return -EINVAL;
6892 
6893 	rx_q = &priv->dma_conf.rx_queue[queue];
6894 	tx_q = &priv->dma_conf.tx_queue[queue];
6895 	ch = &priv->channel[queue];
6896 
6897 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6898 		return -EINVAL;
6899 
6900 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6901 		/* EQoS does not have per-DMA channel SW interrupt,
6902 		 * so we schedule RX Napi straight-away.
6903 		 */
6904 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6905 			__napi_schedule(&ch->rxtx_napi);
6906 	}
6907 
6908 	return 0;
6909 }
6910 
6911 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6912 {
6913 	struct stmmac_priv *priv = netdev_priv(dev);
6914 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6915 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6916 	unsigned int start;
6917 	int q;
6918 
6919 	for (q = 0; q < tx_cnt; q++) {
6920 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
6921 		u64 tx_packets;
6922 		u64 tx_bytes;
6923 
6924 		do {
6925 			start = u64_stats_fetch_begin(&txq_stats->syncp);
6926 			tx_packets = txq_stats->tx_packets;
6927 			tx_bytes   = txq_stats->tx_bytes;
6928 		} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
6929 
6930 		stats->tx_packets += tx_packets;
6931 		stats->tx_bytes += tx_bytes;
6932 	}
6933 
6934 	for (q = 0; q < rx_cnt; q++) {
6935 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
6936 		u64 rx_packets;
6937 		u64 rx_bytes;
6938 
6939 		do {
6940 			start = u64_stats_fetch_begin(&rxq_stats->syncp);
6941 			rx_packets = rxq_stats->rx_packets;
6942 			rx_bytes   = rxq_stats->rx_bytes;
6943 		} while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
6944 
6945 		stats->rx_packets += rx_packets;
6946 		stats->rx_bytes += rx_bytes;
6947 	}
6948 
6949 	stats->rx_dropped = priv->xstats.rx_dropped;
6950 	stats->rx_errors = priv->xstats.rx_errors;
6951 	stats->tx_dropped = priv->xstats.tx_dropped;
6952 	stats->tx_errors = priv->xstats.tx_errors;
6953 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
6954 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
6955 	stats->rx_length_errors = priv->xstats.rx_length;
6956 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
6957 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
6958 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
6959 }
6960 
6961 static const struct net_device_ops stmmac_netdev_ops = {
6962 	.ndo_open = stmmac_open,
6963 	.ndo_start_xmit = stmmac_xmit,
6964 	.ndo_stop = stmmac_release,
6965 	.ndo_change_mtu = stmmac_change_mtu,
6966 	.ndo_fix_features = stmmac_fix_features,
6967 	.ndo_set_features = stmmac_set_features,
6968 	.ndo_set_rx_mode = stmmac_set_rx_mode,
6969 	.ndo_tx_timeout = stmmac_tx_timeout,
6970 	.ndo_eth_ioctl = stmmac_ioctl,
6971 	.ndo_get_stats64 = stmmac_get_stats64,
6972 	.ndo_setup_tc = stmmac_setup_tc,
6973 	.ndo_select_queue = stmmac_select_queue,
6974 	.ndo_set_mac_address = stmmac_set_mac_address,
6975 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6976 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6977 	.ndo_bpf = stmmac_bpf,
6978 	.ndo_xdp_xmit = stmmac_xdp_xmit,
6979 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
6980 };
6981 
6982 static void stmmac_reset_subtask(struct stmmac_priv *priv)
6983 {
6984 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
6985 		return;
6986 	if (test_bit(STMMAC_DOWN, &priv->state))
6987 		return;
6988 
6989 	netdev_err(priv->dev, "Reset adapter.\n");
6990 
6991 	rtnl_lock();
6992 	netif_trans_update(priv->dev);
6993 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
6994 		usleep_range(1000, 2000);
6995 
6996 	set_bit(STMMAC_DOWN, &priv->state);
6997 	dev_close(priv->dev);
6998 	dev_open(priv->dev, NULL);
6999 	clear_bit(STMMAC_DOWN, &priv->state);
7000 	clear_bit(STMMAC_RESETING, &priv->state);
7001 	rtnl_unlock();
7002 }
7003 
7004 static void stmmac_service_task(struct work_struct *work)
7005 {
7006 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7007 			service_task);
7008 
7009 	stmmac_reset_subtask(priv);
7010 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7011 }
7012 
7013 /**
7014  *  stmmac_hw_init - Init the MAC device
7015  *  @priv: driver private structure
7016  *  Description: this function is to configure the MAC device according to
7017  *  some platform parameters or the HW capability register. It prepares the
7018  *  driver to use either ring or chain modes and to setup either enhanced or
7019  *  normal descriptors.
7020  */
7021 static int stmmac_hw_init(struct stmmac_priv *priv)
7022 {
7023 	int ret;
7024 
7025 	/* dwmac-sun8i only work in chain mode */
7026 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7027 		chain_mode = 1;
7028 	priv->chain_mode = chain_mode;
7029 
7030 	/* Initialize HW Interface */
7031 	ret = stmmac_hwif_init(priv);
7032 	if (ret)
7033 		return ret;
7034 
7035 	/* Get the HW capability (new GMAC newer than 3.50a) */
7036 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7037 	if (priv->hw_cap_support) {
7038 		dev_info(priv->device, "DMA HW capability register supported\n");
7039 
7040 		/* We can override some gmac/dma configuration fields: e.g.
7041 		 * enh_desc, tx_coe (e.g. that are passed through the
7042 		 * platform) with the values from the HW capability
7043 		 * register (if supported).
7044 		 */
7045 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7046 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7047 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7048 		priv->hw->pmt = priv->plat->pmt;
7049 		if (priv->dma_cap.hash_tb_sz) {
7050 			priv->hw->multicast_filter_bins =
7051 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7052 			priv->hw->mcast_bits_log2 =
7053 					ilog2(priv->hw->multicast_filter_bins);
7054 		}
7055 
7056 		/* TXCOE doesn't work in thresh DMA mode */
7057 		if (priv->plat->force_thresh_dma_mode)
7058 			priv->plat->tx_coe = 0;
7059 		else
7060 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7061 
7062 		/* In case of GMAC4 rx_coe is from HW cap register. */
7063 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7064 
7065 		if (priv->dma_cap.rx_coe_type2)
7066 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7067 		else if (priv->dma_cap.rx_coe_type1)
7068 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7069 
7070 	} else {
7071 		dev_info(priv->device, "No HW DMA feature register supported\n");
7072 	}
7073 
7074 	if (priv->plat->rx_coe) {
7075 		priv->hw->rx_csum = priv->plat->rx_coe;
7076 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7077 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7078 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7079 	}
7080 	if (priv->plat->tx_coe)
7081 		dev_info(priv->device, "TX Checksum insertion supported\n");
7082 
7083 	if (priv->plat->pmt) {
7084 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7085 		device_set_wakeup_capable(priv->device, 1);
7086 	}
7087 
7088 	if (priv->dma_cap.tsoen)
7089 		dev_info(priv->device, "TSO supported\n");
7090 
7091 	priv->hw->vlan_fail_q_en =
7092 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7093 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7094 
7095 	/* Run HW quirks, if any */
7096 	if (priv->hwif_quirks) {
7097 		ret = priv->hwif_quirks(priv);
7098 		if (ret)
7099 			return ret;
7100 	}
7101 
7102 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7103 	 * In some case, for example on bugged HW this feature
7104 	 * has to be disable and this can be done by passing the
7105 	 * riwt_off field from the platform.
7106 	 */
7107 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7108 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7109 		priv->use_riwt = 1;
7110 		dev_info(priv->device,
7111 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7112 	}
7113 
7114 	return 0;
7115 }
7116 
7117 static void stmmac_napi_add(struct net_device *dev)
7118 {
7119 	struct stmmac_priv *priv = netdev_priv(dev);
7120 	u32 queue, maxq;
7121 
7122 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7123 
7124 	for (queue = 0; queue < maxq; queue++) {
7125 		struct stmmac_channel *ch = &priv->channel[queue];
7126 
7127 		ch->priv_data = priv;
7128 		ch->index = queue;
7129 		spin_lock_init(&ch->lock);
7130 
7131 		if (queue < priv->plat->rx_queues_to_use) {
7132 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7133 		}
7134 		if (queue < priv->plat->tx_queues_to_use) {
7135 			netif_napi_add_tx(dev, &ch->tx_napi,
7136 					  stmmac_napi_poll_tx);
7137 		}
7138 		if (queue < priv->plat->rx_queues_to_use &&
7139 		    queue < priv->plat->tx_queues_to_use) {
7140 			netif_napi_add(dev, &ch->rxtx_napi,
7141 				       stmmac_napi_poll_rxtx);
7142 		}
7143 	}
7144 }
7145 
7146 static void stmmac_napi_del(struct net_device *dev)
7147 {
7148 	struct stmmac_priv *priv = netdev_priv(dev);
7149 	u32 queue, maxq;
7150 
7151 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7152 
7153 	for (queue = 0; queue < maxq; queue++) {
7154 		struct stmmac_channel *ch = &priv->channel[queue];
7155 
7156 		if (queue < priv->plat->rx_queues_to_use)
7157 			netif_napi_del(&ch->rx_napi);
7158 		if (queue < priv->plat->tx_queues_to_use)
7159 			netif_napi_del(&ch->tx_napi);
7160 		if (queue < priv->plat->rx_queues_to_use &&
7161 		    queue < priv->plat->tx_queues_to_use) {
7162 			netif_napi_del(&ch->rxtx_napi);
7163 		}
7164 	}
7165 }
7166 
7167 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7168 {
7169 	struct stmmac_priv *priv = netdev_priv(dev);
7170 	int ret = 0, i;
7171 
7172 	if (netif_running(dev))
7173 		stmmac_release(dev);
7174 
7175 	stmmac_napi_del(dev);
7176 
7177 	priv->plat->rx_queues_to_use = rx_cnt;
7178 	priv->plat->tx_queues_to_use = tx_cnt;
7179 	if (!netif_is_rxfh_configured(dev))
7180 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7181 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7182 									rx_cnt);
7183 
7184 	stmmac_set_half_duplex(priv);
7185 	stmmac_napi_add(dev);
7186 
7187 	if (netif_running(dev))
7188 		ret = stmmac_open(dev);
7189 
7190 	return ret;
7191 }
7192 
7193 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7194 {
7195 	struct stmmac_priv *priv = netdev_priv(dev);
7196 	int ret = 0;
7197 
7198 	if (netif_running(dev))
7199 		stmmac_release(dev);
7200 
7201 	priv->dma_conf.dma_rx_size = rx_size;
7202 	priv->dma_conf.dma_tx_size = tx_size;
7203 
7204 	if (netif_running(dev))
7205 		ret = stmmac_open(dev);
7206 
7207 	return ret;
7208 }
7209 
7210 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7211 static void stmmac_fpe_lp_task(struct work_struct *work)
7212 {
7213 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7214 						fpe_task);
7215 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7216 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7217 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7218 	bool *hs_enable = &fpe_cfg->hs_enable;
7219 	bool *enable = &fpe_cfg->enable;
7220 	int retries = 20;
7221 
7222 	while (retries-- > 0) {
7223 		/* Bail out immediately if FPE handshake is OFF */
7224 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7225 			break;
7226 
7227 		if (*lo_state == FPE_STATE_ENTERING_ON &&
7228 		    *lp_state == FPE_STATE_ENTERING_ON) {
7229 			stmmac_fpe_configure(priv, priv->ioaddr,
7230 					     priv->plat->tx_queues_to_use,
7231 					     priv->plat->rx_queues_to_use,
7232 					     *enable);
7233 
7234 			netdev_info(priv->dev, "configured FPE\n");
7235 
7236 			*lo_state = FPE_STATE_ON;
7237 			*lp_state = FPE_STATE_ON;
7238 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7239 			break;
7240 		}
7241 
7242 		if ((*lo_state == FPE_STATE_CAPABLE ||
7243 		     *lo_state == FPE_STATE_ENTERING_ON) &&
7244 		     *lp_state != FPE_STATE_ON) {
7245 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7246 				    *lo_state, *lp_state);
7247 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7248 						MPACKET_VERIFY);
7249 		}
7250 		/* Sleep then retry */
7251 		msleep(500);
7252 	}
7253 
7254 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7255 }
7256 
7257 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7258 {
7259 	if (priv->plat->fpe_cfg->hs_enable != enable) {
7260 		if (enable) {
7261 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7262 						MPACKET_VERIFY);
7263 		} else {
7264 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7265 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7266 		}
7267 
7268 		priv->plat->fpe_cfg->hs_enable = enable;
7269 	}
7270 }
7271 
7272 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7273 {
7274 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7275 	struct dma_desc *desc_contains_ts = ctx->desc;
7276 	struct stmmac_priv *priv = ctx->priv;
7277 	struct dma_desc *ndesc = ctx->ndesc;
7278 	struct dma_desc *desc = ctx->desc;
7279 	u64 ns = 0;
7280 
7281 	if (!priv->hwts_rx_en)
7282 		return -ENODATA;
7283 
7284 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7285 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7286 		desc_contains_ts = ndesc;
7287 
7288 	/* Check if timestamp is available */
7289 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7290 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7291 		ns -= priv->plat->cdc_error_adj;
7292 		*timestamp = ns_to_ktime(ns);
7293 		return 0;
7294 	}
7295 
7296 	return -ENODATA;
7297 }
7298 
7299 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7300 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7301 };
7302 
7303 /**
7304  * stmmac_dvr_probe
7305  * @device: device pointer
7306  * @plat_dat: platform data pointer
7307  * @res: stmmac resource pointer
7308  * Description: this is the main probe function used to
7309  * call the alloc_etherdev, allocate the priv structure.
7310  * Return:
7311  * returns 0 on success, otherwise errno.
7312  */
7313 int stmmac_dvr_probe(struct device *device,
7314 		     struct plat_stmmacenet_data *plat_dat,
7315 		     struct stmmac_resources *res)
7316 {
7317 	struct net_device *ndev = NULL;
7318 	struct stmmac_priv *priv;
7319 	u32 rxq;
7320 	int i, ret = 0;
7321 
7322 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7323 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7324 	if (!ndev)
7325 		return -ENOMEM;
7326 
7327 	SET_NETDEV_DEV(ndev, device);
7328 
7329 	priv = netdev_priv(ndev);
7330 	priv->device = device;
7331 	priv->dev = ndev;
7332 
7333 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7334 		u64_stats_init(&priv->xstats.rxq_stats[i].syncp);
7335 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7336 		u64_stats_init(&priv->xstats.txq_stats[i].syncp);
7337 
7338 	stmmac_set_ethtool_ops(ndev);
7339 	priv->pause = pause;
7340 	priv->plat = plat_dat;
7341 	priv->ioaddr = res->addr;
7342 	priv->dev->base_addr = (unsigned long)res->addr;
7343 	priv->plat->dma_cfg->multi_msi_en =
7344 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7345 
7346 	priv->dev->irq = res->irq;
7347 	priv->wol_irq = res->wol_irq;
7348 	priv->lpi_irq = res->lpi_irq;
7349 	priv->sfty_ce_irq = res->sfty_ce_irq;
7350 	priv->sfty_ue_irq = res->sfty_ue_irq;
7351 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7352 		priv->rx_irq[i] = res->rx_irq[i];
7353 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7354 		priv->tx_irq[i] = res->tx_irq[i];
7355 
7356 	if (!is_zero_ether_addr(res->mac))
7357 		eth_hw_addr_set(priv->dev, res->mac);
7358 
7359 	dev_set_drvdata(device, priv->dev);
7360 
7361 	/* Verify driver arguments */
7362 	stmmac_verify_args();
7363 
7364 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7365 	if (!priv->af_xdp_zc_qps)
7366 		return -ENOMEM;
7367 
7368 	/* Allocate workqueue */
7369 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7370 	if (!priv->wq) {
7371 		dev_err(priv->device, "failed to create workqueue\n");
7372 		ret = -ENOMEM;
7373 		goto error_wq_init;
7374 	}
7375 
7376 	INIT_WORK(&priv->service_task, stmmac_service_task);
7377 
7378 	/* Initialize Link Partner FPE workqueue */
7379 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7380 
7381 	/* Override with kernel parameters if supplied XXX CRS XXX
7382 	 * this needs to have multiple instances
7383 	 */
7384 	if ((phyaddr >= 0) && (phyaddr <= 31))
7385 		priv->plat->phy_addr = phyaddr;
7386 
7387 	if (priv->plat->stmmac_rst) {
7388 		ret = reset_control_assert(priv->plat->stmmac_rst);
7389 		reset_control_deassert(priv->plat->stmmac_rst);
7390 		/* Some reset controllers have only reset callback instead of
7391 		 * assert + deassert callbacks pair.
7392 		 */
7393 		if (ret == -ENOTSUPP)
7394 			reset_control_reset(priv->plat->stmmac_rst);
7395 	}
7396 
7397 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7398 	if (ret == -ENOTSUPP)
7399 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7400 			ERR_PTR(ret));
7401 
7402 	/* Init MAC and get the capabilities */
7403 	ret = stmmac_hw_init(priv);
7404 	if (ret)
7405 		goto error_hw_init;
7406 
7407 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7408 	 */
7409 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7410 		priv->plat->dma_cfg->dche = false;
7411 
7412 	stmmac_check_ether_addr(priv);
7413 
7414 	ndev->netdev_ops = &stmmac_netdev_ops;
7415 
7416 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7417 
7418 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7419 			    NETIF_F_RXCSUM;
7420 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7421 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7422 
7423 	ret = stmmac_tc_init(priv, priv);
7424 	if (!ret) {
7425 		ndev->hw_features |= NETIF_F_HW_TC;
7426 	}
7427 
7428 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7429 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7430 		if (priv->plat->has_gmac4)
7431 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7432 		priv->tso = true;
7433 		dev_info(priv->device, "TSO feature enabled\n");
7434 	}
7435 
7436 	if (priv->dma_cap.sphen &&
7437 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7438 		ndev->hw_features |= NETIF_F_GRO;
7439 		priv->sph_cap = true;
7440 		priv->sph = priv->sph_cap;
7441 		dev_info(priv->device, "SPH feature enabled\n");
7442 	}
7443 
7444 	/* Ideally our host DMA address width is the same as for the
7445 	 * device. However, it may differ and then we have to use our
7446 	 * host DMA width for allocation and the device DMA width for
7447 	 * register handling.
7448 	 */
7449 	if (priv->plat->host_dma_width)
7450 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7451 	else
7452 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7453 
7454 	if (priv->dma_cap.host_dma_width) {
7455 		ret = dma_set_mask_and_coherent(device,
7456 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7457 		if (!ret) {
7458 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7459 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7460 
7461 			/*
7462 			 * If more than 32 bits can be addressed, make sure to
7463 			 * enable enhanced addressing mode.
7464 			 */
7465 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7466 				priv->plat->dma_cfg->eame = true;
7467 		} else {
7468 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7469 			if (ret) {
7470 				dev_err(priv->device, "Failed to set DMA Mask\n");
7471 				goto error_hw_init;
7472 			}
7473 
7474 			priv->dma_cap.host_dma_width = 32;
7475 		}
7476 	}
7477 
7478 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7479 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7480 #ifdef STMMAC_VLAN_TAG_USED
7481 	/* Both mac100 and gmac support receive VLAN tag detection */
7482 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7483 	if (priv->dma_cap.vlhash) {
7484 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7485 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7486 	}
7487 	if (priv->dma_cap.vlins) {
7488 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7489 		if (priv->dma_cap.dvlan)
7490 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7491 	}
7492 #endif
7493 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7494 
7495 	priv->xstats.threshold = tc;
7496 
7497 	/* Initialize RSS */
7498 	rxq = priv->plat->rx_queues_to_use;
7499 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7500 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7501 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7502 
7503 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7504 		ndev->features |= NETIF_F_RXHASH;
7505 
7506 	ndev->vlan_features |= ndev->features;
7507 	/* TSO doesn't work on VLANs yet */
7508 	ndev->vlan_features &= ~NETIF_F_TSO;
7509 
7510 	/* MTU range: 46 - hw-specific max */
7511 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7512 	if (priv->plat->has_xgmac)
7513 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7514 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7515 		ndev->max_mtu = JUMBO_LEN;
7516 	else
7517 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7518 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7519 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7520 	 */
7521 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7522 	    (priv->plat->maxmtu >= ndev->min_mtu))
7523 		ndev->max_mtu = priv->plat->maxmtu;
7524 	else if (priv->plat->maxmtu < ndev->min_mtu)
7525 		dev_warn(priv->device,
7526 			 "%s: warning: maxmtu having invalid value (%d)\n",
7527 			 __func__, priv->plat->maxmtu);
7528 
7529 	if (flow_ctrl)
7530 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7531 
7532 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7533 
7534 	/* Setup channels NAPI */
7535 	stmmac_napi_add(ndev);
7536 
7537 	mutex_init(&priv->lock);
7538 
7539 	/* If a specific clk_csr value is passed from the platform
7540 	 * this means that the CSR Clock Range selection cannot be
7541 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7542 	 * set the MDC clock dynamically according to the csr actual
7543 	 * clock input.
7544 	 */
7545 	if (priv->plat->clk_csr >= 0)
7546 		priv->clk_csr = priv->plat->clk_csr;
7547 	else
7548 		stmmac_clk_csr_set(priv);
7549 
7550 	stmmac_check_pcs_mode(priv);
7551 
7552 	pm_runtime_get_noresume(device);
7553 	pm_runtime_set_active(device);
7554 	if (!pm_runtime_enabled(device))
7555 		pm_runtime_enable(device);
7556 
7557 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7558 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
7559 		/* MDIO bus Registration */
7560 		ret = stmmac_mdio_register(ndev);
7561 		if (ret < 0) {
7562 			dev_err_probe(priv->device, ret,
7563 				      "%s: MDIO bus (id: %d) registration failed\n",
7564 				      __func__, priv->plat->bus_id);
7565 			goto error_mdio_register;
7566 		}
7567 	}
7568 
7569 	if (priv->plat->speed_mode_2500)
7570 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7571 
7572 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7573 		ret = stmmac_xpcs_setup(priv->mii);
7574 		if (ret)
7575 			goto error_xpcs_setup;
7576 	}
7577 
7578 	ret = stmmac_phy_setup(priv);
7579 	if (ret) {
7580 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7581 		goto error_phy_setup;
7582 	}
7583 
7584 	ret = register_netdev(ndev);
7585 	if (ret) {
7586 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7587 			__func__, ret);
7588 		goto error_netdev_register;
7589 	}
7590 
7591 #ifdef CONFIG_DEBUG_FS
7592 	stmmac_init_fs(ndev);
7593 #endif
7594 
7595 	if (priv->plat->dump_debug_regs)
7596 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7597 
7598 	/* Let pm_runtime_put() disable the clocks.
7599 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7600 	 */
7601 	pm_runtime_put(device);
7602 
7603 	return ret;
7604 
7605 error_netdev_register:
7606 	phylink_destroy(priv->phylink);
7607 error_xpcs_setup:
7608 error_phy_setup:
7609 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7610 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7611 		stmmac_mdio_unregister(ndev);
7612 error_mdio_register:
7613 	stmmac_napi_del(ndev);
7614 error_hw_init:
7615 	destroy_workqueue(priv->wq);
7616 error_wq_init:
7617 	bitmap_free(priv->af_xdp_zc_qps);
7618 
7619 	return ret;
7620 }
7621 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7622 
7623 /**
7624  * stmmac_dvr_remove
7625  * @dev: device pointer
7626  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7627  * changes the link status, releases the DMA descriptor rings.
7628  */
7629 void stmmac_dvr_remove(struct device *dev)
7630 {
7631 	struct net_device *ndev = dev_get_drvdata(dev);
7632 	struct stmmac_priv *priv = netdev_priv(ndev);
7633 
7634 	netdev_info(priv->dev, "%s: removing driver", __func__);
7635 
7636 	pm_runtime_get_sync(dev);
7637 
7638 	stmmac_stop_all_dma(priv);
7639 	stmmac_mac_set(priv, priv->ioaddr, false);
7640 	netif_carrier_off(ndev);
7641 	unregister_netdev(ndev);
7642 
7643 #ifdef CONFIG_DEBUG_FS
7644 	stmmac_exit_fs(ndev);
7645 #endif
7646 	phylink_destroy(priv->phylink);
7647 	if (priv->plat->stmmac_rst)
7648 		reset_control_assert(priv->plat->stmmac_rst);
7649 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7650 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7651 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7652 		stmmac_mdio_unregister(ndev);
7653 	destroy_workqueue(priv->wq);
7654 	mutex_destroy(&priv->lock);
7655 	bitmap_free(priv->af_xdp_zc_qps);
7656 
7657 	pm_runtime_disable(dev);
7658 	pm_runtime_put_noidle(dev);
7659 }
7660 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7661 
7662 /**
7663  * stmmac_suspend - suspend callback
7664  * @dev: device pointer
7665  * Description: this is the function to suspend the device and it is called
7666  * by the platform driver to stop the network queue, release the resources,
7667  * program the PMT register (for WoL), clean and release driver resources.
7668  */
7669 int stmmac_suspend(struct device *dev)
7670 {
7671 	struct net_device *ndev = dev_get_drvdata(dev);
7672 	struct stmmac_priv *priv = netdev_priv(ndev);
7673 	u32 chan;
7674 
7675 	if (!ndev || !netif_running(ndev))
7676 		return 0;
7677 
7678 	mutex_lock(&priv->lock);
7679 
7680 	netif_device_detach(ndev);
7681 
7682 	stmmac_disable_all_queues(priv);
7683 
7684 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7685 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7686 
7687 	if (priv->eee_enabled) {
7688 		priv->tx_path_in_lpi_mode = false;
7689 		del_timer_sync(&priv->eee_ctrl_timer);
7690 	}
7691 
7692 	/* Stop TX/RX DMA */
7693 	stmmac_stop_all_dma(priv);
7694 
7695 	if (priv->plat->serdes_powerdown)
7696 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7697 
7698 	/* Enable Power down mode by programming the PMT regs */
7699 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7700 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7701 		priv->irq_wake = 1;
7702 	} else {
7703 		stmmac_mac_set(priv, priv->ioaddr, false);
7704 		pinctrl_pm_select_sleep_state(priv->device);
7705 	}
7706 
7707 	mutex_unlock(&priv->lock);
7708 
7709 	rtnl_lock();
7710 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7711 		phylink_suspend(priv->phylink, true);
7712 	} else {
7713 		if (device_may_wakeup(priv->device))
7714 			phylink_speed_down(priv->phylink, false);
7715 		phylink_suspend(priv->phylink, false);
7716 	}
7717 	rtnl_unlock();
7718 
7719 	if (priv->dma_cap.fpesel) {
7720 		/* Disable FPE */
7721 		stmmac_fpe_configure(priv, priv->ioaddr,
7722 				     priv->plat->tx_queues_to_use,
7723 				     priv->plat->rx_queues_to_use, false);
7724 
7725 		stmmac_fpe_handshake(priv, false);
7726 		stmmac_fpe_stop_wq(priv);
7727 	}
7728 
7729 	priv->speed = SPEED_UNKNOWN;
7730 	return 0;
7731 }
7732 EXPORT_SYMBOL_GPL(stmmac_suspend);
7733 
7734 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7735 {
7736 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7737 
7738 	rx_q->cur_rx = 0;
7739 	rx_q->dirty_rx = 0;
7740 }
7741 
7742 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7743 {
7744 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7745 
7746 	tx_q->cur_tx = 0;
7747 	tx_q->dirty_tx = 0;
7748 	tx_q->mss = 0;
7749 
7750 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7751 }
7752 
7753 /**
7754  * stmmac_reset_queues_param - reset queue parameters
7755  * @priv: device pointer
7756  */
7757 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7758 {
7759 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7760 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7761 	u32 queue;
7762 
7763 	for (queue = 0; queue < rx_cnt; queue++)
7764 		stmmac_reset_rx_queue(priv, queue);
7765 
7766 	for (queue = 0; queue < tx_cnt; queue++)
7767 		stmmac_reset_tx_queue(priv, queue);
7768 }
7769 
7770 /**
7771  * stmmac_resume - resume callback
7772  * @dev: device pointer
7773  * Description: when resume this function is invoked to setup the DMA and CORE
7774  * in a usable state.
7775  */
7776 int stmmac_resume(struct device *dev)
7777 {
7778 	struct net_device *ndev = dev_get_drvdata(dev);
7779 	struct stmmac_priv *priv = netdev_priv(ndev);
7780 	int ret;
7781 
7782 	if (!netif_running(ndev))
7783 		return 0;
7784 
7785 	/* Power Down bit, into the PM register, is cleared
7786 	 * automatically as soon as a magic packet or a Wake-up frame
7787 	 * is received. Anyway, it's better to manually clear
7788 	 * this bit because it can generate problems while resuming
7789 	 * from another devices (e.g. serial console).
7790 	 */
7791 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7792 		mutex_lock(&priv->lock);
7793 		stmmac_pmt(priv, priv->hw, 0);
7794 		mutex_unlock(&priv->lock);
7795 		priv->irq_wake = 0;
7796 	} else {
7797 		pinctrl_pm_select_default_state(priv->device);
7798 		/* reset the phy so that it's ready */
7799 		if (priv->mii)
7800 			stmmac_mdio_reset(priv->mii);
7801 	}
7802 
7803 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7804 	    priv->plat->serdes_powerup) {
7805 		ret = priv->plat->serdes_powerup(ndev,
7806 						 priv->plat->bsp_priv);
7807 
7808 		if (ret < 0)
7809 			return ret;
7810 	}
7811 
7812 	rtnl_lock();
7813 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7814 		phylink_resume(priv->phylink);
7815 	} else {
7816 		phylink_resume(priv->phylink);
7817 		if (device_may_wakeup(priv->device))
7818 			phylink_speed_up(priv->phylink);
7819 	}
7820 	rtnl_unlock();
7821 
7822 	rtnl_lock();
7823 	mutex_lock(&priv->lock);
7824 
7825 	stmmac_reset_queues_param(priv);
7826 
7827 	stmmac_free_tx_skbufs(priv);
7828 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7829 
7830 	stmmac_hw_setup(ndev, false);
7831 	stmmac_init_coalesce(priv);
7832 	stmmac_set_rx_mode(ndev);
7833 
7834 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7835 
7836 	stmmac_enable_all_queues(priv);
7837 	stmmac_enable_all_dma_irq(priv);
7838 
7839 	mutex_unlock(&priv->lock);
7840 	rtnl_unlock();
7841 
7842 	netif_device_attach(ndev);
7843 
7844 	return 0;
7845 }
7846 EXPORT_SYMBOL_GPL(stmmac_resume);
7847 
7848 #ifndef MODULE
7849 static int __init stmmac_cmdline_opt(char *str)
7850 {
7851 	char *opt;
7852 
7853 	if (!str || !*str)
7854 		return 1;
7855 	while ((opt = strsep(&str, ",")) != NULL) {
7856 		if (!strncmp(opt, "debug:", 6)) {
7857 			if (kstrtoint(opt + 6, 0, &debug))
7858 				goto err;
7859 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7860 			if (kstrtoint(opt + 8, 0, &phyaddr))
7861 				goto err;
7862 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7863 			if (kstrtoint(opt + 7, 0, &buf_sz))
7864 				goto err;
7865 		} else if (!strncmp(opt, "tc:", 3)) {
7866 			if (kstrtoint(opt + 3, 0, &tc))
7867 				goto err;
7868 		} else if (!strncmp(opt, "watchdog:", 9)) {
7869 			if (kstrtoint(opt + 9, 0, &watchdog))
7870 				goto err;
7871 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7872 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7873 				goto err;
7874 		} else if (!strncmp(opt, "pause:", 6)) {
7875 			if (kstrtoint(opt + 6, 0, &pause))
7876 				goto err;
7877 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7878 			if (kstrtoint(opt + 10, 0, &eee_timer))
7879 				goto err;
7880 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7881 			if (kstrtoint(opt + 11, 0, &chain_mode))
7882 				goto err;
7883 		}
7884 	}
7885 	return 1;
7886 
7887 err:
7888 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7889 	return 1;
7890 }
7891 
7892 __setup("stmmaceth=", stmmac_cmdline_opt);
7893 #endif /* MODULE */
7894 
7895 static int __init stmmac_init(void)
7896 {
7897 #ifdef CONFIG_DEBUG_FS
7898 	/* Create debugfs main directory if it doesn't exist yet */
7899 	if (!stmmac_fs_dir)
7900 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7901 	register_netdevice_notifier(&stmmac_notifier);
7902 #endif
7903 
7904 	return 0;
7905 }
7906 
7907 static void __exit stmmac_exit(void)
7908 {
7909 #ifdef CONFIG_DEBUG_FS
7910 	unregister_netdevice_notifier(&stmmac_notifier);
7911 	debugfs_remove_recursive(stmmac_fs_dir);
7912 #endif
7913 }
7914 
7915 module_init(stmmac_init)
7916 module_exit(stmmac_exit)
7917 
7918 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7919 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7920 MODULE_LICENSE("GPL");
7921