xref: /openbmc/linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision e855afd715656a9f25cf62fa68d99c33213b83b7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Using PCS we cannot dial with the phy registers at this stage
475 	 * so we do not support extra feature like EEE.
476 	 */
477 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
478 	    priv->hw->pcs == STMMAC_PCS_RTBI)
479 		return false;
480 
481 	/* Check if MAC core supports the EEE feature. */
482 	if (!priv->dma_cap.eee)
483 		return false;
484 
485 	mutex_lock(&priv->lock);
486 
487 	/* Check if it needs to be deactivated */
488 	if (!priv->eee_active) {
489 		if (priv->eee_enabled) {
490 			netdev_dbg(priv->dev, "disable EEE\n");
491 			stmmac_lpi_entry_timer_config(priv, 0);
492 			del_timer_sync(&priv->eee_ctrl_timer);
493 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494 			if (priv->hw->xpcs)
495 				xpcs_config_eee(priv->hw->xpcs,
496 						priv->plat->mult_fact_100ns,
497 						false);
498 		}
499 		mutex_unlock(&priv->lock);
500 		return false;
501 	}
502 
503 	if (priv->eee_active && !priv->eee_enabled) {
504 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506 				     eee_tw_timer);
507 		if (priv->hw->xpcs)
508 			xpcs_config_eee(priv->hw->xpcs,
509 					priv->plat->mult_fact_100ns,
510 					true);
511 	}
512 
513 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514 		del_timer_sync(&priv->eee_ctrl_timer);
515 		priv->tx_path_in_lpi_mode = false;
516 		stmmac_lpi_entry_timer_config(priv, 1);
517 	} else {
518 		stmmac_lpi_entry_timer_config(priv, 0);
519 		mod_timer(&priv->eee_ctrl_timer,
520 			  STMMAC_LPI_T(priv->tx_lpi_timer));
521 	}
522 
523 	mutex_unlock(&priv->lock);
524 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525 	return true;
526 }
527 
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529  * @priv: driver private structure
530  * @p : descriptor pointer
531  * @skb : the socket buffer
532  * Description :
533  * This function will read timestamp from the descriptor & pass it to stack.
534  * and also perform some sanity checks.
535  */
536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537 				   struct dma_desc *p, struct sk_buff *skb)
538 {
539 	struct skb_shared_hwtstamps shhwtstamp;
540 	bool found = false;
541 	u64 ns = 0;
542 
543 	if (!priv->hwts_tx_en)
544 		return;
545 
546 	/* exit if skb doesn't support hw tstamp */
547 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548 		return;
549 
550 	/* check tx tstamp status */
551 	if (stmmac_get_tx_timestamp_status(priv, p)) {
552 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553 		found = true;
554 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555 		found = true;
556 	}
557 
558 	if (found) {
559 		ns -= priv->plat->cdc_error_adj;
560 
561 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
563 
564 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565 		/* pass tstamp to stack */
566 		skb_tstamp_tx(skb, &shhwtstamp);
567 	}
568 }
569 
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571  * @priv: driver private structure
572  * @p : descriptor pointer
573  * @np : next descriptor pointer
574  * @skb : the socket buffer
575  * Description :
576  * This function will read received packet's timestamp from the descriptor
577  * and pass it to stack. It also perform some sanity checks.
578  */
579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580 				   struct dma_desc *np, struct sk_buff *skb)
581 {
582 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
583 	struct dma_desc *desc = p;
584 	u64 ns = 0;
585 
586 	if (!priv->hwts_rx_en)
587 		return;
588 	/* For GMAC4, the valid timestamp is from CTX next desc. */
589 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590 		desc = np;
591 
592 	/* Check if timestamp is available */
593 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595 
596 		ns -= priv->plat->cdc_error_adj;
597 
598 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599 		shhwtstamp = skb_hwtstamps(skb);
600 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
602 	} else  {
603 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604 	}
605 }
606 
607 /**
608  *  stmmac_hwtstamp_set - control hardware timestamping.
609  *  @dev: device pointer.
610  *  @ifr: An IOCTL specific structure, that can contain a pointer to
611  *  a proprietary structure used to pass information to the driver.
612  *  Description:
613  *  This function configures the MAC to enable/disable both outgoing(TX)
614  *  and incoming(RX) packets time stamping based on user input.
615  *  Return Value:
616  *  0 on success and an appropriate -ve integer on failure.
617  */
618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620 	struct stmmac_priv *priv = netdev_priv(dev);
621 	struct hwtstamp_config config;
622 	u32 ptp_v2 = 0;
623 	u32 tstamp_all = 0;
624 	u32 ptp_over_ipv4_udp = 0;
625 	u32 ptp_over_ipv6_udp = 0;
626 	u32 ptp_over_ethernet = 0;
627 	u32 snap_type_sel = 0;
628 	u32 ts_master_en = 0;
629 	u32 ts_event_en = 0;
630 
631 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632 		netdev_alert(priv->dev, "No support for HW time stamping\n");
633 		priv->hwts_tx_en = 0;
634 		priv->hwts_rx_en = 0;
635 
636 		return -EOPNOTSUPP;
637 	}
638 
639 	if (copy_from_user(&config, ifr->ifr_data,
640 			   sizeof(config)))
641 		return -EFAULT;
642 
643 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 		   __func__, config.flags, config.tx_type, config.rx_filter);
645 
646 	if (config.tx_type != HWTSTAMP_TX_OFF &&
647 	    config.tx_type != HWTSTAMP_TX_ON)
648 		return -ERANGE;
649 
650 	if (priv->adv_ts) {
651 		switch (config.rx_filter) {
652 		case HWTSTAMP_FILTER_NONE:
653 			/* time stamp no incoming packet at all */
654 			config.rx_filter = HWTSTAMP_FILTER_NONE;
655 			break;
656 
657 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 			/* PTP v1, UDP, any kind of event packet */
659 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 			/* 'xmac' hardware can support Sync, Pdelay_Req and
661 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 			 * This leaves Delay_Req timestamps out.
663 			 * Enable all events *and* general purpose message
664 			 * timestamping
665 			 */
666 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			break;
670 
671 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 			/* PTP v1, UDP, Sync packet */
673 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 			/* take time stamp for SYNC messages only */
675 			ts_event_en = PTP_TCR_TSEVNTENA;
676 
677 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 			break;
680 
681 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 			/* PTP v1, UDP, Delay_req packet */
683 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 			/* take time stamp for Delay_Req messages only */
685 			ts_master_en = PTP_TCR_TSMSTRENA;
686 			ts_event_en = PTP_TCR_TSEVNTENA;
687 
688 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 			break;
691 
692 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 			/* PTP v2, UDP, any kind of event packet */
694 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 			ptp_v2 = PTP_TCR_TSVER2ENA;
696 			/* take time stamp for all event messages */
697 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698 
699 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 			break;
702 
703 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 			/* PTP v2, UDP, Sync packet */
705 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 			ptp_v2 = PTP_TCR_TSVER2ENA;
707 			/* take time stamp for SYNC messages only */
708 			ts_event_en = PTP_TCR_TSEVNTENA;
709 
710 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 			break;
713 
714 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 			/* PTP v2, UDP, Delay_req packet */
716 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 			ptp_v2 = PTP_TCR_TSVER2ENA;
718 			/* take time stamp for Delay_Req messages only */
719 			ts_master_en = PTP_TCR_TSMSTRENA;
720 			ts_event_en = PTP_TCR_TSEVNTENA;
721 
722 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 			break;
725 
726 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 			/* PTP v2/802.AS1 any layer, any kind of event packet */
728 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 			ptp_v2 = PTP_TCR_TSVER2ENA;
730 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 			if (priv->synopsys_id < DWMAC_CORE_4_10)
732 				ts_event_en = PTP_TCR_TSEVNTENA;
733 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 			ptp_over_ethernet = PTP_TCR_TSIPENA;
736 			break;
737 
738 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 			/* PTP v2/802.AS1, any layer, Sync packet */
740 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 			ptp_v2 = PTP_TCR_TSVER2ENA;
742 			/* take time stamp for SYNC messages only */
743 			ts_event_en = PTP_TCR_TSEVNTENA;
744 
745 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 			ptp_over_ethernet = PTP_TCR_TSIPENA;
748 			break;
749 
750 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 			/* PTP v2/802.AS1, any layer, Delay_req packet */
752 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 			ptp_v2 = PTP_TCR_TSVER2ENA;
754 			/* take time stamp for Delay_Req messages only */
755 			ts_master_en = PTP_TCR_TSMSTRENA;
756 			ts_event_en = PTP_TCR_TSEVNTENA;
757 
758 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 			ptp_over_ethernet = PTP_TCR_TSIPENA;
761 			break;
762 
763 		case HWTSTAMP_FILTER_NTP_ALL:
764 		case HWTSTAMP_FILTER_ALL:
765 			/* time stamp any incoming packet */
766 			config.rx_filter = HWTSTAMP_FILTER_ALL;
767 			tstamp_all = PTP_TCR_TSENALL;
768 			break;
769 
770 		default:
771 			return -ERANGE;
772 		}
773 	} else {
774 		switch (config.rx_filter) {
775 		case HWTSTAMP_FILTER_NONE:
776 			config.rx_filter = HWTSTAMP_FILTER_NONE;
777 			break;
778 		default:
779 			/* PTP v1, UDP, any kind of event packet */
780 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 			break;
782 		}
783 	}
784 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786 
787 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
788 
789 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
790 		priv->systime_flags |= tstamp_all | ptp_v2 |
791 				       ptp_over_ethernet | ptp_over_ipv6_udp |
792 				       ptp_over_ipv4_udp | ts_event_en |
793 				       ts_master_en | snap_type_sel;
794 	}
795 
796 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797 
798 	memcpy(&priv->tstamp_config, &config, sizeof(config));
799 
800 	return copy_to_user(ifr->ifr_data, &config,
801 			    sizeof(config)) ? -EFAULT : 0;
802 }
803 
804 /**
805  *  stmmac_hwtstamp_get - read hardware timestamping.
806  *  @dev: device pointer.
807  *  @ifr: An IOCTL specific structure, that can contain a pointer to
808  *  a proprietary structure used to pass information to the driver.
809  *  Description:
810  *  This function obtain the current hardware timestamping settings
811  *  as requested.
812  */
813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815 	struct stmmac_priv *priv = netdev_priv(dev);
816 	struct hwtstamp_config *config = &priv->tstamp_config;
817 
818 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819 		return -EOPNOTSUPP;
820 
821 	return copy_to_user(ifr->ifr_data, config,
822 			    sizeof(*config)) ? -EFAULT : 0;
823 }
824 
825 /**
826  * stmmac_init_tstamp_counter - init hardware timestamping counter
827  * @priv: driver private structure
828  * @systime_flags: timestamping flags
829  * Description:
830  * Initialize hardware counter for packet timestamping.
831  * This is valid as long as the interface is open and not suspended.
832  * Will be rerun after resuming from suspend, case in which the timestamping
833  * flags updated by stmmac_hwtstamp_set() also need to be restored.
834  */
835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838 	struct timespec64 now;
839 	u32 sec_inc = 0;
840 	u64 temp = 0;
841 
842 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843 		return -EOPNOTSUPP;
844 
845 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846 	priv->systime_flags = systime_flags;
847 
848 	/* program Sub Second Increment reg */
849 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850 					   priv->plat->clk_ptp_rate,
851 					   xmac, &sec_inc);
852 	temp = div_u64(1000000000ULL, sec_inc);
853 
854 	/* Store sub second increment for later use */
855 	priv->sub_second_inc = sec_inc;
856 
857 	/* calculate default added value:
858 	 * formula is :
859 	 * addend = (2^32)/freq_div_ratio;
860 	 * where, freq_div_ratio = 1e9ns/sec_inc
861 	 */
862 	temp = (u64)(temp << 32);
863 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865 
866 	/* initialize system time */
867 	ktime_get_real_ts64(&now);
868 
869 	/* lower 32 bits of tv_sec are safe until y2106 */
870 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871 
872 	return 0;
873 }
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875 
876 /**
877  * stmmac_init_ptp - init PTP
878  * @priv: driver private structure
879  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880  * This is done by looking at the HW cap. register.
881  * This function also registers the ptp driver.
882  */
883 static int stmmac_init_ptp(struct stmmac_priv *priv)
884 {
885 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886 	int ret;
887 
888 	if (priv->plat->ptp_clk_freq_config)
889 		priv->plat->ptp_clk_freq_config(priv);
890 
891 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892 	if (ret)
893 		return ret;
894 
895 	priv->adv_ts = 0;
896 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 	if (xmac && priv->dma_cap.atime_stamp)
898 		priv->adv_ts = 1;
899 	/* Dwmac 3.x core with extend_desc can support adv_ts */
900 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901 		priv->adv_ts = 1;
902 
903 	if (priv->dma_cap.time_stamp)
904 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
905 
906 	if (priv->adv_ts)
907 		netdev_info(priv->dev,
908 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
909 
910 	priv->hwts_tx_en = 0;
911 	priv->hwts_rx_en = 0;
912 
913 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
914 		stmmac_hwtstamp_correct_latency(priv, priv);
915 
916 	return 0;
917 }
918 
919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
922 	stmmac_ptp_unregister(priv);
923 }
924 
925 /**
926  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
927  *  @priv: driver private structure
928  *  @duplex: duplex passed to the next function
929  *  Description: It is used for configuring the flow control in all queues
930  */
931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933 	u32 tx_cnt = priv->plat->tx_queues_to_use;
934 
935 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936 			priv->pause, tx_cnt);
937 }
938 
939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
940 						 phy_interface_t interface)
941 {
942 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943 
944 	if (priv->hw->xpcs)
945 		return &priv->hw->xpcs->pcs;
946 
947 	if (priv->hw->lynx_pcs)
948 		return priv->hw->lynx_pcs;
949 
950 	return NULL;
951 }
952 
953 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
954 			      const struct phylink_link_state *state)
955 {
956 	/* Nothing to do, xpcs_config() handles everything */
957 }
958 
959 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
960 {
961 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
962 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
963 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
964 	bool *hs_enable = &fpe_cfg->hs_enable;
965 
966 	if (is_up && *hs_enable) {
967 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
968 	} else {
969 		*lo_state = FPE_STATE_OFF;
970 		*lp_state = FPE_STATE_OFF;
971 	}
972 }
973 
974 static void stmmac_mac_link_down(struct phylink_config *config,
975 				 unsigned int mode, phy_interface_t interface)
976 {
977 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
978 
979 	stmmac_mac_set(priv, priv->ioaddr, false);
980 	priv->eee_active = false;
981 	priv->tx_lpi_enabled = false;
982 	priv->eee_enabled = stmmac_eee_init(priv);
983 	stmmac_set_eee_pls(priv, priv->hw, false);
984 
985 	if (priv->dma_cap.fpesel)
986 		stmmac_fpe_link_state_handle(priv, false);
987 }
988 
989 static void stmmac_mac_link_up(struct phylink_config *config,
990 			       struct phy_device *phy,
991 			       unsigned int mode, phy_interface_t interface,
992 			       int speed, int duplex,
993 			       bool tx_pause, bool rx_pause)
994 {
995 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
996 	u32 old_ctrl, ctrl;
997 
998 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
999 	    priv->plat->serdes_powerup)
1000 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1001 
1002 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1003 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1004 
1005 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1006 		switch (speed) {
1007 		case SPEED_10000:
1008 			ctrl |= priv->hw->link.xgmii.speed10000;
1009 			break;
1010 		case SPEED_5000:
1011 			ctrl |= priv->hw->link.xgmii.speed5000;
1012 			break;
1013 		case SPEED_2500:
1014 			ctrl |= priv->hw->link.xgmii.speed2500;
1015 			break;
1016 		default:
1017 			return;
1018 		}
1019 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1020 		switch (speed) {
1021 		case SPEED_100000:
1022 			ctrl |= priv->hw->link.xlgmii.speed100000;
1023 			break;
1024 		case SPEED_50000:
1025 			ctrl |= priv->hw->link.xlgmii.speed50000;
1026 			break;
1027 		case SPEED_40000:
1028 			ctrl |= priv->hw->link.xlgmii.speed40000;
1029 			break;
1030 		case SPEED_25000:
1031 			ctrl |= priv->hw->link.xlgmii.speed25000;
1032 			break;
1033 		case SPEED_10000:
1034 			ctrl |= priv->hw->link.xgmii.speed10000;
1035 			break;
1036 		case SPEED_2500:
1037 			ctrl |= priv->hw->link.speed2500;
1038 			break;
1039 		case SPEED_1000:
1040 			ctrl |= priv->hw->link.speed1000;
1041 			break;
1042 		default:
1043 			return;
1044 		}
1045 	} else {
1046 		switch (speed) {
1047 		case SPEED_2500:
1048 			ctrl |= priv->hw->link.speed2500;
1049 			break;
1050 		case SPEED_1000:
1051 			ctrl |= priv->hw->link.speed1000;
1052 			break;
1053 		case SPEED_100:
1054 			ctrl |= priv->hw->link.speed100;
1055 			break;
1056 		case SPEED_10:
1057 			ctrl |= priv->hw->link.speed10;
1058 			break;
1059 		default:
1060 			return;
1061 		}
1062 	}
1063 
1064 	priv->speed = speed;
1065 
1066 	if (priv->plat->fix_mac_speed)
1067 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1068 
1069 	if (!duplex)
1070 		ctrl &= ~priv->hw->link.duplex;
1071 	else
1072 		ctrl |= priv->hw->link.duplex;
1073 
1074 	/* Flow Control operation */
1075 	if (rx_pause && tx_pause)
1076 		priv->flow_ctrl = FLOW_AUTO;
1077 	else if (rx_pause && !tx_pause)
1078 		priv->flow_ctrl = FLOW_RX;
1079 	else if (!rx_pause && tx_pause)
1080 		priv->flow_ctrl = FLOW_TX;
1081 	else
1082 		priv->flow_ctrl = FLOW_OFF;
1083 
1084 	stmmac_mac_flow_ctrl(priv, duplex);
1085 
1086 	if (ctrl != old_ctrl)
1087 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1088 
1089 	stmmac_mac_set(priv, priv->ioaddr, true);
1090 	if (phy && priv->dma_cap.eee) {
1091 		priv->eee_active =
1092 			phy_init_eee(phy, !(priv->plat->flags &
1093 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1094 		priv->eee_enabled = stmmac_eee_init(priv);
1095 		priv->tx_lpi_enabled = priv->eee_enabled;
1096 		stmmac_set_eee_pls(priv, priv->hw, true);
1097 	}
1098 
1099 	if (priv->dma_cap.fpesel)
1100 		stmmac_fpe_link_state_handle(priv, true);
1101 
1102 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1103 		stmmac_hwtstamp_correct_latency(priv, priv);
1104 }
1105 
1106 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1107 	.mac_select_pcs = stmmac_mac_select_pcs,
1108 	.mac_config = stmmac_mac_config,
1109 	.mac_link_down = stmmac_mac_link_down,
1110 	.mac_link_up = stmmac_mac_link_up,
1111 };
1112 
1113 /**
1114  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1115  * @priv: driver private structure
1116  * Description: this is to verify if the HW supports the PCS.
1117  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1118  * configured for the TBI, RTBI, or SGMII PHY interface.
1119  */
1120 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1121 {
1122 	int interface = priv->plat->interface;
1123 
1124 	if (priv->dma_cap.pcs) {
1125 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1126 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1127 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1128 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1129 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1130 			priv->hw->pcs = STMMAC_PCS_RGMII;
1131 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1132 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1133 			priv->hw->pcs = STMMAC_PCS_SGMII;
1134 		}
1135 	}
1136 }
1137 
1138 /**
1139  * stmmac_init_phy - PHY initialization
1140  * @dev: net device structure
1141  * Description: it initializes the driver's PHY state, and attaches the PHY
1142  * to the mac driver.
1143  *  Return value:
1144  *  0 on success
1145  */
1146 static int stmmac_init_phy(struct net_device *dev)
1147 {
1148 	struct stmmac_priv *priv = netdev_priv(dev);
1149 	struct fwnode_handle *phy_fwnode;
1150 	struct fwnode_handle *fwnode;
1151 	int ret;
1152 
1153 	if (!phylink_expects_phy(priv->phylink))
1154 		return 0;
1155 
1156 	fwnode = of_fwnode_handle(priv->plat->phylink_node);
1157 	if (!fwnode)
1158 		fwnode = dev_fwnode(priv->device);
1159 
1160 	if (fwnode)
1161 		phy_fwnode = fwnode_get_phy_node(fwnode);
1162 	else
1163 		phy_fwnode = NULL;
1164 
1165 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1166 	 * manually parse it
1167 	 */
1168 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1169 		int addr = priv->plat->phy_addr;
1170 		struct phy_device *phydev;
1171 
1172 		if (addr < 0) {
1173 			netdev_err(priv->dev, "no phy found\n");
1174 			return -ENODEV;
1175 		}
1176 
1177 		phydev = mdiobus_get_phy(priv->mii, addr);
1178 		if (!phydev) {
1179 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1180 			return -ENODEV;
1181 		}
1182 
1183 		ret = phylink_connect_phy(priv->phylink, phydev);
1184 	} else {
1185 		fwnode_handle_put(phy_fwnode);
1186 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1187 	}
1188 
1189 	if (!priv->plat->pmt) {
1190 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1191 
1192 		phylink_ethtool_get_wol(priv->phylink, &wol);
1193 		device_set_wakeup_capable(priv->device, !!wol.supported);
1194 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1195 	}
1196 
1197 	return ret;
1198 }
1199 
1200 static int stmmac_phy_setup(struct stmmac_priv *priv)
1201 {
1202 	struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
1203 	struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1204 	int max_speed = priv->plat->max_speed;
1205 	int mode = priv->plat->phy_interface;
1206 	struct phylink *phylink;
1207 
1208 	priv->phylink_config.dev = &priv->dev->dev;
1209 	priv->phylink_config.type = PHYLINK_NETDEV;
1210 	if (priv->plat->mdio_bus_data)
1211 		priv->phylink_config.ovr_an_inband =
1212 			mdio_bus_data->xpcs_an_inband;
1213 
1214 	if (!fwnode)
1215 		fwnode = dev_fwnode(priv->device);
1216 
1217 	/* Set the platform/firmware specified interface mode */
1218 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1219 
1220 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1221 	if (priv->hw->xpcs)
1222 		xpcs_get_interfaces(priv->hw->xpcs,
1223 				    priv->phylink_config.supported_interfaces);
1224 
1225 	priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1226 		MAC_10 | MAC_100;
1227 
1228 	if (!max_speed || max_speed >= 1000)
1229 		priv->phylink_config.mac_capabilities |= MAC_1000;
1230 
1231 	if (priv->plat->has_gmac4) {
1232 		if (!max_speed || max_speed >= 2500)
1233 			priv->phylink_config.mac_capabilities |= MAC_2500FD;
1234 	} else if (priv->plat->has_xgmac) {
1235 		if (!max_speed || max_speed >= 2500)
1236 			priv->phylink_config.mac_capabilities |= MAC_2500FD;
1237 		if (!max_speed || max_speed >= 5000)
1238 			priv->phylink_config.mac_capabilities |= MAC_5000FD;
1239 		if (!max_speed || max_speed >= 10000)
1240 			priv->phylink_config.mac_capabilities |= MAC_10000FD;
1241 		if (!max_speed || max_speed >= 25000)
1242 			priv->phylink_config.mac_capabilities |= MAC_25000FD;
1243 		if (!max_speed || max_speed >= 40000)
1244 			priv->phylink_config.mac_capabilities |= MAC_40000FD;
1245 		if (!max_speed || max_speed >= 50000)
1246 			priv->phylink_config.mac_capabilities |= MAC_50000FD;
1247 		if (!max_speed || max_speed >= 100000)
1248 			priv->phylink_config.mac_capabilities |= MAC_100000FD;
1249 	}
1250 
1251 	/* Half-Duplex can only work with single queue */
1252 	if (priv->plat->tx_queues_to_use > 1)
1253 		priv->phylink_config.mac_capabilities &=
1254 			~(MAC_10HD | MAC_100HD | MAC_1000HD);
1255 	priv->phylink_config.mac_managed_pm = true;
1256 
1257 	phylink = phylink_create(&priv->phylink_config, fwnode,
1258 				 mode, &stmmac_phylink_mac_ops);
1259 	if (IS_ERR(phylink))
1260 		return PTR_ERR(phylink);
1261 
1262 	priv->phylink = phylink;
1263 	return 0;
1264 }
1265 
1266 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1267 				    struct stmmac_dma_conf *dma_conf)
1268 {
1269 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1270 	unsigned int desc_size;
1271 	void *head_rx;
1272 	u32 queue;
1273 
1274 	/* Display RX rings */
1275 	for (queue = 0; queue < rx_cnt; queue++) {
1276 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1277 
1278 		pr_info("\tRX Queue %u rings\n", queue);
1279 
1280 		if (priv->extend_desc) {
1281 			head_rx = (void *)rx_q->dma_erx;
1282 			desc_size = sizeof(struct dma_extended_desc);
1283 		} else {
1284 			head_rx = (void *)rx_q->dma_rx;
1285 			desc_size = sizeof(struct dma_desc);
1286 		}
1287 
1288 		/* Display RX ring */
1289 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1290 				    rx_q->dma_rx_phy, desc_size);
1291 	}
1292 }
1293 
1294 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1295 				    struct stmmac_dma_conf *dma_conf)
1296 {
1297 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1298 	unsigned int desc_size;
1299 	void *head_tx;
1300 	u32 queue;
1301 
1302 	/* Display TX rings */
1303 	for (queue = 0; queue < tx_cnt; queue++) {
1304 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1305 
1306 		pr_info("\tTX Queue %d rings\n", queue);
1307 
1308 		if (priv->extend_desc) {
1309 			head_tx = (void *)tx_q->dma_etx;
1310 			desc_size = sizeof(struct dma_extended_desc);
1311 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1312 			head_tx = (void *)tx_q->dma_entx;
1313 			desc_size = sizeof(struct dma_edesc);
1314 		} else {
1315 			head_tx = (void *)tx_q->dma_tx;
1316 			desc_size = sizeof(struct dma_desc);
1317 		}
1318 
1319 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1320 				    tx_q->dma_tx_phy, desc_size);
1321 	}
1322 }
1323 
1324 static void stmmac_display_rings(struct stmmac_priv *priv,
1325 				 struct stmmac_dma_conf *dma_conf)
1326 {
1327 	/* Display RX ring */
1328 	stmmac_display_rx_rings(priv, dma_conf);
1329 
1330 	/* Display TX ring */
1331 	stmmac_display_tx_rings(priv, dma_conf);
1332 }
1333 
1334 static int stmmac_set_bfsize(int mtu, int bufsize)
1335 {
1336 	int ret = bufsize;
1337 
1338 	if (mtu >= BUF_SIZE_8KiB)
1339 		ret = BUF_SIZE_16KiB;
1340 	else if (mtu >= BUF_SIZE_4KiB)
1341 		ret = BUF_SIZE_8KiB;
1342 	else if (mtu >= BUF_SIZE_2KiB)
1343 		ret = BUF_SIZE_4KiB;
1344 	else if (mtu > DEFAULT_BUFSIZE)
1345 		ret = BUF_SIZE_2KiB;
1346 	else
1347 		ret = DEFAULT_BUFSIZE;
1348 
1349 	return ret;
1350 }
1351 
1352 /**
1353  * stmmac_clear_rx_descriptors - clear RX descriptors
1354  * @priv: driver private structure
1355  * @dma_conf: structure to take the dma data
1356  * @queue: RX queue index
1357  * Description: this function is called to clear the RX descriptors
1358  * in case of both basic and extended descriptors are used.
1359  */
1360 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1361 					struct stmmac_dma_conf *dma_conf,
1362 					u32 queue)
1363 {
1364 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1365 	int i;
1366 
1367 	/* Clear the RX descriptors */
1368 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1369 		if (priv->extend_desc)
1370 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1371 					priv->use_riwt, priv->mode,
1372 					(i == dma_conf->dma_rx_size - 1),
1373 					dma_conf->dma_buf_sz);
1374 		else
1375 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1376 					priv->use_riwt, priv->mode,
1377 					(i == dma_conf->dma_rx_size - 1),
1378 					dma_conf->dma_buf_sz);
1379 }
1380 
1381 /**
1382  * stmmac_clear_tx_descriptors - clear tx descriptors
1383  * @priv: driver private structure
1384  * @dma_conf: structure to take the dma data
1385  * @queue: TX queue index.
1386  * Description: this function is called to clear the TX descriptors
1387  * in case of both basic and extended descriptors are used.
1388  */
1389 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1390 					struct stmmac_dma_conf *dma_conf,
1391 					u32 queue)
1392 {
1393 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1394 	int i;
1395 
1396 	/* Clear the TX descriptors */
1397 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1398 		int last = (i == (dma_conf->dma_tx_size - 1));
1399 		struct dma_desc *p;
1400 
1401 		if (priv->extend_desc)
1402 			p = &tx_q->dma_etx[i].basic;
1403 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1404 			p = &tx_q->dma_entx[i].basic;
1405 		else
1406 			p = &tx_q->dma_tx[i];
1407 
1408 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1409 	}
1410 }
1411 
1412 /**
1413  * stmmac_clear_descriptors - clear descriptors
1414  * @priv: driver private structure
1415  * @dma_conf: structure to take the dma data
1416  * Description: this function is called to clear the TX and RX descriptors
1417  * in case of both basic and extended descriptors are used.
1418  */
1419 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1420 				     struct stmmac_dma_conf *dma_conf)
1421 {
1422 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1423 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1424 	u32 queue;
1425 
1426 	/* Clear the RX descriptors */
1427 	for (queue = 0; queue < rx_queue_cnt; queue++)
1428 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1429 
1430 	/* Clear the TX descriptors */
1431 	for (queue = 0; queue < tx_queue_cnt; queue++)
1432 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1433 }
1434 
1435 /**
1436  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1437  * @priv: driver private structure
1438  * @dma_conf: structure to take the dma data
1439  * @p: descriptor pointer
1440  * @i: descriptor index
1441  * @flags: gfp flag
1442  * @queue: RX queue index
1443  * Description: this function is called to allocate a receive buffer, perform
1444  * the DMA mapping and init the descriptor.
1445  */
1446 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1447 				  struct stmmac_dma_conf *dma_conf,
1448 				  struct dma_desc *p,
1449 				  int i, gfp_t flags, u32 queue)
1450 {
1451 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1452 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1453 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1454 
1455 	if (priv->dma_cap.host_dma_width <= 32)
1456 		gfp |= GFP_DMA32;
1457 
1458 	if (!buf->page) {
1459 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1460 		if (!buf->page)
1461 			return -ENOMEM;
1462 		buf->page_offset = stmmac_rx_offset(priv);
1463 	}
1464 
1465 	if (priv->sph && !buf->sec_page) {
1466 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1467 		if (!buf->sec_page)
1468 			return -ENOMEM;
1469 
1470 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1471 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1472 	} else {
1473 		buf->sec_page = NULL;
1474 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1475 	}
1476 
1477 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1478 
1479 	stmmac_set_desc_addr(priv, p, buf->addr);
1480 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1481 		stmmac_init_desc3(priv, p);
1482 
1483 	return 0;
1484 }
1485 
1486 /**
1487  * stmmac_free_rx_buffer - free RX dma buffers
1488  * @priv: private structure
1489  * @rx_q: RX queue
1490  * @i: buffer index.
1491  */
1492 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1493 				  struct stmmac_rx_queue *rx_q,
1494 				  int i)
1495 {
1496 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1497 
1498 	if (buf->page)
1499 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1500 	buf->page = NULL;
1501 
1502 	if (buf->sec_page)
1503 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1504 	buf->sec_page = NULL;
1505 }
1506 
1507 /**
1508  * stmmac_free_tx_buffer - free RX dma buffers
1509  * @priv: private structure
1510  * @dma_conf: structure to take the dma data
1511  * @queue: RX queue index
1512  * @i: buffer index.
1513  */
1514 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1515 				  struct stmmac_dma_conf *dma_conf,
1516 				  u32 queue, int i)
1517 {
1518 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1519 
1520 	if (tx_q->tx_skbuff_dma[i].buf &&
1521 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1522 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1523 			dma_unmap_page(priv->device,
1524 				       tx_q->tx_skbuff_dma[i].buf,
1525 				       tx_q->tx_skbuff_dma[i].len,
1526 				       DMA_TO_DEVICE);
1527 		else
1528 			dma_unmap_single(priv->device,
1529 					 tx_q->tx_skbuff_dma[i].buf,
1530 					 tx_q->tx_skbuff_dma[i].len,
1531 					 DMA_TO_DEVICE);
1532 	}
1533 
1534 	if (tx_q->xdpf[i] &&
1535 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1536 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1537 		xdp_return_frame(tx_q->xdpf[i]);
1538 		tx_q->xdpf[i] = NULL;
1539 	}
1540 
1541 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1542 		tx_q->xsk_frames_done++;
1543 
1544 	if (tx_q->tx_skbuff[i] &&
1545 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1546 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1547 		tx_q->tx_skbuff[i] = NULL;
1548 	}
1549 
1550 	tx_q->tx_skbuff_dma[i].buf = 0;
1551 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1552 }
1553 
1554 /**
1555  * dma_free_rx_skbufs - free RX dma buffers
1556  * @priv: private structure
1557  * @dma_conf: structure to take the dma data
1558  * @queue: RX queue index
1559  */
1560 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1561 			       struct stmmac_dma_conf *dma_conf,
1562 			       u32 queue)
1563 {
1564 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1565 	int i;
1566 
1567 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1568 		stmmac_free_rx_buffer(priv, rx_q, i);
1569 }
1570 
1571 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1572 				   struct stmmac_dma_conf *dma_conf,
1573 				   u32 queue, gfp_t flags)
1574 {
1575 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1576 	int i;
1577 
1578 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1579 		struct dma_desc *p;
1580 		int ret;
1581 
1582 		if (priv->extend_desc)
1583 			p = &((rx_q->dma_erx + i)->basic);
1584 		else
1585 			p = rx_q->dma_rx + i;
1586 
1587 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1588 					     queue);
1589 		if (ret)
1590 			return ret;
1591 
1592 		rx_q->buf_alloc_num++;
1593 	}
1594 
1595 	return 0;
1596 }
1597 
1598 /**
1599  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1600  * @priv: private structure
1601  * @dma_conf: structure to take the dma data
1602  * @queue: RX queue index
1603  */
1604 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1605 				struct stmmac_dma_conf *dma_conf,
1606 				u32 queue)
1607 {
1608 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1609 	int i;
1610 
1611 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1612 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1613 
1614 		if (!buf->xdp)
1615 			continue;
1616 
1617 		xsk_buff_free(buf->xdp);
1618 		buf->xdp = NULL;
1619 	}
1620 }
1621 
1622 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1623 				      struct stmmac_dma_conf *dma_conf,
1624 				      u32 queue)
1625 {
1626 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1627 	int i;
1628 
1629 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1630 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1631 	 * use this macro to make sure no size violations.
1632 	 */
1633 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1634 
1635 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1636 		struct stmmac_rx_buffer *buf;
1637 		dma_addr_t dma_addr;
1638 		struct dma_desc *p;
1639 
1640 		if (priv->extend_desc)
1641 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1642 		else
1643 			p = rx_q->dma_rx + i;
1644 
1645 		buf = &rx_q->buf_pool[i];
1646 
1647 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1648 		if (!buf->xdp)
1649 			return -ENOMEM;
1650 
1651 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1652 		stmmac_set_desc_addr(priv, p, dma_addr);
1653 		rx_q->buf_alloc_num++;
1654 	}
1655 
1656 	return 0;
1657 }
1658 
1659 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1660 {
1661 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1662 		return NULL;
1663 
1664 	return xsk_get_pool_from_qid(priv->dev, queue);
1665 }
1666 
1667 /**
1668  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1669  * @priv: driver private structure
1670  * @dma_conf: structure to take the dma data
1671  * @queue: RX queue index
1672  * @flags: gfp flag.
1673  * Description: this function initializes the DMA RX descriptors
1674  * and allocates the socket buffers. It supports the chained and ring
1675  * modes.
1676  */
1677 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1678 				    struct stmmac_dma_conf *dma_conf,
1679 				    u32 queue, gfp_t flags)
1680 {
1681 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1682 	int ret;
1683 
1684 	netif_dbg(priv, probe, priv->dev,
1685 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1686 		  (u32)rx_q->dma_rx_phy);
1687 
1688 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1689 
1690 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1691 
1692 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1693 
1694 	if (rx_q->xsk_pool) {
1695 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1696 						   MEM_TYPE_XSK_BUFF_POOL,
1697 						   NULL));
1698 		netdev_info(priv->dev,
1699 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1700 			    rx_q->queue_index);
1701 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1702 	} else {
1703 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1704 						   MEM_TYPE_PAGE_POOL,
1705 						   rx_q->page_pool));
1706 		netdev_info(priv->dev,
1707 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1708 			    rx_q->queue_index);
1709 	}
1710 
1711 	if (rx_q->xsk_pool) {
1712 		/* RX XDP ZC buffer pool may not be populated, e.g.
1713 		 * xdpsock TX-only.
1714 		 */
1715 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1716 	} else {
1717 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1718 		if (ret < 0)
1719 			return -ENOMEM;
1720 	}
1721 
1722 	/* Setup the chained descriptor addresses */
1723 	if (priv->mode == STMMAC_CHAIN_MODE) {
1724 		if (priv->extend_desc)
1725 			stmmac_mode_init(priv, rx_q->dma_erx,
1726 					 rx_q->dma_rx_phy,
1727 					 dma_conf->dma_rx_size, 1);
1728 		else
1729 			stmmac_mode_init(priv, rx_q->dma_rx,
1730 					 rx_q->dma_rx_phy,
1731 					 dma_conf->dma_rx_size, 0);
1732 	}
1733 
1734 	return 0;
1735 }
1736 
1737 static int init_dma_rx_desc_rings(struct net_device *dev,
1738 				  struct stmmac_dma_conf *dma_conf,
1739 				  gfp_t flags)
1740 {
1741 	struct stmmac_priv *priv = netdev_priv(dev);
1742 	u32 rx_count = priv->plat->rx_queues_to_use;
1743 	int queue;
1744 	int ret;
1745 
1746 	/* RX INITIALIZATION */
1747 	netif_dbg(priv, probe, priv->dev,
1748 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1749 
1750 	for (queue = 0; queue < rx_count; queue++) {
1751 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1752 		if (ret)
1753 			goto err_init_rx_buffers;
1754 	}
1755 
1756 	return 0;
1757 
1758 err_init_rx_buffers:
1759 	while (queue >= 0) {
1760 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1761 
1762 		if (rx_q->xsk_pool)
1763 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1764 		else
1765 			dma_free_rx_skbufs(priv, dma_conf, queue);
1766 
1767 		rx_q->buf_alloc_num = 0;
1768 		rx_q->xsk_pool = NULL;
1769 
1770 		queue--;
1771 	}
1772 
1773 	return ret;
1774 }
1775 
1776 /**
1777  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1778  * @priv: driver private structure
1779  * @dma_conf: structure to take the dma data
1780  * @queue: TX queue index
1781  * Description: this function initializes the DMA TX descriptors
1782  * and allocates the socket buffers. It supports the chained and ring
1783  * modes.
1784  */
1785 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1786 				    struct stmmac_dma_conf *dma_conf,
1787 				    u32 queue)
1788 {
1789 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1790 	int i;
1791 
1792 	netif_dbg(priv, probe, priv->dev,
1793 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1794 		  (u32)tx_q->dma_tx_phy);
1795 
1796 	/* Setup the chained descriptor addresses */
1797 	if (priv->mode == STMMAC_CHAIN_MODE) {
1798 		if (priv->extend_desc)
1799 			stmmac_mode_init(priv, tx_q->dma_etx,
1800 					 tx_q->dma_tx_phy,
1801 					 dma_conf->dma_tx_size, 1);
1802 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1803 			stmmac_mode_init(priv, tx_q->dma_tx,
1804 					 tx_q->dma_tx_phy,
1805 					 dma_conf->dma_tx_size, 0);
1806 	}
1807 
1808 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1809 
1810 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1811 		struct dma_desc *p;
1812 
1813 		if (priv->extend_desc)
1814 			p = &((tx_q->dma_etx + i)->basic);
1815 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1816 			p = &((tx_q->dma_entx + i)->basic);
1817 		else
1818 			p = tx_q->dma_tx + i;
1819 
1820 		stmmac_clear_desc(priv, p);
1821 
1822 		tx_q->tx_skbuff_dma[i].buf = 0;
1823 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1824 		tx_q->tx_skbuff_dma[i].len = 0;
1825 		tx_q->tx_skbuff_dma[i].last_segment = false;
1826 		tx_q->tx_skbuff[i] = NULL;
1827 	}
1828 
1829 	return 0;
1830 }
1831 
1832 static int init_dma_tx_desc_rings(struct net_device *dev,
1833 				  struct stmmac_dma_conf *dma_conf)
1834 {
1835 	struct stmmac_priv *priv = netdev_priv(dev);
1836 	u32 tx_queue_cnt;
1837 	u32 queue;
1838 
1839 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1840 
1841 	for (queue = 0; queue < tx_queue_cnt; queue++)
1842 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1843 
1844 	return 0;
1845 }
1846 
1847 /**
1848  * init_dma_desc_rings - init the RX/TX descriptor rings
1849  * @dev: net device structure
1850  * @dma_conf: structure to take the dma data
1851  * @flags: gfp flag.
1852  * Description: this function initializes the DMA RX/TX descriptors
1853  * and allocates the socket buffers. It supports the chained and ring
1854  * modes.
1855  */
1856 static int init_dma_desc_rings(struct net_device *dev,
1857 			       struct stmmac_dma_conf *dma_conf,
1858 			       gfp_t flags)
1859 {
1860 	struct stmmac_priv *priv = netdev_priv(dev);
1861 	int ret;
1862 
1863 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1864 	if (ret)
1865 		return ret;
1866 
1867 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1868 
1869 	stmmac_clear_descriptors(priv, dma_conf);
1870 
1871 	if (netif_msg_hw(priv))
1872 		stmmac_display_rings(priv, dma_conf);
1873 
1874 	return ret;
1875 }
1876 
1877 /**
1878  * dma_free_tx_skbufs - free TX dma buffers
1879  * @priv: private structure
1880  * @dma_conf: structure to take the dma data
1881  * @queue: TX queue index
1882  */
1883 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1884 			       struct stmmac_dma_conf *dma_conf,
1885 			       u32 queue)
1886 {
1887 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1888 	int i;
1889 
1890 	tx_q->xsk_frames_done = 0;
1891 
1892 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1893 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1894 
1895 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1896 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1897 		tx_q->xsk_frames_done = 0;
1898 		tx_q->xsk_pool = NULL;
1899 	}
1900 }
1901 
1902 /**
1903  * stmmac_free_tx_skbufs - free TX skb buffers
1904  * @priv: private structure
1905  */
1906 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1907 {
1908 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1909 	u32 queue;
1910 
1911 	for (queue = 0; queue < tx_queue_cnt; queue++)
1912 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1913 }
1914 
1915 /**
1916  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1917  * @priv: private structure
1918  * @dma_conf: structure to take the dma data
1919  * @queue: RX queue index
1920  */
1921 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1922 					 struct stmmac_dma_conf *dma_conf,
1923 					 u32 queue)
1924 {
1925 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1926 
1927 	/* Release the DMA RX socket buffers */
1928 	if (rx_q->xsk_pool)
1929 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1930 	else
1931 		dma_free_rx_skbufs(priv, dma_conf, queue);
1932 
1933 	rx_q->buf_alloc_num = 0;
1934 	rx_q->xsk_pool = NULL;
1935 
1936 	/* Free DMA regions of consistent memory previously allocated */
1937 	if (!priv->extend_desc)
1938 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1939 				  sizeof(struct dma_desc),
1940 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1941 	else
1942 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1943 				  sizeof(struct dma_extended_desc),
1944 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1945 
1946 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1947 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1948 
1949 	kfree(rx_q->buf_pool);
1950 	if (rx_q->page_pool)
1951 		page_pool_destroy(rx_q->page_pool);
1952 }
1953 
1954 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1955 				       struct stmmac_dma_conf *dma_conf)
1956 {
1957 	u32 rx_count = priv->plat->rx_queues_to_use;
1958 	u32 queue;
1959 
1960 	/* Free RX queue resources */
1961 	for (queue = 0; queue < rx_count; queue++)
1962 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1963 }
1964 
1965 /**
1966  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1967  * @priv: private structure
1968  * @dma_conf: structure to take the dma data
1969  * @queue: TX queue index
1970  */
1971 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1972 					 struct stmmac_dma_conf *dma_conf,
1973 					 u32 queue)
1974 {
1975 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1976 	size_t size;
1977 	void *addr;
1978 
1979 	/* Release the DMA TX socket buffers */
1980 	dma_free_tx_skbufs(priv, dma_conf, queue);
1981 
1982 	if (priv->extend_desc) {
1983 		size = sizeof(struct dma_extended_desc);
1984 		addr = tx_q->dma_etx;
1985 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1986 		size = sizeof(struct dma_edesc);
1987 		addr = tx_q->dma_entx;
1988 	} else {
1989 		size = sizeof(struct dma_desc);
1990 		addr = tx_q->dma_tx;
1991 	}
1992 
1993 	size *= dma_conf->dma_tx_size;
1994 
1995 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1996 
1997 	kfree(tx_q->tx_skbuff_dma);
1998 	kfree(tx_q->tx_skbuff);
1999 }
2000 
2001 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2002 				       struct stmmac_dma_conf *dma_conf)
2003 {
2004 	u32 tx_count = priv->plat->tx_queues_to_use;
2005 	u32 queue;
2006 
2007 	/* Free TX queue resources */
2008 	for (queue = 0; queue < tx_count; queue++)
2009 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2010 }
2011 
2012 /**
2013  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2014  * @priv: private structure
2015  * @dma_conf: structure to take the dma data
2016  * @queue: RX queue index
2017  * Description: according to which descriptor can be used (extend or basic)
2018  * this function allocates the resources for TX and RX paths. In case of
2019  * reception, for example, it pre-allocated the RX socket buffer in order to
2020  * allow zero-copy mechanism.
2021  */
2022 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2023 					 struct stmmac_dma_conf *dma_conf,
2024 					 u32 queue)
2025 {
2026 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2027 	struct stmmac_channel *ch = &priv->channel[queue];
2028 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2029 	struct page_pool_params pp_params = { 0 };
2030 	unsigned int num_pages;
2031 	unsigned int napi_id;
2032 	int ret;
2033 
2034 	rx_q->queue_index = queue;
2035 	rx_q->priv_data = priv;
2036 
2037 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2038 	pp_params.pool_size = dma_conf->dma_rx_size;
2039 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2040 	pp_params.order = ilog2(num_pages);
2041 	pp_params.nid = dev_to_node(priv->device);
2042 	pp_params.dev = priv->device;
2043 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2044 	pp_params.offset = stmmac_rx_offset(priv);
2045 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2046 
2047 	rx_q->page_pool = page_pool_create(&pp_params);
2048 	if (IS_ERR(rx_q->page_pool)) {
2049 		ret = PTR_ERR(rx_q->page_pool);
2050 		rx_q->page_pool = NULL;
2051 		return ret;
2052 	}
2053 
2054 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2055 				 sizeof(*rx_q->buf_pool),
2056 				 GFP_KERNEL);
2057 	if (!rx_q->buf_pool)
2058 		return -ENOMEM;
2059 
2060 	if (priv->extend_desc) {
2061 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2062 						   dma_conf->dma_rx_size *
2063 						   sizeof(struct dma_extended_desc),
2064 						   &rx_q->dma_rx_phy,
2065 						   GFP_KERNEL);
2066 		if (!rx_q->dma_erx)
2067 			return -ENOMEM;
2068 
2069 	} else {
2070 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2071 						  dma_conf->dma_rx_size *
2072 						  sizeof(struct dma_desc),
2073 						  &rx_q->dma_rx_phy,
2074 						  GFP_KERNEL);
2075 		if (!rx_q->dma_rx)
2076 			return -ENOMEM;
2077 	}
2078 
2079 	if (stmmac_xdp_is_enabled(priv) &&
2080 	    test_bit(queue, priv->af_xdp_zc_qps))
2081 		napi_id = ch->rxtx_napi.napi_id;
2082 	else
2083 		napi_id = ch->rx_napi.napi_id;
2084 
2085 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2086 			       rx_q->queue_index,
2087 			       napi_id);
2088 	if (ret) {
2089 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2090 		return -EINVAL;
2091 	}
2092 
2093 	return 0;
2094 }
2095 
2096 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2097 				       struct stmmac_dma_conf *dma_conf)
2098 {
2099 	u32 rx_count = priv->plat->rx_queues_to_use;
2100 	u32 queue;
2101 	int ret;
2102 
2103 	/* RX queues buffers and DMA */
2104 	for (queue = 0; queue < rx_count; queue++) {
2105 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2106 		if (ret)
2107 			goto err_dma;
2108 	}
2109 
2110 	return 0;
2111 
2112 err_dma:
2113 	free_dma_rx_desc_resources(priv, dma_conf);
2114 
2115 	return ret;
2116 }
2117 
2118 /**
2119  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2120  * @priv: private structure
2121  * @dma_conf: structure to take the dma data
2122  * @queue: TX queue index
2123  * Description: according to which descriptor can be used (extend or basic)
2124  * this function allocates the resources for TX and RX paths. In case of
2125  * reception, for example, it pre-allocated the RX socket buffer in order to
2126  * allow zero-copy mechanism.
2127  */
2128 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2129 					 struct stmmac_dma_conf *dma_conf,
2130 					 u32 queue)
2131 {
2132 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2133 	size_t size;
2134 	void *addr;
2135 
2136 	tx_q->queue_index = queue;
2137 	tx_q->priv_data = priv;
2138 
2139 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2140 				      sizeof(*tx_q->tx_skbuff_dma),
2141 				      GFP_KERNEL);
2142 	if (!tx_q->tx_skbuff_dma)
2143 		return -ENOMEM;
2144 
2145 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2146 				  sizeof(struct sk_buff *),
2147 				  GFP_KERNEL);
2148 	if (!tx_q->tx_skbuff)
2149 		return -ENOMEM;
2150 
2151 	if (priv->extend_desc)
2152 		size = sizeof(struct dma_extended_desc);
2153 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2154 		size = sizeof(struct dma_edesc);
2155 	else
2156 		size = sizeof(struct dma_desc);
2157 
2158 	size *= dma_conf->dma_tx_size;
2159 
2160 	addr = dma_alloc_coherent(priv->device, size,
2161 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2162 	if (!addr)
2163 		return -ENOMEM;
2164 
2165 	if (priv->extend_desc)
2166 		tx_q->dma_etx = addr;
2167 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2168 		tx_q->dma_entx = addr;
2169 	else
2170 		tx_q->dma_tx = addr;
2171 
2172 	return 0;
2173 }
2174 
2175 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2176 				       struct stmmac_dma_conf *dma_conf)
2177 {
2178 	u32 tx_count = priv->plat->tx_queues_to_use;
2179 	u32 queue;
2180 	int ret;
2181 
2182 	/* TX queues buffers and DMA */
2183 	for (queue = 0; queue < tx_count; queue++) {
2184 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2185 		if (ret)
2186 			goto err_dma;
2187 	}
2188 
2189 	return 0;
2190 
2191 err_dma:
2192 	free_dma_tx_desc_resources(priv, dma_conf);
2193 	return ret;
2194 }
2195 
2196 /**
2197  * alloc_dma_desc_resources - alloc TX/RX resources.
2198  * @priv: private structure
2199  * @dma_conf: structure to take the dma data
2200  * Description: according to which descriptor can be used (extend or basic)
2201  * this function allocates the resources for TX and RX paths. In case of
2202  * reception, for example, it pre-allocated the RX socket buffer in order to
2203  * allow zero-copy mechanism.
2204  */
2205 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2206 				    struct stmmac_dma_conf *dma_conf)
2207 {
2208 	/* RX Allocation */
2209 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2210 
2211 	if (ret)
2212 		return ret;
2213 
2214 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2215 
2216 	return ret;
2217 }
2218 
2219 /**
2220  * free_dma_desc_resources - free dma desc resources
2221  * @priv: private structure
2222  * @dma_conf: structure to take the dma data
2223  */
2224 static void free_dma_desc_resources(struct stmmac_priv *priv,
2225 				    struct stmmac_dma_conf *dma_conf)
2226 {
2227 	/* Release the DMA TX socket buffers */
2228 	free_dma_tx_desc_resources(priv, dma_conf);
2229 
2230 	/* Release the DMA RX socket buffers later
2231 	 * to ensure all pending XDP_TX buffers are returned.
2232 	 */
2233 	free_dma_rx_desc_resources(priv, dma_conf);
2234 }
2235 
2236 /**
2237  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2238  *  @priv: driver private structure
2239  *  Description: It is used for enabling the rx queues in the MAC
2240  */
2241 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2242 {
2243 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2244 	int queue;
2245 	u8 mode;
2246 
2247 	for (queue = 0; queue < rx_queues_count; queue++) {
2248 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2249 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2250 	}
2251 }
2252 
2253 /**
2254  * stmmac_start_rx_dma - start RX DMA channel
2255  * @priv: driver private structure
2256  * @chan: RX channel index
2257  * Description:
2258  * This starts a RX DMA channel
2259  */
2260 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2261 {
2262 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2263 	stmmac_start_rx(priv, priv->ioaddr, chan);
2264 }
2265 
2266 /**
2267  * stmmac_start_tx_dma - start TX DMA channel
2268  * @priv: driver private structure
2269  * @chan: TX channel index
2270  * Description:
2271  * This starts a TX DMA channel
2272  */
2273 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2274 {
2275 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2276 	stmmac_start_tx(priv, priv->ioaddr, chan);
2277 }
2278 
2279 /**
2280  * stmmac_stop_rx_dma - stop RX DMA channel
2281  * @priv: driver private structure
2282  * @chan: RX channel index
2283  * Description:
2284  * This stops a RX DMA channel
2285  */
2286 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2287 {
2288 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2289 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2290 }
2291 
2292 /**
2293  * stmmac_stop_tx_dma - stop TX DMA channel
2294  * @priv: driver private structure
2295  * @chan: TX channel index
2296  * Description:
2297  * This stops a TX DMA channel
2298  */
2299 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2300 {
2301 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2302 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2303 }
2304 
2305 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2306 {
2307 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2308 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2309 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2310 	u32 chan;
2311 
2312 	for (chan = 0; chan < dma_csr_ch; chan++) {
2313 		struct stmmac_channel *ch = &priv->channel[chan];
2314 		unsigned long flags;
2315 
2316 		spin_lock_irqsave(&ch->lock, flags);
2317 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2318 		spin_unlock_irqrestore(&ch->lock, flags);
2319 	}
2320 }
2321 
2322 /**
2323  * stmmac_start_all_dma - start all RX and TX DMA channels
2324  * @priv: driver private structure
2325  * Description:
2326  * This starts all the RX and TX DMA channels
2327  */
2328 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2329 {
2330 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2331 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2332 	u32 chan = 0;
2333 
2334 	for (chan = 0; chan < rx_channels_count; chan++)
2335 		stmmac_start_rx_dma(priv, chan);
2336 
2337 	for (chan = 0; chan < tx_channels_count; chan++)
2338 		stmmac_start_tx_dma(priv, chan);
2339 }
2340 
2341 /**
2342  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2343  * @priv: driver private structure
2344  * Description:
2345  * This stops the RX and TX DMA channels
2346  */
2347 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2348 {
2349 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2350 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2351 	u32 chan = 0;
2352 
2353 	for (chan = 0; chan < rx_channels_count; chan++)
2354 		stmmac_stop_rx_dma(priv, chan);
2355 
2356 	for (chan = 0; chan < tx_channels_count; chan++)
2357 		stmmac_stop_tx_dma(priv, chan);
2358 }
2359 
2360 /**
2361  *  stmmac_dma_operation_mode - HW DMA operation mode
2362  *  @priv: driver private structure
2363  *  Description: it is used for configuring the DMA operation mode register in
2364  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2365  */
2366 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2367 {
2368 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2369 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2370 	int rxfifosz = priv->plat->rx_fifo_size;
2371 	int txfifosz = priv->plat->tx_fifo_size;
2372 	u32 txmode = 0;
2373 	u32 rxmode = 0;
2374 	u32 chan = 0;
2375 	u8 qmode = 0;
2376 
2377 	if (rxfifosz == 0)
2378 		rxfifosz = priv->dma_cap.rx_fifo_size;
2379 	if (txfifosz == 0)
2380 		txfifosz = priv->dma_cap.tx_fifo_size;
2381 
2382 	/* Adjust for real per queue fifo size */
2383 	rxfifosz /= rx_channels_count;
2384 	txfifosz /= tx_channels_count;
2385 
2386 	if (priv->plat->force_thresh_dma_mode) {
2387 		txmode = tc;
2388 		rxmode = tc;
2389 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2390 		/*
2391 		 * In case of GMAC, SF mode can be enabled
2392 		 * to perform the TX COE in HW. This depends on:
2393 		 * 1) TX COE if actually supported
2394 		 * 2) There is no bugged Jumbo frame support
2395 		 *    that needs to not insert csum in the TDES.
2396 		 */
2397 		txmode = SF_DMA_MODE;
2398 		rxmode = SF_DMA_MODE;
2399 		priv->xstats.threshold = SF_DMA_MODE;
2400 	} else {
2401 		txmode = tc;
2402 		rxmode = SF_DMA_MODE;
2403 	}
2404 
2405 	/* configure all channels */
2406 	for (chan = 0; chan < rx_channels_count; chan++) {
2407 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2408 		u32 buf_size;
2409 
2410 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2411 
2412 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2413 				rxfifosz, qmode);
2414 
2415 		if (rx_q->xsk_pool) {
2416 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2417 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2418 					      buf_size,
2419 					      chan);
2420 		} else {
2421 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2422 					      priv->dma_conf.dma_buf_sz,
2423 					      chan);
2424 		}
2425 	}
2426 
2427 	for (chan = 0; chan < tx_channels_count; chan++) {
2428 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2429 
2430 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2431 				txfifosz, qmode);
2432 	}
2433 }
2434 
2435 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2436 {
2437 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2438 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2439 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2440 	unsigned int entry = tx_q->cur_tx;
2441 	struct dma_desc *tx_desc = NULL;
2442 	struct xdp_desc xdp_desc;
2443 	bool work_done = true;
2444 	u32 tx_set_ic_bit = 0;
2445 	unsigned long flags;
2446 
2447 	/* Avoids TX time-out as we are sharing with slow path */
2448 	txq_trans_cond_update(nq);
2449 
2450 	budget = min(budget, stmmac_tx_avail(priv, queue));
2451 
2452 	while (budget-- > 0) {
2453 		dma_addr_t dma_addr;
2454 		bool set_ic;
2455 
2456 		/* We are sharing with slow path and stop XSK TX desc submission when
2457 		 * available TX ring is less than threshold.
2458 		 */
2459 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2460 		    !netif_carrier_ok(priv->dev)) {
2461 			work_done = false;
2462 			break;
2463 		}
2464 
2465 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2466 			break;
2467 
2468 		if (likely(priv->extend_desc))
2469 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2470 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2471 			tx_desc = &tx_q->dma_entx[entry].basic;
2472 		else
2473 			tx_desc = tx_q->dma_tx + entry;
2474 
2475 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2476 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2477 
2478 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2479 
2480 		/* To return XDP buffer to XSK pool, we simple call
2481 		 * xsk_tx_completed(), so we don't need to fill up
2482 		 * 'buf' and 'xdpf'.
2483 		 */
2484 		tx_q->tx_skbuff_dma[entry].buf = 0;
2485 		tx_q->xdpf[entry] = NULL;
2486 
2487 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2488 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2489 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2490 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2491 
2492 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2493 
2494 		tx_q->tx_count_frames++;
2495 
2496 		if (!priv->tx_coal_frames[queue])
2497 			set_ic = false;
2498 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2499 			set_ic = true;
2500 		else
2501 			set_ic = false;
2502 
2503 		if (set_ic) {
2504 			tx_q->tx_count_frames = 0;
2505 			stmmac_set_tx_ic(priv, tx_desc);
2506 			tx_set_ic_bit++;
2507 		}
2508 
2509 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2510 				       true, priv->mode, true, true,
2511 				       xdp_desc.len);
2512 
2513 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2514 
2515 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2516 		entry = tx_q->cur_tx;
2517 	}
2518 	flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
2519 	tx_q->txq_stats.tx_set_ic_bit += tx_set_ic_bit;
2520 	u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
2521 
2522 	if (tx_desc) {
2523 		stmmac_flush_tx_descriptors(priv, queue);
2524 		xsk_tx_release(pool);
2525 	}
2526 
2527 	/* Return true if all of the 3 conditions are met
2528 	 *  a) TX Budget is still available
2529 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2530 	 *     pending XSK TX for transmission)
2531 	 */
2532 	return !!budget && work_done;
2533 }
2534 
2535 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2536 {
2537 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2538 		tc += 64;
2539 
2540 		if (priv->plat->force_thresh_dma_mode)
2541 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2542 		else
2543 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2544 						      chan);
2545 
2546 		priv->xstats.threshold = tc;
2547 	}
2548 }
2549 
2550 /**
2551  * stmmac_tx_clean - to manage the transmission completion
2552  * @priv: driver private structure
2553  * @budget: napi budget limiting this functions packet handling
2554  * @queue: TX queue index
2555  * Description: it reclaims the transmit resources after transmission completes.
2556  */
2557 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2558 {
2559 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2560 	unsigned int bytes_compl = 0, pkts_compl = 0;
2561 	unsigned int entry, xmits = 0, count = 0;
2562 	u32 tx_packets = 0, tx_errors = 0;
2563 	unsigned long flags;
2564 
2565 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2566 
2567 	tx_q->xsk_frames_done = 0;
2568 
2569 	entry = tx_q->dirty_tx;
2570 
2571 	/* Try to clean all TX complete frame in 1 shot */
2572 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2573 		struct xdp_frame *xdpf;
2574 		struct sk_buff *skb;
2575 		struct dma_desc *p;
2576 		int status;
2577 
2578 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2579 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2580 			xdpf = tx_q->xdpf[entry];
2581 			skb = NULL;
2582 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2583 			xdpf = NULL;
2584 			skb = tx_q->tx_skbuff[entry];
2585 		} else {
2586 			xdpf = NULL;
2587 			skb = NULL;
2588 		}
2589 
2590 		if (priv->extend_desc)
2591 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2592 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2593 			p = &tx_q->dma_entx[entry].basic;
2594 		else
2595 			p = tx_q->dma_tx + entry;
2596 
2597 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2598 		/* Check if the descriptor is owned by the DMA */
2599 		if (unlikely(status & tx_dma_own))
2600 			break;
2601 
2602 		count++;
2603 
2604 		/* Make sure descriptor fields are read after reading
2605 		 * the own bit.
2606 		 */
2607 		dma_rmb();
2608 
2609 		/* Just consider the last segment and ...*/
2610 		if (likely(!(status & tx_not_ls))) {
2611 			/* ... verify the status error condition */
2612 			if (unlikely(status & tx_err)) {
2613 				tx_errors++;
2614 				if (unlikely(status & tx_err_bump_tc))
2615 					stmmac_bump_dma_threshold(priv, queue);
2616 			} else {
2617 				tx_packets++;
2618 			}
2619 			if (skb)
2620 				stmmac_get_tx_hwtstamp(priv, p, skb);
2621 		}
2622 
2623 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2624 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2625 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2626 				dma_unmap_page(priv->device,
2627 					       tx_q->tx_skbuff_dma[entry].buf,
2628 					       tx_q->tx_skbuff_dma[entry].len,
2629 					       DMA_TO_DEVICE);
2630 			else
2631 				dma_unmap_single(priv->device,
2632 						 tx_q->tx_skbuff_dma[entry].buf,
2633 						 tx_q->tx_skbuff_dma[entry].len,
2634 						 DMA_TO_DEVICE);
2635 			tx_q->tx_skbuff_dma[entry].buf = 0;
2636 			tx_q->tx_skbuff_dma[entry].len = 0;
2637 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2638 		}
2639 
2640 		stmmac_clean_desc3(priv, tx_q, p);
2641 
2642 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2643 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2644 
2645 		if (xdpf &&
2646 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2647 			xdp_return_frame_rx_napi(xdpf);
2648 			tx_q->xdpf[entry] = NULL;
2649 		}
2650 
2651 		if (xdpf &&
2652 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2653 			xdp_return_frame(xdpf);
2654 			tx_q->xdpf[entry] = NULL;
2655 		}
2656 
2657 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2658 			tx_q->xsk_frames_done++;
2659 
2660 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2661 			if (likely(skb)) {
2662 				pkts_compl++;
2663 				bytes_compl += skb->len;
2664 				dev_consume_skb_any(skb);
2665 				tx_q->tx_skbuff[entry] = NULL;
2666 			}
2667 		}
2668 
2669 		stmmac_release_tx_desc(priv, p, priv->mode);
2670 
2671 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2672 	}
2673 	tx_q->dirty_tx = entry;
2674 
2675 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2676 				  pkts_compl, bytes_compl);
2677 
2678 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2679 								queue))) &&
2680 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2681 
2682 		netif_dbg(priv, tx_done, priv->dev,
2683 			  "%s: restart transmit\n", __func__);
2684 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2685 	}
2686 
2687 	if (tx_q->xsk_pool) {
2688 		bool work_done;
2689 
2690 		if (tx_q->xsk_frames_done)
2691 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2692 
2693 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2694 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2695 
2696 		/* For XSK TX, we try to send as many as possible.
2697 		 * If XSK work done (XSK TX desc empty and budget still
2698 		 * available), return "budget - 1" to reenable TX IRQ.
2699 		 * Else, return "budget" to make NAPI continue polling.
2700 		 */
2701 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2702 					       STMMAC_XSK_TX_BUDGET_MAX);
2703 		if (work_done)
2704 			xmits = budget - 1;
2705 		else
2706 			xmits = budget;
2707 	}
2708 
2709 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2710 	    priv->eee_sw_timer_en) {
2711 		if (stmmac_enable_eee_mode(priv))
2712 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2713 	}
2714 
2715 	/* We still have pending packets, let's call for a new scheduling */
2716 	if (tx_q->dirty_tx != tx_q->cur_tx)
2717 		hrtimer_start(&tx_q->txtimer,
2718 			      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2719 			      HRTIMER_MODE_REL);
2720 
2721 	flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
2722 	tx_q->txq_stats.tx_packets += tx_packets;
2723 	tx_q->txq_stats.tx_pkt_n += tx_packets;
2724 	tx_q->txq_stats.tx_clean++;
2725 	u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
2726 
2727 	priv->xstats.tx_errors += tx_errors;
2728 
2729 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2730 
2731 	/* Combine decisions from TX clean and XSK TX */
2732 	return max(count, xmits);
2733 }
2734 
2735 /**
2736  * stmmac_tx_err - to manage the tx error
2737  * @priv: driver private structure
2738  * @chan: channel index
2739  * Description: it cleans the descriptors and restarts the transmission
2740  * in case of transmission errors.
2741  */
2742 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2743 {
2744 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2745 
2746 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2747 
2748 	stmmac_stop_tx_dma(priv, chan);
2749 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2750 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2751 	stmmac_reset_tx_queue(priv, chan);
2752 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2753 			    tx_q->dma_tx_phy, chan);
2754 	stmmac_start_tx_dma(priv, chan);
2755 
2756 	priv->xstats.tx_errors++;
2757 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2758 }
2759 
2760 /**
2761  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2762  *  @priv: driver private structure
2763  *  @txmode: TX operating mode
2764  *  @rxmode: RX operating mode
2765  *  @chan: channel index
2766  *  Description: it is used for configuring of the DMA operation mode in
2767  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2768  *  mode.
2769  */
2770 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2771 					  u32 rxmode, u32 chan)
2772 {
2773 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2774 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2775 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2776 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2777 	int rxfifosz = priv->plat->rx_fifo_size;
2778 	int txfifosz = priv->plat->tx_fifo_size;
2779 
2780 	if (rxfifosz == 0)
2781 		rxfifosz = priv->dma_cap.rx_fifo_size;
2782 	if (txfifosz == 0)
2783 		txfifosz = priv->dma_cap.tx_fifo_size;
2784 
2785 	/* Adjust for real per queue fifo size */
2786 	rxfifosz /= rx_channels_count;
2787 	txfifosz /= tx_channels_count;
2788 
2789 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2790 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2791 }
2792 
2793 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2794 {
2795 	int ret;
2796 
2797 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2798 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2799 	if (ret && (ret != -EINVAL)) {
2800 		stmmac_global_err(priv);
2801 		return true;
2802 	}
2803 
2804 	return false;
2805 }
2806 
2807 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2808 {
2809 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2810 						 &priv->xstats, chan, dir);
2811 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2812 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2813 	struct stmmac_channel *ch = &priv->channel[chan];
2814 	struct napi_struct *rx_napi;
2815 	struct napi_struct *tx_napi;
2816 	unsigned long flags;
2817 
2818 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2819 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2820 
2821 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2822 		if (napi_schedule_prep(rx_napi)) {
2823 			spin_lock_irqsave(&ch->lock, flags);
2824 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2825 			spin_unlock_irqrestore(&ch->lock, flags);
2826 			__napi_schedule(rx_napi);
2827 		}
2828 	}
2829 
2830 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2831 		if (napi_schedule_prep(tx_napi)) {
2832 			spin_lock_irqsave(&ch->lock, flags);
2833 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2834 			spin_unlock_irqrestore(&ch->lock, flags);
2835 			__napi_schedule(tx_napi);
2836 		}
2837 	}
2838 
2839 	return status;
2840 }
2841 
2842 /**
2843  * stmmac_dma_interrupt - DMA ISR
2844  * @priv: driver private structure
2845  * Description: this is the DMA ISR. It is called by the main ISR.
2846  * It calls the dwmac dma routine and schedule poll method in case of some
2847  * work can be done.
2848  */
2849 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2850 {
2851 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2852 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2853 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2854 				tx_channel_count : rx_channel_count;
2855 	u32 chan;
2856 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2857 
2858 	/* Make sure we never check beyond our status buffer. */
2859 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2860 		channels_to_check = ARRAY_SIZE(status);
2861 
2862 	for (chan = 0; chan < channels_to_check; chan++)
2863 		status[chan] = stmmac_napi_check(priv, chan,
2864 						 DMA_DIR_RXTX);
2865 
2866 	for (chan = 0; chan < tx_channel_count; chan++) {
2867 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2868 			/* Try to bump up the dma threshold on this failure */
2869 			stmmac_bump_dma_threshold(priv, chan);
2870 		} else if (unlikely(status[chan] == tx_hard_error)) {
2871 			stmmac_tx_err(priv, chan);
2872 		}
2873 	}
2874 }
2875 
2876 /**
2877  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2878  * @priv: driver private structure
2879  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2880  */
2881 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2882 {
2883 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2884 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2885 
2886 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2887 
2888 	if (priv->dma_cap.rmon) {
2889 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2890 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2891 	} else
2892 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2893 }
2894 
2895 /**
2896  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2897  * @priv: driver private structure
2898  * Description:
2899  *  new GMAC chip generations have a new register to indicate the
2900  *  presence of the optional feature/functions.
2901  *  This can be also used to override the value passed through the
2902  *  platform and necessary for old MAC10/100 and GMAC chips.
2903  */
2904 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2905 {
2906 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2907 }
2908 
2909 /**
2910  * stmmac_check_ether_addr - check if the MAC addr is valid
2911  * @priv: driver private structure
2912  * Description:
2913  * it is to verify if the MAC address is valid, in case of failures it
2914  * generates a random MAC address
2915  */
2916 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2917 {
2918 	u8 addr[ETH_ALEN];
2919 
2920 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2921 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2922 		if (is_valid_ether_addr(addr))
2923 			eth_hw_addr_set(priv->dev, addr);
2924 		else
2925 			eth_hw_addr_random(priv->dev);
2926 		dev_info(priv->device, "device MAC address %pM\n",
2927 			 priv->dev->dev_addr);
2928 	}
2929 }
2930 
2931 /**
2932  * stmmac_init_dma_engine - DMA init.
2933  * @priv: driver private structure
2934  * Description:
2935  * It inits the DMA invoking the specific MAC/GMAC callback.
2936  * Some DMA parameters can be passed from the platform;
2937  * in case of these are not passed a default is kept for the MAC or GMAC.
2938  */
2939 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2940 {
2941 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2942 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2943 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2944 	struct stmmac_rx_queue *rx_q;
2945 	struct stmmac_tx_queue *tx_q;
2946 	u32 chan = 0;
2947 	int atds = 0;
2948 	int ret = 0;
2949 
2950 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2951 		dev_err(priv->device, "Invalid DMA configuration\n");
2952 		return -EINVAL;
2953 	}
2954 
2955 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2956 		atds = 1;
2957 
2958 	ret = stmmac_reset(priv, priv->ioaddr);
2959 	if (ret) {
2960 		dev_err(priv->device, "Failed to reset the dma\n");
2961 		return ret;
2962 	}
2963 
2964 	/* DMA Configuration */
2965 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2966 
2967 	if (priv->plat->axi)
2968 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2969 
2970 	/* DMA CSR Channel configuration */
2971 	for (chan = 0; chan < dma_csr_ch; chan++) {
2972 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2973 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2974 	}
2975 
2976 	/* DMA RX Channel Configuration */
2977 	for (chan = 0; chan < rx_channels_count; chan++) {
2978 		rx_q = &priv->dma_conf.rx_queue[chan];
2979 
2980 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2981 				    rx_q->dma_rx_phy, chan);
2982 
2983 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2984 				     (rx_q->buf_alloc_num *
2985 				      sizeof(struct dma_desc));
2986 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2987 				       rx_q->rx_tail_addr, chan);
2988 	}
2989 
2990 	/* DMA TX Channel Configuration */
2991 	for (chan = 0; chan < tx_channels_count; chan++) {
2992 		tx_q = &priv->dma_conf.tx_queue[chan];
2993 
2994 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2995 				    tx_q->dma_tx_phy, chan);
2996 
2997 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2998 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2999 				       tx_q->tx_tail_addr, chan);
3000 	}
3001 
3002 	return ret;
3003 }
3004 
3005 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3006 {
3007 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3008 
3009 	hrtimer_start(&tx_q->txtimer,
3010 		      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
3011 		      HRTIMER_MODE_REL);
3012 }
3013 
3014 /**
3015  * stmmac_tx_timer - mitigation sw timer for tx.
3016  * @t: data pointer
3017  * Description:
3018  * This is the timer handler to directly invoke the stmmac_tx_clean.
3019  */
3020 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3021 {
3022 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3023 	struct stmmac_priv *priv = tx_q->priv_data;
3024 	struct stmmac_channel *ch;
3025 	struct napi_struct *napi;
3026 
3027 	ch = &priv->channel[tx_q->queue_index];
3028 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3029 
3030 	if (likely(napi_schedule_prep(napi))) {
3031 		unsigned long flags;
3032 
3033 		spin_lock_irqsave(&ch->lock, flags);
3034 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3035 		spin_unlock_irqrestore(&ch->lock, flags);
3036 		__napi_schedule(napi);
3037 	}
3038 
3039 	return HRTIMER_NORESTART;
3040 }
3041 
3042 /**
3043  * stmmac_init_coalesce - init mitigation options.
3044  * @priv: driver private structure
3045  * Description:
3046  * This inits the coalesce parameters: i.e. timer rate,
3047  * timer handler and default threshold used for enabling the
3048  * interrupt on completion bit.
3049  */
3050 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3051 {
3052 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3053 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3054 	u32 chan;
3055 
3056 	for (chan = 0; chan < tx_channel_count; chan++) {
3057 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3058 
3059 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3060 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3061 
3062 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3063 		tx_q->txtimer.function = stmmac_tx_timer;
3064 	}
3065 
3066 	for (chan = 0; chan < rx_channel_count; chan++)
3067 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3068 }
3069 
3070 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3071 {
3072 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3073 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3074 	u32 chan;
3075 
3076 	/* set TX ring length */
3077 	for (chan = 0; chan < tx_channels_count; chan++)
3078 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3079 				       (priv->dma_conf.dma_tx_size - 1), chan);
3080 
3081 	/* set RX ring length */
3082 	for (chan = 0; chan < rx_channels_count; chan++)
3083 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3084 				       (priv->dma_conf.dma_rx_size - 1), chan);
3085 }
3086 
3087 /**
3088  *  stmmac_set_tx_queue_weight - Set TX queue weight
3089  *  @priv: driver private structure
3090  *  Description: It is used for setting TX queues weight
3091  */
3092 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3093 {
3094 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3095 	u32 weight;
3096 	u32 queue;
3097 
3098 	for (queue = 0; queue < tx_queues_count; queue++) {
3099 		weight = priv->plat->tx_queues_cfg[queue].weight;
3100 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3101 	}
3102 }
3103 
3104 /**
3105  *  stmmac_configure_cbs - Configure CBS in TX queue
3106  *  @priv: driver private structure
3107  *  Description: It is used for configuring CBS in AVB TX queues
3108  */
3109 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3110 {
3111 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3112 	u32 mode_to_use;
3113 	u32 queue;
3114 
3115 	/* queue 0 is reserved for legacy traffic */
3116 	for (queue = 1; queue < tx_queues_count; queue++) {
3117 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3118 		if (mode_to_use == MTL_QUEUE_DCB)
3119 			continue;
3120 
3121 		stmmac_config_cbs(priv, priv->hw,
3122 				priv->plat->tx_queues_cfg[queue].send_slope,
3123 				priv->plat->tx_queues_cfg[queue].idle_slope,
3124 				priv->plat->tx_queues_cfg[queue].high_credit,
3125 				priv->plat->tx_queues_cfg[queue].low_credit,
3126 				queue);
3127 	}
3128 }
3129 
3130 /**
3131  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3132  *  @priv: driver private structure
3133  *  Description: It is used for mapping RX queues to RX dma channels
3134  */
3135 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3136 {
3137 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3138 	u32 queue;
3139 	u32 chan;
3140 
3141 	for (queue = 0; queue < rx_queues_count; queue++) {
3142 		chan = priv->plat->rx_queues_cfg[queue].chan;
3143 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3144 	}
3145 }
3146 
3147 /**
3148  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3149  *  @priv: driver private structure
3150  *  Description: It is used for configuring the RX Queue Priority
3151  */
3152 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3153 {
3154 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3155 	u32 queue;
3156 	u32 prio;
3157 
3158 	for (queue = 0; queue < rx_queues_count; queue++) {
3159 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3160 			continue;
3161 
3162 		prio = priv->plat->rx_queues_cfg[queue].prio;
3163 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3164 	}
3165 }
3166 
3167 /**
3168  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3169  *  @priv: driver private structure
3170  *  Description: It is used for configuring the TX Queue Priority
3171  */
3172 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3173 {
3174 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3175 	u32 queue;
3176 	u32 prio;
3177 
3178 	for (queue = 0; queue < tx_queues_count; queue++) {
3179 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3180 			continue;
3181 
3182 		prio = priv->plat->tx_queues_cfg[queue].prio;
3183 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3184 	}
3185 }
3186 
3187 /**
3188  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3189  *  @priv: driver private structure
3190  *  Description: It is used for configuring the RX queue routing
3191  */
3192 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3193 {
3194 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3195 	u32 queue;
3196 	u8 packet;
3197 
3198 	for (queue = 0; queue < rx_queues_count; queue++) {
3199 		/* no specific packet type routing specified for the queue */
3200 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3201 			continue;
3202 
3203 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3204 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3205 	}
3206 }
3207 
3208 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3209 {
3210 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3211 		priv->rss.enable = false;
3212 		return;
3213 	}
3214 
3215 	if (priv->dev->features & NETIF_F_RXHASH)
3216 		priv->rss.enable = true;
3217 	else
3218 		priv->rss.enable = false;
3219 
3220 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3221 			     priv->plat->rx_queues_to_use);
3222 }
3223 
3224 /**
3225  *  stmmac_mtl_configuration - Configure MTL
3226  *  @priv: driver private structure
3227  *  Description: It is used for configurring MTL
3228  */
3229 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3230 {
3231 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3232 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3233 
3234 	if (tx_queues_count > 1)
3235 		stmmac_set_tx_queue_weight(priv);
3236 
3237 	/* Configure MTL RX algorithms */
3238 	if (rx_queues_count > 1)
3239 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3240 				priv->plat->rx_sched_algorithm);
3241 
3242 	/* Configure MTL TX algorithms */
3243 	if (tx_queues_count > 1)
3244 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3245 				priv->plat->tx_sched_algorithm);
3246 
3247 	/* Configure CBS in AVB TX queues */
3248 	if (tx_queues_count > 1)
3249 		stmmac_configure_cbs(priv);
3250 
3251 	/* Map RX MTL to DMA channels */
3252 	stmmac_rx_queue_dma_chan_map(priv);
3253 
3254 	/* Enable MAC RX Queues */
3255 	stmmac_mac_enable_rx_queues(priv);
3256 
3257 	/* Set RX priorities */
3258 	if (rx_queues_count > 1)
3259 		stmmac_mac_config_rx_queues_prio(priv);
3260 
3261 	/* Set TX priorities */
3262 	if (tx_queues_count > 1)
3263 		stmmac_mac_config_tx_queues_prio(priv);
3264 
3265 	/* Set RX routing */
3266 	if (rx_queues_count > 1)
3267 		stmmac_mac_config_rx_queues_routing(priv);
3268 
3269 	/* Receive Side Scaling */
3270 	if (rx_queues_count > 1)
3271 		stmmac_mac_config_rss(priv);
3272 }
3273 
3274 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3275 {
3276 	if (priv->dma_cap.asp) {
3277 		netdev_info(priv->dev, "Enabling Safety Features\n");
3278 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3279 					  priv->plat->safety_feat_cfg);
3280 	} else {
3281 		netdev_info(priv->dev, "No Safety Features support found\n");
3282 	}
3283 }
3284 
3285 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3286 {
3287 	char *name;
3288 
3289 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3290 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3291 
3292 	name = priv->wq_name;
3293 	sprintf(name, "%s-fpe", priv->dev->name);
3294 
3295 	priv->fpe_wq = create_singlethread_workqueue(name);
3296 	if (!priv->fpe_wq) {
3297 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3298 
3299 		return -ENOMEM;
3300 	}
3301 	netdev_info(priv->dev, "FPE workqueue start");
3302 
3303 	return 0;
3304 }
3305 
3306 /**
3307  * stmmac_hw_setup - setup mac in a usable state.
3308  *  @dev : pointer to the device structure.
3309  *  @ptp_register: register PTP if set
3310  *  Description:
3311  *  this is the main function to setup the HW in a usable state because the
3312  *  dma engine is reset, the core registers are configured (e.g. AXI,
3313  *  Checksum features, timers). The DMA is ready to start receiving and
3314  *  transmitting.
3315  *  Return value:
3316  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3317  *  file on failure.
3318  */
3319 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3320 {
3321 	struct stmmac_priv *priv = netdev_priv(dev);
3322 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3323 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3324 	bool sph_en;
3325 	u32 chan;
3326 	int ret;
3327 
3328 	/* DMA initialization and SW reset */
3329 	ret = stmmac_init_dma_engine(priv);
3330 	if (ret < 0) {
3331 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3332 			   __func__);
3333 		return ret;
3334 	}
3335 
3336 	/* Copy the MAC addr into the HW  */
3337 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3338 
3339 	/* PS and related bits will be programmed according to the speed */
3340 	if (priv->hw->pcs) {
3341 		int speed = priv->plat->mac_port_sel_speed;
3342 
3343 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3344 		    (speed == SPEED_1000)) {
3345 			priv->hw->ps = speed;
3346 		} else {
3347 			dev_warn(priv->device, "invalid port speed\n");
3348 			priv->hw->ps = 0;
3349 		}
3350 	}
3351 
3352 	/* Initialize the MAC Core */
3353 	stmmac_core_init(priv, priv->hw, dev);
3354 
3355 	/* Initialize MTL*/
3356 	stmmac_mtl_configuration(priv);
3357 
3358 	/* Initialize Safety Features */
3359 	stmmac_safety_feat_configuration(priv);
3360 
3361 	ret = stmmac_rx_ipc(priv, priv->hw);
3362 	if (!ret) {
3363 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3364 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3365 		priv->hw->rx_csum = 0;
3366 	}
3367 
3368 	/* Enable the MAC Rx/Tx */
3369 	stmmac_mac_set(priv, priv->ioaddr, true);
3370 
3371 	/* Set the HW DMA mode and the COE */
3372 	stmmac_dma_operation_mode(priv);
3373 
3374 	stmmac_mmc_setup(priv);
3375 
3376 	if (ptp_register) {
3377 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3378 		if (ret < 0)
3379 			netdev_warn(priv->dev,
3380 				    "failed to enable PTP reference clock: %pe\n",
3381 				    ERR_PTR(ret));
3382 	}
3383 
3384 	ret = stmmac_init_ptp(priv);
3385 	if (ret == -EOPNOTSUPP)
3386 		netdev_info(priv->dev, "PTP not supported by HW\n");
3387 	else if (ret)
3388 		netdev_warn(priv->dev, "PTP init failed\n");
3389 	else if (ptp_register)
3390 		stmmac_ptp_register(priv);
3391 
3392 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3393 
3394 	/* Convert the timer from msec to usec */
3395 	if (!priv->tx_lpi_timer)
3396 		priv->tx_lpi_timer = eee_timer * 1000;
3397 
3398 	if (priv->use_riwt) {
3399 		u32 queue;
3400 
3401 		for (queue = 0; queue < rx_cnt; queue++) {
3402 			if (!priv->rx_riwt[queue])
3403 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3404 
3405 			stmmac_rx_watchdog(priv, priv->ioaddr,
3406 					   priv->rx_riwt[queue], queue);
3407 		}
3408 	}
3409 
3410 	if (priv->hw->pcs)
3411 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3412 
3413 	/* set TX and RX rings length */
3414 	stmmac_set_rings_length(priv);
3415 
3416 	/* Enable TSO */
3417 	if (priv->tso) {
3418 		for (chan = 0; chan < tx_cnt; chan++) {
3419 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3420 
3421 			/* TSO and TBS cannot co-exist */
3422 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3423 				continue;
3424 
3425 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3426 		}
3427 	}
3428 
3429 	/* Enable Split Header */
3430 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3431 	for (chan = 0; chan < rx_cnt; chan++)
3432 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3433 
3434 
3435 	/* VLAN Tag Insertion */
3436 	if (priv->dma_cap.vlins)
3437 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3438 
3439 	/* TBS */
3440 	for (chan = 0; chan < tx_cnt; chan++) {
3441 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3442 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3443 
3444 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3445 	}
3446 
3447 	/* Configure real RX and TX queues */
3448 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3449 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3450 
3451 	/* Start the ball rolling... */
3452 	stmmac_start_all_dma(priv);
3453 
3454 	if (priv->dma_cap.fpesel) {
3455 		stmmac_fpe_start_wq(priv);
3456 
3457 		if (priv->plat->fpe_cfg->enable)
3458 			stmmac_fpe_handshake(priv, true);
3459 	}
3460 
3461 	return 0;
3462 }
3463 
3464 static void stmmac_hw_teardown(struct net_device *dev)
3465 {
3466 	struct stmmac_priv *priv = netdev_priv(dev);
3467 
3468 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3469 }
3470 
3471 static void stmmac_free_irq(struct net_device *dev,
3472 			    enum request_irq_err irq_err, int irq_idx)
3473 {
3474 	struct stmmac_priv *priv = netdev_priv(dev);
3475 	int j;
3476 
3477 	switch (irq_err) {
3478 	case REQ_IRQ_ERR_ALL:
3479 		irq_idx = priv->plat->tx_queues_to_use;
3480 		fallthrough;
3481 	case REQ_IRQ_ERR_TX:
3482 		for (j = irq_idx - 1; j >= 0; j--) {
3483 			if (priv->tx_irq[j] > 0) {
3484 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3485 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3486 			}
3487 		}
3488 		irq_idx = priv->plat->rx_queues_to_use;
3489 		fallthrough;
3490 	case REQ_IRQ_ERR_RX:
3491 		for (j = irq_idx - 1; j >= 0; j--) {
3492 			if (priv->rx_irq[j] > 0) {
3493 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3494 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3495 			}
3496 		}
3497 
3498 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3499 			free_irq(priv->sfty_ue_irq, dev);
3500 		fallthrough;
3501 	case REQ_IRQ_ERR_SFTY_UE:
3502 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3503 			free_irq(priv->sfty_ce_irq, dev);
3504 		fallthrough;
3505 	case REQ_IRQ_ERR_SFTY_CE:
3506 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3507 			free_irq(priv->lpi_irq, dev);
3508 		fallthrough;
3509 	case REQ_IRQ_ERR_LPI:
3510 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3511 			free_irq(priv->wol_irq, dev);
3512 		fallthrough;
3513 	case REQ_IRQ_ERR_WOL:
3514 		free_irq(dev->irq, dev);
3515 		fallthrough;
3516 	case REQ_IRQ_ERR_MAC:
3517 	case REQ_IRQ_ERR_NO:
3518 		/* If MAC IRQ request error, no more IRQ to free */
3519 		break;
3520 	}
3521 }
3522 
3523 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3524 {
3525 	struct stmmac_priv *priv = netdev_priv(dev);
3526 	enum request_irq_err irq_err;
3527 	cpumask_t cpu_mask;
3528 	int irq_idx = 0;
3529 	char *int_name;
3530 	int ret;
3531 	int i;
3532 
3533 	/* For common interrupt */
3534 	int_name = priv->int_name_mac;
3535 	sprintf(int_name, "%s:%s", dev->name, "mac");
3536 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3537 			  0, int_name, dev);
3538 	if (unlikely(ret < 0)) {
3539 		netdev_err(priv->dev,
3540 			   "%s: alloc mac MSI %d (error: %d)\n",
3541 			   __func__, dev->irq, ret);
3542 		irq_err = REQ_IRQ_ERR_MAC;
3543 		goto irq_error;
3544 	}
3545 
3546 	/* Request the Wake IRQ in case of another line
3547 	 * is used for WoL
3548 	 */
3549 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3550 		int_name = priv->int_name_wol;
3551 		sprintf(int_name, "%s:%s", dev->name, "wol");
3552 		ret = request_irq(priv->wol_irq,
3553 				  stmmac_mac_interrupt,
3554 				  0, int_name, dev);
3555 		if (unlikely(ret < 0)) {
3556 			netdev_err(priv->dev,
3557 				   "%s: alloc wol MSI %d (error: %d)\n",
3558 				   __func__, priv->wol_irq, ret);
3559 			irq_err = REQ_IRQ_ERR_WOL;
3560 			goto irq_error;
3561 		}
3562 	}
3563 
3564 	/* Request the LPI IRQ in case of another line
3565 	 * is used for LPI
3566 	 */
3567 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3568 		int_name = priv->int_name_lpi;
3569 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3570 		ret = request_irq(priv->lpi_irq,
3571 				  stmmac_mac_interrupt,
3572 				  0, int_name, dev);
3573 		if (unlikely(ret < 0)) {
3574 			netdev_err(priv->dev,
3575 				   "%s: alloc lpi MSI %d (error: %d)\n",
3576 				   __func__, priv->lpi_irq, ret);
3577 			irq_err = REQ_IRQ_ERR_LPI;
3578 			goto irq_error;
3579 		}
3580 	}
3581 
3582 	/* Request the Safety Feature Correctible Error line in
3583 	 * case of another line is used
3584 	 */
3585 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3586 		int_name = priv->int_name_sfty_ce;
3587 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3588 		ret = request_irq(priv->sfty_ce_irq,
3589 				  stmmac_safety_interrupt,
3590 				  0, int_name, dev);
3591 		if (unlikely(ret < 0)) {
3592 			netdev_err(priv->dev,
3593 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3594 				   __func__, priv->sfty_ce_irq, ret);
3595 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3596 			goto irq_error;
3597 		}
3598 	}
3599 
3600 	/* Request the Safety Feature Uncorrectible Error line in
3601 	 * case of another line is used
3602 	 */
3603 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3604 		int_name = priv->int_name_sfty_ue;
3605 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3606 		ret = request_irq(priv->sfty_ue_irq,
3607 				  stmmac_safety_interrupt,
3608 				  0, int_name, dev);
3609 		if (unlikely(ret < 0)) {
3610 			netdev_err(priv->dev,
3611 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3612 				   __func__, priv->sfty_ue_irq, ret);
3613 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3614 			goto irq_error;
3615 		}
3616 	}
3617 
3618 	/* Request Rx MSI irq */
3619 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3620 		if (i >= MTL_MAX_RX_QUEUES)
3621 			break;
3622 		if (priv->rx_irq[i] == 0)
3623 			continue;
3624 
3625 		int_name = priv->int_name_rx_irq[i];
3626 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3627 		ret = request_irq(priv->rx_irq[i],
3628 				  stmmac_msi_intr_rx,
3629 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3630 		if (unlikely(ret < 0)) {
3631 			netdev_err(priv->dev,
3632 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3633 				   __func__, i, priv->rx_irq[i], ret);
3634 			irq_err = REQ_IRQ_ERR_RX;
3635 			irq_idx = i;
3636 			goto irq_error;
3637 		}
3638 		cpumask_clear(&cpu_mask);
3639 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3640 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3641 	}
3642 
3643 	/* Request Tx MSI irq */
3644 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3645 		if (i >= MTL_MAX_TX_QUEUES)
3646 			break;
3647 		if (priv->tx_irq[i] == 0)
3648 			continue;
3649 
3650 		int_name = priv->int_name_tx_irq[i];
3651 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3652 		ret = request_irq(priv->tx_irq[i],
3653 				  stmmac_msi_intr_tx,
3654 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3655 		if (unlikely(ret < 0)) {
3656 			netdev_err(priv->dev,
3657 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3658 				   __func__, i, priv->tx_irq[i], ret);
3659 			irq_err = REQ_IRQ_ERR_TX;
3660 			irq_idx = i;
3661 			goto irq_error;
3662 		}
3663 		cpumask_clear(&cpu_mask);
3664 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3665 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3666 	}
3667 
3668 	return 0;
3669 
3670 irq_error:
3671 	stmmac_free_irq(dev, irq_err, irq_idx);
3672 	return ret;
3673 }
3674 
3675 static int stmmac_request_irq_single(struct net_device *dev)
3676 {
3677 	struct stmmac_priv *priv = netdev_priv(dev);
3678 	enum request_irq_err irq_err;
3679 	int ret;
3680 
3681 	ret = request_irq(dev->irq, stmmac_interrupt,
3682 			  IRQF_SHARED, dev->name, dev);
3683 	if (unlikely(ret < 0)) {
3684 		netdev_err(priv->dev,
3685 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3686 			   __func__, dev->irq, ret);
3687 		irq_err = REQ_IRQ_ERR_MAC;
3688 		goto irq_error;
3689 	}
3690 
3691 	/* Request the Wake IRQ in case of another line
3692 	 * is used for WoL
3693 	 */
3694 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3695 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3696 				  IRQF_SHARED, dev->name, dev);
3697 		if (unlikely(ret < 0)) {
3698 			netdev_err(priv->dev,
3699 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3700 				   __func__, priv->wol_irq, ret);
3701 			irq_err = REQ_IRQ_ERR_WOL;
3702 			goto irq_error;
3703 		}
3704 	}
3705 
3706 	/* Request the IRQ lines */
3707 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3708 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3709 				  IRQF_SHARED, dev->name, dev);
3710 		if (unlikely(ret < 0)) {
3711 			netdev_err(priv->dev,
3712 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3713 				   __func__, priv->lpi_irq, ret);
3714 			irq_err = REQ_IRQ_ERR_LPI;
3715 			goto irq_error;
3716 		}
3717 	}
3718 
3719 	return 0;
3720 
3721 irq_error:
3722 	stmmac_free_irq(dev, irq_err, 0);
3723 	return ret;
3724 }
3725 
3726 static int stmmac_request_irq(struct net_device *dev)
3727 {
3728 	struct stmmac_priv *priv = netdev_priv(dev);
3729 	int ret;
3730 
3731 	/* Request the IRQ lines */
3732 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3733 		ret = stmmac_request_irq_multi_msi(dev);
3734 	else
3735 		ret = stmmac_request_irq_single(dev);
3736 
3737 	return ret;
3738 }
3739 
3740 /**
3741  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3742  *  @priv: driver private structure
3743  *  @mtu: MTU to setup the dma queue and buf with
3744  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3745  *  Allocate the Tx/Rx DMA queue and init them.
3746  *  Return value:
3747  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3748  */
3749 static struct stmmac_dma_conf *
3750 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3751 {
3752 	struct stmmac_dma_conf *dma_conf;
3753 	int chan, bfsize, ret;
3754 
3755 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3756 	if (!dma_conf) {
3757 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3758 			   __func__);
3759 		return ERR_PTR(-ENOMEM);
3760 	}
3761 
3762 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3763 	if (bfsize < 0)
3764 		bfsize = 0;
3765 
3766 	if (bfsize < BUF_SIZE_16KiB)
3767 		bfsize = stmmac_set_bfsize(mtu, 0);
3768 
3769 	dma_conf->dma_buf_sz = bfsize;
3770 	/* Chose the tx/rx size from the already defined one in the
3771 	 * priv struct. (if defined)
3772 	 */
3773 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3774 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3775 
3776 	if (!dma_conf->dma_tx_size)
3777 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3778 	if (!dma_conf->dma_rx_size)
3779 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3780 
3781 	/* Earlier check for TBS */
3782 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3783 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3784 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3785 
3786 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3787 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3788 	}
3789 
3790 	ret = alloc_dma_desc_resources(priv, dma_conf);
3791 	if (ret < 0) {
3792 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3793 			   __func__);
3794 		goto alloc_error;
3795 	}
3796 
3797 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3798 	if (ret < 0) {
3799 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3800 			   __func__);
3801 		goto init_error;
3802 	}
3803 
3804 	return dma_conf;
3805 
3806 init_error:
3807 	free_dma_desc_resources(priv, dma_conf);
3808 alloc_error:
3809 	kfree(dma_conf);
3810 	return ERR_PTR(ret);
3811 }
3812 
3813 /**
3814  *  __stmmac_open - open entry point of the driver
3815  *  @dev : pointer to the device structure.
3816  *  @dma_conf :  structure to take the dma data
3817  *  Description:
3818  *  This function is the open entry point of the driver.
3819  *  Return value:
3820  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3821  *  file on failure.
3822  */
3823 static int __stmmac_open(struct net_device *dev,
3824 			 struct stmmac_dma_conf *dma_conf)
3825 {
3826 	struct stmmac_priv *priv = netdev_priv(dev);
3827 	int mode = priv->plat->phy_interface;
3828 	u32 chan;
3829 	int ret;
3830 
3831 	ret = pm_runtime_resume_and_get(priv->device);
3832 	if (ret < 0)
3833 		return ret;
3834 
3835 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3836 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
3837 	    (!priv->hw->xpcs ||
3838 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
3839 	    !priv->hw->lynx_pcs) {
3840 		ret = stmmac_init_phy(dev);
3841 		if (ret) {
3842 			netdev_err(priv->dev,
3843 				   "%s: Cannot attach to PHY (error: %d)\n",
3844 				   __func__, ret);
3845 			goto init_phy_error;
3846 		}
3847 	}
3848 
3849 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3850 
3851 	buf_sz = dma_conf->dma_buf_sz;
3852 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3853 
3854 	stmmac_reset_queues_param(priv);
3855 
3856 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3857 	    priv->plat->serdes_powerup) {
3858 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3859 		if (ret < 0) {
3860 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3861 				   __func__);
3862 			goto init_error;
3863 		}
3864 	}
3865 
3866 	ret = stmmac_hw_setup(dev, true);
3867 	if (ret < 0) {
3868 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3869 		goto init_error;
3870 	}
3871 
3872 	stmmac_init_coalesce(priv);
3873 
3874 	phylink_start(priv->phylink);
3875 	/* We may have called phylink_speed_down before */
3876 	phylink_speed_up(priv->phylink);
3877 
3878 	ret = stmmac_request_irq(dev);
3879 	if (ret)
3880 		goto irq_error;
3881 
3882 	stmmac_enable_all_queues(priv);
3883 	netif_tx_start_all_queues(priv->dev);
3884 	stmmac_enable_all_dma_irq(priv);
3885 
3886 	return 0;
3887 
3888 irq_error:
3889 	phylink_stop(priv->phylink);
3890 
3891 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3892 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3893 
3894 	stmmac_hw_teardown(dev);
3895 init_error:
3896 	phylink_disconnect_phy(priv->phylink);
3897 init_phy_error:
3898 	pm_runtime_put(priv->device);
3899 	return ret;
3900 }
3901 
3902 static int stmmac_open(struct net_device *dev)
3903 {
3904 	struct stmmac_priv *priv = netdev_priv(dev);
3905 	struct stmmac_dma_conf *dma_conf;
3906 	int ret;
3907 
3908 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3909 	if (IS_ERR(dma_conf))
3910 		return PTR_ERR(dma_conf);
3911 
3912 	ret = __stmmac_open(dev, dma_conf);
3913 	if (ret)
3914 		free_dma_desc_resources(priv, dma_conf);
3915 
3916 	kfree(dma_conf);
3917 	return ret;
3918 }
3919 
3920 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3921 {
3922 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3923 
3924 	if (priv->fpe_wq)
3925 		destroy_workqueue(priv->fpe_wq);
3926 
3927 	netdev_info(priv->dev, "FPE workqueue stop");
3928 }
3929 
3930 /**
3931  *  stmmac_release - close entry point of the driver
3932  *  @dev : device pointer.
3933  *  Description:
3934  *  This is the stop entry point of the driver.
3935  */
3936 static int stmmac_release(struct net_device *dev)
3937 {
3938 	struct stmmac_priv *priv = netdev_priv(dev);
3939 	u32 chan;
3940 
3941 	if (device_may_wakeup(priv->device))
3942 		phylink_speed_down(priv->phylink, false);
3943 	/* Stop and disconnect the PHY */
3944 	phylink_stop(priv->phylink);
3945 	phylink_disconnect_phy(priv->phylink);
3946 
3947 	stmmac_disable_all_queues(priv);
3948 
3949 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3950 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3951 
3952 	netif_tx_disable(dev);
3953 
3954 	/* Free the IRQ lines */
3955 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3956 
3957 	if (priv->eee_enabled) {
3958 		priv->tx_path_in_lpi_mode = false;
3959 		del_timer_sync(&priv->eee_ctrl_timer);
3960 	}
3961 
3962 	/* Stop TX/RX DMA and clear the descriptors */
3963 	stmmac_stop_all_dma(priv);
3964 
3965 	/* Release and free the Rx/Tx resources */
3966 	free_dma_desc_resources(priv, &priv->dma_conf);
3967 
3968 	/* Disable the MAC Rx/Tx */
3969 	stmmac_mac_set(priv, priv->ioaddr, false);
3970 
3971 	/* Powerdown Serdes if there is */
3972 	if (priv->plat->serdes_powerdown)
3973 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3974 
3975 	netif_carrier_off(dev);
3976 
3977 	stmmac_release_ptp(priv);
3978 
3979 	pm_runtime_put(priv->device);
3980 
3981 	if (priv->dma_cap.fpesel)
3982 		stmmac_fpe_stop_wq(priv);
3983 
3984 	return 0;
3985 }
3986 
3987 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3988 			       struct stmmac_tx_queue *tx_q)
3989 {
3990 	u16 tag = 0x0, inner_tag = 0x0;
3991 	u32 inner_type = 0x0;
3992 	struct dma_desc *p;
3993 
3994 	if (!priv->dma_cap.vlins)
3995 		return false;
3996 	if (!skb_vlan_tag_present(skb))
3997 		return false;
3998 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3999 		inner_tag = skb_vlan_tag_get(skb);
4000 		inner_type = STMMAC_VLAN_INSERT;
4001 	}
4002 
4003 	tag = skb_vlan_tag_get(skb);
4004 
4005 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4006 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4007 	else
4008 		p = &tx_q->dma_tx[tx_q->cur_tx];
4009 
4010 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4011 		return false;
4012 
4013 	stmmac_set_tx_owner(priv, p);
4014 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4015 	return true;
4016 }
4017 
4018 /**
4019  *  stmmac_tso_allocator - close entry point of the driver
4020  *  @priv: driver private structure
4021  *  @des: buffer start address
4022  *  @total_len: total length to fill in descriptors
4023  *  @last_segment: condition for the last descriptor
4024  *  @queue: TX queue index
4025  *  Description:
4026  *  This function fills descriptor and request new descriptors according to
4027  *  buffer length to fill
4028  */
4029 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4030 				 int total_len, bool last_segment, u32 queue)
4031 {
4032 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4033 	struct dma_desc *desc;
4034 	u32 buff_size;
4035 	int tmp_len;
4036 
4037 	tmp_len = total_len;
4038 
4039 	while (tmp_len > 0) {
4040 		dma_addr_t curr_addr;
4041 
4042 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4043 						priv->dma_conf.dma_tx_size);
4044 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4045 
4046 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4047 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4048 		else
4049 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4050 
4051 		curr_addr = des + (total_len - tmp_len);
4052 		if (priv->dma_cap.addr64 <= 32)
4053 			desc->des0 = cpu_to_le32(curr_addr);
4054 		else
4055 			stmmac_set_desc_addr(priv, desc, curr_addr);
4056 
4057 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4058 			    TSO_MAX_BUFF_SIZE : tmp_len;
4059 
4060 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4061 				0, 1,
4062 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4063 				0, 0);
4064 
4065 		tmp_len -= TSO_MAX_BUFF_SIZE;
4066 	}
4067 }
4068 
4069 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4070 {
4071 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4072 	int desc_size;
4073 
4074 	if (likely(priv->extend_desc))
4075 		desc_size = sizeof(struct dma_extended_desc);
4076 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4077 		desc_size = sizeof(struct dma_edesc);
4078 	else
4079 		desc_size = sizeof(struct dma_desc);
4080 
4081 	/* The own bit must be the latest setting done when prepare the
4082 	 * descriptor and then barrier is needed to make sure that
4083 	 * all is coherent before granting the DMA engine.
4084 	 */
4085 	wmb();
4086 
4087 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4088 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4089 }
4090 
4091 /**
4092  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4093  *  @skb : the socket buffer
4094  *  @dev : device pointer
4095  *  Description: this is the transmit function that is called on TSO frames
4096  *  (support available on GMAC4 and newer chips).
4097  *  Diagram below show the ring programming in case of TSO frames:
4098  *
4099  *  First Descriptor
4100  *   --------
4101  *   | DES0 |---> buffer1 = L2/L3/L4 header
4102  *   | DES1 |---> TCP Payload (can continue on next descr...)
4103  *   | DES2 |---> buffer 1 and 2 len
4104  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4105  *   --------
4106  *	|
4107  *     ...
4108  *	|
4109  *   --------
4110  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4111  *   | DES1 | --|
4112  *   | DES2 | --> buffer 1 and 2 len
4113  *   | DES3 |
4114  *   --------
4115  *
4116  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4117  */
4118 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4119 {
4120 	struct dma_desc *desc, *first, *mss_desc = NULL;
4121 	struct stmmac_priv *priv = netdev_priv(dev);
4122 	int nfrags = skb_shinfo(skb)->nr_frags;
4123 	u32 queue = skb_get_queue_mapping(skb);
4124 	unsigned int first_entry, tx_packets;
4125 	int tmp_pay_len = 0, first_tx;
4126 	struct stmmac_tx_queue *tx_q;
4127 	bool has_vlan, set_ic;
4128 	u8 proto_hdr_len, hdr;
4129 	unsigned long flags;
4130 	u32 pay_len, mss;
4131 	dma_addr_t des;
4132 	int i;
4133 
4134 	tx_q = &priv->dma_conf.tx_queue[queue];
4135 	first_tx = tx_q->cur_tx;
4136 
4137 	/* Compute header lengths */
4138 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4139 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4140 		hdr = sizeof(struct udphdr);
4141 	} else {
4142 		proto_hdr_len = skb_tcp_all_headers(skb);
4143 		hdr = tcp_hdrlen(skb);
4144 	}
4145 
4146 	/* Desc availability based on threshold should be enough safe */
4147 	if (unlikely(stmmac_tx_avail(priv, queue) <
4148 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4149 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4150 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4151 								queue));
4152 			/* This is a hard error, log it. */
4153 			netdev_err(priv->dev,
4154 				   "%s: Tx Ring full when queue awake\n",
4155 				   __func__);
4156 		}
4157 		return NETDEV_TX_BUSY;
4158 	}
4159 
4160 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4161 
4162 	mss = skb_shinfo(skb)->gso_size;
4163 
4164 	/* set new MSS value if needed */
4165 	if (mss != tx_q->mss) {
4166 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4167 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4168 		else
4169 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4170 
4171 		stmmac_set_mss(priv, mss_desc, mss);
4172 		tx_q->mss = mss;
4173 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4174 						priv->dma_conf.dma_tx_size);
4175 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4176 	}
4177 
4178 	if (netif_msg_tx_queued(priv)) {
4179 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4180 			__func__, hdr, proto_hdr_len, pay_len, mss);
4181 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4182 			skb->data_len);
4183 	}
4184 
4185 	/* Check if VLAN can be inserted by HW */
4186 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4187 
4188 	first_entry = tx_q->cur_tx;
4189 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4190 
4191 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4192 		desc = &tx_q->dma_entx[first_entry].basic;
4193 	else
4194 		desc = &tx_q->dma_tx[first_entry];
4195 	first = desc;
4196 
4197 	if (has_vlan)
4198 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4199 
4200 	/* first descriptor: fill Headers on Buf1 */
4201 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4202 			     DMA_TO_DEVICE);
4203 	if (dma_mapping_error(priv->device, des))
4204 		goto dma_map_err;
4205 
4206 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4207 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4208 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4209 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4210 
4211 	if (priv->dma_cap.addr64 <= 32) {
4212 		first->des0 = cpu_to_le32(des);
4213 
4214 		/* Fill start of payload in buff2 of first descriptor */
4215 		if (pay_len)
4216 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4217 
4218 		/* If needed take extra descriptors to fill the remaining payload */
4219 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4220 	} else {
4221 		stmmac_set_desc_addr(priv, first, des);
4222 		tmp_pay_len = pay_len;
4223 		des += proto_hdr_len;
4224 		pay_len = 0;
4225 	}
4226 
4227 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4228 
4229 	/* Prepare fragments */
4230 	for (i = 0; i < nfrags; i++) {
4231 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4232 
4233 		des = skb_frag_dma_map(priv->device, frag, 0,
4234 				       skb_frag_size(frag),
4235 				       DMA_TO_DEVICE);
4236 		if (dma_mapping_error(priv->device, des))
4237 			goto dma_map_err;
4238 
4239 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4240 				     (i == nfrags - 1), queue);
4241 
4242 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4243 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4244 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4245 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4246 	}
4247 
4248 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4249 
4250 	/* Only the last descriptor gets to point to the skb. */
4251 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4252 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4253 
4254 	/* Manage tx mitigation */
4255 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4256 	tx_q->tx_count_frames += tx_packets;
4257 
4258 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4259 		set_ic = true;
4260 	else if (!priv->tx_coal_frames[queue])
4261 		set_ic = false;
4262 	else if (tx_packets > priv->tx_coal_frames[queue])
4263 		set_ic = true;
4264 	else if ((tx_q->tx_count_frames %
4265 		  priv->tx_coal_frames[queue]) < tx_packets)
4266 		set_ic = true;
4267 	else
4268 		set_ic = false;
4269 
4270 	if (set_ic) {
4271 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4272 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4273 		else
4274 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4275 
4276 		tx_q->tx_count_frames = 0;
4277 		stmmac_set_tx_ic(priv, desc);
4278 	}
4279 
4280 	/* We've used all descriptors we need for this skb, however,
4281 	 * advance cur_tx so that it references a fresh descriptor.
4282 	 * ndo_start_xmit will fill this descriptor the next time it's
4283 	 * called and stmmac_tx_clean may clean up to this descriptor.
4284 	 */
4285 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4286 
4287 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4288 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4289 			  __func__);
4290 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4291 	}
4292 
4293 	flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
4294 	tx_q->txq_stats.tx_bytes += skb->len;
4295 	tx_q->txq_stats.tx_tso_frames++;
4296 	tx_q->txq_stats.tx_tso_nfrags += nfrags;
4297 	if (set_ic)
4298 		tx_q->txq_stats.tx_set_ic_bit++;
4299 	u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
4300 
4301 	if (priv->sarc_type)
4302 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4303 
4304 	skb_tx_timestamp(skb);
4305 
4306 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4307 		     priv->hwts_tx_en)) {
4308 		/* declare that device is doing timestamping */
4309 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4310 		stmmac_enable_tx_timestamp(priv, first);
4311 	}
4312 
4313 	/* Complete the first descriptor before granting the DMA */
4314 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4315 			proto_hdr_len,
4316 			pay_len,
4317 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4318 			hdr / 4, (skb->len - proto_hdr_len));
4319 
4320 	/* If context desc is used to change MSS */
4321 	if (mss_desc) {
4322 		/* Make sure that first descriptor has been completely
4323 		 * written, including its own bit. This is because MSS is
4324 		 * actually before first descriptor, so we need to make
4325 		 * sure that MSS's own bit is the last thing written.
4326 		 */
4327 		dma_wmb();
4328 		stmmac_set_tx_owner(priv, mss_desc);
4329 	}
4330 
4331 	if (netif_msg_pktdata(priv)) {
4332 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4333 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4334 			tx_q->cur_tx, first, nfrags);
4335 		pr_info(">>> frame to be transmitted: ");
4336 		print_pkt(skb->data, skb_headlen(skb));
4337 	}
4338 
4339 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4340 
4341 	stmmac_flush_tx_descriptors(priv, queue);
4342 	stmmac_tx_timer_arm(priv, queue);
4343 
4344 	return NETDEV_TX_OK;
4345 
4346 dma_map_err:
4347 	dev_err(priv->device, "Tx dma map failed\n");
4348 	dev_kfree_skb(skb);
4349 	priv->xstats.tx_dropped++;
4350 	return NETDEV_TX_OK;
4351 }
4352 
4353 /**
4354  *  stmmac_xmit - Tx entry point of the driver
4355  *  @skb : the socket buffer
4356  *  @dev : device pointer
4357  *  Description : this is the tx entry point of the driver.
4358  *  It programs the chain or the ring and supports oversized frames
4359  *  and SG feature.
4360  */
4361 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4362 {
4363 	unsigned int first_entry, tx_packets, enh_desc;
4364 	struct stmmac_priv *priv = netdev_priv(dev);
4365 	unsigned int nopaged_len = skb_headlen(skb);
4366 	int i, csum_insertion = 0, is_jumbo = 0;
4367 	u32 queue = skb_get_queue_mapping(skb);
4368 	int nfrags = skb_shinfo(skb)->nr_frags;
4369 	int gso = skb_shinfo(skb)->gso_type;
4370 	struct dma_edesc *tbs_desc = NULL;
4371 	struct dma_desc *desc, *first;
4372 	struct stmmac_tx_queue *tx_q;
4373 	bool has_vlan, set_ic;
4374 	int entry, first_tx;
4375 	unsigned long flags;
4376 	dma_addr_t des;
4377 
4378 	tx_q = &priv->dma_conf.tx_queue[queue];
4379 	first_tx = tx_q->cur_tx;
4380 
4381 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4382 		stmmac_disable_eee_mode(priv);
4383 
4384 	/* Manage oversized TCP frames for GMAC4 device */
4385 	if (skb_is_gso(skb) && priv->tso) {
4386 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4387 			return stmmac_tso_xmit(skb, dev);
4388 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4389 			return stmmac_tso_xmit(skb, dev);
4390 	}
4391 
4392 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4393 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4394 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4395 								queue));
4396 			/* This is a hard error, log it. */
4397 			netdev_err(priv->dev,
4398 				   "%s: Tx Ring full when queue awake\n",
4399 				   __func__);
4400 		}
4401 		return NETDEV_TX_BUSY;
4402 	}
4403 
4404 	/* Check if VLAN can be inserted by HW */
4405 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4406 
4407 	entry = tx_q->cur_tx;
4408 	first_entry = entry;
4409 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4410 
4411 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4412 
4413 	if (likely(priv->extend_desc))
4414 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4415 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4416 		desc = &tx_q->dma_entx[entry].basic;
4417 	else
4418 		desc = tx_q->dma_tx + entry;
4419 
4420 	first = desc;
4421 
4422 	if (has_vlan)
4423 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4424 
4425 	enh_desc = priv->plat->enh_desc;
4426 	/* To program the descriptors according to the size of the frame */
4427 	if (enh_desc)
4428 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4429 
4430 	if (unlikely(is_jumbo)) {
4431 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4432 		if (unlikely(entry < 0) && (entry != -EINVAL))
4433 			goto dma_map_err;
4434 	}
4435 
4436 	for (i = 0; i < nfrags; i++) {
4437 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4438 		int len = skb_frag_size(frag);
4439 		bool last_segment = (i == (nfrags - 1));
4440 
4441 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4442 		WARN_ON(tx_q->tx_skbuff[entry]);
4443 
4444 		if (likely(priv->extend_desc))
4445 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4446 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4447 			desc = &tx_q->dma_entx[entry].basic;
4448 		else
4449 			desc = tx_q->dma_tx + entry;
4450 
4451 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4452 				       DMA_TO_DEVICE);
4453 		if (dma_mapping_error(priv->device, des))
4454 			goto dma_map_err; /* should reuse desc w/o issues */
4455 
4456 		tx_q->tx_skbuff_dma[entry].buf = des;
4457 
4458 		stmmac_set_desc_addr(priv, desc, des);
4459 
4460 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4461 		tx_q->tx_skbuff_dma[entry].len = len;
4462 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4463 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4464 
4465 		/* Prepare the descriptor and set the own bit too */
4466 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4467 				priv->mode, 1, last_segment, skb->len);
4468 	}
4469 
4470 	/* Only the last descriptor gets to point to the skb. */
4471 	tx_q->tx_skbuff[entry] = skb;
4472 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4473 
4474 	/* According to the coalesce parameter the IC bit for the latest
4475 	 * segment is reset and the timer re-started to clean the tx status.
4476 	 * This approach takes care about the fragments: desc is the first
4477 	 * element in case of no SG.
4478 	 */
4479 	tx_packets = (entry + 1) - first_tx;
4480 	tx_q->tx_count_frames += tx_packets;
4481 
4482 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4483 		set_ic = true;
4484 	else if (!priv->tx_coal_frames[queue])
4485 		set_ic = false;
4486 	else if (tx_packets > priv->tx_coal_frames[queue])
4487 		set_ic = true;
4488 	else if ((tx_q->tx_count_frames %
4489 		  priv->tx_coal_frames[queue]) < tx_packets)
4490 		set_ic = true;
4491 	else
4492 		set_ic = false;
4493 
4494 	if (set_ic) {
4495 		if (likely(priv->extend_desc))
4496 			desc = &tx_q->dma_etx[entry].basic;
4497 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4498 			desc = &tx_q->dma_entx[entry].basic;
4499 		else
4500 			desc = &tx_q->dma_tx[entry];
4501 
4502 		tx_q->tx_count_frames = 0;
4503 		stmmac_set_tx_ic(priv, desc);
4504 	}
4505 
4506 	/* We've used all descriptors we need for this skb, however,
4507 	 * advance cur_tx so that it references a fresh descriptor.
4508 	 * ndo_start_xmit will fill this descriptor the next time it's
4509 	 * called and stmmac_tx_clean may clean up to this descriptor.
4510 	 */
4511 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4512 	tx_q->cur_tx = entry;
4513 
4514 	if (netif_msg_pktdata(priv)) {
4515 		netdev_dbg(priv->dev,
4516 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4517 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4518 			   entry, first, nfrags);
4519 
4520 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4521 		print_pkt(skb->data, skb->len);
4522 	}
4523 
4524 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4525 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4526 			  __func__);
4527 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4528 	}
4529 
4530 	flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
4531 	tx_q->txq_stats.tx_bytes += skb->len;
4532 	if (set_ic)
4533 		tx_q->txq_stats.tx_set_ic_bit++;
4534 	u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
4535 
4536 	if (priv->sarc_type)
4537 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4538 
4539 	skb_tx_timestamp(skb);
4540 
4541 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4542 	 * problems because all the descriptors are actually ready to be
4543 	 * passed to the DMA engine.
4544 	 */
4545 	if (likely(!is_jumbo)) {
4546 		bool last_segment = (nfrags == 0);
4547 
4548 		des = dma_map_single(priv->device, skb->data,
4549 				     nopaged_len, DMA_TO_DEVICE);
4550 		if (dma_mapping_error(priv->device, des))
4551 			goto dma_map_err;
4552 
4553 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4554 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4555 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4556 
4557 		stmmac_set_desc_addr(priv, first, des);
4558 
4559 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4560 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4561 
4562 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4563 			     priv->hwts_tx_en)) {
4564 			/* declare that device is doing timestamping */
4565 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4566 			stmmac_enable_tx_timestamp(priv, first);
4567 		}
4568 
4569 		/* Prepare the first descriptor setting the OWN bit too */
4570 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4571 				csum_insertion, priv->mode, 0, last_segment,
4572 				skb->len);
4573 	}
4574 
4575 	if (tx_q->tbs & STMMAC_TBS_EN) {
4576 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4577 
4578 		tbs_desc = &tx_q->dma_entx[first_entry];
4579 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4580 	}
4581 
4582 	stmmac_set_tx_owner(priv, first);
4583 
4584 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4585 
4586 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4587 
4588 	stmmac_flush_tx_descriptors(priv, queue);
4589 	stmmac_tx_timer_arm(priv, queue);
4590 
4591 	return NETDEV_TX_OK;
4592 
4593 dma_map_err:
4594 	netdev_err(priv->dev, "Tx DMA map failed\n");
4595 	dev_kfree_skb(skb);
4596 	priv->xstats.tx_dropped++;
4597 	return NETDEV_TX_OK;
4598 }
4599 
4600 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4601 {
4602 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4603 	__be16 vlan_proto = veth->h_vlan_proto;
4604 	u16 vlanid;
4605 
4606 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4607 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4608 	    (vlan_proto == htons(ETH_P_8021AD) &&
4609 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4610 		/* pop the vlan tag */
4611 		vlanid = ntohs(veth->h_vlan_TCI);
4612 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4613 		skb_pull(skb, VLAN_HLEN);
4614 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4615 	}
4616 }
4617 
4618 /**
4619  * stmmac_rx_refill - refill used skb preallocated buffers
4620  * @priv: driver private structure
4621  * @queue: RX queue index
4622  * Description : this is to reallocate the skb for the reception process
4623  * that is based on zero-copy.
4624  */
4625 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4626 {
4627 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4628 	int dirty = stmmac_rx_dirty(priv, queue);
4629 	unsigned int entry = rx_q->dirty_rx;
4630 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4631 
4632 	if (priv->dma_cap.host_dma_width <= 32)
4633 		gfp |= GFP_DMA32;
4634 
4635 	while (dirty-- > 0) {
4636 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4637 		struct dma_desc *p;
4638 		bool use_rx_wd;
4639 
4640 		if (priv->extend_desc)
4641 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4642 		else
4643 			p = rx_q->dma_rx + entry;
4644 
4645 		if (!buf->page) {
4646 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4647 			if (!buf->page)
4648 				break;
4649 		}
4650 
4651 		if (priv->sph && !buf->sec_page) {
4652 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4653 			if (!buf->sec_page)
4654 				break;
4655 
4656 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4657 		}
4658 
4659 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4660 
4661 		stmmac_set_desc_addr(priv, p, buf->addr);
4662 		if (priv->sph)
4663 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4664 		else
4665 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4666 		stmmac_refill_desc3(priv, rx_q, p);
4667 
4668 		rx_q->rx_count_frames++;
4669 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4670 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4671 			rx_q->rx_count_frames = 0;
4672 
4673 		use_rx_wd = !priv->rx_coal_frames[queue];
4674 		use_rx_wd |= rx_q->rx_count_frames > 0;
4675 		if (!priv->use_riwt)
4676 			use_rx_wd = false;
4677 
4678 		dma_wmb();
4679 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4680 
4681 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4682 	}
4683 	rx_q->dirty_rx = entry;
4684 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4685 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4686 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4687 }
4688 
4689 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4690 				       struct dma_desc *p,
4691 				       int status, unsigned int len)
4692 {
4693 	unsigned int plen = 0, hlen = 0;
4694 	int coe = priv->hw->rx_csum;
4695 
4696 	/* Not first descriptor, buffer is always zero */
4697 	if (priv->sph && len)
4698 		return 0;
4699 
4700 	/* First descriptor, get split header length */
4701 	stmmac_get_rx_header_len(priv, p, &hlen);
4702 	if (priv->sph && hlen) {
4703 		priv->xstats.rx_split_hdr_pkt_n++;
4704 		return hlen;
4705 	}
4706 
4707 	/* First descriptor, not last descriptor and not split header */
4708 	if (status & rx_not_ls)
4709 		return priv->dma_conf.dma_buf_sz;
4710 
4711 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4712 
4713 	/* First descriptor and last descriptor and not split header */
4714 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4715 }
4716 
4717 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4718 				       struct dma_desc *p,
4719 				       int status, unsigned int len)
4720 {
4721 	int coe = priv->hw->rx_csum;
4722 	unsigned int plen = 0;
4723 
4724 	/* Not split header, buffer is not available */
4725 	if (!priv->sph)
4726 		return 0;
4727 
4728 	/* Not last descriptor */
4729 	if (status & rx_not_ls)
4730 		return priv->dma_conf.dma_buf_sz;
4731 
4732 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4733 
4734 	/* Last descriptor */
4735 	return plen - len;
4736 }
4737 
4738 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4739 				struct xdp_frame *xdpf, bool dma_map)
4740 {
4741 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4742 	unsigned int entry = tx_q->cur_tx;
4743 	struct dma_desc *tx_desc;
4744 	dma_addr_t dma_addr;
4745 	bool set_ic;
4746 
4747 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4748 		return STMMAC_XDP_CONSUMED;
4749 
4750 	if (likely(priv->extend_desc))
4751 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4752 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4753 		tx_desc = &tx_q->dma_entx[entry].basic;
4754 	else
4755 		tx_desc = tx_q->dma_tx + entry;
4756 
4757 	if (dma_map) {
4758 		dma_addr = dma_map_single(priv->device, xdpf->data,
4759 					  xdpf->len, DMA_TO_DEVICE);
4760 		if (dma_mapping_error(priv->device, dma_addr))
4761 			return STMMAC_XDP_CONSUMED;
4762 
4763 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4764 	} else {
4765 		struct page *page = virt_to_page(xdpf->data);
4766 
4767 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4768 			   xdpf->headroom;
4769 		dma_sync_single_for_device(priv->device, dma_addr,
4770 					   xdpf->len, DMA_BIDIRECTIONAL);
4771 
4772 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4773 	}
4774 
4775 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4776 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4777 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4778 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4779 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4780 
4781 	tx_q->xdpf[entry] = xdpf;
4782 
4783 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4784 
4785 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4786 			       true, priv->mode, true, true,
4787 			       xdpf->len);
4788 
4789 	tx_q->tx_count_frames++;
4790 
4791 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4792 		set_ic = true;
4793 	else
4794 		set_ic = false;
4795 
4796 	if (set_ic) {
4797 		unsigned long flags;
4798 		tx_q->tx_count_frames = 0;
4799 		stmmac_set_tx_ic(priv, tx_desc);
4800 		flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
4801 		tx_q->txq_stats.tx_set_ic_bit++;
4802 		u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
4803 	}
4804 
4805 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4806 
4807 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4808 	tx_q->cur_tx = entry;
4809 
4810 	return STMMAC_XDP_TX;
4811 }
4812 
4813 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4814 				   int cpu)
4815 {
4816 	int index = cpu;
4817 
4818 	if (unlikely(index < 0))
4819 		index = 0;
4820 
4821 	while (index >= priv->plat->tx_queues_to_use)
4822 		index -= priv->plat->tx_queues_to_use;
4823 
4824 	return index;
4825 }
4826 
4827 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4828 				struct xdp_buff *xdp)
4829 {
4830 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4831 	int cpu = smp_processor_id();
4832 	struct netdev_queue *nq;
4833 	int queue;
4834 	int res;
4835 
4836 	if (unlikely(!xdpf))
4837 		return STMMAC_XDP_CONSUMED;
4838 
4839 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4840 	nq = netdev_get_tx_queue(priv->dev, queue);
4841 
4842 	__netif_tx_lock(nq, cpu);
4843 	/* Avoids TX time-out as we are sharing with slow path */
4844 	txq_trans_cond_update(nq);
4845 
4846 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4847 	if (res == STMMAC_XDP_TX)
4848 		stmmac_flush_tx_descriptors(priv, queue);
4849 
4850 	__netif_tx_unlock(nq);
4851 
4852 	return res;
4853 }
4854 
4855 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4856 				 struct bpf_prog *prog,
4857 				 struct xdp_buff *xdp)
4858 {
4859 	u32 act;
4860 	int res;
4861 
4862 	act = bpf_prog_run_xdp(prog, xdp);
4863 	switch (act) {
4864 	case XDP_PASS:
4865 		res = STMMAC_XDP_PASS;
4866 		break;
4867 	case XDP_TX:
4868 		res = stmmac_xdp_xmit_back(priv, xdp);
4869 		break;
4870 	case XDP_REDIRECT:
4871 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4872 			res = STMMAC_XDP_CONSUMED;
4873 		else
4874 			res = STMMAC_XDP_REDIRECT;
4875 		break;
4876 	default:
4877 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4878 		fallthrough;
4879 	case XDP_ABORTED:
4880 		trace_xdp_exception(priv->dev, prog, act);
4881 		fallthrough;
4882 	case XDP_DROP:
4883 		res = STMMAC_XDP_CONSUMED;
4884 		break;
4885 	}
4886 
4887 	return res;
4888 }
4889 
4890 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4891 					   struct xdp_buff *xdp)
4892 {
4893 	struct bpf_prog *prog;
4894 	int res;
4895 
4896 	prog = READ_ONCE(priv->xdp_prog);
4897 	if (!prog) {
4898 		res = STMMAC_XDP_PASS;
4899 		goto out;
4900 	}
4901 
4902 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
4903 out:
4904 	return ERR_PTR(-res);
4905 }
4906 
4907 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4908 				   int xdp_status)
4909 {
4910 	int cpu = smp_processor_id();
4911 	int queue;
4912 
4913 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4914 
4915 	if (xdp_status & STMMAC_XDP_TX)
4916 		stmmac_tx_timer_arm(priv, queue);
4917 
4918 	if (xdp_status & STMMAC_XDP_REDIRECT)
4919 		xdp_do_flush();
4920 }
4921 
4922 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4923 					       struct xdp_buff *xdp)
4924 {
4925 	unsigned int metasize = xdp->data - xdp->data_meta;
4926 	unsigned int datasize = xdp->data_end - xdp->data;
4927 	struct sk_buff *skb;
4928 
4929 	skb = __napi_alloc_skb(&ch->rxtx_napi,
4930 			       xdp->data_end - xdp->data_hard_start,
4931 			       GFP_ATOMIC | __GFP_NOWARN);
4932 	if (unlikely(!skb))
4933 		return NULL;
4934 
4935 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
4936 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4937 	if (metasize)
4938 		skb_metadata_set(skb, metasize);
4939 
4940 	return skb;
4941 }
4942 
4943 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4944 				   struct dma_desc *p, struct dma_desc *np,
4945 				   struct xdp_buff *xdp)
4946 {
4947 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4948 	struct stmmac_channel *ch = &priv->channel[queue];
4949 	unsigned int len = xdp->data_end - xdp->data;
4950 	enum pkt_hash_types hash_type;
4951 	int coe = priv->hw->rx_csum;
4952 	unsigned long flags;
4953 	struct sk_buff *skb;
4954 	u32 hash;
4955 
4956 	skb = stmmac_construct_skb_zc(ch, xdp);
4957 	if (!skb) {
4958 		priv->xstats.rx_dropped++;
4959 		return;
4960 	}
4961 
4962 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
4963 	stmmac_rx_vlan(priv->dev, skb);
4964 	skb->protocol = eth_type_trans(skb, priv->dev);
4965 
4966 	if (unlikely(!coe))
4967 		skb_checksum_none_assert(skb);
4968 	else
4969 		skb->ip_summed = CHECKSUM_UNNECESSARY;
4970 
4971 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4972 		skb_set_hash(skb, hash, hash_type);
4973 
4974 	skb_record_rx_queue(skb, queue);
4975 	napi_gro_receive(&ch->rxtx_napi, skb);
4976 
4977 	flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
4978 	rx_q->rxq_stats.rx_pkt_n++;
4979 	rx_q->rxq_stats.rx_bytes += len;
4980 	u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
4981 }
4982 
4983 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4984 {
4985 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4986 	unsigned int entry = rx_q->dirty_rx;
4987 	struct dma_desc *rx_desc = NULL;
4988 	bool ret = true;
4989 
4990 	budget = min(budget, stmmac_rx_dirty(priv, queue));
4991 
4992 	while (budget-- > 0 && entry != rx_q->cur_rx) {
4993 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4994 		dma_addr_t dma_addr;
4995 		bool use_rx_wd;
4996 
4997 		if (!buf->xdp) {
4998 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
4999 			if (!buf->xdp) {
5000 				ret = false;
5001 				break;
5002 			}
5003 		}
5004 
5005 		if (priv->extend_desc)
5006 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5007 		else
5008 			rx_desc = rx_q->dma_rx + entry;
5009 
5010 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5011 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5012 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5013 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5014 
5015 		rx_q->rx_count_frames++;
5016 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5017 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5018 			rx_q->rx_count_frames = 0;
5019 
5020 		use_rx_wd = !priv->rx_coal_frames[queue];
5021 		use_rx_wd |= rx_q->rx_count_frames > 0;
5022 		if (!priv->use_riwt)
5023 			use_rx_wd = false;
5024 
5025 		dma_wmb();
5026 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5027 
5028 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5029 	}
5030 
5031 	if (rx_desc) {
5032 		rx_q->dirty_rx = entry;
5033 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5034 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5035 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5036 	}
5037 
5038 	return ret;
5039 }
5040 
5041 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5042 {
5043 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5044 	 * to represent incoming packet, whereas cb field in the same structure
5045 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5046 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5047 	 */
5048 	return (struct stmmac_xdp_buff *)xdp;
5049 }
5050 
5051 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5052 {
5053 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5054 	unsigned int count = 0, error = 0, len = 0;
5055 	int dirty = stmmac_rx_dirty(priv, queue);
5056 	unsigned int next_entry = rx_q->cur_rx;
5057 	u32 rx_errors = 0, rx_dropped = 0;
5058 	unsigned int desc_size;
5059 	struct bpf_prog *prog;
5060 	bool failure = false;
5061 	unsigned long flags;
5062 	int xdp_status = 0;
5063 	int status = 0;
5064 
5065 	if (netif_msg_rx_status(priv)) {
5066 		void *rx_head;
5067 
5068 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5069 		if (priv->extend_desc) {
5070 			rx_head = (void *)rx_q->dma_erx;
5071 			desc_size = sizeof(struct dma_extended_desc);
5072 		} else {
5073 			rx_head = (void *)rx_q->dma_rx;
5074 			desc_size = sizeof(struct dma_desc);
5075 		}
5076 
5077 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5078 				    rx_q->dma_rx_phy, desc_size);
5079 	}
5080 	while (count < limit) {
5081 		struct stmmac_rx_buffer *buf;
5082 		struct stmmac_xdp_buff *ctx;
5083 		unsigned int buf1_len = 0;
5084 		struct dma_desc *np, *p;
5085 		int entry;
5086 		int res;
5087 
5088 		if (!count && rx_q->state_saved) {
5089 			error = rx_q->state.error;
5090 			len = rx_q->state.len;
5091 		} else {
5092 			rx_q->state_saved = false;
5093 			error = 0;
5094 			len = 0;
5095 		}
5096 
5097 		if (count >= limit)
5098 			break;
5099 
5100 read_again:
5101 		buf1_len = 0;
5102 		entry = next_entry;
5103 		buf = &rx_q->buf_pool[entry];
5104 
5105 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5106 			failure = failure ||
5107 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5108 			dirty = 0;
5109 		}
5110 
5111 		if (priv->extend_desc)
5112 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5113 		else
5114 			p = rx_q->dma_rx + entry;
5115 
5116 		/* read the status of the incoming frame */
5117 		status = stmmac_rx_status(priv, &priv->xstats, p);
5118 		/* check if managed by the DMA otherwise go ahead */
5119 		if (unlikely(status & dma_own))
5120 			break;
5121 
5122 		/* Prefetch the next RX descriptor */
5123 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5124 						priv->dma_conf.dma_rx_size);
5125 		next_entry = rx_q->cur_rx;
5126 
5127 		if (priv->extend_desc)
5128 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5129 		else
5130 			np = rx_q->dma_rx + next_entry;
5131 
5132 		prefetch(np);
5133 
5134 		/* Ensure a valid XSK buffer before proceed */
5135 		if (!buf->xdp)
5136 			break;
5137 
5138 		if (priv->extend_desc)
5139 			stmmac_rx_extended_status(priv, &priv->xstats,
5140 						  rx_q->dma_erx + entry);
5141 		if (unlikely(status == discard_frame)) {
5142 			xsk_buff_free(buf->xdp);
5143 			buf->xdp = NULL;
5144 			dirty++;
5145 			error = 1;
5146 			if (!priv->hwts_rx_en)
5147 				rx_errors++;
5148 		}
5149 
5150 		if (unlikely(error && (status & rx_not_ls)))
5151 			goto read_again;
5152 		if (unlikely(error)) {
5153 			count++;
5154 			continue;
5155 		}
5156 
5157 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5158 		if (likely(status & rx_not_ls)) {
5159 			xsk_buff_free(buf->xdp);
5160 			buf->xdp = NULL;
5161 			dirty++;
5162 			count++;
5163 			goto read_again;
5164 		}
5165 
5166 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5167 		ctx->priv = priv;
5168 		ctx->desc = p;
5169 		ctx->ndesc = np;
5170 
5171 		/* XDP ZC Frame only support primary buffers for now */
5172 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5173 		len += buf1_len;
5174 
5175 		/* ACS is disabled; strip manually. */
5176 		if (likely(!(status & rx_not_ls))) {
5177 			buf1_len -= ETH_FCS_LEN;
5178 			len -= ETH_FCS_LEN;
5179 		}
5180 
5181 		/* RX buffer is good and fit into a XSK pool buffer */
5182 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5183 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5184 
5185 		prog = READ_ONCE(priv->xdp_prog);
5186 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5187 
5188 		switch (res) {
5189 		case STMMAC_XDP_PASS:
5190 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5191 			xsk_buff_free(buf->xdp);
5192 			break;
5193 		case STMMAC_XDP_CONSUMED:
5194 			xsk_buff_free(buf->xdp);
5195 			rx_dropped++;
5196 			break;
5197 		case STMMAC_XDP_TX:
5198 		case STMMAC_XDP_REDIRECT:
5199 			xdp_status |= res;
5200 			break;
5201 		}
5202 
5203 		buf->xdp = NULL;
5204 		dirty++;
5205 		count++;
5206 	}
5207 
5208 	if (status & rx_not_ls) {
5209 		rx_q->state_saved = true;
5210 		rx_q->state.error = error;
5211 		rx_q->state.len = len;
5212 	}
5213 
5214 	stmmac_finalize_xdp_rx(priv, xdp_status);
5215 
5216 	flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
5217 	rx_q->rxq_stats.rx_pkt_n += count;
5218 	u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
5219 
5220 	priv->xstats.rx_dropped += rx_dropped;
5221 	priv->xstats.rx_errors += rx_errors;
5222 
5223 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5224 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5225 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5226 		else
5227 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5228 
5229 		return (int)count;
5230 	}
5231 
5232 	return failure ? limit : (int)count;
5233 }
5234 
5235 /**
5236  * stmmac_rx - manage the receive process
5237  * @priv: driver private structure
5238  * @limit: napi bugget
5239  * @queue: RX queue index.
5240  * Description :  this the function called by the napi poll method.
5241  * It gets all the frames inside the ring.
5242  */
5243 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5244 {
5245 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5246 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5247 	struct stmmac_channel *ch = &priv->channel[queue];
5248 	unsigned int count = 0, error = 0, len = 0;
5249 	int status = 0, coe = priv->hw->rx_csum;
5250 	unsigned int next_entry = rx_q->cur_rx;
5251 	enum dma_data_direction dma_dir;
5252 	unsigned int desc_size;
5253 	struct sk_buff *skb = NULL;
5254 	struct stmmac_xdp_buff ctx;
5255 	unsigned long flags;
5256 	int xdp_status = 0;
5257 	int buf_sz;
5258 
5259 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5260 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5261 
5262 	if (netif_msg_rx_status(priv)) {
5263 		void *rx_head;
5264 
5265 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5266 		if (priv->extend_desc) {
5267 			rx_head = (void *)rx_q->dma_erx;
5268 			desc_size = sizeof(struct dma_extended_desc);
5269 		} else {
5270 			rx_head = (void *)rx_q->dma_rx;
5271 			desc_size = sizeof(struct dma_desc);
5272 		}
5273 
5274 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5275 				    rx_q->dma_rx_phy, desc_size);
5276 	}
5277 	while (count < limit) {
5278 		unsigned int buf1_len = 0, buf2_len = 0;
5279 		enum pkt_hash_types hash_type;
5280 		struct stmmac_rx_buffer *buf;
5281 		struct dma_desc *np, *p;
5282 		int entry;
5283 		u32 hash;
5284 
5285 		if (!count && rx_q->state_saved) {
5286 			skb = rx_q->state.skb;
5287 			error = rx_q->state.error;
5288 			len = rx_q->state.len;
5289 		} else {
5290 			rx_q->state_saved = false;
5291 			skb = NULL;
5292 			error = 0;
5293 			len = 0;
5294 		}
5295 
5296 		if (count >= limit)
5297 			break;
5298 
5299 read_again:
5300 		buf1_len = 0;
5301 		buf2_len = 0;
5302 		entry = next_entry;
5303 		buf = &rx_q->buf_pool[entry];
5304 
5305 		if (priv->extend_desc)
5306 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5307 		else
5308 			p = rx_q->dma_rx + entry;
5309 
5310 		/* read the status of the incoming frame */
5311 		status = stmmac_rx_status(priv, &priv->xstats, p);
5312 		/* check if managed by the DMA otherwise go ahead */
5313 		if (unlikely(status & dma_own))
5314 			break;
5315 
5316 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5317 						priv->dma_conf.dma_rx_size);
5318 		next_entry = rx_q->cur_rx;
5319 
5320 		if (priv->extend_desc)
5321 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5322 		else
5323 			np = rx_q->dma_rx + next_entry;
5324 
5325 		prefetch(np);
5326 
5327 		if (priv->extend_desc)
5328 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5329 		if (unlikely(status == discard_frame)) {
5330 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5331 			buf->page = NULL;
5332 			error = 1;
5333 			if (!priv->hwts_rx_en)
5334 				rx_errors++;
5335 		}
5336 
5337 		if (unlikely(error && (status & rx_not_ls)))
5338 			goto read_again;
5339 		if (unlikely(error)) {
5340 			dev_kfree_skb(skb);
5341 			skb = NULL;
5342 			count++;
5343 			continue;
5344 		}
5345 
5346 		/* Buffer is good. Go on. */
5347 
5348 		prefetch(page_address(buf->page) + buf->page_offset);
5349 		if (buf->sec_page)
5350 			prefetch(page_address(buf->sec_page));
5351 
5352 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5353 		len += buf1_len;
5354 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5355 		len += buf2_len;
5356 
5357 		/* ACS is disabled; strip manually. */
5358 		if (likely(!(status & rx_not_ls))) {
5359 			if (buf2_len) {
5360 				buf2_len -= ETH_FCS_LEN;
5361 				len -= ETH_FCS_LEN;
5362 			} else if (buf1_len) {
5363 				buf1_len -= ETH_FCS_LEN;
5364 				len -= ETH_FCS_LEN;
5365 			}
5366 		}
5367 
5368 		if (!skb) {
5369 			unsigned int pre_len, sync_len;
5370 
5371 			dma_sync_single_for_cpu(priv->device, buf->addr,
5372 						buf1_len, dma_dir);
5373 
5374 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5375 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5376 					 buf->page_offset, buf1_len, true);
5377 
5378 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5379 				  buf->page_offset;
5380 
5381 			ctx.priv = priv;
5382 			ctx.desc = p;
5383 			ctx.ndesc = np;
5384 
5385 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5386 			/* Due xdp_adjust_tail: DMA sync for_device
5387 			 * cover max len CPU touch
5388 			 */
5389 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5390 				   buf->page_offset;
5391 			sync_len = max(sync_len, pre_len);
5392 
5393 			/* For Not XDP_PASS verdict */
5394 			if (IS_ERR(skb)) {
5395 				unsigned int xdp_res = -PTR_ERR(skb);
5396 
5397 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5398 					page_pool_put_page(rx_q->page_pool,
5399 							   virt_to_head_page(ctx.xdp.data),
5400 							   sync_len, true);
5401 					buf->page = NULL;
5402 					rx_dropped++;
5403 
5404 					/* Clear skb as it was set as
5405 					 * status by XDP program.
5406 					 */
5407 					skb = NULL;
5408 
5409 					if (unlikely((status & rx_not_ls)))
5410 						goto read_again;
5411 
5412 					count++;
5413 					continue;
5414 				} else if (xdp_res & (STMMAC_XDP_TX |
5415 						      STMMAC_XDP_REDIRECT)) {
5416 					xdp_status |= xdp_res;
5417 					buf->page = NULL;
5418 					skb = NULL;
5419 					count++;
5420 					continue;
5421 				}
5422 			}
5423 		}
5424 
5425 		if (!skb) {
5426 			/* XDP program may expand or reduce tail */
5427 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5428 
5429 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5430 			if (!skb) {
5431 				rx_dropped++;
5432 				count++;
5433 				goto drain_data;
5434 			}
5435 
5436 			/* XDP program may adjust header */
5437 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5438 			skb_put(skb, buf1_len);
5439 
5440 			/* Data payload copied into SKB, page ready for recycle */
5441 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5442 			buf->page = NULL;
5443 		} else if (buf1_len) {
5444 			dma_sync_single_for_cpu(priv->device, buf->addr,
5445 						buf1_len, dma_dir);
5446 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5447 					buf->page, buf->page_offset, buf1_len,
5448 					priv->dma_conf.dma_buf_sz);
5449 
5450 			/* Data payload appended into SKB */
5451 			skb_mark_for_recycle(skb);
5452 			buf->page = NULL;
5453 		}
5454 
5455 		if (buf2_len) {
5456 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5457 						buf2_len, dma_dir);
5458 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5459 					buf->sec_page, 0, buf2_len,
5460 					priv->dma_conf.dma_buf_sz);
5461 
5462 			/* Data payload appended into SKB */
5463 			skb_mark_for_recycle(skb);
5464 			buf->sec_page = NULL;
5465 		}
5466 
5467 drain_data:
5468 		if (likely(status & rx_not_ls))
5469 			goto read_again;
5470 		if (!skb)
5471 			continue;
5472 
5473 		/* Got entire packet into SKB. Finish it. */
5474 
5475 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5476 		stmmac_rx_vlan(priv->dev, skb);
5477 		skb->protocol = eth_type_trans(skb, priv->dev);
5478 
5479 		if (unlikely(!coe))
5480 			skb_checksum_none_assert(skb);
5481 		else
5482 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5483 
5484 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5485 			skb_set_hash(skb, hash, hash_type);
5486 
5487 		skb_record_rx_queue(skb, queue);
5488 		napi_gro_receive(&ch->rx_napi, skb);
5489 		skb = NULL;
5490 
5491 		rx_packets++;
5492 		rx_bytes += len;
5493 		count++;
5494 	}
5495 
5496 	if (status & rx_not_ls || skb) {
5497 		rx_q->state_saved = true;
5498 		rx_q->state.skb = skb;
5499 		rx_q->state.error = error;
5500 		rx_q->state.len = len;
5501 	}
5502 
5503 	stmmac_finalize_xdp_rx(priv, xdp_status);
5504 
5505 	stmmac_rx_refill(priv, queue);
5506 
5507 	flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
5508 	rx_q->rxq_stats.rx_packets += rx_packets;
5509 	rx_q->rxq_stats.rx_bytes += rx_bytes;
5510 	rx_q->rxq_stats.rx_pkt_n += count;
5511 	u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
5512 
5513 	priv->xstats.rx_dropped += rx_dropped;
5514 	priv->xstats.rx_errors += rx_errors;
5515 
5516 	return count;
5517 }
5518 
5519 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5520 {
5521 	struct stmmac_channel *ch =
5522 		container_of(napi, struct stmmac_channel, rx_napi);
5523 	struct stmmac_priv *priv = ch->priv_data;
5524 	struct stmmac_rx_queue *rx_q;
5525 	u32 chan = ch->index;
5526 	unsigned long flags;
5527 	int work_done;
5528 
5529 	rx_q = &priv->dma_conf.rx_queue[chan];
5530 	flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
5531 	rx_q->rxq_stats.napi_poll++;
5532 	u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
5533 
5534 	work_done = stmmac_rx(priv, budget, chan);
5535 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5536 		unsigned long flags;
5537 
5538 		spin_lock_irqsave(&ch->lock, flags);
5539 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5540 		spin_unlock_irqrestore(&ch->lock, flags);
5541 	}
5542 
5543 	return work_done;
5544 }
5545 
5546 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5547 {
5548 	struct stmmac_channel *ch =
5549 		container_of(napi, struct stmmac_channel, tx_napi);
5550 	struct stmmac_priv *priv = ch->priv_data;
5551 	struct stmmac_tx_queue *tx_q;
5552 	u32 chan = ch->index;
5553 	unsigned long flags;
5554 	int work_done;
5555 
5556 	tx_q = &priv->dma_conf.tx_queue[chan];
5557 	flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
5558 	tx_q->txq_stats.napi_poll++;
5559 	u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
5560 
5561 	work_done = stmmac_tx_clean(priv, budget, chan);
5562 	work_done = min(work_done, budget);
5563 
5564 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5565 		unsigned long flags;
5566 
5567 		spin_lock_irqsave(&ch->lock, flags);
5568 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5569 		spin_unlock_irqrestore(&ch->lock, flags);
5570 	}
5571 
5572 	return work_done;
5573 }
5574 
5575 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5576 {
5577 	struct stmmac_channel *ch =
5578 		container_of(napi, struct stmmac_channel, rxtx_napi);
5579 	struct stmmac_priv *priv = ch->priv_data;
5580 	int rx_done, tx_done, rxtx_done;
5581 	struct stmmac_rx_queue *rx_q;
5582 	struct stmmac_tx_queue *tx_q;
5583 	u32 chan = ch->index;
5584 	unsigned long flags;
5585 
5586 	rx_q = &priv->dma_conf.rx_queue[chan];
5587 	flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
5588 	rx_q->rxq_stats.napi_poll++;
5589 	u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
5590 
5591 	tx_q = &priv->dma_conf.tx_queue[chan];
5592 	flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
5593 	tx_q->txq_stats.napi_poll++;
5594 	u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
5595 
5596 	tx_done = stmmac_tx_clean(priv, budget, chan);
5597 	tx_done = min(tx_done, budget);
5598 
5599 	rx_done = stmmac_rx_zc(priv, budget, chan);
5600 
5601 	rxtx_done = max(tx_done, rx_done);
5602 
5603 	/* If either TX or RX work is not complete, return budget
5604 	 * and keep pooling
5605 	 */
5606 	if (rxtx_done >= budget)
5607 		return budget;
5608 
5609 	/* all work done, exit the polling mode */
5610 	if (napi_complete_done(napi, rxtx_done)) {
5611 		unsigned long flags;
5612 
5613 		spin_lock_irqsave(&ch->lock, flags);
5614 		/* Both RX and TX work done are compelte,
5615 		 * so enable both RX & TX IRQs.
5616 		 */
5617 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5618 		spin_unlock_irqrestore(&ch->lock, flags);
5619 	}
5620 
5621 	return min(rxtx_done, budget - 1);
5622 }
5623 
5624 /**
5625  *  stmmac_tx_timeout
5626  *  @dev : Pointer to net device structure
5627  *  @txqueue: the index of the hanging transmit queue
5628  *  Description: this function is called when a packet transmission fails to
5629  *   complete within a reasonable time. The driver will mark the error in the
5630  *   netdev structure and arrange for the device to be reset to a sane state
5631  *   in order to transmit a new packet.
5632  */
5633 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5634 {
5635 	struct stmmac_priv *priv = netdev_priv(dev);
5636 
5637 	stmmac_global_err(priv);
5638 }
5639 
5640 /**
5641  *  stmmac_set_rx_mode - entry point for multicast addressing
5642  *  @dev : pointer to the device structure
5643  *  Description:
5644  *  This function is a driver entry point which gets called by the kernel
5645  *  whenever multicast addresses must be enabled/disabled.
5646  *  Return value:
5647  *  void.
5648  */
5649 static void stmmac_set_rx_mode(struct net_device *dev)
5650 {
5651 	struct stmmac_priv *priv = netdev_priv(dev);
5652 
5653 	stmmac_set_filter(priv, priv->hw, dev);
5654 }
5655 
5656 /**
5657  *  stmmac_change_mtu - entry point to change MTU size for the device.
5658  *  @dev : device pointer.
5659  *  @new_mtu : the new MTU size for the device.
5660  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5661  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5662  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5663  *  Return value:
5664  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5665  *  file on failure.
5666  */
5667 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5668 {
5669 	struct stmmac_priv *priv = netdev_priv(dev);
5670 	int txfifosz = priv->plat->tx_fifo_size;
5671 	struct stmmac_dma_conf *dma_conf;
5672 	const int mtu = new_mtu;
5673 	int ret;
5674 
5675 	if (txfifosz == 0)
5676 		txfifosz = priv->dma_cap.tx_fifo_size;
5677 
5678 	txfifosz /= priv->plat->tx_queues_to_use;
5679 
5680 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5681 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5682 		return -EINVAL;
5683 	}
5684 
5685 	new_mtu = STMMAC_ALIGN(new_mtu);
5686 
5687 	/* If condition true, FIFO is too small or MTU too large */
5688 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5689 		return -EINVAL;
5690 
5691 	if (netif_running(dev)) {
5692 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5693 		/* Try to allocate the new DMA conf with the new mtu */
5694 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5695 		if (IS_ERR(dma_conf)) {
5696 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5697 				   mtu);
5698 			return PTR_ERR(dma_conf);
5699 		}
5700 
5701 		stmmac_release(dev);
5702 
5703 		ret = __stmmac_open(dev, dma_conf);
5704 		if (ret) {
5705 			free_dma_desc_resources(priv, dma_conf);
5706 			kfree(dma_conf);
5707 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5708 			return ret;
5709 		}
5710 
5711 		kfree(dma_conf);
5712 
5713 		stmmac_set_rx_mode(dev);
5714 	}
5715 
5716 	dev->mtu = mtu;
5717 	netdev_update_features(dev);
5718 
5719 	return 0;
5720 }
5721 
5722 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5723 					     netdev_features_t features)
5724 {
5725 	struct stmmac_priv *priv = netdev_priv(dev);
5726 
5727 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5728 		features &= ~NETIF_F_RXCSUM;
5729 
5730 	if (!priv->plat->tx_coe)
5731 		features &= ~NETIF_F_CSUM_MASK;
5732 
5733 	/* Some GMAC devices have a bugged Jumbo frame support that
5734 	 * needs to have the Tx COE disabled for oversized frames
5735 	 * (due to limited buffer sizes). In this case we disable
5736 	 * the TX csum insertion in the TDES and not use SF.
5737 	 */
5738 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5739 		features &= ~NETIF_F_CSUM_MASK;
5740 
5741 	/* Disable tso if asked by ethtool */
5742 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5743 		if (features & NETIF_F_TSO)
5744 			priv->tso = true;
5745 		else
5746 			priv->tso = false;
5747 	}
5748 
5749 	return features;
5750 }
5751 
5752 static int stmmac_set_features(struct net_device *netdev,
5753 			       netdev_features_t features)
5754 {
5755 	struct stmmac_priv *priv = netdev_priv(netdev);
5756 
5757 	/* Keep the COE Type in case of csum is supporting */
5758 	if (features & NETIF_F_RXCSUM)
5759 		priv->hw->rx_csum = priv->plat->rx_coe;
5760 	else
5761 		priv->hw->rx_csum = 0;
5762 	/* No check needed because rx_coe has been set before and it will be
5763 	 * fixed in case of issue.
5764 	 */
5765 	stmmac_rx_ipc(priv, priv->hw);
5766 
5767 	if (priv->sph_cap) {
5768 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5769 		u32 chan;
5770 
5771 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5772 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5773 	}
5774 
5775 	return 0;
5776 }
5777 
5778 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5779 {
5780 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5781 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5782 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5783 	bool *hs_enable = &fpe_cfg->hs_enable;
5784 
5785 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5786 		return;
5787 
5788 	/* If LP has sent verify mPacket, LP is FPE capable */
5789 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5790 		if (*lp_state < FPE_STATE_CAPABLE)
5791 			*lp_state = FPE_STATE_CAPABLE;
5792 
5793 		/* If user has requested FPE enable, quickly response */
5794 		if (*hs_enable)
5795 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5796 						MPACKET_RESPONSE);
5797 	}
5798 
5799 	/* If Local has sent verify mPacket, Local is FPE capable */
5800 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5801 		if (*lo_state < FPE_STATE_CAPABLE)
5802 			*lo_state = FPE_STATE_CAPABLE;
5803 	}
5804 
5805 	/* If LP has sent response mPacket, LP is entering FPE ON */
5806 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5807 		*lp_state = FPE_STATE_ENTERING_ON;
5808 
5809 	/* If Local has sent response mPacket, Local is entering FPE ON */
5810 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5811 		*lo_state = FPE_STATE_ENTERING_ON;
5812 
5813 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5814 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5815 	    priv->fpe_wq) {
5816 		queue_work(priv->fpe_wq, &priv->fpe_task);
5817 	}
5818 }
5819 
5820 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5821 {
5822 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5823 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5824 	u32 queues_count;
5825 	u32 queue;
5826 	bool xmac;
5827 
5828 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5829 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5830 
5831 	if (priv->irq_wake)
5832 		pm_wakeup_event(priv->device, 0);
5833 
5834 	if (priv->dma_cap.estsel)
5835 		stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5836 				      &priv->xstats, tx_cnt);
5837 
5838 	if (priv->dma_cap.fpesel) {
5839 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5840 						   priv->dev);
5841 
5842 		stmmac_fpe_event_status(priv, status);
5843 	}
5844 
5845 	/* To handle GMAC own interrupts */
5846 	if ((priv->plat->has_gmac) || xmac) {
5847 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5848 
5849 		if (unlikely(status)) {
5850 			/* For LPI we need to save the tx status */
5851 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5852 				priv->tx_path_in_lpi_mode = true;
5853 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5854 				priv->tx_path_in_lpi_mode = false;
5855 		}
5856 
5857 		for (queue = 0; queue < queues_count; queue++) {
5858 			status = stmmac_host_mtl_irq_status(priv, priv->hw,
5859 							    queue);
5860 		}
5861 
5862 		/* PCS link status */
5863 		if (priv->hw->pcs &&
5864 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5865 			if (priv->xstats.pcs_link)
5866 				netif_carrier_on(priv->dev);
5867 			else
5868 				netif_carrier_off(priv->dev);
5869 		}
5870 
5871 		stmmac_timestamp_interrupt(priv, priv);
5872 	}
5873 }
5874 
5875 /**
5876  *  stmmac_interrupt - main ISR
5877  *  @irq: interrupt number.
5878  *  @dev_id: to pass the net device pointer.
5879  *  Description: this is the main driver interrupt service routine.
5880  *  It can call:
5881  *  o DMA service routine (to manage incoming frame reception and transmission
5882  *    status)
5883  *  o Core interrupts to manage: remote wake-up, management counter, LPI
5884  *    interrupts.
5885  */
5886 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5887 {
5888 	struct net_device *dev = (struct net_device *)dev_id;
5889 	struct stmmac_priv *priv = netdev_priv(dev);
5890 
5891 	/* Check if adapter is up */
5892 	if (test_bit(STMMAC_DOWN, &priv->state))
5893 		return IRQ_HANDLED;
5894 
5895 	/* Check if a fatal error happened */
5896 	if (stmmac_safety_feat_interrupt(priv))
5897 		return IRQ_HANDLED;
5898 
5899 	/* To handle Common interrupts */
5900 	stmmac_common_interrupt(priv);
5901 
5902 	/* To handle DMA interrupts */
5903 	stmmac_dma_interrupt(priv);
5904 
5905 	return IRQ_HANDLED;
5906 }
5907 
5908 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5909 {
5910 	struct net_device *dev = (struct net_device *)dev_id;
5911 	struct stmmac_priv *priv = netdev_priv(dev);
5912 
5913 	if (unlikely(!dev)) {
5914 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5915 		return IRQ_NONE;
5916 	}
5917 
5918 	/* Check if adapter is up */
5919 	if (test_bit(STMMAC_DOWN, &priv->state))
5920 		return IRQ_HANDLED;
5921 
5922 	/* To handle Common interrupts */
5923 	stmmac_common_interrupt(priv);
5924 
5925 	return IRQ_HANDLED;
5926 }
5927 
5928 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5929 {
5930 	struct net_device *dev = (struct net_device *)dev_id;
5931 	struct stmmac_priv *priv = netdev_priv(dev);
5932 
5933 	if (unlikely(!dev)) {
5934 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5935 		return IRQ_NONE;
5936 	}
5937 
5938 	/* Check if adapter is up */
5939 	if (test_bit(STMMAC_DOWN, &priv->state))
5940 		return IRQ_HANDLED;
5941 
5942 	/* Check if a fatal error happened */
5943 	stmmac_safety_feat_interrupt(priv);
5944 
5945 	return IRQ_HANDLED;
5946 }
5947 
5948 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5949 {
5950 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5951 	struct stmmac_dma_conf *dma_conf;
5952 	int chan = tx_q->queue_index;
5953 	struct stmmac_priv *priv;
5954 	int status;
5955 
5956 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
5957 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5958 
5959 	if (unlikely(!data)) {
5960 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5961 		return IRQ_NONE;
5962 	}
5963 
5964 	/* Check if adapter is up */
5965 	if (test_bit(STMMAC_DOWN, &priv->state))
5966 		return IRQ_HANDLED;
5967 
5968 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5969 
5970 	if (unlikely(status & tx_hard_error_bump_tc)) {
5971 		/* Try to bump up the dma threshold on this failure */
5972 		stmmac_bump_dma_threshold(priv, chan);
5973 	} else if (unlikely(status == tx_hard_error)) {
5974 		stmmac_tx_err(priv, chan);
5975 	}
5976 
5977 	return IRQ_HANDLED;
5978 }
5979 
5980 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
5981 {
5982 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
5983 	struct stmmac_dma_conf *dma_conf;
5984 	int chan = rx_q->queue_index;
5985 	struct stmmac_priv *priv;
5986 
5987 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
5988 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5989 
5990 	if (unlikely(!data)) {
5991 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5992 		return IRQ_NONE;
5993 	}
5994 
5995 	/* Check if adapter is up */
5996 	if (test_bit(STMMAC_DOWN, &priv->state))
5997 		return IRQ_HANDLED;
5998 
5999 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6000 
6001 	return IRQ_HANDLED;
6002 }
6003 
6004 #ifdef CONFIG_NET_POLL_CONTROLLER
6005 /* Polling receive - used by NETCONSOLE and other diagnostic tools
6006  * to allow network I/O with interrupts disabled.
6007  */
6008 static void stmmac_poll_controller(struct net_device *dev)
6009 {
6010 	struct stmmac_priv *priv = netdev_priv(dev);
6011 	int i;
6012 
6013 	/* If adapter is down, do nothing */
6014 	if (test_bit(STMMAC_DOWN, &priv->state))
6015 		return;
6016 
6017 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) {
6018 		for (i = 0; i < priv->plat->rx_queues_to_use; i++)
6019 			stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
6020 
6021 		for (i = 0; i < priv->plat->tx_queues_to_use; i++)
6022 			stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
6023 	} else {
6024 		disable_irq(dev->irq);
6025 		stmmac_interrupt(dev->irq, dev);
6026 		enable_irq(dev->irq);
6027 	}
6028 }
6029 #endif
6030 
6031 /**
6032  *  stmmac_ioctl - Entry point for the Ioctl
6033  *  @dev: Device pointer.
6034  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6035  *  a proprietary structure used to pass information to the driver.
6036  *  @cmd: IOCTL command
6037  *  Description:
6038  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6039  */
6040 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6041 {
6042 	struct stmmac_priv *priv = netdev_priv (dev);
6043 	int ret = -EOPNOTSUPP;
6044 
6045 	if (!netif_running(dev))
6046 		return -EINVAL;
6047 
6048 	switch (cmd) {
6049 	case SIOCGMIIPHY:
6050 	case SIOCGMIIREG:
6051 	case SIOCSMIIREG:
6052 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6053 		break;
6054 	case SIOCSHWTSTAMP:
6055 		ret = stmmac_hwtstamp_set(dev, rq);
6056 		break;
6057 	case SIOCGHWTSTAMP:
6058 		ret = stmmac_hwtstamp_get(dev, rq);
6059 		break;
6060 	default:
6061 		break;
6062 	}
6063 
6064 	return ret;
6065 }
6066 
6067 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6068 				    void *cb_priv)
6069 {
6070 	struct stmmac_priv *priv = cb_priv;
6071 	int ret = -EOPNOTSUPP;
6072 
6073 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6074 		return ret;
6075 
6076 	__stmmac_disable_all_queues(priv);
6077 
6078 	switch (type) {
6079 	case TC_SETUP_CLSU32:
6080 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6081 		break;
6082 	case TC_SETUP_CLSFLOWER:
6083 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6084 		break;
6085 	default:
6086 		break;
6087 	}
6088 
6089 	stmmac_enable_all_queues(priv);
6090 	return ret;
6091 }
6092 
6093 static LIST_HEAD(stmmac_block_cb_list);
6094 
6095 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6096 			   void *type_data)
6097 {
6098 	struct stmmac_priv *priv = netdev_priv(ndev);
6099 
6100 	switch (type) {
6101 	case TC_QUERY_CAPS:
6102 		return stmmac_tc_query_caps(priv, priv, type_data);
6103 	case TC_SETUP_BLOCK:
6104 		return flow_block_cb_setup_simple(type_data,
6105 						  &stmmac_block_cb_list,
6106 						  stmmac_setup_tc_block_cb,
6107 						  priv, priv, true);
6108 	case TC_SETUP_QDISC_CBS:
6109 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6110 	case TC_SETUP_QDISC_TAPRIO:
6111 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6112 	case TC_SETUP_QDISC_ETF:
6113 		return stmmac_tc_setup_etf(priv, priv, type_data);
6114 	default:
6115 		return -EOPNOTSUPP;
6116 	}
6117 }
6118 
6119 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6120 			       struct net_device *sb_dev)
6121 {
6122 	int gso = skb_shinfo(skb)->gso_type;
6123 
6124 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6125 		/*
6126 		 * There is no way to determine the number of TSO/USO
6127 		 * capable Queues. Let's use always the Queue 0
6128 		 * because if TSO/USO is supported then at least this
6129 		 * one will be capable.
6130 		 */
6131 		return 0;
6132 	}
6133 
6134 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6135 }
6136 
6137 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6138 {
6139 	struct stmmac_priv *priv = netdev_priv(ndev);
6140 	int ret = 0;
6141 
6142 	ret = pm_runtime_resume_and_get(priv->device);
6143 	if (ret < 0)
6144 		return ret;
6145 
6146 	ret = eth_mac_addr(ndev, addr);
6147 	if (ret)
6148 		goto set_mac_error;
6149 
6150 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6151 
6152 set_mac_error:
6153 	pm_runtime_put(priv->device);
6154 
6155 	return ret;
6156 }
6157 
6158 #ifdef CONFIG_DEBUG_FS
6159 static struct dentry *stmmac_fs_dir;
6160 
6161 static void sysfs_display_ring(void *head, int size, int extend_desc,
6162 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6163 {
6164 	int i;
6165 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6166 	struct dma_desc *p = (struct dma_desc *)head;
6167 	dma_addr_t dma_addr;
6168 
6169 	for (i = 0; i < size; i++) {
6170 		if (extend_desc) {
6171 			dma_addr = dma_phy_addr + i * sizeof(*ep);
6172 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6173 				   i, &dma_addr,
6174 				   le32_to_cpu(ep->basic.des0),
6175 				   le32_to_cpu(ep->basic.des1),
6176 				   le32_to_cpu(ep->basic.des2),
6177 				   le32_to_cpu(ep->basic.des3));
6178 			ep++;
6179 		} else {
6180 			dma_addr = dma_phy_addr + i * sizeof(*p);
6181 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6182 				   i, &dma_addr,
6183 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6184 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6185 			p++;
6186 		}
6187 		seq_printf(seq, "\n");
6188 	}
6189 }
6190 
6191 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6192 {
6193 	struct net_device *dev = seq->private;
6194 	struct stmmac_priv *priv = netdev_priv(dev);
6195 	u32 rx_count = priv->plat->rx_queues_to_use;
6196 	u32 tx_count = priv->plat->tx_queues_to_use;
6197 	u32 queue;
6198 
6199 	if ((dev->flags & IFF_UP) == 0)
6200 		return 0;
6201 
6202 	for (queue = 0; queue < rx_count; queue++) {
6203 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6204 
6205 		seq_printf(seq, "RX Queue %d:\n", queue);
6206 
6207 		if (priv->extend_desc) {
6208 			seq_printf(seq, "Extended descriptor ring:\n");
6209 			sysfs_display_ring((void *)rx_q->dma_erx,
6210 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6211 		} else {
6212 			seq_printf(seq, "Descriptor ring:\n");
6213 			sysfs_display_ring((void *)rx_q->dma_rx,
6214 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6215 		}
6216 	}
6217 
6218 	for (queue = 0; queue < tx_count; queue++) {
6219 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6220 
6221 		seq_printf(seq, "TX Queue %d:\n", queue);
6222 
6223 		if (priv->extend_desc) {
6224 			seq_printf(seq, "Extended descriptor ring:\n");
6225 			sysfs_display_ring((void *)tx_q->dma_etx,
6226 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6227 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6228 			seq_printf(seq, "Descriptor ring:\n");
6229 			sysfs_display_ring((void *)tx_q->dma_tx,
6230 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6231 		}
6232 	}
6233 
6234 	return 0;
6235 }
6236 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6237 
6238 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6239 {
6240 	static const char * const dwxgmac_timestamp_source[] = {
6241 		"None",
6242 		"Internal",
6243 		"External",
6244 		"Both",
6245 	};
6246 	static const char * const dwxgmac_safety_feature_desc[] = {
6247 		"No",
6248 		"All Safety Features with ECC and Parity",
6249 		"All Safety Features without ECC or Parity",
6250 		"All Safety Features with Parity Only",
6251 		"ECC Only",
6252 		"UNDEFINED",
6253 		"UNDEFINED",
6254 		"UNDEFINED",
6255 	};
6256 	struct net_device *dev = seq->private;
6257 	struct stmmac_priv *priv = netdev_priv(dev);
6258 
6259 	if (!priv->hw_cap_support) {
6260 		seq_printf(seq, "DMA HW features not supported\n");
6261 		return 0;
6262 	}
6263 
6264 	seq_printf(seq, "==============================\n");
6265 	seq_printf(seq, "\tDMA HW features\n");
6266 	seq_printf(seq, "==============================\n");
6267 
6268 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6269 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6270 	seq_printf(seq, "\t1000 Mbps: %s\n",
6271 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6272 	seq_printf(seq, "\tHalf duplex: %s\n",
6273 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6274 	if (priv->plat->has_xgmac) {
6275 		seq_printf(seq,
6276 			   "\tNumber of Additional MAC address registers: %d\n",
6277 			   priv->dma_cap.multi_addr);
6278 	} else {
6279 		seq_printf(seq, "\tHash Filter: %s\n",
6280 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6281 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6282 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6283 	}
6284 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6285 		   (priv->dma_cap.pcs) ? "Y" : "N");
6286 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6287 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6288 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6289 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6290 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6291 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6292 	seq_printf(seq, "\tRMON module: %s\n",
6293 		   (priv->dma_cap.rmon) ? "Y" : "N");
6294 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6295 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6296 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6297 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6298 	if (priv->plat->has_xgmac)
6299 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6300 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6301 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6302 		   (priv->dma_cap.eee) ? "Y" : "N");
6303 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6304 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6305 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6306 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6307 	    priv->plat->has_xgmac) {
6308 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6309 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6310 	} else {
6311 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6312 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6313 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6314 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6315 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6316 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6317 	}
6318 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6319 		   priv->dma_cap.number_rx_channel);
6320 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6321 		   priv->dma_cap.number_tx_channel);
6322 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6323 		   priv->dma_cap.number_rx_queues);
6324 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6325 		   priv->dma_cap.number_tx_queues);
6326 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6327 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6328 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6329 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6330 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6331 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6332 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6333 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6334 		   priv->dma_cap.pps_out_num);
6335 	seq_printf(seq, "\tSafety Features: %s\n",
6336 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6337 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6338 		   priv->dma_cap.frpsel ? "Y" : "N");
6339 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6340 		   priv->dma_cap.host_dma_width);
6341 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6342 		   priv->dma_cap.rssen ? "Y" : "N");
6343 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6344 		   priv->dma_cap.vlhash ? "Y" : "N");
6345 	seq_printf(seq, "\tSplit Header: %s\n",
6346 		   priv->dma_cap.sphen ? "Y" : "N");
6347 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6348 		   priv->dma_cap.vlins ? "Y" : "N");
6349 	seq_printf(seq, "\tDouble VLAN: %s\n",
6350 		   priv->dma_cap.dvlan ? "Y" : "N");
6351 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6352 		   priv->dma_cap.l3l4fnum);
6353 	seq_printf(seq, "\tARP Offloading: %s\n",
6354 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6355 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6356 		   priv->dma_cap.estsel ? "Y" : "N");
6357 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6358 		   priv->dma_cap.fpesel ? "Y" : "N");
6359 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6360 		   priv->dma_cap.tbssel ? "Y" : "N");
6361 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6362 		   priv->dma_cap.tbs_ch_num);
6363 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6364 		   priv->dma_cap.sgfsel ? "Y" : "N");
6365 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6366 		   BIT(priv->dma_cap.ttsfd) >> 1);
6367 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6368 		   priv->dma_cap.numtc);
6369 	seq_printf(seq, "\tDCB Feature: %s\n",
6370 		   priv->dma_cap.dcben ? "Y" : "N");
6371 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6372 		   priv->dma_cap.advthword ? "Y" : "N");
6373 	seq_printf(seq, "\tPTP Offload: %s\n",
6374 		   priv->dma_cap.ptoen ? "Y" : "N");
6375 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6376 		   priv->dma_cap.osten ? "Y" : "N");
6377 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6378 		   priv->dma_cap.pfcen ? "Y" : "N");
6379 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6380 		   BIT(priv->dma_cap.frpes) << 6);
6381 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6382 		   BIT(priv->dma_cap.frpbs) << 6);
6383 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6384 		   priv->dma_cap.frppipe_num);
6385 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6386 		   priv->dma_cap.nrvf_num ?
6387 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6388 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6389 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6390 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6391 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6392 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6393 		   priv->dma_cap.cbtisel ? "Y" : "N");
6394 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6395 		   priv->dma_cap.aux_snapshot_n);
6396 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6397 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6398 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6399 		   priv->dma_cap.edma ? "Y" : "N");
6400 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6401 		   priv->dma_cap.ediffc ? "Y" : "N");
6402 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6403 		   priv->dma_cap.vxn ? "Y" : "N");
6404 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6405 		   priv->dma_cap.dbgmem ? "Y" : "N");
6406 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6407 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6408 	return 0;
6409 }
6410 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6411 
6412 /* Use network device events to rename debugfs file entries.
6413  */
6414 static int stmmac_device_event(struct notifier_block *unused,
6415 			       unsigned long event, void *ptr)
6416 {
6417 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6418 	struct stmmac_priv *priv = netdev_priv(dev);
6419 
6420 	if (dev->netdev_ops != &stmmac_netdev_ops)
6421 		goto done;
6422 
6423 	switch (event) {
6424 	case NETDEV_CHANGENAME:
6425 		if (priv->dbgfs_dir)
6426 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6427 							 priv->dbgfs_dir,
6428 							 stmmac_fs_dir,
6429 							 dev->name);
6430 		break;
6431 	}
6432 done:
6433 	return NOTIFY_DONE;
6434 }
6435 
6436 static struct notifier_block stmmac_notifier = {
6437 	.notifier_call = stmmac_device_event,
6438 };
6439 
6440 static void stmmac_init_fs(struct net_device *dev)
6441 {
6442 	struct stmmac_priv *priv = netdev_priv(dev);
6443 
6444 	rtnl_lock();
6445 
6446 	/* Create per netdev entries */
6447 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6448 
6449 	/* Entry to report DMA RX/TX rings */
6450 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6451 			    &stmmac_rings_status_fops);
6452 
6453 	/* Entry to report the DMA HW features */
6454 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6455 			    &stmmac_dma_cap_fops);
6456 
6457 	rtnl_unlock();
6458 }
6459 
6460 static void stmmac_exit_fs(struct net_device *dev)
6461 {
6462 	struct stmmac_priv *priv = netdev_priv(dev);
6463 
6464 	debugfs_remove_recursive(priv->dbgfs_dir);
6465 }
6466 #endif /* CONFIG_DEBUG_FS */
6467 
6468 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6469 {
6470 	unsigned char *data = (unsigned char *)&vid_le;
6471 	unsigned char data_byte = 0;
6472 	u32 crc = ~0x0;
6473 	u32 temp = 0;
6474 	int i, bits;
6475 
6476 	bits = get_bitmask_order(VLAN_VID_MASK);
6477 	for (i = 0; i < bits; i++) {
6478 		if ((i % 8) == 0)
6479 			data_byte = data[i / 8];
6480 
6481 		temp = ((crc & 1) ^ data_byte) & 1;
6482 		crc >>= 1;
6483 		data_byte >>= 1;
6484 
6485 		if (temp)
6486 			crc ^= 0xedb88320;
6487 	}
6488 
6489 	return crc;
6490 }
6491 
6492 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6493 {
6494 	u32 crc, hash = 0;
6495 	__le16 pmatch = 0;
6496 	int count = 0;
6497 	u16 vid = 0;
6498 
6499 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6500 		__le16 vid_le = cpu_to_le16(vid);
6501 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6502 		hash |= (1 << crc);
6503 		count++;
6504 	}
6505 
6506 	if (!priv->dma_cap.vlhash) {
6507 		if (count > 2) /* VID = 0 always passes filter */
6508 			return -EOPNOTSUPP;
6509 
6510 		pmatch = cpu_to_le16(vid);
6511 		hash = 0;
6512 	}
6513 
6514 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6515 }
6516 
6517 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6518 {
6519 	struct stmmac_priv *priv = netdev_priv(ndev);
6520 	bool is_double = false;
6521 	int ret;
6522 
6523 	ret = pm_runtime_resume_and_get(priv->device);
6524 	if (ret < 0)
6525 		return ret;
6526 
6527 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6528 		is_double = true;
6529 
6530 	set_bit(vid, priv->active_vlans);
6531 	ret = stmmac_vlan_update(priv, is_double);
6532 	if (ret) {
6533 		clear_bit(vid, priv->active_vlans);
6534 		goto err_pm_put;
6535 	}
6536 
6537 	if (priv->hw->num_vlan) {
6538 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6539 		if (ret)
6540 			goto err_pm_put;
6541 	}
6542 err_pm_put:
6543 	pm_runtime_put(priv->device);
6544 
6545 	return ret;
6546 }
6547 
6548 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6549 {
6550 	struct stmmac_priv *priv = netdev_priv(ndev);
6551 	bool is_double = false;
6552 	int ret;
6553 
6554 	ret = pm_runtime_resume_and_get(priv->device);
6555 	if (ret < 0)
6556 		return ret;
6557 
6558 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6559 		is_double = true;
6560 
6561 	clear_bit(vid, priv->active_vlans);
6562 
6563 	if (priv->hw->num_vlan) {
6564 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6565 		if (ret)
6566 			goto del_vlan_error;
6567 	}
6568 
6569 	ret = stmmac_vlan_update(priv, is_double);
6570 
6571 del_vlan_error:
6572 	pm_runtime_put(priv->device);
6573 
6574 	return ret;
6575 }
6576 
6577 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6578 {
6579 	struct stmmac_priv *priv = netdev_priv(dev);
6580 
6581 	switch (bpf->command) {
6582 	case XDP_SETUP_PROG:
6583 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6584 	case XDP_SETUP_XSK_POOL:
6585 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6586 					     bpf->xsk.queue_id);
6587 	default:
6588 		return -EOPNOTSUPP;
6589 	}
6590 }
6591 
6592 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6593 			   struct xdp_frame **frames, u32 flags)
6594 {
6595 	struct stmmac_priv *priv = netdev_priv(dev);
6596 	int cpu = smp_processor_id();
6597 	struct netdev_queue *nq;
6598 	int i, nxmit = 0;
6599 	int queue;
6600 
6601 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6602 		return -ENETDOWN;
6603 
6604 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6605 		return -EINVAL;
6606 
6607 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6608 	nq = netdev_get_tx_queue(priv->dev, queue);
6609 
6610 	__netif_tx_lock(nq, cpu);
6611 	/* Avoids TX time-out as we are sharing with slow path */
6612 	txq_trans_cond_update(nq);
6613 
6614 	for (i = 0; i < num_frames; i++) {
6615 		int res;
6616 
6617 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6618 		if (res == STMMAC_XDP_CONSUMED)
6619 			break;
6620 
6621 		nxmit++;
6622 	}
6623 
6624 	if (flags & XDP_XMIT_FLUSH) {
6625 		stmmac_flush_tx_descriptors(priv, queue);
6626 		stmmac_tx_timer_arm(priv, queue);
6627 	}
6628 
6629 	__netif_tx_unlock(nq);
6630 
6631 	return nxmit;
6632 }
6633 
6634 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6635 {
6636 	struct stmmac_channel *ch = &priv->channel[queue];
6637 	unsigned long flags;
6638 
6639 	spin_lock_irqsave(&ch->lock, flags);
6640 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6641 	spin_unlock_irqrestore(&ch->lock, flags);
6642 
6643 	stmmac_stop_rx_dma(priv, queue);
6644 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6645 }
6646 
6647 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6648 {
6649 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6650 	struct stmmac_channel *ch = &priv->channel[queue];
6651 	unsigned long flags;
6652 	u32 buf_size;
6653 	int ret;
6654 
6655 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6656 	if (ret) {
6657 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6658 		return;
6659 	}
6660 
6661 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6662 	if (ret) {
6663 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6664 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6665 		return;
6666 	}
6667 
6668 	stmmac_reset_rx_queue(priv, queue);
6669 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6670 
6671 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6672 			    rx_q->dma_rx_phy, rx_q->queue_index);
6673 
6674 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6675 			     sizeof(struct dma_desc));
6676 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6677 			       rx_q->rx_tail_addr, rx_q->queue_index);
6678 
6679 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6680 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6681 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6682 				      buf_size,
6683 				      rx_q->queue_index);
6684 	} else {
6685 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6686 				      priv->dma_conf.dma_buf_sz,
6687 				      rx_q->queue_index);
6688 	}
6689 
6690 	stmmac_start_rx_dma(priv, queue);
6691 
6692 	spin_lock_irqsave(&ch->lock, flags);
6693 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6694 	spin_unlock_irqrestore(&ch->lock, flags);
6695 }
6696 
6697 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6698 {
6699 	struct stmmac_channel *ch = &priv->channel[queue];
6700 	unsigned long flags;
6701 
6702 	spin_lock_irqsave(&ch->lock, flags);
6703 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6704 	spin_unlock_irqrestore(&ch->lock, flags);
6705 
6706 	stmmac_stop_tx_dma(priv, queue);
6707 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6708 }
6709 
6710 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6711 {
6712 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6713 	struct stmmac_channel *ch = &priv->channel[queue];
6714 	unsigned long flags;
6715 	int ret;
6716 
6717 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6718 	if (ret) {
6719 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6720 		return;
6721 	}
6722 
6723 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6724 	if (ret) {
6725 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6726 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6727 		return;
6728 	}
6729 
6730 	stmmac_reset_tx_queue(priv, queue);
6731 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6732 
6733 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6734 			    tx_q->dma_tx_phy, tx_q->queue_index);
6735 
6736 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6737 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6738 
6739 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6740 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6741 			       tx_q->tx_tail_addr, tx_q->queue_index);
6742 
6743 	stmmac_start_tx_dma(priv, queue);
6744 
6745 	spin_lock_irqsave(&ch->lock, flags);
6746 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6747 	spin_unlock_irqrestore(&ch->lock, flags);
6748 }
6749 
6750 void stmmac_xdp_release(struct net_device *dev)
6751 {
6752 	struct stmmac_priv *priv = netdev_priv(dev);
6753 	u32 chan;
6754 
6755 	/* Ensure tx function is not running */
6756 	netif_tx_disable(dev);
6757 
6758 	/* Disable NAPI process */
6759 	stmmac_disable_all_queues(priv);
6760 
6761 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6762 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6763 
6764 	/* Free the IRQ lines */
6765 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6766 
6767 	/* Stop TX/RX DMA channels */
6768 	stmmac_stop_all_dma(priv);
6769 
6770 	/* Release and free the Rx/Tx resources */
6771 	free_dma_desc_resources(priv, &priv->dma_conf);
6772 
6773 	/* Disable the MAC Rx/Tx */
6774 	stmmac_mac_set(priv, priv->ioaddr, false);
6775 
6776 	/* set trans_start so we don't get spurious
6777 	 * watchdogs during reset
6778 	 */
6779 	netif_trans_update(dev);
6780 	netif_carrier_off(dev);
6781 }
6782 
6783 int stmmac_xdp_open(struct net_device *dev)
6784 {
6785 	struct stmmac_priv *priv = netdev_priv(dev);
6786 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6787 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6788 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6789 	struct stmmac_rx_queue *rx_q;
6790 	struct stmmac_tx_queue *tx_q;
6791 	u32 buf_size;
6792 	bool sph_en;
6793 	u32 chan;
6794 	int ret;
6795 
6796 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6797 	if (ret < 0) {
6798 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6799 			   __func__);
6800 		goto dma_desc_error;
6801 	}
6802 
6803 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6804 	if (ret < 0) {
6805 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6806 			   __func__);
6807 		goto init_error;
6808 	}
6809 
6810 	stmmac_reset_queues_param(priv);
6811 
6812 	/* DMA CSR Channel configuration */
6813 	for (chan = 0; chan < dma_csr_ch; chan++) {
6814 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6815 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6816 	}
6817 
6818 	/* Adjust Split header */
6819 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6820 
6821 	/* DMA RX Channel Configuration */
6822 	for (chan = 0; chan < rx_cnt; chan++) {
6823 		rx_q = &priv->dma_conf.rx_queue[chan];
6824 
6825 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6826 				    rx_q->dma_rx_phy, chan);
6827 
6828 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6829 				     (rx_q->buf_alloc_num *
6830 				      sizeof(struct dma_desc));
6831 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6832 				       rx_q->rx_tail_addr, chan);
6833 
6834 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6835 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6836 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6837 					      buf_size,
6838 					      rx_q->queue_index);
6839 		} else {
6840 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6841 					      priv->dma_conf.dma_buf_sz,
6842 					      rx_q->queue_index);
6843 		}
6844 
6845 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6846 	}
6847 
6848 	/* DMA TX Channel Configuration */
6849 	for (chan = 0; chan < tx_cnt; chan++) {
6850 		tx_q = &priv->dma_conf.tx_queue[chan];
6851 
6852 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6853 				    tx_q->dma_tx_phy, chan);
6854 
6855 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6856 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6857 				       tx_q->tx_tail_addr, chan);
6858 
6859 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6860 		tx_q->txtimer.function = stmmac_tx_timer;
6861 	}
6862 
6863 	/* Enable the MAC Rx/Tx */
6864 	stmmac_mac_set(priv, priv->ioaddr, true);
6865 
6866 	/* Start Rx & Tx DMA Channels */
6867 	stmmac_start_all_dma(priv);
6868 
6869 	ret = stmmac_request_irq(dev);
6870 	if (ret)
6871 		goto irq_error;
6872 
6873 	/* Enable NAPI process*/
6874 	stmmac_enable_all_queues(priv);
6875 	netif_carrier_on(dev);
6876 	netif_tx_start_all_queues(dev);
6877 	stmmac_enable_all_dma_irq(priv);
6878 
6879 	return 0;
6880 
6881 irq_error:
6882 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6883 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6884 
6885 	stmmac_hw_teardown(dev);
6886 init_error:
6887 	free_dma_desc_resources(priv, &priv->dma_conf);
6888 dma_desc_error:
6889 	return ret;
6890 }
6891 
6892 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6893 {
6894 	struct stmmac_priv *priv = netdev_priv(dev);
6895 	struct stmmac_rx_queue *rx_q;
6896 	struct stmmac_tx_queue *tx_q;
6897 	struct stmmac_channel *ch;
6898 
6899 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6900 	    !netif_carrier_ok(priv->dev))
6901 		return -ENETDOWN;
6902 
6903 	if (!stmmac_xdp_is_enabled(priv))
6904 		return -EINVAL;
6905 
6906 	if (queue >= priv->plat->rx_queues_to_use ||
6907 	    queue >= priv->plat->tx_queues_to_use)
6908 		return -EINVAL;
6909 
6910 	rx_q = &priv->dma_conf.rx_queue[queue];
6911 	tx_q = &priv->dma_conf.tx_queue[queue];
6912 	ch = &priv->channel[queue];
6913 
6914 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6915 		return -EINVAL;
6916 
6917 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6918 		/* EQoS does not have per-DMA channel SW interrupt,
6919 		 * so we schedule RX Napi straight-away.
6920 		 */
6921 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6922 			__napi_schedule(&ch->rxtx_napi);
6923 	}
6924 
6925 	return 0;
6926 }
6927 
6928 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6929 {
6930 	struct stmmac_priv *priv = netdev_priv(dev);
6931 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6932 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6933 	unsigned int start;
6934 	int q;
6935 
6936 	for (q = 0; q < tx_cnt; q++) {
6937 		struct stmmac_txq_stats *txq_stats = &priv->dma_conf.tx_queue[q].txq_stats;
6938 		u64 tx_packets;
6939 		u64 tx_bytes;
6940 
6941 		do {
6942 			start = u64_stats_fetch_begin(&txq_stats->syncp);
6943 			tx_packets = txq_stats->tx_packets;
6944 			tx_bytes   = txq_stats->tx_bytes;
6945 		} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
6946 
6947 		stats->tx_packets += tx_packets;
6948 		stats->tx_bytes += tx_bytes;
6949 	}
6950 
6951 	for (q = 0; q < rx_cnt; q++) {
6952 		struct stmmac_rxq_stats *rxq_stats = &priv->dma_conf.rx_queue[q].rxq_stats;
6953 		u64 rx_packets;
6954 		u64 rx_bytes;
6955 
6956 		do {
6957 			start = u64_stats_fetch_begin(&rxq_stats->syncp);
6958 			rx_packets = rxq_stats->rx_packets;
6959 			rx_bytes   = rxq_stats->rx_bytes;
6960 		} while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
6961 
6962 		stats->rx_packets += rx_packets;
6963 		stats->rx_bytes += rx_bytes;
6964 	}
6965 
6966 	stats->rx_dropped = priv->xstats.rx_dropped;
6967 	stats->rx_errors = priv->xstats.rx_errors;
6968 	stats->tx_dropped = priv->xstats.tx_dropped;
6969 	stats->tx_errors = priv->xstats.tx_errors;
6970 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
6971 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
6972 	stats->rx_length_errors = priv->xstats.rx_length;
6973 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
6974 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
6975 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
6976 }
6977 
6978 static const struct net_device_ops stmmac_netdev_ops = {
6979 	.ndo_open = stmmac_open,
6980 	.ndo_start_xmit = stmmac_xmit,
6981 	.ndo_stop = stmmac_release,
6982 	.ndo_change_mtu = stmmac_change_mtu,
6983 	.ndo_fix_features = stmmac_fix_features,
6984 	.ndo_set_features = stmmac_set_features,
6985 	.ndo_set_rx_mode = stmmac_set_rx_mode,
6986 	.ndo_tx_timeout = stmmac_tx_timeout,
6987 	.ndo_eth_ioctl = stmmac_ioctl,
6988 	.ndo_get_stats64 = stmmac_get_stats64,
6989 	.ndo_setup_tc = stmmac_setup_tc,
6990 	.ndo_select_queue = stmmac_select_queue,
6991 #ifdef CONFIG_NET_POLL_CONTROLLER
6992 	.ndo_poll_controller = stmmac_poll_controller,
6993 #endif
6994 	.ndo_set_mac_address = stmmac_set_mac_address,
6995 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6996 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6997 	.ndo_bpf = stmmac_bpf,
6998 	.ndo_xdp_xmit = stmmac_xdp_xmit,
6999 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7000 };
7001 
7002 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7003 {
7004 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7005 		return;
7006 	if (test_bit(STMMAC_DOWN, &priv->state))
7007 		return;
7008 
7009 	netdev_err(priv->dev, "Reset adapter.\n");
7010 
7011 	rtnl_lock();
7012 	netif_trans_update(priv->dev);
7013 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7014 		usleep_range(1000, 2000);
7015 
7016 	set_bit(STMMAC_DOWN, &priv->state);
7017 	dev_close(priv->dev);
7018 	dev_open(priv->dev, NULL);
7019 	clear_bit(STMMAC_DOWN, &priv->state);
7020 	clear_bit(STMMAC_RESETING, &priv->state);
7021 	rtnl_unlock();
7022 }
7023 
7024 static void stmmac_service_task(struct work_struct *work)
7025 {
7026 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7027 			service_task);
7028 
7029 	stmmac_reset_subtask(priv);
7030 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7031 }
7032 
7033 /**
7034  *  stmmac_hw_init - Init the MAC device
7035  *  @priv: driver private structure
7036  *  Description: this function is to configure the MAC device according to
7037  *  some platform parameters or the HW capability register. It prepares the
7038  *  driver to use either ring or chain modes and to setup either enhanced or
7039  *  normal descriptors.
7040  */
7041 static int stmmac_hw_init(struct stmmac_priv *priv)
7042 {
7043 	int ret;
7044 
7045 	/* dwmac-sun8i only work in chain mode */
7046 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7047 		chain_mode = 1;
7048 	priv->chain_mode = chain_mode;
7049 
7050 	/* Initialize HW Interface */
7051 	ret = stmmac_hwif_init(priv);
7052 	if (ret)
7053 		return ret;
7054 
7055 	/* Get the HW capability (new GMAC newer than 3.50a) */
7056 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7057 	if (priv->hw_cap_support) {
7058 		dev_info(priv->device, "DMA HW capability register supported\n");
7059 
7060 		/* We can override some gmac/dma configuration fields: e.g.
7061 		 * enh_desc, tx_coe (e.g. that are passed through the
7062 		 * platform) with the values from the HW capability
7063 		 * register (if supported).
7064 		 */
7065 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7066 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7067 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7068 		priv->hw->pmt = priv->plat->pmt;
7069 		if (priv->dma_cap.hash_tb_sz) {
7070 			priv->hw->multicast_filter_bins =
7071 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7072 			priv->hw->mcast_bits_log2 =
7073 					ilog2(priv->hw->multicast_filter_bins);
7074 		}
7075 
7076 		/* TXCOE doesn't work in thresh DMA mode */
7077 		if (priv->plat->force_thresh_dma_mode)
7078 			priv->plat->tx_coe = 0;
7079 		else
7080 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7081 
7082 		/* In case of GMAC4 rx_coe is from HW cap register. */
7083 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7084 
7085 		if (priv->dma_cap.rx_coe_type2)
7086 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7087 		else if (priv->dma_cap.rx_coe_type1)
7088 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7089 
7090 	} else {
7091 		dev_info(priv->device, "No HW DMA feature register supported\n");
7092 	}
7093 
7094 	if (priv->plat->rx_coe) {
7095 		priv->hw->rx_csum = priv->plat->rx_coe;
7096 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7097 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7098 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7099 	}
7100 	if (priv->plat->tx_coe)
7101 		dev_info(priv->device, "TX Checksum insertion supported\n");
7102 
7103 	if (priv->plat->pmt) {
7104 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7105 		device_set_wakeup_capable(priv->device, 1);
7106 	}
7107 
7108 	if (priv->dma_cap.tsoen)
7109 		dev_info(priv->device, "TSO supported\n");
7110 
7111 	priv->hw->vlan_fail_q_en =
7112 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7113 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7114 
7115 	/* Run HW quirks, if any */
7116 	if (priv->hwif_quirks) {
7117 		ret = priv->hwif_quirks(priv);
7118 		if (ret)
7119 			return ret;
7120 	}
7121 
7122 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7123 	 * In some case, for example on bugged HW this feature
7124 	 * has to be disable and this can be done by passing the
7125 	 * riwt_off field from the platform.
7126 	 */
7127 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7128 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7129 		priv->use_riwt = 1;
7130 		dev_info(priv->device,
7131 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7132 	}
7133 
7134 	return 0;
7135 }
7136 
7137 static void stmmac_napi_add(struct net_device *dev)
7138 {
7139 	struct stmmac_priv *priv = netdev_priv(dev);
7140 	u32 queue, maxq;
7141 
7142 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7143 
7144 	for (queue = 0; queue < maxq; queue++) {
7145 		struct stmmac_channel *ch = &priv->channel[queue];
7146 
7147 		ch->priv_data = priv;
7148 		ch->index = queue;
7149 		spin_lock_init(&ch->lock);
7150 
7151 		if (queue < priv->plat->rx_queues_to_use) {
7152 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7153 		}
7154 		if (queue < priv->plat->tx_queues_to_use) {
7155 			netif_napi_add_tx(dev, &ch->tx_napi,
7156 					  stmmac_napi_poll_tx);
7157 		}
7158 		if (queue < priv->plat->rx_queues_to_use &&
7159 		    queue < priv->plat->tx_queues_to_use) {
7160 			netif_napi_add(dev, &ch->rxtx_napi,
7161 				       stmmac_napi_poll_rxtx);
7162 		}
7163 	}
7164 }
7165 
7166 static void stmmac_napi_del(struct net_device *dev)
7167 {
7168 	struct stmmac_priv *priv = netdev_priv(dev);
7169 	u32 queue, maxq;
7170 
7171 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7172 
7173 	for (queue = 0; queue < maxq; queue++) {
7174 		struct stmmac_channel *ch = &priv->channel[queue];
7175 
7176 		if (queue < priv->plat->rx_queues_to_use)
7177 			netif_napi_del(&ch->rx_napi);
7178 		if (queue < priv->plat->tx_queues_to_use)
7179 			netif_napi_del(&ch->tx_napi);
7180 		if (queue < priv->plat->rx_queues_to_use &&
7181 		    queue < priv->plat->tx_queues_to_use) {
7182 			netif_napi_del(&ch->rxtx_napi);
7183 		}
7184 	}
7185 }
7186 
7187 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7188 {
7189 	struct stmmac_priv *priv = netdev_priv(dev);
7190 	int ret = 0, i;
7191 
7192 	if (netif_running(dev))
7193 		stmmac_release(dev);
7194 
7195 	stmmac_napi_del(dev);
7196 
7197 	priv->plat->rx_queues_to_use = rx_cnt;
7198 	priv->plat->tx_queues_to_use = tx_cnt;
7199 	if (!netif_is_rxfh_configured(dev))
7200 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7201 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7202 									rx_cnt);
7203 
7204 	stmmac_napi_add(dev);
7205 
7206 	if (netif_running(dev))
7207 		ret = stmmac_open(dev);
7208 
7209 	return ret;
7210 }
7211 
7212 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7213 {
7214 	struct stmmac_priv *priv = netdev_priv(dev);
7215 	int ret = 0;
7216 
7217 	if (netif_running(dev))
7218 		stmmac_release(dev);
7219 
7220 	priv->dma_conf.dma_rx_size = rx_size;
7221 	priv->dma_conf.dma_tx_size = tx_size;
7222 
7223 	if (netif_running(dev))
7224 		ret = stmmac_open(dev);
7225 
7226 	return ret;
7227 }
7228 
7229 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7230 static void stmmac_fpe_lp_task(struct work_struct *work)
7231 {
7232 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7233 						fpe_task);
7234 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7235 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7236 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7237 	bool *hs_enable = &fpe_cfg->hs_enable;
7238 	bool *enable = &fpe_cfg->enable;
7239 	int retries = 20;
7240 
7241 	while (retries-- > 0) {
7242 		/* Bail out immediately if FPE handshake is OFF */
7243 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7244 			break;
7245 
7246 		if (*lo_state == FPE_STATE_ENTERING_ON &&
7247 		    *lp_state == FPE_STATE_ENTERING_ON) {
7248 			stmmac_fpe_configure(priv, priv->ioaddr,
7249 					     priv->plat->tx_queues_to_use,
7250 					     priv->plat->rx_queues_to_use,
7251 					     *enable);
7252 
7253 			netdev_info(priv->dev, "configured FPE\n");
7254 
7255 			*lo_state = FPE_STATE_ON;
7256 			*lp_state = FPE_STATE_ON;
7257 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7258 			break;
7259 		}
7260 
7261 		if ((*lo_state == FPE_STATE_CAPABLE ||
7262 		     *lo_state == FPE_STATE_ENTERING_ON) &&
7263 		     *lp_state != FPE_STATE_ON) {
7264 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7265 				    *lo_state, *lp_state);
7266 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7267 						MPACKET_VERIFY);
7268 		}
7269 		/* Sleep then retry */
7270 		msleep(500);
7271 	}
7272 
7273 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7274 }
7275 
7276 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7277 {
7278 	if (priv->plat->fpe_cfg->hs_enable != enable) {
7279 		if (enable) {
7280 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7281 						MPACKET_VERIFY);
7282 		} else {
7283 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7284 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7285 		}
7286 
7287 		priv->plat->fpe_cfg->hs_enable = enable;
7288 	}
7289 }
7290 
7291 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7292 {
7293 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7294 	struct dma_desc *desc_contains_ts = ctx->desc;
7295 	struct stmmac_priv *priv = ctx->priv;
7296 	struct dma_desc *ndesc = ctx->ndesc;
7297 	struct dma_desc *desc = ctx->desc;
7298 	u64 ns = 0;
7299 
7300 	if (!priv->hwts_rx_en)
7301 		return -ENODATA;
7302 
7303 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7304 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7305 		desc_contains_ts = ndesc;
7306 
7307 	/* Check if timestamp is available */
7308 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7309 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7310 		ns -= priv->plat->cdc_error_adj;
7311 		*timestamp = ns_to_ktime(ns);
7312 		return 0;
7313 	}
7314 
7315 	return -ENODATA;
7316 }
7317 
7318 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7319 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7320 };
7321 
7322 /**
7323  * stmmac_dvr_probe
7324  * @device: device pointer
7325  * @plat_dat: platform data pointer
7326  * @res: stmmac resource pointer
7327  * Description: this is the main probe function used to
7328  * call the alloc_etherdev, allocate the priv structure.
7329  * Return:
7330  * returns 0 on success, otherwise errno.
7331  */
7332 int stmmac_dvr_probe(struct device *device,
7333 		     struct plat_stmmacenet_data *plat_dat,
7334 		     struct stmmac_resources *res)
7335 {
7336 	struct net_device *ndev = NULL;
7337 	struct stmmac_priv *priv;
7338 	u32 rxq;
7339 	int i, ret = 0;
7340 
7341 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7342 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7343 	if (!ndev)
7344 		return -ENOMEM;
7345 
7346 	SET_NETDEV_DEV(ndev, device);
7347 
7348 	priv = netdev_priv(ndev);
7349 	priv->device = device;
7350 	priv->dev = ndev;
7351 
7352 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7353 		u64_stats_init(&priv->dma_conf.rx_queue[i].rxq_stats.syncp);
7354 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7355 		u64_stats_init(&priv->dma_conf.tx_queue[i].txq_stats.syncp);
7356 
7357 	stmmac_set_ethtool_ops(ndev);
7358 	priv->pause = pause;
7359 	priv->plat = plat_dat;
7360 	priv->ioaddr = res->addr;
7361 	priv->dev->base_addr = (unsigned long)res->addr;
7362 	priv->plat->dma_cfg->multi_msi_en =
7363 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7364 
7365 	priv->dev->irq = res->irq;
7366 	priv->wol_irq = res->wol_irq;
7367 	priv->lpi_irq = res->lpi_irq;
7368 	priv->sfty_ce_irq = res->sfty_ce_irq;
7369 	priv->sfty_ue_irq = res->sfty_ue_irq;
7370 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7371 		priv->rx_irq[i] = res->rx_irq[i];
7372 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7373 		priv->tx_irq[i] = res->tx_irq[i];
7374 
7375 	if (!is_zero_ether_addr(res->mac))
7376 		eth_hw_addr_set(priv->dev, res->mac);
7377 
7378 	dev_set_drvdata(device, priv->dev);
7379 
7380 	/* Verify driver arguments */
7381 	stmmac_verify_args();
7382 
7383 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7384 	if (!priv->af_xdp_zc_qps)
7385 		return -ENOMEM;
7386 
7387 	/* Allocate workqueue */
7388 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7389 	if (!priv->wq) {
7390 		dev_err(priv->device, "failed to create workqueue\n");
7391 		ret = -ENOMEM;
7392 		goto error_wq_init;
7393 	}
7394 
7395 	INIT_WORK(&priv->service_task, stmmac_service_task);
7396 
7397 	/* Initialize Link Partner FPE workqueue */
7398 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7399 
7400 	/* Override with kernel parameters if supplied XXX CRS XXX
7401 	 * this needs to have multiple instances
7402 	 */
7403 	if ((phyaddr >= 0) && (phyaddr <= 31))
7404 		priv->plat->phy_addr = phyaddr;
7405 
7406 	if (priv->plat->stmmac_rst) {
7407 		ret = reset_control_assert(priv->plat->stmmac_rst);
7408 		reset_control_deassert(priv->plat->stmmac_rst);
7409 		/* Some reset controllers have only reset callback instead of
7410 		 * assert + deassert callbacks pair.
7411 		 */
7412 		if (ret == -ENOTSUPP)
7413 			reset_control_reset(priv->plat->stmmac_rst);
7414 	}
7415 
7416 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7417 	if (ret == -ENOTSUPP)
7418 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7419 			ERR_PTR(ret));
7420 
7421 	/* Init MAC and get the capabilities */
7422 	ret = stmmac_hw_init(priv);
7423 	if (ret)
7424 		goto error_hw_init;
7425 
7426 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7427 	 */
7428 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7429 		priv->plat->dma_cfg->dche = false;
7430 
7431 	stmmac_check_ether_addr(priv);
7432 
7433 	ndev->netdev_ops = &stmmac_netdev_ops;
7434 
7435 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7436 
7437 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7438 			    NETIF_F_RXCSUM;
7439 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7440 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7441 
7442 	ret = stmmac_tc_init(priv, priv);
7443 	if (!ret) {
7444 		ndev->hw_features |= NETIF_F_HW_TC;
7445 	}
7446 
7447 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7448 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7449 		if (priv->plat->has_gmac4)
7450 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7451 		priv->tso = true;
7452 		dev_info(priv->device, "TSO feature enabled\n");
7453 	}
7454 
7455 	if (priv->dma_cap.sphen &&
7456 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7457 		ndev->hw_features |= NETIF_F_GRO;
7458 		priv->sph_cap = true;
7459 		priv->sph = priv->sph_cap;
7460 		dev_info(priv->device, "SPH feature enabled\n");
7461 	}
7462 
7463 	/* Ideally our host DMA address width is the same as for the
7464 	 * device. However, it may differ and then we have to use our
7465 	 * host DMA width for allocation and the device DMA width for
7466 	 * register handling.
7467 	 */
7468 	if (priv->plat->host_dma_width)
7469 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7470 	else
7471 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7472 
7473 	if (priv->dma_cap.host_dma_width) {
7474 		ret = dma_set_mask_and_coherent(device,
7475 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7476 		if (!ret) {
7477 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7478 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7479 
7480 			/*
7481 			 * If more than 32 bits can be addressed, make sure to
7482 			 * enable enhanced addressing mode.
7483 			 */
7484 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7485 				priv->plat->dma_cfg->eame = true;
7486 		} else {
7487 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7488 			if (ret) {
7489 				dev_err(priv->device, "Failed to set DMA Mask\n");
7490 				goto error_hw_init;
7491 			}
7492 
7493 			priv->dma_cap.host_dma_width = 32;
7494 		}
7495 	}
7496 
7497 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7498 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7499 #ifdef STMMAC_VLAN_TAG_USED
7500 	/* Both mac100 and gmac support receive VLAN tag detection */
7501 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7502 	if (priv->dma_cap.vlhash) {
7503 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7504 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7505 	}
7506 	if (priv->dma_cap.vlins) {
7507 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7508 		if (priv->dma_cap.dvlan)
7509 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7510 	}
7511 #endif
7512 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7513 
7514 	priv->xstats.threshold = tc;
7515 
7516 	/* Initialize RSS */
7517 	rxq = priv->plat->rx_queues_to_use;
7518 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7519 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7520 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7521 
7522 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7523 		ndev->features |= NETIF_F_RXHASH;
7524 
7525 	ndev->vlan_features |= ndev->features;
7526 	/* TSO doesn't work on VLANs yet */
7527 	ndev->vlan_features &= ~NETIF_F_TSO;
7528 
7529 	/* MTU range: 46 - hw-specific max */
7530 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7531 	if (priv->plat->has_xgmac)
7532 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7533 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7534 		ndev->max_mtu = JUMBO_LEN;
7535 	else
7536 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7537 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7538 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7539 	 */
7540 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7541 	    (priv->plat->maxmtu >= ndev->min_mtu))
7542 		ndev->max_mtu = priv->plat->maxmtu;
7543 	else if (priv->plat->maxmtu < ndev->min_mtu)
7544 		dev_warn(priv->device,
7545 			 "%s: warning: maxmtu having invalid value (%d)\n",
7546 			 __func__, priv->plat->maxmtu);
7547 
7548 	if (flow_ctrl)
7549 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7550 
7551 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7552 
7553 	/* Setup channels NAPI */
7554 	stmmac_napi_add(ndev);
7555 
7556 	mutex_init(&priv->lock);
7557 
7558 	/* If a specific clk_csr value is passed from the platform
7559 	 * this means that the CSR Clock Range selection cannot be
7560 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7561 	 * set the MDC clock dynamically according to the csr actual
7562 	 * clock input.
7563 	 */
7564 	if (priv->plat->clk_csr >= 0)
7565 		priv->clk_csr = priv->plat->clk_csr;
7566 	else
7567 		stmmac_clk_csr_set(priv);
7568 
7569 	stmmac_check_pcs_mode(priv);
7570 
7571 	pm_runtime_get_noresume(device);
7572 	pm_runtime_set_active(device);
7573 	if (!pm_runtime_enabled(device))
7574 		pm_runtime_enable(device);
7575 
7576 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7577 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
7578 		/* MDIO bus Registration */
7579 		ret = stmmac_mdio_register(ndev);
7580 		if (ret < 0) {
7581 			dev_err_probe(priv->device, ret,
7582 				      "%s: MDIO bus (id: %d) registration failed\n",
7583 				      __func__, priv->plat->bus_id);
7584 			goto error_mdio_register;
7585 		}
7586 	}
7587 
7588 	if (priv->plat->speed_mode_2500)
7589 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7590 
7591 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7592 		ret = stmmac_xpcs_setup(priv->mii);
7593 		if (ret)
7594 			goto error_xpcs_setup;
7595 	}
7596 
7597 	ret = stmmac_phy_setup(priv);
7598 	if (ret) {
7599 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7600 		goto error_phy_setup;
7601 	}
7602 
7603 	ret = register_netdev(ndev);
7604 	if (ret) {
7605 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7606 			__func__, ret);
7607 		goto error_netdev_register;
7608 	}
7609 
7610 #ifdef CONFIG_DEBUG_FS
7611 	stmmac_init_fs(ndev);
7612 #endif
7613 
7614 	if (priv->plat->dump_debug_regs)
7615 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7616 
7617 	/* Let pm_runtime_put() disable the clocks.
7618 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7619 	 */
7620 	pm_runtime_put(device);
7621 
7622 	return ret;
7623 
7624 error_netdev_register:
7625 	phylink_destroy(priv->phylink);
7626 error_xpcs_setup:
7627 error_phy_setup:
7628 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7629 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7630 		stmmac_mdio_unregister(ndev);
7631 error_mdio_register:
7632 	stmmac_napi_del(ndev);
7633 error_hw_init:
7634 	destroy_workqueue(priv->wq);
7635 error_wq_init:
7636 	bitmap_free(priv->af_xdp_zc_qps);
7637 
7638 	return ret;
7639 }
7640 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7641 
7642 /**
7643  * stmmac_dvr_remove
7644  * @dev: device pointer
7645  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7646  * changes the link status, releases the DMA descriptor rings.
7647  */
7648 void stmmac_dvr_remove(struct device *dev)
7649 {
7650 	struct net_device *ndev = dev_get_drvdata(dev);
7651 	struct stmmac_priv *priv = netdev_priv(ndev);
7652 
7653 	netdev_info(priv->dev, "%s: removing driver", __func__);
7654 
7655 	pm_runtime_get_sync(dev);
7656 
7657 	stmmac_stop_all_dma(priv);
7658 	stmmac_mac_set(priv, priv->ioaddr, false);
7659 	netif_carrier_off(ndev);
7660 	unregister_netdev(ndev);
7661 
7662 #ifdef CONFIG_DEBUG_FS
7663 	stmmac_exit_fs(ndev);
7664 #endif
7665 	phylink_destroy(priv->phylink);
7666 	if (priv->plat->stmmac_rst)
7667 		reset_control_assert(priv->plat->stmmac_rst);
7668 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7669 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7670 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7671 		stmmac_mdio_unregister(ndev);
7672 	destroy_workqueue(priv->wq);
7673 	mutex_destroy(&priv->lock);
7674 	bitmap_free(priv->af_xdp_zc_qps);
7675 
7676 	pm_runtime_disable(dev);
7677 	pm_runtime_put_noidle(dev);
7678 }
7679 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7680 
7681 /**
7682  * stmmac_suspend - suspend callback
7683  * @dev: device pointer
7684  * Description: this is the function to suspend the device and it is called
7685  * by the platform driver to stop the network queue, release the resources,
7686  * program the PMT register (for WoL), clean and release driver resources.
7687  */
7688 int stmmac_suspend(struct device *dev)
7689 {
7690 	struct net_device *ndev = dev_get_drvdata(dev);
7691 	struct stmmac_priv *priv = netdev_priv(ndev);
7692 	u32 chan;
7693 
7694 	if (!ndev || !netif_running(ndev))
7695 		return 0;
7696 
7697 	mutex_lock(&priv->lock);
7698 
7699 	netif_device_detach(ndev);
7700 
7701 	stmmac_disable_all_queues(priv);
7702 
7703 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7704 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7705 
7706 	if (priv->eee_enabled) {
7707 		priv->tx_path_in_lpi_mode = false;
7708 		del_timer_sync(&priv->eee_ctrl_timer);
7709 	}
7710 
7711 	/* Stop TX/RX DMA */
7712 	stmmac_stop_all_dma(priv);
7713 
7714 	if (priv->plat->serdes_powerdown)
7715 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7716 
7717 	/* Enable Power down mode by programming the PMT regs */
7718 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7719 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7720 		priv->irq_wake = 1;
7721 	} else {
7722 		stmmac_mac_set(priv, priv->ioaddr, false);
7723 		pinctrl_pm_select_sleep_state(priv->device);
7724 	}
7725 
7726 	mutex_unlock(&priv->lock);
7727 
7728 	rtnl_lock();
7729 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7730 		phylink_suspend(priv->phylink, true);
7731 	} else {
7732 		if (device_may_wakeup(priv->device))
7733 			phylink_speed_down(priv->phylink, false);
7734 		phylink_suspend(priv->phylink, false);
7735 	}
7736 	rtnl_unlock();
7737 
7738 	if (priv->dma_cap.fpesel) {
7739 		/* Disable FPE */
7740 		stmmac_fpe_configure(priv, priv->ioaddr,
7741 				     priv->plat->tx_queues_to_use,
7742 				     priv->plat->rx_queues_to_use, false);
7743 
7744 		stmmac_fpe_handshake(priv, false);
7745 		stmmac_fpe_stop_wq(priv);
7746 	}
7747 
7748 	priv->speed = SPEED_UNKNOWN;
7749 	return 0;
7750 }
7751 EXPORT_SYMBOL_GPL(stmmac_suspend);
7752 
7753 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7754 {
7755 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7756 
7757 	rx_q->cur_rx = 0;
7758 	rx_q->dirty_rx = 0;
7759 }
7760 
7761 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7762 {
7763 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7764 
7765 	tx_q->cur_tx = 0;
7766 	tx_q->dirty_tx = 0;
7767 	tx_q->mss = 0;
7768 
7769 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7770 }
7771 
7772 /**
7773  * stmmac_reset_queues_param - reset queue parameters
7774  * @priv: device pointer
7775  */
7776 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7777 {
7778 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7779 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7780 	u32 queue;
7781 
7782 	for (queue = 0; queue < rx_cnt; queue++)
7783 		stmmac_reset_rx_queue(priv, queue);
7784 
7785 	for (queue = 0; queue < tx_cnt; queue++)
7786 		stmmac_reset_tx_queue(priv, queue);
7787 }
7788 
7789 /**
7790  * stmmac_resume - resume callback
7791  * @dev: device pointer
7792  * Description: when resume this function is invoked to setup the DMA and CORE
7793  * in a usable state.
7794  */
7795 int stmmac_resume(struct device *dev)
7796 {
7797 	struct net_device *ndev = dev_get_drvdata(dev);
7798 	struct stmmac_priv *priv = netdev_priv(ndev);
7799 	int ret;
7800 
7801 	if (!netif_running(ndev))
7802 		return 0;
7803 
7804 	/* Power Down bit, into the PM register, is cleared
7805 	 * automatically as soon as a magic packet or a Wake-up frame
7806 	 * is received. Anyway, it's better to manually clear
7807 	 * this bit because it can generate problems while resuming
7808 	 * from another devices (e.g. serial console).
7809 	 */
7810 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7811 		mutex_lock(&priv->lock);
7812 		stmmac_pmt(priv, priv->hw, 0);
7813 		mutex_unlock(&priv->lock);
7814 		priv->irq_wake = 0;
7815 	} else {
7816 		pinctrl_pm_select_default_state(priv->device);
7817 		/* reset the phy so that it's ready */
7818 		if (priv->mii)
7819 			stmmac_mdio_reset(priv->mii);
7820 	}
7821 
7822 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7823 	    priv->plat->serdes_powerup) {
7824 		ret = priv->plat->serdes_powerup(ndev,
7825 						 priv->plat->bsp_priv);
7826 
7827 		if (ret < 0)
7828 			return ret;
7829 	}
7830 
7831 	rtnl_lock();
7832 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7833 		phylink_resume(priv->phylink);
7834 	} else {
7835 		phylink_resume(priv->phylink);
7836 		if (device_may_wakeup(priv->device))
7837 			phylink_speed_up(priv->phylink);
7838 	}
7839 	rtnl_unlock();
7840 
7841 	rtnl_lock();
7842 	mutex_lock(&priv->lock);
7843 
7844 	stmmac_reset_queues_param(priv);
7845 
7846 	stmmac_free_tx_skbufs(priv);
7847 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7848 
7849 	stmmac_hw_setup(ndev, false);
7850 	stmmac_init_coalesce(priv);
7851 	stmmac_set_rx_mode(ndev);
7852 
7853 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7854 
7855 	stmmac_enable_all_queues(priv);
7856 	stmmac_enable_all_dma_irq(priv);
7857 
7858 	mutex_unlock(&priv->lock);
7859 	rtnl_unlock();
7860 
7861 	netif_device_attach(ndev);
7862 
7863 	return 0;
7864 }
7865 EXPORT_SYMBOL_GPL(stmmac_resume);
7866 
7867 #ifndef MODULE
7868 static int __init stmmac_cmdline_opt(char *str)
7869 {
7870 	char *opt;
7871 
7872 	if (!str || !*str)
7873 		return 1;
7874 	while ((opt = strsep(&str, ",")) != NULL) {
7875 		if (!strncmp(opt, "debug:", 6)) {
7876 			if (kstrtoint(opt + 6, 0, &debug))
7877 				goto err;
7878 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7879 			if (kstrtoint(opt + 8, 0, &phyaddr))
7880 				goto err;
7881 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7882 			if (kstrtoint(opt + 7, 0, &buf_sz))
7883 				goto err;
7884 		} else if (!strncmp(opt, "tc:", 3)) {
7885 			if (kstrtoint(opt + 3, 0, &tc))
7886 				goto err;
7887 		} else if (!strncmp(opt, "watchdog:", 9)) {
7888 			if (kstrtoint(opt + 9, 0, &watchdog))
7889 				goto err;
7890 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7891 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7892 				goto err;
7893 		} else if (!strncmp(opt, "pause:", 6)) {
7894 			if (kstrtoint(opt + 6, 0, &pause))
7895 				goto err;
7896 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7897 			if (kstrtoint(opt + 10, 0, &eee_timer))
7898 				goto err;
7899 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7900 			if (kstrtoint(opt + 11, 0, &chain_mode))
7901 				goto err;
7902 		}
7903 	}
7904 	return 1;
7905 
7906 err:
7907 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7908 	return 1;
7909 }
7910 
7911 __setup("stmmaceth=", stmmac_cmdline_opt);
7912 #endif /* MODULE */
7913 
7914 static int __init stmmac_init(void)
7915 {
7916 #ifdef CONFIG_DEBUG_FS
7917 	/* Create debugfs main directory if it doesn't exist yet */
7918 	if (!stmmac_fs_dir)
7919 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7920 	register_netdevice_notifier(&stmmac_notifier);
7921 #endif
7922 
7923 	return 0;
7924 }
7925 
7926 static void __exit stmmac_exit(void)
7927 {
7928 #ifdef CONFIG_DEBUG_FS
7929 	unregister_netdevice_notifier(&stmmac_notifier);
7930 	debugfs_remove_recursive(stmmac_fs_dir);
7931 #endif
7932 }
7933 
7934 module_init(stmmac_init)
7935 module_exit(stmmac_exit)
7936 
7937 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7938 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7939 MODULE_LICENSE("GPL");
7940