1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Using PCS we cannot dial with the phy registers at this stage
475 	 * so we do not support extra feature like EEE.
476 	 */
477 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
478 	    priv->hw->pcs == STMMAC_PCS_RTBI)
479 		return false;
480 
481 	/* Check if MAC core supports the EEE feature. */
482 	if (!priv->dma_cap.eee)
483 		return false;
484 
485 	mutex_lock(&priv->lock);
486 
487 	/* Check if it needs to be deactivated */
488 	if (!priv->eee_active) {
489 		if (priv->eee_enabled) {
490 			netdev_dbg(priv->dev, "disable EEE\n");
491 			stmmac_lpi_entry_timer_config(priv, 0);
492 			del_timer_sync(&priv->eee_ctrl_timer);
493 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494 			if (priv->hw->xpcs)
495 				xpcs_config_eee(priv->hw->xpcs,
496 						priv->plat->mult_fact_100ns,
497 						false);
498 		}
499 		mutex_unlock(&priv->lock);
500 		return false;
501 	}
502 
503 	if (priv->eee_active && !priv->eee_enabled) {
504 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506 				     eee_tw_timer);
507 		if (priv->hw->xpcs)
508 			xpcs_config_eee(priv->hw->xpcs,
509 					priv->plat->mult_fact_100ns,
510 					true);
511 	}
512 
513 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514 		del_timer_sync(&priv->eee_ctrl_timer);
515 		priv->tx_path_in_lpi_mode = false;
516 		stmmac_lpi_entry_timer_config(priv, 1);
517 	} else {
518 		stmmac_lpi_entry_timer_config(priv, 0);
519 		mod_timer(&priv->eee_ctrl_timer,
520 			  STMMAC_LPI_T(priv->tx_lpi_timer));
521 	}
522 
523 	mutex_unlock(&priv->lock);
524 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525 	return true;
526 }
527 
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529  * @priv: driver private structure
530  * @p : descriptor pointer
531  * @skb : the socket buffer
532  * Description :
533  * This function will read timestamp from the descriptor & pass it to stack.
534  * and also perform some sanity checks.
535  */
536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537 				   struct dma_desc *p, struct sk_buff *skb)
538 {
539 	struct skb_shared_hwtstamps shhwtstamp;
540 	bool found = false;
541 	u64 ns = 0;
542 
543 	if (!priv->hwts_tx_en)
544 		return;
545 
546 	/* exit if skb doesn't support hw tstamp */
547 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548 		return;
549 
550 	/* check tx tstamp status */
551 	if (stmmac_get_tx_timestamp_status(priv, p)) {
552 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553 		found = true;
554 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555 		found = true;
556 	}
557 
558 	if (found) {
559 		ns -= priv->plat->cdc_error_adj;
560 
561 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
563 
564 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565 		/* pass tstamp to stack */
566 		skb_tstamp_tx(skb, &shhwtstamp);
567 	}
568 }
569 
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571  * @priv: driver private structure
572  * @p : descriptor pointer
573  * @np : next descriptor pointer
574  * @skb : the socket buffer
575  * Description :
576  * This function will read received packet's timestamp from the descriptor
577  * and pass it to stack. It also perform some sanity checks.
578  */
579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580 				   struct dma_desc *np, struct sk_buff *skb)
581 {
582 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
583 	struct dma_desc *desc = p;
584 	u64 ns = 0;
585 
586 	if (!priv->hwts_rx_en)
587 		return;
588 	/* For GMAC4, the valid timestamp is from CTX next desc. */
589 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590 		desc = np;
591 
592 	/* Check if timestamp is available */
593 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595 
596 		ns -= priv->plat->cdc_error_adj;
597 
598 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599 		shhwtstamp = skb_hwtstamps(skb);
600 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
602 	} else  {
603 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604 	}
605 }
606 
607 /**
608  *  stmmac_hwtstamp_set - control hardware timestamping.
609  *  @dev: device pointer.
610  *  @ifr: An IOCTL specific structure, that can contain a pointer to
611  *  a proprietary structure used to pass information to the driver.
612  *  Description:
613  *  This function configures the MAC to enable/disable both outgoing(TX)
614  *  and incoming(RX) packets time stamping based on user input.
615  *  Return Value:
616  *  0 on success and an appropriate -ve integer on failure.
617  */
618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620 	struct stmmac_priv *priv = netdev_priv(dev);
621 	struct hwtstamp_config config;
622 	u32 ptp_v2 = 0;
623 	u32 tstamp_all = 0;
624 	u32 ptp_over_ipv4_udp = 0;
625 	u32 ptp_over_ipv6_udp = 0;
626 	u32 ptp_over_ethernet = 0;
627 	u32 snap_type_sel = 0;
628 	u32 ts_master_en = 0;
629 	u32 ts_event_en = 0;
630 
631 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632 		netdev_alert(priv->dev, "No support for HW time stamping\n");
633 		priv->hwts_tx_en = 0;
634 		priv->hwts_rx_en = 0;
635 
636 		return -EOPNOTSUPP;
637 	}
638 
639 	if (copy_from_user(&config, ifr->ifr_data,
640 			   sizeof(config)))
641 		return -EFAULT;
642 
643 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 		   __func__, config.flags, config.tx_type, config.rx_filter);
645 
646 	if (config.tx_type != HWTSTAMP_TX_OFF &&
647 	    config.tx_type != HWTSTAMP_TX_ON)
648 		return -ERANGE;
649 
650 	if (priv->adv_ts) {
651 		switch (config.rx_filter) {
652 		case HWTSTAMP_FILTER_NONE:
653 			/* time stamp no incoming packet at all */
654 			config.rx_filter = HWTSTAMP_FILTER_NONE;
655 			break;
656 
657 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 			/* PTP v1, UDP, any kind of event packet */
659 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 			/* 'xmac' hardware can support Sync, Pdelay_Req and
661 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 			 * This leaves Delay_Req timestamps out.
663 			 * Enable all events *and* general purpose message
664 			 * timestamping
665 			 */
666 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			break;
670 
671 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 			/* PTP v1, UDP, Sync packet */
673 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 			/* take time stamp for SYNC messages only */
675 			ts_event_en = PTP_TCR_TSEVNTENA;
676 
677 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 			break;
680 
681 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 			/* PTP v1, UDP, Delay_req packet */
683 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 			/* take time stamp for Delay_Req messages only */
685 			ts_master_en = PTP_TCR_TSMSTRENA;
686 			ts_event_en = PTP_TCR_TSEVNTENA;
687 
688 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 			break;
691 
692 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 			/* PTP v2, UDP, any kind of event packet */
694 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 			ptp_v2 = PTP_TCR_TSVER2ENA;
696 			/* take time stamp for all event messages */
697 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698 
699 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 			break;
702 
703 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 			/* PTP v2, UDP, Sync packet */
705 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 			ptp_v2 = PTP_TCR_TSVER2ENA;
707 			/* take time stamp for SYNC messages only */
708 			ts_event_en = PTP_TCR_TSEVNTENA;
709 
710 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 			break;
713 
714 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 			/* PTP v2, UDP, Delay_req packet */
716 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 			ptp_v2 = PTP_TCR_TSVER2ENA;
718 			/* take time stamp for Delay_Req messages only */
719 			ts_master_en = PTP_TCR_TSMSTRENA;
720 			ts_event_en = PTP_TCR_TSEVNTENA;
721 
722 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 			break;
725 
726 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 			/* PTP v2/802.AS1 any layer, any kind of event packet */
728 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 			ptp_v2 = PTP_TCR_TSVER2ENA;
730 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 			if (priv->synopsys_id < DWMAC_CORE_4_10)
732 				ts_event_en = PTP_TCR_TSEVNTENA;
733 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 			ptp_over_ethernet = PTP_TCR_TSIPENA;
736 			break;
737 
738 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 			/* PTP v2/802.AS1, any layer, Sync packet */
740 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 			ptp_v2 = PTP_TCR_TSVER2ENA;
742 			/* take time stamp for SYNC messages only */
743 			ts_event_en = PTP_TCR_TSEVNTENA;
744 
745 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 			ptp_over_ethernet = PTP_TCR_TSIPENA;
748 			break;
749 
750 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 			/* PTP v2/802.AS1, any layer, Delay_req packet */
752 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 			ptp_v2 = PTP_TCR_TSVER2ENA;
754 			/* take time stamp for Delay_Req messages only */
755 			ts_master_en = PTP_TCR_TSMSTRENA;
756 			ts_event_en = PTP_TCR_TSEVNTENA;
757 
758 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 			ptp_over_ethernet = PTP_TCR_TSIPENA;
761 			break;
762 
763 		case HWTSTAMP_FILTER_NTP_ALL:
764 		case HWTSTAMP_FILTER_ALL:
765 			/* time stamp any incoming packet */
766 			config.rx_filter = HWTSTAMP_FILTER_ALL;
767 			tstamp_all = PTP_TCR_TSENALL;
768 			break;
769 
770 		default:
771 			return -ERANGE;
772 		}
773 	} else {
774 		switch (config.rx_filter) {
775 		case HWTSTAMP_FILTER_NONE:
776 			config.rx_filter = HWTSTAMP_FILTER_NONE;
777 			break;
778 		default:
779 			/* PTP v1, UDP, any kind of event packet */
780 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 			break;
782 		}
783 	}
784 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786 
787 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
788 
789 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
790 		priv->systime_flags |= tstamp_all | ptp_v2 |
791 				       ptp_over_ethernet | ptp_over_ipv6_udp |
792 				       ptp_over_ipv4_udp | ts_event_en |
793 				       ts_master_en | snap_type_sel;
794 	}
795 
796 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797 
798 	memcpy(&priv->tstamp_config, &config, sizeof(config));
799 
800 	return copy_to_user(ifr->ifr_data, &config,
801 			    sizeof(config)) ? -EFAULT : 0;
802 }
803 
804 /**
805  *  stmmac_hwtstamp_get - read hardware timestamping.
806  *  @dev: device pointer.
807  *  @ifr: An IOCTL specific structure, that can contain a pointer to
808  *  a proprietary structure used to pass information to the driver.
809  *  Description:
810  *  This function obtain the current hardware timestamping settings
811  *  as requested.
812  */
813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815 	struct stmmac_priv *priv = netdev_priv(dev);
816 	struct hwtstamp_config *config = &priv->tstamp_config;
817 
818 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819 		return -EOPNOTSUPP;
820 
821 	return copy_to_user(ifr->ifr_data, config,
822 			    sizeof(*config)) ? -EFAULT : 0;
823 }
824 
825 /**
826  * stmmac_init_tstamp_counter - init hardware timestamping counter
827  * @priv: driver private structure
828  * @systime_flags: timestamping flags
829  * Description:
830  * Initialize hardware counter for packet timestamping.
831  * This is valid as long as the interface is open and not suspended.
832  * Will be rerun after resuming from suspend, case in which the timestamping
833  * flags updated by stmmac_hwtstamp_set() also need to be restored.
834  */
835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838 	struct timespec64 now;
839 	u32 sec_inc = 0;
840 	u64 temp = 0;
841 
842 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843 		return -EOPNOTSUPP;
844 
845 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846 	priv->systime_flags = systime_flags;
847 
848 	/* program Sub Second Increment reg */
849 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850 					   priv->plat->clk_ptp_rate,
851 					   xmac, &sec_inc);
852 	temp = div_u64(1000000000ULL, sec_inc);
853 
854 	/* Store sub second increment for later use */
855 	priv->sub_second_inc = sec_inc;
856 
857 	/* calculate default added value:
858 	 * formula is :
859 	 * addend = (2^32)/freq_div_ratio;
860 	 * where, freq_div_ratio = 1e9ns/sec_inc
861 	 */
862 	temp = (u64)(temp << 32);
863 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865 
866 	/* initialize system time */
867 	ktime_get_real_ts64(&now);
868 
869 	/* lower 32 bits of tv_sec are safe until y2106 */
870 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871 
872 	return 0;
873 }
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875 
876 /**
877  * stmmac_init_ptp - init PTP
878  * @priv: driver private structure
879  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880  * This is done by looking at the HW cap. register.
881  * This function also registers the ptp driver.
882  */
883 static int stmmac_init_ptp(struct stmmac_priv *priv)
884 {
885 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886 	int ret;
887 
888 	if (priv->plat->ptp_clk_freq_config)
889 		priv->plat->ptp_clk_freq_config(priv);
890 
891 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892 	if (ret)
893 		return ret;
894 
895 	priv->adv_ts = 0;
896 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 	if (xmac && priv->dma_cap.atime_stamp)
898 		priv->adv_ts = 1;
899 	/* Dwmac 3.x core with extend_desc can support adv_ts */
900 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901 		priv->adv_ts = 1;
902 
903 	if (priv->dma_cap.time_stamp)
904 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
905 
906 	if (priv->adv_ts)
907 		netdev_info(priv->dev,
908 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
909 
910 	priv->hwts_tx_en = 0;
911 	priv->hwts_rx_en = 0;
912 
913 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
914 		stmmac_hwtstamp_correct_latency(priv, priv);
915 
916 	return 0;
917 }
918 
919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
922 	stmmac_ptp_unregister(priv);
923 }
924 
925 /**
926  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
927  *  @priv: driver private structure
928  *  @duplex: duplex passed to the next function
929  *  Description: It is used for configuring the flow control in all queues
930  */
931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933 	u32 tx_cnt = priv->plat->tx_queues_to_use;
934 
935 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936 			priv->pause, tx_cnt);
937 }
938 
939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
940 						 phy_interface_t interface)
941 {
942 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943 
944 	if (priv->hw->xpcs)
945 		return &priv->hw->xpcs->pcs;
946 
947 	if (priv->hw->lynx_pcs)
948 		return priv->hw->lynx_pcs;
949 
950 	return NULL;
951 }
952 
953 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
954 			      const struct phylink_link_state *state)
955 {
956 	/* Nothing to do, xpcs_config() handles everything */
957 }
958 
959 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
960 {
961 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
962 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
963 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
964 	bool *hs_enable = &fpe_cfg->hs_enable;
965 
966 	if (is_up && *hs_enable) {
967 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
968 	} else {
969 		*lo_state = FPE_STATE_OFF;
970 		*lp_state = FPE_STATE_OFF;
971 	}
972 }
973 
974 static void stmmac_mac_link_down(struct phylink_config *config,
975 				 unsigned int mode, phy_interface_t interface)
976 {
977 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
978 
979 	stmmac_mac_set(priv, priv->ioaddr, false);
980 	priv->eee_active = false;
981 	priv->tx_lpi_enabled = false;
982 	priv->eee_enabled = stmmac_eee_init(priv);
983 	stmmac_set_eee_pls(priv, priv->hw, false);
984 
985 	if (priv->dma_cap.fpesel)
986 		stmmac_fpe_link_state_handle(priv, false);
987 }
988 
989 static void stmmac_mac_link_up(struct phylink_config *config,
990 			       struct phy_device *phy,
991 			       unsigned int mode, phy_interface_t interface,
992 			       int speed, int duplex,
993 			       bool tx_pause, bool rx_pause)
994 {
995 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
996 	u32 old_ctrl, ctrl;
997 
998 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
999 	    priv->plat->serdes_powerup)
1000 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1001 
1002 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1003 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1004 
1005 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1006 		switch (speed) {
1007 		case SPEED_10000:
1008 			ctrl |= priv->hw->link.xgmii.speed10000;
1009 			break;
1010 		case SPEED_5000:
1011 			ctrl |= priv->hw->link.xgmii.speed5000;
1012 			break;
1013 		case SPEED_2500:
1014 			ctrl |= priv->hw->link.xgmii.speed2500;
1015 			break;
1016 		default:
1017 			return;
1018 		}
1019 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1020 		switch (speed) {
1021 		case SPEED_100000:
1022 			ctrl |= priv->hw->link.xlgmii.speed100000;
1023 			break;
1024 		case SPEED_50000:
1025 			ctrl |= priv->hw->link.xlgmii.speed50000;
1026 			break;
1027 		case SPEED_40000:
1028 			ctrl |= priv->hw->link.xlgmii.speed40000;
1029 			break;
1030 		case SPEED_25000:
1031 			ctrl |= priv->hw->link.xlgmii.speed25000;
1032 			break;
1033 		case SPEED_10000:
1034 			ctrl |= priv->hw->link.xgmii.speed10000;
1035 			break;
1036 		case SPEED_2500:
1037 			ctrl |= priv->hw->link.speed2500;
1038 			break;
1039 		case SPEED_1000:
1040 			ctrl |= priv->hw->link.speed1000;
1041 			break;
1042 		default:
1043 			return;
1044 		}
1045 	} else {
1046 		switch (speed) {
1047 		case SPEED_2500:
1048 			ctrl |= priv->hw->link.speed2500;
1049 			break;
1050 		case SPEED_1000:
1051 			ctrl |= priv->hw->link.speed1000;
1052 			break;
1053 		case SPEED_100:
1054 			ctrl |= priv->hw->link.speed100;
1055 			break;
1056 		case SPEED_10:
1057 			ctrl |= priv->hw->link.speed10;
1058 			break;
1059 		default:
1060 			return;
1061 		}
1062 	}
1063 
1064 	priv->speed = speed;
1065 
1066 	if (priv->plat->fix_mac_speed)
1067 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1068 
1069 	if (!duplex)
1070 		ctrl &= ~priv->hw->link.duplex;
1071 	else
1072 		ctrl |= priv->hw->link.duplex;
1073 
1074 	/* Flow Control operation */
1075 	if (rx_pause && tx_pause)
1076 		priv->flow_ctrl = FLOW_AUTO;
1077 	else if (rx_pause && !tx_pause)
1078 		priv->flow_ctrl = FLOW_RX;
1079 	else if (!rx_pause && tx_pause)
1080 		priv->flow_ctrl = FLOW_TX;
1081 	else
1082 		priv->flow_ctrl = FLOW_OFF;
1083 
1084 	stmmac_mac_flow_ctrl(priv, duplex);
1085 
1086 	if (ctrl != old_ctrl)
1087 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1088 
1089 	stmmac_mac_set(priv, priv->ioaddr, true);
1090 	if (phy && priv->dma_cap.eee) {
1091 		priv->eee_active =
1092 			phy_init_eee(phy, !(priv->plat->flags &
1093 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1094 		priv->eee_enabled = stmmac_eee_init(priv);
1095 		priv->tx_lpi_enabled = priv->eee_enabled;
1096 		stmmac_set_eee_pls(priv, priv->hw, true);
1097 	}
1098 
1099 	if (priv->dma_cap.fpesel)
1100 		stmmac_fpe_link_state_handle(priv, true);
1101 
1102 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1103 		stmmac_hwtstamp_correct_latency(priv, priv);
1104 }
1105 
1106 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1107 	.mac_select_pcs = stmmac_mac_select_pcs,
1108 	.mac_config = stmmac_mac_config,
1109 	.mac_link_down = stmmac_mac_link_down,
1110 	.mac_link_up = stmmac_mac_link_up,
1111 };
1112 
1113 /**
1114  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1115  * @priv: driver private structure
1116  * Description: this is to verify if the HW supports the PCS.
1117  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1118  * configured for the TBI, RTBI, or SGMII PHY interface.
1119  */
1120 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1121 {
1122 	int interface = priv->plat->mac_interface;
1123 
1124 	if (priv->dma_cap.pcs) {
1125 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1126 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1127 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1128 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1129 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1130 			priv->hw->pcs = STMMAC_PCS_RGMII;
1131 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1132 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1133 			priv->hw->pcs = STMMAC_PCS_SGMII;
1134 		}
1135 	}
1136 }
1137 
1138 /**
1139  * stmmac_init_phy - PHY initialization
1140  * @dev: net device structure
1141  * Description: it initializes the driver's PHY state, and attaches the PHY
1142  * to the mac driver.
1143  *  Return value:
1144  *  0 on success
1145  */
1146 static int stmmac_init_phy(struct net_device *dev)
1147 {
1148 	struct stmmac_priv *priv = netdev_priv(dev);
1149 	struct fwnode_handle *phy_fwnode;
1150 	struct fwnode_handle *fwnode;
1151 	int ret;
1152 
1153 	if (!phylink_expects_phy(priv->phylink))
1154 		return 0;
1155 
1156 	fwnode = priv->plat->port_node;
1157 	if (!fwnode)
1158 		fwnode = dev_fwnode(priv->device);
1159 
1160 	if (fwnode)
1161 		phy_fwnode = fwnode_get_phy_node(fwnode);
1162 	else
1163 		phy_fwnode = NULL;
1164 
1165 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1166 	 * manually parse it
1167 	 */
1168 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1169 		int addr = priv->plat->phy_addr;
1170 		struct phy_device *phydev;
1171 
1172 		if (addr < 0) {
1173 			netdev_err(priv->dev, "no phy found\n");
1174 			return -ENODEV;
1175 		}
1176 
1177 		phydev = mdiobus_get_phy(priv->mii, addr);
1178 		if (!phydev) {
1179 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1180 			return -ENODEV;
1181 		}
1182 
1183 		ret = phylink_connect_phy(priv->phylink, phydev);
1184 	} else {
1185 		fwnode_handle_put(phy_fwnode);
1186 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1187 	}
1188 
1189 	if (!priv->plat->pmt) {
1190 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1191 
1192 		phylink_ethtool_get_wol(priv->phylink, &wol);
1193 		device_set_wakeup_capable(priv->device, !!wol.supported);
1194 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1195 	}
1196 
1197 	return ret;
1198 }
1199 
1200 static int stmmac_phy_setup(struct stmmac_priv *priv)
1201 {
1202 	struct stmmac_mdio_bus_data *mdio_bus_data;
1203 	int mode = priv->plat->phy_interface;
1204 	struct fwnode_handle *fwnode;
1205 	struct phylink *phylink;
1206 	int max_speed;
1207 
1208 	priv->phylink_config.dev = &priv->dev->dev;
1209 	priv->phylink_config.type = PHYLINK_NETDEV;
1210 	priv->phylink_config.mac_managed_pm = true;
1211 
1212 	mdio_bus_data = priv->plat->mdio_bus_data;
1213 	if (mdio_bus_data)
1214 		priv->phylink_config.ovr_an_inband =
1215 			mdio_bus_data->xpcs_an_inband;
1216 
1217 	/* Set the platform/firmware specified interface mode. Note, phylink
1218 	 * deals with the PHY interface mode, not the MAC interface mode.
1219 	 */
1220 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1221 
1222 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1223 	if (priv->hw->xpcs)
1224 		xpcs_get_interfaces(priv->hw->xpcs,
1225 				    priv->phylink_config.supported_interfaces);
1226 
1227 	priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1228 						MAC_10FD | MAC_100FD |
1229 						MAC_1000FD;
1230 
1231 	/* Half-Duplex can only work with single queue */
1232 	if (priv->plat->tx_queues_to_use <= 1)
1233 		priv->phylink_config.mac_capabilities |= MAC_10HD | MAC_100HD |
1234 							 MAC_1000HD;
1235 
1236 	/* Get the MAC specific capabilities */
1237 	stmmac_mac_phylink_get_caps(priv);
1238 
1239 	max_speed = priv->plat->max_speed;
1240 	if (max_speed)
1241 		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1242 
1243 	fwnode = priv->plat->port_node;
1244 	if (!fwnode)
1245 		fwnode = dev_fwnode(priv->device);
1246 
1247 	phylink = phylink_create(&priv->phylink_config, fwnode,
1248 				 mode, &stmmac_phylink_mac_ops);
1249 	if (IS_ERR(phylink))
1250 		return PTR_ERR(phylink);
1251 
1252 	priv->phylink = phylink;
1253 	return 0;
1254 }
1255 
1256 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1257 				    struct stmmac_dma_conf *dma_conf)
1258 {
1259 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1260 	unsigned int desc_size;
1261 	void *head_rx;
1262 	u32 queue;
1263 
1264 	/* Display RX rings */
1265 	for (queue = 0; queue < rx_cnt; queue++) {
1266 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1267 
1268 		pr_info("\tRX Queue %u rings\n", queue);
1269 
1270 		if (priv->extend_desc) {
1271 			head_rx = (void *)rx_q->dma_erx;
1272 			desc_size = sizeof(struct dma_extended_desc);
1273 		} else {
1274 			head_rx = (void *)rx_q->dma_rx;
1275 			desc_size = sizeof(struct dma_desc);
1276 		}
1277 
1278 		/* Display RX ring */
1279 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1280 				    rx_q->dma_rx_phy, desc_size);
1281 	}
1282 }
1283 
1284 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1285 				    struct stmmac_dma_conf *dma_conf)
1286 {
1287 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1288 	unsigned int desc_size;
1289 	void *head_tx;
1290 	u32 queue;
1291 
1292 	/* Display TX rings */
1293 	for (queue = 0; queue < tx_cnt; queue++) {
1294 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1295 
1296 		pr_info("\tTX Queue %d rings\n", queue);
1297 
1298 		if (priv->extend_desc) {
1299 			head_tx = (void *)tx_q->dma_etx;
1300 			desc_size = sizeof(struct dma_extended_desc);
1301 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1302 			head_tx = (void *)tx_q->dma_entx;
1303 			desc_size = sizeof(struct dma_edesc);
1304 		} else {
1305 			head_tx = (void *)tx_q->dma_tx;
1306 			desc_size = sizeof(struct dma_desc);
1307 		}
1308 
1309 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1310 				    tx_q->dma_tx_phy, desc_size);
1311 	}
1312 }
1313 
1314 static void stmmac_display_rings(struct stmmac_priv *priv,
1315 				 struct stmmac_dma_conf *dma_conf)
1316 {
1317 	/* Display RX ring */
1318 	stmmac_display_rx_rings(priv, dma_conf);
1319 
1320 	/* Display TX ring */
1321 	stmmac_display_tx_rings(priv, dma_conf);
1322 }
1323 
1324 static int stmmac_set_bfsize(int mtu, int bufsize)
1325 {
1326 	int ret = bufsize;
1327 
1328 	if (mtu >= BUF_SIZE_8KiB)
1329 		ret = BUF_SIZE_16KiB;
1330 	else if (mtu >= BUF_SIZE_4KiB)
1331 		ret = BUF_SIZE_8KiB;
1332 	else if (mtu >= BUF_SIZE_2KiB)
1333 		ret = BUF_SIZE_4KiB;
1334 	else if (mtu > DEFAULT_BUFSIZE)
1335 		ret = BUF_SIZE_2KiB;
1336 	else
1337 		ret = DEFAULT_BUFSIZE;
1338 
1339 	return ret;
1340 }
1341 
1342 /**
1343  * stmmac_clear_rx_descriptors - clear RX descriptors
1344  * @priv: driver private structure
1345  * @dma_conf: structure to take the dma data
1346  * @queue: RX queue index
1347  * Description: this function is called to clear the RX descriptors
1348  * in case of both basic and extended descriptors are used.
1349  */
1350 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1351 					struct stmmac_dma_conf *dma_conf,
1352 					u32 queue)
1353 {
1354 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1355 	int i;
1356 
1357 	/* Clear the RX descriptors */
1358 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1359 		if (priv->extend_desc)
1360 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1361 					priv->use_riwt, priv->mode,
1362 					(i == dma_conf->dma_rx_size - 1),
1363 					dma_conf->dma_buf_sz);
1364 		else
1365 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1366 					priv->use_riwt, priv->mode,
1367 					(i == dma_conf->dma_rx_size - 1),
1368 					dma_conf->dma_buf_sz);
1369 }
1370 
1371 /**
1372  * stmmac_clear_tx_descriptors - clear tx descriptors
1373  * @priv: driver private structure
1374  * @dma_conf: structure to take the dma data
1375  * @queue: TX queue index.
1376  * Description: this function is called to clear the TX descriptors
1377  * in case of both basic and extended descriptors are used.
1378  */
1379 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1380 					struct stmmac_dma_conf *dma_conf,
1381 					u32 queue)
1382 {
1383 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1384 	int i;
1385 
1386 	/* Clear the TX descriptors */
1387 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1388 		int last = (i == (dma_conf->dma_tx_size - 1));
1389 		struct dma_desc *p;
1390 
1391 		if (priv->extend_desc)
1392 			p = &tx_q->dma_etx[i].basic;
1393 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1394 			p = &tx_q->dma_entx[i].basic;
1395 		else
1396 			p = &tx_q->dma_tx[i];
1397 
1398 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1399 	}
1400 }
1401 
1402 /**
1403  * stmmac_clear_descriptors - clear descriptors
1404  * @priv: driver private structure
1405  * @dma_conf: structure to take the dma data
1406  * Description: this function is called to clear the TX and RX descriptors
1407  * in case of both basic and extended descriptors are used.
1408  */
1409 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1410 				     struct stmmac_dma_conf *dma_conf)
1411 {
1412 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1413 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1414 	u32 queue;
1415 
1416 	/* Clear the RX descriptors */
1417 	for (queue = 0; queue < rx_queue_cnt; queue++)
1418 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1419 
1420 	/* Clear the TX descriptors */
1421 	for (queue = 0; queue < tx_queue_cnt; queue++)
1422 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1423 }
1424 
1425 /**
1426  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1427  * @priv: driver private structure
1428  * @dma_conf: structure to take the dma data
1429  * @p: descriptor pointer
1430  * @i: descriptor index
1431  * @flags: gfp flag
1432  * @queue: RX queue index
1433  * Description: this function is called to allocate a receive buffer, perform
1434  * the DMA mapping and init the descriptor.
1435  */
1436 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1437 				  struct stmmac_dma_conf *dma_conf,
1438 				  struct dma_desc *p,
1439 				  int i, gfp_t flags, u32 queue)
1440 {
1441 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1442 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1443 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1444 
1445 	if (priv->dma_cap.host_dma_width <= 32)
1446 		gfp |= GFP_DMA32;
1447 
1448 	if (!buf->page) {
1449 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1450 		if (!buf->page)
1451 			return -ENOMEM;
1452 		buf->page_offset = stmmac_rx_offset(priv);
1453 	}
1454 
1455 	if (priv->sph && !buf->sec_page) {
1456 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1457 		if (!buf->sec_page)
1458 			return -ENOMEM;
1459 
1460 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1461 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1462 	} else {
1463 		buf->sec_page = NULL;
1464 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1465 	}
1466 
1467 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1468 
1469 	stmmac_set_desc_addr(priv, p, buf->addr);
1470 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1471 		stmmac_init_desc3(priv, p);
1472 
1473 	return 0;
1474 }
1475 
1476 /**
1477  * stmmac_free_rx_buffer - free RX dma buffers
1478  * @priv: private structure
1479  * @rx_q: RX queue
1480  * @i: buffer index.
1481  */
1482 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1483 				  struct stmmac_rx_queue *rx_q,
1484 				  int i)
1485 {
1486 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1487 
1488 	if (buf->page)
1489 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1490 	buf->page = NULL;
1491 
1492 	if (buf->sec_page)
1493 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1494 	buf->sec_page = NULL;
1495 }
1496 
1497 /**
1498  * stmmac_free_tx_buffer - free RX dma buffers
1499  * @priv: private structure
1500  * @dma_conf: structure to take the dma data
1501  * @queue: RX queue index
1502  * @i: buffer index.
1503  */
1504 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1505 				  struct stmmac_dma_conf *dma_conf,
1506 				  u32 queue, int i)
1507 {
1508 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1509 
1510 	if (tx_q->tx_skbuff_dma[i].buf &&
1511 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1512 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1513 			dma_unmap_page(priv->device,
1514 				       tx_q->tx_skbuff_dma[i].buf,
1515 				       tx_q->tx_skbuff_dma[i].len,
1516 				       DMA_TO_DEVICE);
1517 		else
1518 			dma_unmap_single(priv->device,
1519 					 tx_q->tx_skbuff_dma[i].buf,
1520 					 tx_q->tx_skbuff_dma[i].len,
1521 					 DMA_TO_DEVICE);
1522 	}
1523 
1524 	if (tx_q->xdpf[i] &&
1525 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1526 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1527 		xdp_return_frame(tx_q->xdpf[i]);
1528 		tx_q->xdpf[i] = NULL;
1529 	}
1530 
1531 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1532 		tx_q->xsk_frames_done++;
1533 
1534 	if (tx_q->tx_skbuff[i] &&
1535 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1536 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1537 		tx_q->tx_skbuff[i] = NULL;
1538 	}
1539 
1540 	tx_q->tx_skbuff_dma[i].buf = 0;
1541 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1542 }
1543 
1544 /**
1545  * dma_free_rx_skbufs - free RX dma buffers
1546  * @priv: private structure
1547  * @dma_conf: structure to take the dma data
1548  * @queue: RX queue index
1549  */
1550 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1551 			       struct stmmac_dma_conf *dma_conf,
1552 			       u32 queue)
1553 {
1554 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1555 	int i;
1556 
1557 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1558 		stmmac_free_rx_buffer(priv, rx_q, i);
1559 }
1560 
1561 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1562 				   struct stmmac_dma_conf *dma_conf,
1563 				   u32 queue, gfp_t flags)
1564 {
1565 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1566 	int i;
1567 
1568 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1569 		struct dma_desc *p;
1570 		int ret;
1571 
1572 		if (priv->extend_desc)
1573 			p = &((rx_q->dma_erx + i)->basic);
1574 		else
1575 			p = rx_q->dma_rx + i;
1576 
1577 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1578 					     queue);
1579 		if (ret)
1580 			return ret;
1581 
1582 		rx_q->buf_alloc_num++;
1583 	}
1584 
1585 	return 0;
1586 }
1587 
1588 /**
1589  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1590  * @priv: private structure
1591  * @dma_conf: structure to take the dma data
1592  * @queue: RX queue index
1593  */
1594 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1595 				struct stmmac_dma_conf *dma_conf,
1596 				u32 queue)
1597 {
1598 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1599 	int i;
1600 
1601 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1602 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1603 
1604 		if (!buf->xdp)
1605 			continue;
1606 
1607 		xsk_buff_free(buf->xdp);
1608 		buf->xdp = NULL;
1609 	}
1610 }
1611 
1612 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1613 				      struct stmmac_dma_conf *dma_conf,
1614 				      u32 queue)
1615 {
1616 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1617 	int i;
1618 
1619 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1620 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1621 	 * use this macro to make sure no size violations.
1622 	 */
1623 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1624 
1625 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1626 		struct stmmac_rx_buffer *buf;
1627 		dma_addr_t dma_addr;
1628 		struct dma_desc *p;
1629 
1630 		if (priv->extend_desc)
1631 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1632 		else
1633 			p = rx_q->dma_rx + i;
1634 
1635 		buf = &rx_q->buf_pool[i];
1636 
1637 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1638 		if (!buf->xdp)
1639 			return -ENOMEM;
1640 
1641 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1642 		stmmac_set_desc_addr(priv, p, dma_addr);
1643 		rx_q->buf_alloc_num++;
1644 	}
1645 
1646 	return 0;
1647 }
1648 
1649 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1650 {
1651 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1652 		return NULL;
1653 
1654 	return xsk_get_pool_from_qid(priv->dev, queue);
1655 }
1656 
1657 /**
1658  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1659  * @priv: driver private structure
1660  * @dma_conf: structure to take the dma data
1661  * @queue: RX queue index
1662  * @flags: gfp flag.
1663  * Description: this function initializes the DMA RX descriptors
1664  * and allocates the socket buffers. It supports the chained and ring
1665  * modes.
1666  */
1667 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1668 				    struct stmmac_dma_conf *dma_conf,
1669 				    u32 queue, gfp_t flags)
1670 {
1671 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1672 	int ret;
1673 
1674 	netif_dbg(priv, probe, priv->dev,
1675 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1676 		  (u32)rx_q->dma_rx_phy);
1677 
1678 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1679 
1680 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1681 
1682 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1683 
1684 	if (rx_q->xsk_pool) {
1685 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1686 						   MEM_TYPE_XSK_BUFF_POOL,
1687 						   NULL));
1688 		netdev_info(priv->dev,
1689 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1690 			    rx_q->queue_index);
1691 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1692 	} else {
1693 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1694 						   MEM_TYPE_PAGE_POOL,
1695 						   rx_q->page_pool));
1696 		netdev_info(priv->dev,
1697 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1698 			    rx_q->queue_index);
1699 	}
1700 
1701 	if (rx_q->xsk_pool) {
1702 		/* RX XDP ZC buffer pool may not be populated, e.g.
1703 		 * xdpsock TX-only.
1704 		 */
1705 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1706 	} else {
1707 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1708 		if (ret < 0)
1709 			return -ENOMEM;
1710 	}
1711 
1712 	/* Setup the chained descriptor addresses */
1713 	if (priv->mode == STMMAC_CHAIN_MODE) {
1714 		if (priv->extend_desc)
1715 			stmmac_mode_init(priv, rx_q->dma_erx,
1716 					 rx_q->dma_rx_phy,
1717 					 dma_conf->dma_rx_size, 1);
1718 		else
1719 			stmmac_mode_init(priv, rx_q->dma_rx,
1720 					 rx_q->dma_rx_phy,
1721 					 dma_conf->dma_rx_size, 0);
1722 	}
1723 
1724 	return 0;
1725 }
1726 
1727 static int init_dma_rx_desc_rings(struct net_device *dev,
1728 				  struct stmmac_dma_conf *dma_conf,
1729 				  gfp_t flags)
1730 {
1731 	struct stmmac_priv *priv = netdev_priv(dev);
1732 	u32 rx_count = priv->plat->rx_queues_to_use;
1733 	int queue;
1734 	int ret;
1735 
1736 	/* RX INITIALIZATION */
1737 	netif_dbg(priv, probe, priv->dev,
1738 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1739 
1740 	for (queue = 0; queue < rx_count; queue++) {
1741 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1742 		if (ret)
1743 			goto err_init_rx_buffers;
1744 	}
1745 
1746 	return 0;
1747 
1748 err_init_rx_buffers:
1749 	while (queue >= 0) {
1750 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1751 
1752 		if (rx_q->xsk_pool)
1753 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1754 		else
1755 			dma_free_rx_skbufs(priv, dma_conf, queue);
1756 
1757 		rx_q->buf_alloc_num = 0;
1758 		rx_q->xsk_pool = NULL;
1759 
1760 		queue--;
1761 	}
1762 
1763 	return ret;
1764 }
1765 
1766 /**
1767  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1768  * @priv: driver private structure
1769  * @dma_conf: structure to take the dma data
1770  * @queue: TX queue index
1771  * Description: this function initializes the DMA TX descriptors
1772  * and allocates the socket buffers. It supports the chained and ring
1773  * modes.
1774  */
1775 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1776 				    struct stmmac_dma_conf *dma_conf,
1777 				    u32 queue)
1778 {
1779 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1780 	int i;
1781 
1782 	netif_dbg(priv, probe, priv->dev,
1783 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1784 		  (u32)tx_q->dma_tx_phy);
1785 
1786 	/* Setup the chained descriptor addresses */
1787 	if (priv->mode == STMMAC_CHAIN_MODE) {
1788 		if (priv->extend_desc)
1789 			stmmac_mode_init(priv, tx_q->dma_etx,
1790 					 tx_q->dma_tx_phy,
1791 					 dma_conf->dma_tx_size, 1);
1792 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1793 			stmmac_mode_init(priv, tx_q->dma_tx,
1794 					 tx_q->dma_tx_phy,
1795 					 dma_conf->dma_tx_size, 0);
1796 	}
1797 
1798 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1799 
1800 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1801 		struct dma_desc *p;
1802 
1803 		if (priv->extend_desc)
1804 			p = &((tx_q->dma_etx + i)->basic);
1805 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1806 			p = &((tx_q->dma_entx + i)->basic);
1807 		else
1808 			p = tx_q->dma_tx + i;
1809 
1810 		stmmac_clear_desc(priv, p);
1811 
1812 		tx_q->tx_skbuff_dma[i].buf = 0;
1813 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1814 		tx_q->tx_skbuff_dma[i].len = 0;
1815 		tx_q->tx_skbuff_dma[i].last_segment = false;
1816 		tx_q->tx_skbuff[i] = NULL;
1817 	}
1818 
1819 	return 0;
1820 }
1821 
1822 static int init_dma_tx_desc_rings(struct net_device *dev,
1823 				  struct stmmac_dma_conf *dma_conf)
1824 {
1825 	struct stmmac_priv *priv = netdev_priv(dev);
1826 	u32 tx_queue_cnt;
1827 	u32 queue;
1828 
1829 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1830 
1831 	for (queue = 0; queue < tx_queue_cnt; queue++)
1832 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1833 
1834 	return 0;
1835 }
1836 
1837 /**
1838  * init_dma_desc_rings - init the RX/TX descriptor rings
1839  * @dev: net device structure
1840  * @dma_conf: structure to take the dma data
1841  * @flags: gfp flag.
1842  * Description: this function initializes the DMA RX/TX descriptors
1843  * and allocates the socket buffers. It supports the chained and ring
1844  * modes.
1845  */
1846 static int init_dma_desc_rings(struct net_device *dev,
1847 			       struct stmmac_dma_conf *dma_conf,
1848 			       gfp_t flags)
1849 {
1850 	struct stmmac_priv *priv = netdev_priv(dev);
1851 	int ret;
1852 
1853 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1854 	if (ret)
1855 		return ret;
1856 
1857 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1858 
1859 	stmmac_clear_descriptors(priv, dma_conf);
1860 
1861 	if (netif_msg_hw(priv))
1862 		stmmac_display_rings(priv, dma_conf);
1863 
1864 	return ret;
1865 }
1866 
1867 /**
1868  * dma_free_tx_skbufs - free TX dma buffers
1869  * @priv: private structure
1870  * @dma_conf: structure to take the dma data
1871  * @queue: TX queue index
1872  */
1873 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1874 			       struct stmmac_dma_conf *dma_conf,
1875 			       u32 queue)
1876 {
1877 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1878 	int i;
1879 
1880 	tx_q->xsk_frames_done = 0;
1881 
1882 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1883 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1884 
1885 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1886 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1887 		tx_q->xsk_frames_done = 0;
1888 		tx_q->xsk_pool = NULL;
1889 	}
1890 }
1891 
1892 /**
1893  * stmmac_free_tx_skbufs - free TX skb buffers
1894  * @priv: private structure
1895  */
1896 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1897 {
1898 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1899 	u32 queue;
1900 
1901 	for (queue = 0; queue < tx_queue_cnt; queue++)
1902 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1903 }
1904 
1905 /**
1906  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1907  * @priv: private structure
1908  * @dma_conf: structure to take the dma data
1909  * @queue: RX queue index
1910  */
1911 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1912 					 struct stmmac_dma_conf *dma_conf,
1913 					 u32 queue)
1914 {
1915 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1916 
1917 	/* Release the DMA RX socket buffers */
1918 	if (rx_q->xsk_pool)
1919 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1920 	else
1921 		dma_free_rx_skbufs(priv, dma_conf, queue);
1922 
1923 	rx_q->buf_alloc_num = 0;
1924 	rx_q->xsk_pool = NULL;
1925 
1926 	/* Free DMA regions of consistent memory previously allocated */
1927 	if (!priv->extend_desc)
1928 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1929 				  sizeof(struct dma_desc),
1930 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1931 	else
1932 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1933 				  sizeof(struct dma_extended_desc),
1934 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1935 
1936 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1937 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1938 
1939 	kfree(rx_q->buf_pool);
1940 	if (rx_q->page_pool)
1941 		page_pool_destroy(rx_q->page_pool);
1942 }
1943 
1944 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1945 				       struct stmmac_dma_conf *dma_conf)
1946 {
1947 	u32 rx_count = priv->plat->rx_queues_to_use;
1948 	u32 queue;
1949 
1950 	/* Free RX queue resources */
1951 	for (queue = 0; queue < rx_count; queue++)
1952 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1953 }
1954 
1955 /**
1956  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1957  * @priv: private structure
1958  * @dma_conf: structure to take the dma data
1959  * @queue: TX queue index
1960  */
1961 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1962 					 struct stmmac_dma_conf *dma_conf,
1963 					 u32 queue)
1964 {
1965 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1966 	size_t size;
1967 	void *addr;
1968 
1969 	/* Release the DMA TX socket buffers */
1970 	dma_free_tx_skbufs(priv, dma_conf, queue);
1971 
1972 	if (priv->extend_desc) {
1973 		size = sizeof(struct dma_extended_desc);
1974 		addr = tx_q->dma_etx;
1975 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1976 		size = sizeof(struct dma_edesc);
1977 		addr = tx_q->dma_entx;
1978 	} else {
1979 		size = sizeof(struct dma_desc);
1980 		addr = tx_q->dma_tx;
1981 	}
1982 
1983 	size *= dma_conf->dma_tx_size;
1984 
1985 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1986 
1987 	kfree(tx_q->tx_skbuff_dma);
1988 	kfree(tx_q->tx_skbuff);
1989 }
1990 
1991 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1992 				       struct stmmac_dma_conf *dma_conf)
1993 {
1994 	u32 tx_count = priv->plat->tx_queues_to_use;
1995 	u32 queue;
1996 
1997 	/* Free TX queue resources */
1998 	for (queue = 0; queue < tx_count; queue++)
1999 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2000 }
2001 
2002 /**
2003  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2004  * @priv: private structure
2005  * @dma_conf: structure to take the dma data
2006  * @queue: RX queue index
2007  * Description: according to which descriptor can be used (extend or basic)
2008  * this function allocates the resources for TX and RX paths. In case of
2009  * reception, for example, it pre-allocated the RX socket buffer in order to
2010  * allow zero-copy mechanism.
2011  */
2012 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2013 					 struct stmmac_dma_conf *dma_conf,
2014 					 u32 queue)
2015 {
2016 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2017 	struct stmmac_channel *ch = &priv->channel[queue];
2018 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2019 	struct page_pool_params pp_params = { 0 };
2020 	unsigned int num_pages;
2021 	unsigned int napi_id;
2022 	int ret;
2023 
2024 	rx_q->queue_index = queue;
2025 	rx_q->priv_data = priv;
2026 
2027 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2028 	pp_params.pool_size = dma_conf->dma_rx_size;
2029 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2030 	pp_params.order = ilog2(num_pages);
2031 	pp_params.nid = dev_to_node(priv->device);
2032 	pp_params.dev = priv->device;
2033 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2034 	pp_params.offset = stmmac_rx_offset(priv);
2035 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2036 
2037 	rx_q->page_pool = page_pool_create(&pp_params);
2038 	if (IS_ERR(rx_q->page_pool)) {
2039 		ret = PTR_ERR(rx_q->page_pool);
2040 		rx_q->page_pool = NULL;
2041 		return ret;
2042 	}
2043 
2044 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2045 				 sizeof(*rx_q->buf_pool),
2046 				 GFP_KERNEL);
2047 	if (!rx_q->buf_pool)
2048 		return -ENOMEM;
2049 
2050 	if (priv->extend_desc) {
2051 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2052 						   dma_conf->dma_rx_size *
2053 						   sizeof(struct dma_extended_desc),
2054 						   &rx_q->dma_rx_phy,
2055 						   GFP_KERNEL);
2056 		if (!rx_q->dma_erx)
2057 			return -ENOMEM;
2058 
2059 	} else {
2060 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2061 						  dma_conf->dma_rx_size *
2062 						  sizeof(struct dma_desc),
2063 						  &rx_q->dma_rx_phy,
2064 						  GFP_KERNEL);
2065 		if (!rx_q->dma_rx)
2066 			return -ENOMEM;
2067 	}
2068 
2069 	if (stmmac_xdp_is_enabled(priv) &&
2070 	    test_bit(queue, priv->af_xdp_zc_qps))
2071 		napi_id = ch->rxtx_napi.napi_id;
2072 	else
2073 		napi_id = ch->rx_napi.napi_id;
2074 
2075 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2076 			       rx_q->queue_index,
2077 			       napi_id);
2078 	if (ret) {
2079 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2080 		return -EINVAL;
2081 	}
2082 
2083 	return 0;
2084 }
2085 
2086 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2087 				       struct stmmac_dma_conf *dma_conf)
2088 {
2089 	u32 rx_count = priv->plat->rx_queues_to_use;
2090 	u32 queue;
2091 	int ret;
2092 
2093 	/* RX queues buffers and DMA */
2094 	for (queue = 0; queue < rx_count; queue++) {
2095 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2096 		if (ret)
2097 			goto err_dma;
2098 	}
2099 
2100 	return 0;
2101 
2102 err_dma:
2103 	free_dma_rx_desc_resources(priv, dma_conf);
2104 
2105 	return ret;
2106 }
2107 
2108 /**
2109  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2110  * @priv: private structure
2111  * @dma_conf: structure to take the dma data
2112  * @queue: TX queue index
2113  * Description: according to which descriptor can be used (extend or basic)
2114  * this function allocates the resources for TX and RX paths. In case of
2115  * reception, for example, it pre-allocated the RX socket buffer in order to
2116  * allow zero-copy mechanism.
2117  */
2118 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2119 					 struct stmmac_dma_conf *dma_conf,
2120 					 u32 queue)
2121 {
2122 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2123 	size_t size;
2124 	void *addr;
2125 
2126 	tx_q->queue_index = queue;
2127 	tx_q->priv_data = priv;
2128 
2129 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2130 				      sizeof(*tx_q->tx_skbuff_dma),
2131 				      GFP_KERNEL);
2132 	if (!tx_q->tx_skbuff_dma)
2133 		return -ENOMEM;
2134 
2135 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2136 				  sizeof(struct sk_buff *),
2137 				  GFP_KERNEL);
2138 	if (!tx_q->tx_skbuff)
2139 		return -ENOMEM;
2140 
2141 	if (priv->extend_desc)
2142 		size = sizeof(struct dma_extended_desc);
2143 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2144 		size = sizeof(struct dma_edesc);
2145 	else
2146 		size = sizeof(struct dma_desc);
2147 
2148 	size *= dma_conf->dma_tx_size;
2149 
2150 	addr = dma_alloc_coherent(priv->device, size,
2151 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2152 	if (!addr)
2153 		return -ENOMEM;
2154 
2155 	if (priv->extend_desc)
2156 		tx_q->dma_etx = addr;
2157 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2158 		tx_q->dma_entx = addr;
2159 	else
2160 		tx_q->dma_tx = addr;
2161 
2162 	return 0;
2163 }
2164 
2165 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2166 				       struct stmmac_dma_conf *dma_conf)
2167 {
2168 	u32 tx_count = priv->plat->tx_queues_to_use;
2169 	u32 queue;
2170 	int ret;
2171 
2172 	/* TX queues buffers and DMA */
2173 	for (queue = 0; queue < tx_count; queue++) {
2174 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2175 		if (ret)
2176 			goto err_dma;
2177 	}
2178 
2179 	return 0;
2180 
2181 err_dma:
2182 	free_dma_tx_desc_resources(priv, dma_conf);
2183 	return ret;
2184 }
2185 
2186 /**
2187  * alloc_dma_desc_resources - alloc TX/RX resources.
2188  * @priv: private structure
2189  * @dma_conf: structure to take the dma data
2190  * Description: according to which descriptor can be used (extend or basic)
2191  * this function allocates the resources for TX and RX paths. In case of
2192  * reception, for example, it pre-allocated the RX socket buffer in order to
2193  * allow zero-copy mechanism.
2194  */
2195 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2196 				    struct stmmac_dma_conf *dma_conf)
2197 {
2198 	/* RX Allocation */
2199 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2200 
2201 	if (ret)
2202 		return ret;
2203 
2204 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2205 
2206 	return ret;
2207 }
2208 
2209 /**
2210  * free_dma_desc_resources - free dma desc resources
2211  * @priv: private structure
2212  * @dma_conf: structure to take the dma data
2213  */
2214 static void free_dma_desc_resources(struct stmmac_priv *priv,
2215 				    struct stmmac_dma_conf *dma_conf)
2216 {
2217 	/* Release the DMA TX socket buffers */
2218 	free_dma_tx_desc_resources(priv, dma_conf);
2219 
2220 	/* Release the DMA RX socket buffers later
2221 	 * to ensure all pending XDP_TX buffers are returned.
2222 	 */
2223 	free_dma_rx_desc_resources(priv, dma_conf);
2224 }
2225 
2226 /**
2227  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2228  *  @priv: driver private structure
2229  *  Description: It is used for enabling the rx queues in the MAC
2230  */
2231 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2232 {
2233 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2234 	int queue;
2235 	u8 mode;
2236 
2237 	for (queue = 0; queue < rx_queues_count; queue++) {
2238 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2239 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2240 	}
2241 }
2242 
2243 /**
2244  * stmmac_start_rx_dma - start RX DMA channel
2245  * @priv: driver private structure
2246  * @chan: RX channel index
2247  * Description:
2248  * This starts a RX DMA channel
2249  */
2250 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2251 {
2252 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2253 	stmmac_start_rx(priv, priv->ioaddr, chan);
2254 }
2255 
2256 /**
2257  * stmmac_start_tx_dma - start TX DMA channel
2258  * @priv: driver private structure
2259  * @chan: TX channel index
2260  * Description:
2261  * This starts a TX DMA channel
2262  */
2263 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2264 {
2265 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2266 	stmmac_start_tx(priv, priv->ioaddr, chan);
2267 }
2268 
2269 /**
2270  * stmmac_stop_rx_dma - stop RX DMA channel
2271  * @priv: driver private structure
2272  * @chan: RX channel index
2273  * Description:
2274  * This stops a RX DMA channel
2275  */
2276 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2277 {
2278 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2279 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2280 }
2281 
2282 /**
2283  * stmmac_stop_tx_dma - stop TX DMA channel
2284  * @priv: driver private structure
2285  * @chan: TX channel index
2286  * Description:
2287  * This stops a TX DMA channel
2288  */
2289 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2290 {
2291 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2292 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2293 }
2294 
2295 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2296 {
2297 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2298 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2299 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2300 	u32 chan;
2301 
2302 	for (chan = 0; chan < dma_csr_ch; chan++) {
2303 		struct stmmac_channel *ch = &priv->channel[chan];
2304 		unsigned long flags;
2305 
2306 		spin_lock_irqsave(&ch->lock, flags);
2307 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2308 		spin_unlock_irqrestore(&ch->lock, flags);
2309 	}
2310 }
2311 
2312 /**
2313  * stmmac_start_all_dma - start all RX and TX DMA channels
2314  * @priv: driver private structure
2315  * Description:
2316  * This starts all the RX and TX DMA channels
2317  */
2318 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2319 {
2320 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2321 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2322 	u32 chan = 0;
2323 
2324 	for (chan = 0; chan < rx_channels_count; chan++)
2325 		stmmac_start_rx_dma(priv, chan);
2326 
2327 	for (chan = 0; chan < tx_channels_count; chan++)
2328 		stmmac_start_tx_dma(priv, chan);
2329 }
2330 
2331 /**
2332  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2333  * @priv: driver private structure
2334  * Description:
2335  * This stops the RX and TX DMA channels
2336  */
2337 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2338 {
2339 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2340 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2341 	u32 chan = 0;
2342 
2343 	for (chan = 0; chan < rx_channels_count; chan++)
2344 		stmmac_stop_rx_dma(priv, chan);
2345 
2346 	for (chan = 0; chan < tx_channels_count; chan++)
2347 		stmmac_stop_tx_dma(priv, chan);
2348 }
2349 
2350 /**
2351  *  stmmac_dma_operation_mode - HW DMA operation mode
2352  *  @priv: driver private structure
2353  *  Description: it is used for configuring the DMA operation mode register in
2354  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2355  */
2356 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2357 {
2358 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2359 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2360 	int rxfifosz = priv->plat->rx_fifo_size;
2361 	int txfifosz = priv->plat->tx_fifo_size;
2362 	u32 txmode = 0;
2363 	u32 rxmode = 0;
2364 	u32 chan = 0;
2365 	u8 qmode = 0;
2366 
2367 	if (rxfifosz == 0)
2368 		rxfifosz = priv->dma_cap.rx_fifo_size;
2369 	if (txfifosz == 0)
2370 		txfifosz = priv->dma_cap.tx_fifo_size;
2371 
2372 	/* Adjust for real per queue fifo size */
2373 	rxfifosz /= rx_channels_count;
2374 	txfifosz /= tx_channels_count;
2375 
2376 	if (priv->plat->force_thresh_dma_mode) {
2377 		txmode = tc;
2378 		rxmode = tc;
2379 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2380 		/*
2381 		 * In case of GMAC, SF mode can be enabled
2382 		 * to perform the TX COE in HW. This depends on:
2383 		 * 1) TX COE if actually supported
2384 		 * 2) There is no bugged Jumbo frame support
2385 		 *    that needs to not insert csum in the TDES.
2386 		 */
2387 		txmode = SF_DMA_MODE;
2388 		rxmode = SF_DMA_MODE;
2389 		priv->xstats.threshold = SF_DMA_MODE;
2390 	} else {
2391 		txmode = tc;
2392 		rxmode = SF_DMA_MODE;
2393 	}
2394 
2395 	/* configure all channels */
2396 	for (chan = 0; chan < rx_channels_count; chan++) {
2397 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2398 		u32 buf_size;
2399 
2400 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2401 
2402 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2403 				rxfifosz, qmode);
2404 
2405 		if (rx_q->xsk_pool) {
2406 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2407 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2408 					      buf_size,
2409 					      chan);
2410 		} else {
2411 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2412 					      priv->dma_conf.dma_buf_sz,
2413 					      chan);
2414 		}
2415 	}
2416 
2417 	for (chan = 0; chan < tx_channels_count; chan++) {
2418 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2419 
2420 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2421 				txfifosz, qmode);
2422 	}
2423 }
2424 
2425 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2426 {
2427 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2428 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2429 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2430 	unsigned int entry = tx_q->cur_tx;
2431 	struct dma_desc *tx_desc = NULL;
2432 	struct xdp_desc xdp_desc;
2433 	bool work_done = true;
2434 	u32 tx_set_ic_bit = 0;
2435 	unsigned long flags;
2436 
2437 	/* Avoids TX time-out as we are sharing with slow path */
2438 	txq_trans_cond_update(nq);
2439 
2440 	budget = min(budget, stmmac_tx_avail(priv, queue));
2441 
2442 	while (budget-- > 0) {
2443 		dma_addr_t dma_addr;
2444 		bool set_ic;
2445 
2446 		/* We are sharing with slow path and stop XSK TX desc submission when
2447 		 * available TX ring is less than threshold.
2448 		 */
2449 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2450 		    !netif_carrier_ok(priv->dev)) {
2451 			work_done = false;
2452 			break;
2453 		}
2454 
2455 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2456 			break;
2457 
2458 		if (likely(priv->extend_desc))
2459 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2460 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2461 			tx_desc = &tx_q->dma_entx[entry].basic;
2462 		else
2463 			tx_desc = tx_q->dma_tx + entry;
2464 
2465 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2466 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2467 
2468 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2469 
2470 		/* To return XDP buffer to XSK pool, we simple call
2471 		 * xsk_tx_completed(), so we don't need to fill up
2472 		 * 'buf' and 'xdpf'.
2473 		 */
2474 		tx_q->tx_skbuff_dma[entry].buf = 0;
2475 		tx_q->xdpf[entry] = NULL;
2476 
2477 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2478 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2479 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2480 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2481 
2482 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2483 
2484 		tx_q->tx_count_frames++;
2485 
2486 		if (!priv->tx_coal_frames[queue])
2487 			set_ic = false;
2488 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2489 			set_ic = true;
2490 		else
2491 			set_ic = false;
2492 
2493 		if (set_ic) {
2494 			tx_q->tx_count_frames = 0;
2495 			stmmac_set_tx_ic(priv, tx_desc);
2496 			tx_set_ic_bit++;
2497 		}
2498 
2499 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2500 				       true, priv->mode, true, true,
2501 				       xdp_desc.len);
2502 
2503 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2504 
2505 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2506 		entry = tx_q->cur_tx;
2507 	}
2508 	flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
2509 	tx_q->txq_stats.tx_set_ic_bit += tx_set_ic_bit;
2510 	u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
2511 
2512 	if (tx_desc) {
2513 		stmmac_flush_tx_descriptors(priv, queue);
2514 		xsk_tx_release(pool);
2515 	}
2516 
2517 	/* Return true if all of the 3 conditions are met
2518 	 *  a) TX Budget is still available
2519 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2520 	 *     pending XSK TX for transmission)
2521 	 */
2522 	return !!budget && work_done;
2523 }
2524 
2525 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2526 {
2527 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2528 		tc += 64;
2529 
2530 		if (priv->plat->force_thresh_dma_mode)
2531 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2532 		else
2533 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2534 						      chan);
2535 
2536 		priv->xstats.threshold = tc;
2537 	}
2538 }
2539 
2540 /**
2541  * stmmac_tx_clean - to manage the transmission completion
2542  * @priv: driver private structure
2543  * @budget: napi budget limiting this functions packet handling
2544  * @queue: TX queue index
2545  * Description: it reclaims the transmit resources after transmission completes.
2546  */
2547 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2548 {
2549 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2550 	unsigned int bytes_compl = 0, pkts_compl = 0;
2551 	unsigned int entry, xmits = 0, count = 0;
2552 	u32 tx_packets = 0, tx_errors = 0;
2553 	unsigned long flags;
2554 
2555 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2556 
2557 	tx_q->xsk_frames_done = 0;
2558 
2559 	entry = tx_q->dirty_tx;
2560 
2561 	/* Try to clean all TX complete frame in 1 shot */
2562 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2563 		struct xdp_frame *xdpf;
2564 		struct sk_buff *skb;
2565 		struct dma_desc *p;
2566 		int status;
2567 
2568 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2569 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2570 			xdpf = tx_q->xdpf[entry];
2571 			skb = NULL;
2572 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2573 			xdpf = NULL;
2574 			skb = tx_q->tx_skbuff[entry];
2575 		} else {
2576 			xdpf = NULL;
2577 			skb = NULL;
2578 		}
2579 
2580 		if (priv->extend_desc)
2581 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2582 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2583 			p = &tx_q->dma_entx[entry].basic;
2584 		else
2585 			p = tx_q->dma_tx + entry;
2586 
2587 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2588 		/* Check if the descriptor is owned by the DMA */
2589 		if (unlikely(status & tx_dma_own))
2590 			break;
2591 
2592 		count++;
2593 
2594 		/* Make sure descriptor fields are read after reading
2595 		 * the own bit.
2596 		 */
2597 		dma_rmb();
2598 
2599 		/* Just consider the last segment and ...*/
2600 		if (likely(!(status & tx_not_ls))) {
2601 			/* ... verify the status error condition */
2602 			if (unlikely(status & tx_err)) {
2603 				tx_errors++;
2604 				if (unlikely(status & tx_err_bump_tc))
2605 					stmmac_bump_dma_threshold(priv, queue);
2606 			} else {
2607 				tx_packets++;
2608 			}
2609 			if (skb)
2610 				stmmac_get_tx_hwtstamp(priv, p, skb);
2611 		}
2612 
2613 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2614 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2615 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2616 				dma_unmap_page(priv->device,
2617 					       tx_q->tx_skbuff_dma[entry].buf,
2618 					       tx_q->tx_skbuff_dma[entry].len,
2619 					       DMA_TO_DEVICE);
2620 			else
2621 				dma_unmap_single(priv->device,
2622 						 tx_q->tx_skbuff_dma[entry].buf,
2623 						 tx_q->tx_skbuff_dma[entry].len,
2624 						 DMA_TO_DEVICE);
2625 			tx_q->tx_skbuff_dma[entry].buf = 0;
2626 			tx_q->tx_skbuff_dma[entry].len = 0;
2627 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2628 		}
2629 
2630 		stmmac_clean_desc3(priv, tx_q, p);
2631 
2632 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2633 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2634 
2635 		if (xdpf &&
2636 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2637 			xdp_return_frame_rx_napi(xdpf);
2638 			tx_q->xdpf[entry] = NULL;
2639 		}
2640 
2641 		if (xdpf &&
2642 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2643 			xdp_return_frame(xdpf);
2644 			tx_q->xdpf[entry] = NULL;
2645 		}
2646 
2647 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2648 			tx_q->xsk_frames_done++;
2649 
2650 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2651 			if (likely(skb)) {
2652 				pkts_compl++;
2653 				bytes_compl += skb->len;
2654 				dev_consume_skb_any(skb);
2655 				tx_q->tx_skbuff[entry] = NULL;
2656 			}
2657 		}
2658 
2659 		stmmac_release_tx_desc(priv, p, priv->mode);
2660 
2661 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2662 	}
2663 	tx_q->dirty_tx = entry;
2664 
2665 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2666 				  pkts_compl, bytes_compl);
2667 
2668 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2669 								queue))) &&
2670 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2671 
2672 		netif_dbg(priv, tx_done, priv->dev,
2673 			  "%s: restart transmit\n", __func__);
2674 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2675 	}
2676 
2677 	if (tx_q->xsk_pool) {
2678 		bool work_done;
2679 
2680 		if (tx_q->xsk_frames_done)
2681 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2682 
2683 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2684 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2685 
2686 		/* For XSK TX, we try to send as many as possible.
2687 		 * If XSK work done (XSK TX desc empty and budget still
2688 		 * available), return "budget - 1" to reenable TX IRQ.
2689 		 * Else, return "budget" to make NAPI continue polling.
2690 		 */
2691 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2692 					       STMMAC_XSK_TX_BUDGET_MAX);
2693 		if (work_done)
2694 			xmits = budget - 1;
2695 		else
2696 			xmits = budget;
2697 	}
2698 
2699 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2700 	    priv->eee_sw_timer_en) {
2701 		if (stmmac_enable_eee_mode(priv))
2702 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2703 	}
2704 
2705 	/* We still have pending packets, let's call for a new scheduling */
2706 	if (tx_q->dirty_tx != tx_q->cur_tx)
2707 		stmmac_tx_timer_arm(priv, queue);
2708 
2709 	flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
2710 	tx_q->txq_stats.tx_packets += tx_packets;
2711 	tx_q->txq_stats.tx_pkt_n += tx_packets;
2712 	tx_q->txq_stats.tx_clean++;
2713 	u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
2714 
2715 	priv->xstats.tx_errors += tx_errors;
2716 
2717 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2718 
2719 	/* Combine decisions from TX clean and XSK TX */
2720 	return max(count, xmits);
2721 }
2722 
2723 /**
2724  * stmmac_tx_err - to manage the tx error
2725  * @priv: driver private structure
2726  * @chan: channel index
2727  * Description: it cleans the descriptors and restarts the transmission
2728  * in case of transmission errors.
2729  */
2730 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2731 {
2732 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2733 
2734 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2735 
2736 	stmmac_stop_tx_dma(priv, chan);
2737 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2738 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2739 	stmmac_reset_tx_queue(priv, chan);
2740 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2741 			    tx_q->dma_tx_phy, chan);
2742 	stmmac_start_tx_dma(priv, chan);
2743 
2744 	priv->xstats.tx_errors++;
2745 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2746 }
2747 
2748 /**
2749  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2750  *  @priv: driver private structure
2751  *  @txmode: TX operating mode
2752  *  @rxmode: RX operating mode
2753  *  @chan: channel index
2754  *  Description: it is used for configuring of the DMA operation mode in
2755  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2756  *  mode.
2757  */
2758 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2759 					  u32 rxmode, u32 chan)
2760 {
2761 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2762 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2763 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2764 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2765 	int rxfifosz = priv->plat->rx_fifo_size;
2766 	int txfifosz = priv->plat->tx_fifo_size;
2767 
2768 	if (rxfifosz == 0)
2769 		rxfifosz = priv->dma_cap.rx_fifo_size;
2770 	if (txfifosz == 0)
2771 		txfifosz = priv->dma_cap.tx_fifo_size;
2772 
2773 	/* Adjust for real per queue fifo size */
2774 	rxfifosz /= rx_channels_count;
2775 	txfifosz /= tx_channels_count;
2776 
2777 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2778 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2779 }
2780 
2781 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2782 {
2783 	int ret;
2784 
2785 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2786 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2787 	if (ret && (ret != -EINVAL)) {
2788 		stmmac_global_err(priv);
2789 		return true;
2790 	}
2791 
2792 	return false;
2793 }
2794 
2795 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2796 {
2797 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2798 						 &priv->xstats, chan, dir);
2799 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2800 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2801 	struct stmmac_channel *ch = &priv->channel[chan];
2802 	struct napi_struct *rx_napi;
2803 	struct napi_struct *tx_napi;
2804 	unsigned long flags;
2805 
2806 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2807 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2808 
2809 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2810 		if (napi_schedule_prep(rx_napi)) {
2811 			spin_lock_irqsave(&ch->lock, flags);
2812 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2813 			spin_unlock_irqrestore(&ch->lock, flags);
2814 			__napi_schedule(rx_napi);
2815 		}
2816 	}
2817 
2818 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2819 		if (napi_schedule_prep(tx_napi)) {
2820 			spin_lock_irqsave(&ch->lock, flags);
2821 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2822 			spin_unlock_irqrestore(&ch->lock, flags);
2823 			__napi_schedule(tx_napi);
2824 		}
2825 	}
2826 
2827 	return status;
2828 }
2829 
2830 /**
2831  * stmmac_dma_interrupt - DMA ISR
2832  * @priv: driver private structure
2833  * Description: this is the DMA ISR. It is called by the main ISR.
2834  * It calls the dwmac dma routine and schedule poll method in case of some
2835  * work can be done.
2836  */
2837 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2838 {
2839 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2840 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2841 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2842 				tx_channel_count : rx_channel_count;
2843 	u32 chan;
2844 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2845 
2846 	/* Make sure we never check beyond our status buffer. */
2847 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2848 		channels_to_check = ARRAY_SIZE(status);
2849 
2850 	for (chan = 0; chan < channels_to_check; chan++)
2851 		status[chan] = stmmac_napi_check(priv, chan,
2852 						 DMA_DIR_RXTX);
2853 
2854 	for (chan = 0; chan < tx_channel_count; chan++) {
2855 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2856 			/* Try to bump up the dma threshold on this failure */
2857 			stmmac_bump_dma_threshold(priv, chan);
2858 		} else if (unlikely(status[chan] == tx_hard_error)) {
2859 			stmmac_tx_err(priv, chan);
2860 		}
2861 	}
2862 }
2863 
2864 /**
2865  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2866  * @priv: driver private structure
2867  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2868  */
2869 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2870 {
2871 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2872 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2873 
2874 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2875 
2876 	if (priv->dma_cap.rmon) {
2877 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2878 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2879 	} else
2880 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2881 }
2882 
2883 /**
2884  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2885  * @priv: driver private structure
2886  * Description:
2887  *  new GMAC chip generations have a new register to indicate the
2888  *  presence of the optional feature/functions.
2889  *  This can be also used to override the value passed through the
2890  *  platform and necessary for old MAC10/100 and GMAC chips.
2891  */
2892 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2893 {
2894 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2895 }
2896 
2897 /**
2898  * stmmac_check_ether_addr - check if the MAC addr is valid
2899  * @priv: driver private structure
2900  * Description:
2901  * it is to verify if the MAC address is valid, in case of failures it
2902  * generates a random MAC address
2903  */
2904 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2905 {
2906 	u8 addr[ETH_ALEN];
2907 
2908 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2909 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2910 		if (is_valid_ether_addr(addr))
2911 			eth_hw_addr_set(priv->dev, addr);
2912 		else
2913 			eth_hw_addr_random(priv->dev);
2914 		dev_info(priv->device, "device MAC address %pM\n",
2915 			 priv->dev->dev_addr);
2916 	}
2917 }
2918 
2919 /**
2920  * stmmac_init_dma_engine - DMA init.
2921  * @priv: driver private structure
2922  * Description:
2923  * It inits the DMA invoking the specific MAC/GMAC callback.
2924  * Some DMA parameters can be passed from the platform;
2925  * in case of these are not passed a default is kept for the MAC or GMAC.
2926  */
2927 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2928 {
2929 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2930 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2931 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2932 	struct stmmac_rx_queue *rx_q;
2933 	struct stmmac_tx_queue *tx_q;
2934 	u32 chan = 0;
2935 	int atds = 0;
2936 	int ret = 0;
2937 
2938 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2939 		dev_err(priv->device, "Invalid DMA configuration\n");
2940 		return -EINVAL;
2941 	}
2942 
2943 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2944 		atds = 1;
2945 
2946 	ret = stmmac_reset(priv, priv->ioaddr);
2947 	if (ret) {
2948 		dev_err(priv->device, "Failed to reset the dma\n");
2949 		return ret;
2950 	}
2951 
2952 	/* DMA Configuration */
2953 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2954 
2955 	if (priv->plat->axi)
2956 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2957 
2958 	/* DMA CSR Channel configuration */
2959 	for (chan = 0; chan < dma_csr_ch; chan++) {
2960 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2961 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2962 	}
2963 
2964 	/* DMA RX Channel Configuration */
2965 	for (chan = 0; chan < rx_channels_count; chan++) {
2966 		rx_q = &priv->dma_conf.rx_queue[chan];
2967 
2968 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2969 				    rx_q->dma_rx_phy, chan);
2970 
2971 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2972 				     (rx_q->buf_alloc_num *
2973 				      sizeof(struct dma_desc));
2974 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2975 				       rx_q->rx_tail_addr, chan);
2976 	}
2977 
2978 	/* DMA TX Channel Configuration */
2979 	for (chan = 0; chan < tx_channels_count; chan++) {
2980 		tx_q = &priv->dma_conf.tx_queue[chan];
2981 
2982 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2983 				    tx_q->dma_tx_phy, chan);
2984 
2985 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2986 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2987 				       tx_q->tx_tail_addr, chan);
2988 	}
2989 
2990 	return ret;
2991 }
2992 
2993 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2994 {
2995 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2996 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
2997 
2998 	if (!tx_coal_timer)
2999 		return;
3000 
3001 	hrtimer_start(&tx_q->txtimer,
3002 		      STMMAC_COAL_TIMER(tx_coal_timer),
3003 		      HRTIMER_MODE_REL);
3004 }
3005 
3006 /**
3007  * stmmac_tx_timer - mitigation sw timer for tx.
3008  * @t: data pointer
3009  * Description:
3010  * This is the timer handler to directly invoke the stmmac_tx_clean.
3011  */
3012 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3013 {
3014 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3015 	struct stmmac_priv *priv = tx_q->priv_data;
3016 	struct stmmac_channel *ch;
3017 	struct napi_struct *napi;
3018 
3019 	ch = &priv->channel[tx_q->queue_index];
3020 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3021 
3022 	if (likely(napi_schedule_prep(napi))) {
3023 		unsigned long flags;
3024 
3025 		spin_lock_irqsave(&ch->lock, flags);
3026 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3027 		spin_unlock_irqrestore(&ch->lock, flags);
3028 		__napi_schedule(napi);
3029 	}
3030 
3031 	return HRTIMER_NORESTART;
3032 }
3033 
3034 /**
3035  * stmmac_init_coalesce - init mitigation options.
3036  * @priv: driver private structure
3037  * Description:
3038  * This inits the coalesce parameters: i.e. timer rate,
3039  * timer handler and default threshold used for enabling the
3040  * interrupt on completion bit.
3041  */
3042 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3043 {
3044 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3045 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3046 	u32 chan;
3047 
3048 	for (chan = 0; chan < tx_channel_count; chan++) {
3049 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3050 
3051 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3052 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3053 
3054 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3055 		tx_q->txtimer.function = stmmac_tx_timer;
3056 	}
3057 
3058 	for (chan = 0; chan < rx_channel_count; chan++)
3059 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3060 }
3061 
3062 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3063 {
3064 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3065 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3066 	u32 chan;
3067 
3068 	/* set TX ring length */
3069 	for (chan = 0; chan < tx_channels_count; chan++)
3070 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3071 				       (priv->dma_conf.dma_tx_size - 1), chan);
3072 
3073 	/* set RX ring length */
3074 	for (chan = 0; chan < rx_channels_count; chan++)
3075 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3076 				       (priv->dma_conf.dma_rx_size - 1), chan);
3077 }
3078 
3079 /**
3080  *  stmmac_set_tx_queue_weight - Set TX queue weight
3081  *  @priv: driver private structure
3082  *  Description: It is used for setting TX queues weight
3083  */
3084 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3085 {
3086 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3087 	u32 weight;
3088 	u32 queue;
3089 
3090 	for (queue = 0; queue < tx_queues_count; queue++) {
3091 		weight = priv->plat->tx_queues_cfg[queue].weight;
3092 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3093 	}
3094 }
3095 
3096 /**
3097  *  stmmac_configure_cbs - Configure CBS in TX queue
3098  *  @priv: driver private structure
3099  *  Description: It is used for configuring CBS in AVB TX queues
3100  */
3101 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3102 {
3103 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3104 	u32 mode_to_use;
3105 	u32 queue;
3106 
3107 	/* queue 0 is reserved for legacy traffic */
3108 	for (queue = 1; queue < tx_queues_count; queue++) {
3109 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3110 		if (mode_to_use == MTL_QUEUE_DCB)
3111 			continue;
3112 
3113 		stmmac_config_cbs(priv, priv->hw,
3114 				priv->plat->tx_queues_cfg[queue].send_slope,
3115 				priv->plat->tx_queues_cfg[queue].idle_slope,
3116 				priv->plat->tx_queues_cfg[queue].high_credit,
3117 				priv->plat->tx_queues_cfg[queue].low_credit,
3118 				queue);
3119 	}
3120 }
3121 
3122 /**
3123  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3124  *  @priv: driver private structure
3125  *  Description: It is used for mapping RX queues to RX dma channels
3126  */
3127 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3128 {
3129 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3130 	u32 queue;
3131 	u32 chan;
3132 
3133 	for (queue = 0; queue < rx_queues_count; queue++) {
3134 		chan = priv->plat->rx_queues_cfg[queue].chan;
3135 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3136 	}
3137 }
3138 
3139 /**
3140  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3141  *  @priv: driver private structure
3142  *  Description: It is used for configuring the RX Queue Priority
3143  */
3144 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3145 {
3146 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3147 	u32 queue;
3148 	u32 prio;
3149 
3150 	for (queue = 0; queue < rx_queues_count; queue++) {
3151 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3152 			continue;
3153 
3154 		prio = priv->plat->rx_queues_cfg[queue].prio;
3155 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3156 	}
3157 }
3158 
3159 /**
3160  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3161  *  @priv: driver private structure
3162  *  Description: It is used for configuring the TX Queue Priority
3163  */
3164 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3165 {
3166 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3167 	u32 queue;
3168 	u32 prio;
3169 
3170 	for (queue = 0; queue < tx_queues_count; queue++) {
3171 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3172 			continue;
3173 
3174 		prio = priv->plat->tx_queues_cfg[queue].prio;
3175 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3176 	}
3177 }
3178 
3179 /**
3180  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3181  *  @priv: driver private structure
3182  *  Description: It is used for configuring the RX queue routing
3183  */
3184 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3185 {
3186 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3187 	u32 queue;
3188 	u8 packet;
3189 
3190 	for (queue = 0; queue < rx_queues_count; queue++) {
3191 		/* no specific packet type routing specified for the queue */
3192 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3193 			continue;
3194 
3195 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3196 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3197 	}
3198 }
3199 
3200 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3201 {
3202 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3203 		priv->rss.enable = false;
3204 		return;
3205 	}
3206 
3207 	if (priv->dev->features & NETIF_F_RXHASH)
3208 		priv->rss.enable = true;
3209 	else
3210 		priv->rss.enable = false;
3211 
3212 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3213 			     priv->plat->rx_queues_to_use);
3214 }
3215 
3216 /**
3217  *  stmmac_mtl_configuration - Configure MTL
3218  *  @priv: driver private structure
3219  *  Description: It is used for configurring MTL
3220  */
3221 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3222 {
3223 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3224 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3225 
3226 	if (tx_queues_count > 1)
3227 		stmmac_set_tx_queue_weight(priv);
3228 
3229 	/* Configure MTL RX algorithms */
3230 	if (rx_queues_count > 1)
3231 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3232 				priv->plat->rx_sched_algorithm);
3233 
3234 	/* Configure MTL TX algorithms */
3235 	if (tx_queues_count > 1)
3236 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3237 				priv->plat->tx_sched_algorithm);
3238 
3239 	/* Configure CBS in AVB TX queues */
3240 	if (tx_queues_count > 1)
3241 		stmmac_configure_cbs(priv);
3242 
3243 	/* Map RX MTL to DMA channels */
3244 	stmmac_rx_queue_dma_chan_map(priv);
3245 
3246 	/* Enable MAC RX Queues */
3247 	stmmac_mac_enable_rx_queues(priv);
3248 
3249 	/* Set RX priorities */
3250 	if (rx_queues_count > 1)
3251 		stmmac_mac_config_rx_queues_prio(priv);
3252 
3253 	/* Set TX priorities */
3254 	if (tx_queues_count > 1)
3255 		stmmac_mac_config_tx_queues_prio(priv);
3256 
3257 	/* Set RX routing */
3258 	if (rx_queues_count > 1)
3259 		stmmac_mac_config_rx_queues_routing(priv);
3260 
3261 	/* Receive Side Scaling */
3262 	if (rx_queues_count > 1)
3263 		stmmac_mac_config_rss(priv);
3264 }
3265 
3266 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3267 {
3268 	if (priv->dma_cap.asp) {
3269 		netdev_info(priv->dev, "Enabling Safety Features\n");
3270 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3271 					  priv->plat->safety_feat_cfg);
3272 	} else {
3273 		netdev_info(priv->dev, "No Safety Features support found\n");
3274 	}
3275 }
3276 
3277 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3278 {
3279 	char *name;
3280 
3281 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3282 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3283 
3284 	name = priv->wq_name;
3285 	sprintf(name, "%s-fpe", priv->dev->name);
3286 
3287 	priv->fpe_wq = create_singlethread_workqueue(name);
3288 	if (!priv->fpe_wq) {
3289 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3290 
3291 		return -ENOMEM;
3292 	}
3293 	netdev_info(priv->dev, "FPE workqueue start");
3294 
3295 	return 0;
3296 }
3297 
3298 /**
3299  * stmmac_hw_setup - setup mac in a usable state.
3300  *  @dev : pointer to the device structure.
3301  *  @ptp_register: register PTP if set
3302  *  Description:
3303  *  this is the main function to setup the HW in a usable state because the
3304  *  dma engine is reset, the core registers are configured (e.g. AXI,
3305  *  Checksum features, timers). The DMA is ready to start receiving and
3306  *  transmitting.
3307  *  Return value:
3308  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3309  *  file on failure.
3310  */
3311 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3312 {
3313 	struct stmmac_priv *priv = netdev_priv(dev);
3314 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3315 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3316 	bool sph_en;
3317 	u32 chan;
3318 	int ret;
3319 
3320 	/* DMA initialization and SW reset */
3321 	ret = stmmac_init_dma_engine(priv);
3322 	if (ret < 0) {
3323 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3324 			   __func__);
3325 		return ret;
3326 	}
3327 
3328 	/* Copy the MAC addr into the HW  */
3329 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3330 
3331 	/* PS and related bits will be programmed according to the speed */
3332 	if (priv->hw->pcs) {
3333 		int speed = priv->plat->mac_port_sel_speed;
3334 
3335 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3336 		    (speed == SPEED_1000)) {
3337 			priv->hw->ps = speed;
3338 		} else {
3339 			dev_warn(priv->device, "invalid port speed\n");
3340 			priv->hw->ps = 0;
3341 		}
3342 	}
3343 
3344 	/* Initialize the MAC Core */
3345 	stmmac_core_init(priv, priv->hw, dev);
3346 
3347 	/* Initialize MTL*/
3348 	stmmac_mtl_configuration(priv);
3349 
3350 	/* Initialize Safety Features */
3351 	stmmac_safety_feat_configuration(priv);
3352 
3353 	ret = stmmac_rx_ipc(priv, priv->hw);
3354 	if (!ret) {
3355 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3356 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3357 		priv->hw->rx_csum = 0;
3358 	}
3359 
3360 	/* Enable the MAC Rx/Tx */
3361 	stmmac_mac_set(priv, priv->ioaddr, true);
3362 
3363 	/* Set the HW DMA mode and the COE */
3364 	stmmac_dma_operation_mode(priv);
3365 
3366 	stmmac_mmc_setup(priv);
3367 
3368 	if (ptp_register) {
3369 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3370 		if (ret < 0)
3371 			netdev_warn(priv->dev,
3372 				    "failed to enable PTP reference clock: %pe\n",
3373 				    ERR_PTR(ret));
3374 	}
3375 
3376 	ret = stmmac_init_ptp(priv);
3377 	if (ret == -EOPNOTSUPP)
3378 		netdev_info(priv->dev, "PTP not supported by HW\n");
3379 	else if (ret)
3380 		netdev_warn(priv->dev, "PTP init failed\n");
3381 	else if (ptp_register)
3382 		stmmac_ptp_register(priv);
3383 
3384 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3385 
3386 	/* Convert the timer from msec to usec */
3387 	if (!priv->tx_lpi_timer)
3388 		priv->tx_lpi_timer = eee_timer * 1000;
3389 
3390 	if (priv->use_riwt) {
3391 		u32 queue;
3392 
3393 		for (queue = 0; queue < rx_cnt; queue++) {
3394 			if (!priv->rx_riwt[queue])
3395 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3396 
3397 			stmmac_rx_watchdog(priv, priv->ioaddr,
3398 					   priv->rx_riwt[queue], queue);
3399 		}
3400 	}
3401 
3402 	if (priv->hw->pcs)
3403 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3404 
3405 	/* set TX and RX rings length */
3406 	stmmac_set_rings_length(priv);
3407 
3408 	/* Enable TSO */
3409 	if (priv->tso) {
3410 		for (chan = 0; chan < tx_cnt; chan++) {
3411 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3412 
3413 			/* TSO and TBS cannot co-exist */
3414 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3415 				continue;
3416 
3417 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3418 		}
3419 	}
3420 
3421 	/* Enable Split Header */
3422 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3423 	for (chan = 0; chan < rx_cnt; chan++)
3424 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3425 
3426 
3427 	/* VLAN Tag Insertion */
3428 	if (priv->dma_cap.vlins)
3429 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3430 
3431 	/* TBS */
3432 	for (chan = 0; chan < tx_cnt; chan++) {
3433 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3434 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3435 
3436 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3437 	}
3438 
3439 	/* Configure real RX and TX queues */
3440 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3441 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3442 
3443 	/* Start the ball rolling... */
3444 	stmmac_start_all_dma(priv);
3445 
3446 	if (priv->dma_cap.fpesel) {
3447 		stmmac_fpe_start_wq(priv);
3448 
3449 		if (priv->plat->fpe_cfg->enable)
3450 			stmmac_fpe_handshake(priv, true);
3451 	}
3452 
3453 	return 0;
3454 }
3455 
3456 static void stmmac_hw_teardown(struct net_device *dev)
3457 {
3458 	struct stmmac_priv *priv = netdev_priv(dev);
3459 
3460 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3461 }
3462 
3463 static void stmmac_free_irq(struct net_device *dev,
3464 			    enum request_irq_err irq_err, int irq_idx)
3465 {
3466 	struct stmmac_priv *priv = netdev_priv(dev);
3467 	int j;
3468 
3469 	switch (irq_err) {
3470 	case REQ_IRQ_ERR_ALL:
3471 		irq_idx = priv->plat->tx_queues_to_use;
3472 		fallthrough;
3473 	case REQ_IRQ_ERR_TX:
3474 		for (j = irq_idx - 1; j >= 0; j--) {
3475 			if (priv->tx_irq[j] > 0) {
3476 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3477 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3478 			}
3479 		}
3480 		irq_idx = priv->plat->rx_queues_to_use;
3481 		fallthrough;
3482 	case REQ_IRQ_ERR_RX:
3483 		for (j = irq_idx - 1; j >= 0; j--) {
3484 			if (priv->rx_irq[j] > 0) {
3485 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3486 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3487 			}
3488 		}
3489 
3490 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3491 			free_irq(priv->sfty_ue_irq, dev);
3492 		fallthrough;
3493 	case REQ_IRQ_ERR_SFTY_UE:
3494 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3495 			free_irq(priv->sfty_ce_irq, dev);
3496 		fallthrough;
3497 	case REQ_IRQ_ERR_SFTY_CE:
3498 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3499 			free_irq(priv->lpi_irq, dev);
3500 		fallthrough;
3501 	case REQ_IRQ_ERR_LPI:
3502 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3503 			free_irq(priv->wol_irq, dev);
3504 		fallthrough;
3505 	case REQ_IRQ_ERR_WOL:
3506 		free_irq(dev->irq, dev);
3507 		fallthrough;
3508 	case REQ_IRQ_ERR_MAC:
3509 	case REQ_IRQ_ERR_NO:
3510 		/* If MAC IRQ request error, no more IRQ to free */
3511 		break;
3512 	}
3513 }
3514 
3515 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3516 {
3517 	struct stmmac_priv *priv = netdev_priv(dev);
3518 	enum request_irq_err irq_err;
3519 	cpumask_t cpu_mask;
3520 	int irq_idx = 0;
3521 	char *int_name;
3522 	int ret;
3523 	int i;
3524 
3525 	/* For common interrupt */
3526 	int_name = priv->int_name_mac;
3527 	sprintf(int_name, "%s:%s", dev->name, "mac");
3528 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3529 			  0, int_name, dev);
3530 	if (unlikely(ret < 0)) {
3531 		netdev_err(priv->dev,
3532 			   "%s: alloc mac MSI %d (error: %d)\n",
3533 			   __func__, dev->irq, ret);
3534 		irq_err = REQ_IRQ_ERR_MAC;
3535 		goto irq_error;
3536 	}
3537 
3538 	/* Request the Wake IRQ in case of another line
3539 	 * is used for WoL
3540 	 */
3541 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3542 		int_name = priv->int_name_wol;
3543 		sprintf(int_name, "%s:%s", dev->name, "wol");
3544 		ret = request_irq(priv->wol_irq,
3545 				  stmmac_mac_interrupt,
3546 				  0, int_name, dev);
3547 		if (unlikely(ret < 0)) {
3548 			netdev_err(priv->dev,
3549 				   "%s: alloc wol MSI %d (error: %d)\n",
3550 				   __func__, priv->wol_irq, ret);
3551 			irq_err = REQ_IRQ_ERR_WOL;
3552 			goto irq_error;
3553 		}
3554 	}
3555 
3556 	/* Request the LPI IRQ in case of another line
3557 	 * is used for LPI
3558 	 */
3559 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3560 		int_name = priv->int_name_lpi;
3561 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3562 		ret = request_irq(priv->lpi_irq,
3563 				  stmmac_mac_interrupt,
3564 				  0, int_name, dev);
3565 		if (unlikely(ret < 0)) {
3566 			netdev_err(priv->dev,
3567 				   "%s: alloc lpi MSI %d (error: %d)\n",
3568 				   __func__, priv->lpi_irq, ret);
3569 			irq_err = REQ_IRQ_ERR_LPI;
3570 			goto irq_error;
3571 		}
3572 	}
3573 
3574 	/* Request the Safety Feature Correctible Error line in
3575 	 * case of another line is used
3576 	 */
3577 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3578 		int_name = priv->int_name_sfty_ce;
3579 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3580 		ret = request_irq(priv->sfty_ce_irq,
3581 				  stmmac_safety_interrupt,
3582 				  0, int_name, dev);
3583 		if (unlikely(ret < 0)) {
3584 			netdev_err(priv->dev,
3585 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3586 				   __func__, priv->sfty_ce_irq, ret);
3587 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3588 			goto irq_error;
3589 		}
3590 	}
3591 
3592 	/* Request the Safety Feature Uncorrectible Error line in
3593 	 * case of another line is used
3594 	 */
3595 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3596 		int_name = priv->int_name_sfty_ue;
3597 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3598 		ret = request_irq(priv->sfty_ue_irq,
3599 				  stmmac_safety_interrupt,
3600 				  0, int_name, dev);
3601 		if (unlikely(ret < 0)) {
3602 			netdev_err(priv->dev,
3603 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3604 				   __func__, priv->sfty_ue_irq, ret);
3605 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3606 			goto irq_error;
3607 		}
3608 	}
3609 
3610 	/* Request Rx MSI irq */
3611 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3612 		if (i >= MTL_MAX_RX_QUEUES)
3613 			break;
3614 		if (priv->rx_irq[i] == 0)
3615 			continue;
3616 
3617 		int_name = priv->int_name_rx_irq[i];
3618 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3619 		ret = request_irq(priv->rx_irq[i],
3620 				  stmmac_msi_intr_rx,
3621 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3622 		if (unlikely(ret < 0)) {
3623 			netdev_err(priv->dev,
3624 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3625 				   __func__, i, priv->rx_irq[i], ret);
3626 			irq_err = REQ_IRQ_ERR_RX;
3627 			irq_idx = i;
3628 			goto irq_error;
3629 		}
3630 		cpumask_clear(&cpu_mask);
3631 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3632 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3633 	}
3634 
3635 	/* Request Tx MSI irq */
3636 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3637 		if (i >= MTL_MAX_TX_QUEUES)
3638 			break;
3639 		if (priv->tx_irq[i] == 0)
3640 			continue;
3641 
3642 		int_name = priv->int_name_tx_irq[i];
3643 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3644 		ret = request_irq(priv->tx_irq[i],
3645 				  stmmac_msi_intr_tx,
3646 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3647 		if (unlikely(ret < 0)) {
3648 			netdev_err(priv->dev,
3649 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3650 				   __func__, i, priv->tx_irq[i], ret);
3651 			irq_err = REQ_IRQ_ERR_TX;
3652 			irq_idx = i;
3653 			goto irq_error;
3654 		}
3655 		cpumask_clear(&cpu_mask);
3656 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3657 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3658 	}
3659 
3660 	return 0;
3661 
3662 irq_error:
3663 	stmmac_free_irq(dev, irq_err, irq_idx);
3664 	return ret;
3665 }
3666 
3667 static int stmmac_request_irq_single(struct net_device *dev)
3668 {
3669 	struct stmmac_priv *priv = netdev_priv(dev);
3670 	enum request_irq_err irq_err;
3671 	int ret;
3672 
3673 	ret = request_irq(dev->irq, stmmac_interrupt,
3674 			  IRQF_SHARED, dev->name, dev);
3675 	if (unlikely(ret < 0)) {
3676 		netdev_err(priv->dev,
3677 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3678 			   __func__, dev->irq, ret);
3679 		irq_err = REQ_IRQ_ERR_MAC;
3680 		goto irq_error;
3681 	}
3682 
3683 	/* Request the Wake IRQ in case of another line
3684 	 * is used for WoL
3685 	 */
3686 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3687 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3688 				  IRQF_SHARED, dev->name, dev);
3689 		if (unlikely(ret < 0)) {
3690 			netdev_err(priv->dev,
3691 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3692 				   __func__, priv->wol_irq, ret);
3693 			irq_err = REQ_IRQ_ERR_WOL;
3694 			goto irq_error;
3695 		}
3696 	}
3697 
3698 	/* Request the IRQ lines */
3699 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3700 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3701 				  IRQF_SHARED, dev->name, dev);
3702 		if (unlikely(ret < 0)) {
3703 			netdev_err(priv->dev,
3704 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3705 				   __func__, priv->lpi_irq, ret);
3706 			irq_err = REQ_IRQ_ERR_LPI;
3707 			goto irq_error;
3708 		}
3709 	}
3710 
3711 	return 0;
3712 
3713 irq_error:
3714 	stmmac_free_irq(dev, irq_err, 0);
3715 	return ret;
3716 }
3717 
3718 static int stmmac_request_irq(struct net_device *dev)
3719 {
3720 	struct stmmac_priv *priv = netdev_priv(dev);
3721 	int ret;
3722 
3723 	/* Request the IRQ lines */
3724 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3725 		ret = stmmac_request_irq_multi_msi(dev);
3726 	else
3727 		ret = stmmac_request_irq_single(dev);
3728 
3729 	return ret;
3730 }
3731 
3732 /**
3733  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3734  *  @priv: driver private structure
3735  *  @mtu: MTU to setup the dma queue and buf with
3736  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3737  *  Allocate the Tx/Rx DMA queue and init them.
3738  *  Return value:
3739  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3740  */
3741 static struct stmmac_dma_conf *
3742 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3743 {
3744 	struct stmmac_dma_conf *dma_conf;
3745 	int chan, bfsize, ret;
3746 
3747 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3748 	if (!dma_conf) {
3749 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3750 			   __func__);
3751 		return ERR_PTR(-ENOMEM);
3752 	}
3753 
3754 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3755 	if (bfsize < 0)
3756 		bfsize = 0;
3757 
3758 	if (bfsize < BUF_SIZE_16KiB)
3759 		bfsize = stmmac_set_bfsize(mtu, 0);
3760 
3761 	dma_conf->dma_buf_sz = bfsize;
3762 	/* Chose the tx/rx size from the already defined one in the
3763 	 * priv struct. (if defined)
3764 	 */
3765 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3766 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3767 
3768 	if (!dma_conf->dma_tx_size)
3769 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3770 	if (!dma_conf->dma_rx_size)
3771 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3772 
3773 	/* Earlier check for TBS */
3774 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3775 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3776 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3777 
3778 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3779 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3780 	}
3781 
3782 	ret = alloc_dma_desc_resources(priv, dma_conf);
3783 	if (ret < 0) {
3784 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3785 			   __func__);
3786 		goto alloc_error;
3787 	}
3788 
3789 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3790 	if (ret < 0) {
3791 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3792 			   __func__);
3793 		goto init_error;
3794 	}
3795 
3796 	return dma_conf;
3797 
3798 init_error:
3799 	free_dma_desc_resources(priv, dma_conf);
3800 alloc_error:
3801 	kfree(dma_conf);
3802 	return ERR_PTR(ret);
3803 }
3804 
3805 /**
3806  *  __stmmac_open - open entry point of the driver
3807  *  @dev : pointer to the device structure.
3808  *  @dma_conf :  structure to take the dma data
3809  *  Description:
3810  *  This function is the open entry point of the driver.
3811  *  Return value:
3812  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3813  *  file on failure.
3814  */
3815 static int __stmmac_open(struct net_device *dev,
3816 			 struct stmmac_dma_conf *dma_conf)
3817 {
3818 	struct stmmac_priv *priv = netdev_priv(dev);
3819 	int mode = priv->plat->phy_interface;
3820 	u32 chan;
3821 	int ret;
3822 
3823 	ret = pm_runtime_resume_and_get(priv->device);
3824 	if (ret < 0)
3825 		return ret;
3826 
3827 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3828 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
3829 	    (!priv->hw->xpcs ||
3830 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
3831 	    !priv->hw->lynx_pcs) {
3832 		ret = stmmac_init_phy(dev);
3833 		if (ret) {
3834 			netdev_err(priv->dev,
3835 				   "%s: Cannot attach to PHY (error: %d)\n",
3836 				   __func__, ret);
3837 			goto init_phy_error;
3838 		}
3839 	}
3840 
3841 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3842 
3843 	buf_sz = dma_conf->dma_buf_sz;
3844 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3845 
3846 	stmmac_reset_queues_param(priv);
3847 
3848 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3849 	    priv->plat->serdes_powerup) {
3850 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3851 		if (ret < 0) {
3852 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3853 				   __func__);
3854 			goto init_error;
3855 		}
3856 	}
3857 
3858 	ret = stmmac_hw_setup(dev, true);
3859 	if (ret < 0) {
3860 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3861 		goto init_error;
3862 	}
3863 
3864 	stmmac_init_coalesce(priv);
3865 
3866 	phylink_start(priv->phylink);
3867 	/* We may have called phylink_speed_down before */
3868 	phylink_speed_up(priv->phylink);
3869 
3870 	ret = stmmac_request_irq(dev);
3871 	if (ret)
3872 		goto irq_error;
3873 
3874 	stmmac_enable_all_queues(priv);
3875 	netif_tx_start_all_queues(priv->dev);
3876 	stmmac_enable_all_dma_irq(priv);
3877 
3878 	return 0;
3879 
3880 irq_error:
3881 	phylink_stop(priv->phylink);
3882 
3883 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3884 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3885 
3886 	stmmac_hw_teardown(dev);
3887 init_error:
3888 	phylink_disconnect_phy(priv->phylink);
3889 init_phy_error:
3890 	pm_runtime_put(priv->device);
3891 	return ret;
3892 }
3893 
3894 static int stmmac_open(struct net_device *dev)
3895 {
3896 	struct stmmac_priv *priv = netdev_priv(dev);
3897 	struct stmmac_dma_conf *dma_conf;
3898 	int ret;
3899 
3900 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3901 	if (IS_ERR(dma_conf))
3902 		return PTR_ERR(dma_conf);
3903 
3904 	ret = __stmmac_open(dev, dma_conf);
3905 	if (ret)
3906 		free_dma_desc_resources(priv, dma_conf);
3907 
3908 	kfree(dma_conf);
3909 	return ret;
3910 }
3911 
3912 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3913 {
3914 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3915 
3916 	if (priv->fpe_wq)
3917 		destroy_workqueue(priv->fpe_wq);
3918 
3919 	netdev_info(priv->dev, "FPE workqueue stop");
3920 }
3921 
3922 /**
3923  *  stmmac_release - close entry point of the driver
3924  *  @dev : device pointer.
3925  *  Description:
3926  *  This is the stop entry point of the driver.
3927  */
3928 static int stmmac_release(struct net_device *dev)
3929 {
3930 	struct stmmac_priv *priv = netdev_priv(dev);
3931 	u32 chan;
3932 
3933 	if (device_may_wakeup(priv->device))
3934 		phylink_speed_down(priv->phylink, false);
3935 	/* Stop and disconnect the PHY */
3936 	phylink_stop(priv->phylink);
3937 	phylink_disconnect_phy(priv->phylink);
3938 
3939 	stmmac_disable_all_queues(priv);
3940 
3941 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3942 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3943 
3944 	netif_tx_disable(dev);
3945 
3946 	/* Free the IRQ lines */
3947 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3948 
3949 	if (priv->eee_enabled) {
3950 		priv->tx_path_in_lpi_mode = false;
3951 		del_timer_sync(&priv->eee_ctrl_timer);
3952 	}
3953 
3954 	/* Stop TX/RX DMA and clear the descriptors */
3955 	stmmac_stop_all_dma(priv);
3956 
3957 	/* Release and free the Rx/Tx resources */
3958 	free_dma_desc_resources(priv, &priv->dma_conf);
3959 
3960 	/* Disable the MAC Rx/Tx */
3961 	stmmac_mac_set(priv, priv->ioaddr, false);
3962 
3963 	/* Powerdown Serdes if there is */
3964 	if (priv->plat->serdes_powerdown)
3965 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3966 
3967 	netif_carrier_off(dev);
3968 
3969 	stmmac_release_ptp(priv);
3970 
3971 	pm_runtime_put(priv->device);
3972 
3973 	if (priv->dma_cap.fpesel)
3974 		stmmac_fpe_stop_wq(priv);
3975 
3976 	return 0;
3977 }
3978 
3979 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3980 			       struct stmmac_tx_queue *tx_q)
3981 {
3982 	u16 tag = 0x0, inner_tag = 0x0;
3983 	u32 inner_type = 0x0;
3984 	struct dma_desc *p;
3985 
3986 	if (!priv->dma_cap.vlins)
3987 		return false;
3988 	if (!skb_vlan_tag_present(skb))
3989 		return false;
3990 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3991 		inner_tag = skb_vlan_tag_get(skb);
3992 		inner_type = STMMAC_VLAN_INSERT;
3993 	}
3994 
3995 	tag = skb_vlan_tag_get(skb);
3996 
3997 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3998 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3999 	else
4000 		p = &tx_q->dma_tx[tx_q->cur_tx];
4001 
4002 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4003 		return false;
4004 
4005 	stmmac_set_tx_owner(priv, p);
4006 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4007 	return true;
4008 }
4009 
4010 /**
4011  *  stmmac_tso_allocator - close entry point of the driver
4012  *  @priv: driver private structure
4013  *  @des: buffer start address
4014  *  @total_len: total length to fill in descriptors
4015  *  @last_segment: condition for the last descriptor
4016  *  @queue: TX queue index
4017  *  Description:
4018  *  This function fills descriptor and request new descriptors according to
4019  *  buffer length to fill
4020  */
4021 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4022 				 int total_len, bool last_segment, u32 queue)
4023 {
4024 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4025 	struct dma_desc *desc;
4026 	u32 buff_size;
4027 	int tmp_len;
4028 
4029 	tmp_len = total_len;
4030 
4031 	while (tmp_len > 0) {
4032 		dma_addr_t curr_addr;
4033 
4034 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4035 						priv->dma_conf.dma_tx_size);
4036 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4037 
4038 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4039 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4040 		else
4041 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4042 
4043 		curr_addr = des + (total_len - tmp_len);
4044 		if (priv->dma_cap.addr64 <= 32)
4045 			desc->des0 = cpu_to_le32(curr_addr);
4046 		else
4047 			stmmac_set_desc_addr(priv, desc, curr_addr);
4048 
4049 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4050 			    TSO_MAX_BUFF_SIZE : tmp_len;
4051 
4052 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4053 				0, 1,
4054 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4055 				0, 0);
4056 
4057 		tmp_len -= TSO_MAX_BUFF_SIZE;
4058 	}
4059 }
4060 
4061 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4062 {
4063 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4064 	int desc_size;
4065 
4066 	if (likely(priv->extend_desc))
4067 		desc_size = sizeof(struct dma_extended_desc);
4068 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4069 		desc_size = sizeof(struct dma_edesc);
4070 	else
4071 		desc_size = sizeof(struct dma_desc);
4072 
4073 	/* The own bit must be the latest setting done when prepare the
4074 	 * descriptor and then barrier is needed to make sure that
4075 	 * all is coherent before granting the DMA engine.
4076 	 */
4077 	wmb();
4078 
4079 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4080 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4081 }
4082 
4083 /**
4084  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4085  *  @skb : the socket buffer
4086  *  @dev : device pointer
4087  *  Description: this is the transmit function that is called on TSO frames
4088  *  (support available on GMAC4 and newer chips).
4089  *  Diagram below show the ring programming in case of TSO frames:
4090  *
4091  *  First Descriptor
4092  *   --------
4093  *   | DES0 |---> buffer1 = L2/L3/L4 header
4094  *   | DES1 |---> TCP Payload (can continue on next descr...)
4095  *   | DES2 |---> buffer 1 and 2 len
4096  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4097  *   --------
4098  *	|
4099  *     ...
4100  *	|
4101  *   --------
4102  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4103  *   | DES1 | --|
4104  *   | DES2 | --> buffer 1 and 2 len
4105  *   | DES3 |
4106  *   --------
4107  *
4108  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4109  */
4110 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4111 {
4112 	struct dma_desc *desc, *first, *mss_desc = NULL;
4113 	struct stmmac_priv *priv = netdev_priv(dev);
4114 	int nfrags = skb_shinfo(skb)->nr_frags;
4115 	u32 queue = skb_get_queue_mapping(skb);
4116 	unsigned int first_entry, tx_packets;
4117 	int tmp_pay_len = 0, first_tx;
4118 	struct stmmac_tx_queue *tx_q;
4119 	bool has_vlan, set_ic;
4120 	u8 proto_hdr_len, hdr;
4121 	unsigned long flags;
4122 	u32 pay_len, mss;
4123 	dma_addr_t des;
4124 	int i;
4125 
4126 	tx_q = &priv->dma_conf.tx_queue[queue];
4127 	first_tx = tx_q->cur_tx;
4128 
4129 	/* Compute header lengths */
4130 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4131 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4132 		hdr = sizeof(struct udphdr);
4133 	} else {
4134 		proto_hdr_len = skb_tcp_all_headers(skb);
4135 		hdr = tcp_hdrlen(skb);
4136 	}
4137 
4138 	/* Desc availability based on threshold should be enough safe */
4139 	if (unlikely(stmmac_tx_avail(priv, queue) <
4140 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4141 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4142 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4143 								queue));
4144 			/* This is a hard error, log it. */
4145 			netdev_err(priv->dev,
4146 				   "%s: Tx Ring full when queue awake\n",
4147 				   __func__);
4148 		}
4149 		return NETDEV_TX_BUSY;
4150 	}
4151 
4152 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4153 
4154 	mss = skb_shinfo(skb)->gso_size;
4155 
4156 	/* set new MSS value if needed */
4157 	if (mss != tx_q->mss) {
4158 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4159 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4160 		else
4161 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4162 
4163 		stmmac_set_mss(priv, mss_desc, mss);
4164 		tx_q->mss = mss;
4165 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4166 						priv->dma_conf.dma_tx_size);
4167 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4168 	}
4169 
4170 	if (netif_msg_tx_queued(priv)) {
4171 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4172 			__func__, hdr, proto_hdr_len, pay_len, mss);
4173 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4174 			skb->data_len);
4175 	}
4176 
4177 	/* Check if VLAN can be inserted by HW */
4178 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4179 
4180 	first_entry = tx_q->cur_tx;
4181 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4182 
4183 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4184 		desc = &tx_q->dma_entx[first_entry].basic;
4185 	else
4186 		desc = &tx_q->dma_tx[first_entry];
4187 	first = desc;
4188 
4189 	if (has_vlan)
4190 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4191 
4192 	/* first descriptor: fill Headers on Buf1 */
4193 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4194 			     DMA_TO_DEVICE);
4195 	if (dma_mapping_error(priv->device, des))
4196 		goto dma_map_err;
4197 
4198 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4199 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4200 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4201 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4202 
4203 	if (priv->dma_cap.addr64 <= 32) {
4204 		first->des0 = cpu_to_le32(des);
4205 
4206 		/* Fill start of payload in buff2 of first descriptor */
4207 		if (pay_len)
4208 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4209 
4210 		/* If needed take extra descriptors to fill the remaining payload */
4211 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4212 	} else {
4213 		stmmac_set_desc_addr(priv, first, des);
4214 		tmp_pay_len = pay_len;
4215 		des += proto_hdr_len;
4216 		pay_len = 0;
4217 	}
4218 
4219 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4220 
4221 	/* Prepare fragments */
4222 	for (i = 0; i < nfrags; i++) {
4223 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4224 
4225 		des = skb_frag_dma_map(priv->device, frag, 0,
4226 				       skb_frag_size(frag),
4227 				       DMA_TO_DEVICE);
4228 		if (dma_mapping_error(priv->device, des))
4229 			goto dma_map_err;
4230 
4231 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4232 				     (i == nfrags - 1), queue);
4233 
4234 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4235 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4236 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4237 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4238 	}
4239 
4240 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4241 
4242 	/* Only the last descriptor gets to point to the skb. */
4243 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4244 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4245 
4246 	/* Manage tx mitigation */
4247 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4248 	tx_q->tx_count_frames += tx_packets;
4249 
4250 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4251 		set_ic = true;
4252 	else if (!priv->tx_coal_frames[queue])
4253 		set_ic = false;
4254 	else if (tx_packets > priv->tx_coal_frames[queue])
4255 		set_ic = true;
4256 	else if ((tx_q->tx_count_frames %
4257 		  priv->tx_coal_frames[queue]) < tx_packets)
4258 		set_ic = true;
4259 	else
4260 		set_ic = false;
4261 
4262 	if (set_ic) {
4263 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4264 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4265 		else
4266 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4267 
4268 		tx_q->tx_count_frames = 0;
4269 		stmmac_set_tx_ic(priv, desc);
4270 	}
4271 
4272 	/* We've used all descriptors we need for this skb, however,
4273 	 * advance cur_tx so that it references a fresh descriptor.
4274 	 * ndo_start_xmit will fill this descriptor the next time it's
4275 	 * called and stmmac_tx_clean may clean up to this descriptor.
4276 	 */
4277 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4278 
4279 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4280 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4281 			  __func__);
4282 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4283 	}
4284 
4285 	flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
4286 	tx_q->txq_stats.tx_bytes += skb->len;
4287 	tx_q->txq_stats.tx_tso_frames++;
4288 	tx_q->txq_stats.tx_tso_nfrags += nfrags;
4289 	if (set_ic)
4290 		tx_q->txq_stats.tx_set_ic_bit++;
4291 	u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
4292 
4293 	if (priv->sarc_type)
4294 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4295 
4296 	skb_tx_timestamp(skb);
4297 
4298 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4299 		     priv->hwts_tx_en)) {
4300 		/* declare that device is doing timestamping */
4301 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4302 		stmmac_enable_tx_timestamp(priv, first);
4303 	}
4304 
4305 	/* Complete the first descriptor before granting the DMA */
4306 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4307 			proto_hdr_len,
4308 			pay_len,
4309 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4310 			hdr / 4, (skb->len - proto_hdr_len));
4311 
4312 	/* If context desc is used to change MSS */
4313 	if (mss_desc) {
4314 		/* Make sure that first descriptor has been completely
4315 		 * written, including its own bit. This is because MSS is
4316 		 * actually before first descriptor, so we need to make
4317 		 * sure that MSS's own bit is the last thing written.
4318 		 */
4319 		dma_wmb();
4320 		stmmac_set_tx_owner(priv, mss_desc);
4321 	}
4322 
4323 	if (netif_msg_pktdata(priv)) {
4324 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4325 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4326 			tx_q->cur_tx, first, nfrags);
4327 		pr_info(">>> frame to be transmitted: ");
4328 		print_pkt(skb->data, skb_headlen(skb));
4329 	}
4330 
4331 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4332 
4333 	stmmac_flush_tx_descriptors(priv, queue);
4334 	stmmac_tx_timer_arm(priv, queue);
4335 
4336 	return NETDEV_TX_OK;
4337 
4338 dma_map_err:
4339 	dev_err(priv->device, "Tx dma map failed\n");
4340 	dev_kfree_skb(skb);
4341 	priv->xstats.tx_dropped++;
4342 	return NETDEV_TX_OK;
4343 }
4344 
4345 /**
4346  *  stmmac_xmit - Tx entry point of the driver
4347  *  @skb : the socket buffer
4348  *  @dev : device pointer
4349  *  Description : this is the tx entry point of the driver.
4350  *  It programs the chain or the ring and supports oversized frames
4351  *  and SG feature.
4352  */
4353 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4354 {
4355 	unsigned int first_entry, tx_packets, enh_desc;
4356 	struct stmmac_priv *priv = netdev_priv(dev);
4357 	unsigned int nopaged_len = skb_headlen(skb);
4358 	int i, csum_insertion = 0, is_jumbo = 0;
4359 	u32 queue = skb_get_queue_mapping(skb);
4360 	int nfrags = skb_shinfo(skb)->nr_frags;
4361 	int gso = skb_shinfo(skb)->gso_type;
4362 	struct dma_edesc *tbs_desc = NULL;
4363 	struct dma_desc *desc, *first;
4364 	struct stmmac_tx_queue *tx_q;
4365 	bool has_vlan, set_ic;
4366 	int entry, first_tx;
4367 	unsigned long flags;
4368 	dma_addr_t des;
4369 
4370 	tx_q = &priv->dma_conf.tx_queue[queue];
4371 	first_tx = tx_q->cur_tx;
4372 
4373 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4374 		stmmac_disable_eee_mode(priv);
4375 
4376 	/* Manage oversized TCP frames for GMAC4 device */
4377 	if (skb_is_gso(skb) && priv->tso) {
4378 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4379 			return stmmac_tso_xmit(skb, dev);
4380 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4381 			return stmmac_tso_xmit(skb, dev);
4382 	}
4383 
4384 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4385 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4386 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4387 								queue));
4388 			/* This is a hard error, log it. */
4389 			netdev_err(priv->dev,
4390 				   "%s: Tx Ring full when queue awake\n",
4391 				   __func__);
4392 		}
4393 		return NETDEV_TX_BUSY;
4394 	}
4395 
4396 	/* Check if VLAN can be inserted by HW */
4397 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4398 
4399 	entry = tx_q->cur_tx;
4400 	first_entry = entry;
4401 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4402 
4403 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4404 
4405 	if (likely(priv->extend_desc))
4406 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4407 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4408 		desc = &tx_q->dma_entx[entry].basic;
4409 	else
4410 		desc = tx_q->dma_tx + entry;
4411 
4412 	first = desc;
4413 
4414 	if (has_vlan)
4415 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4416 
4417 	enh_desc = priv->plat->enh_desc;
4418 	/* To program the descriptors according to the size of the frame */
4419 	if (enh_desc)
4420 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4421 
4422 	if (unlikely(is_jumbo)) {
4423 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4424 		if (unlikely(entry < 0) && (entry != -EINVAL))
4425 			goto dma_map_err;
4426 	}
4427 
4428 	for (i = 0; i < nfrags; i++) {
4429 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4430 		int len = skb_frag_size(frag);
4431 		bool last_segment = (i == (nfrags - 1));
4432 
4433 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4434 		WARN_ON(tx_q->tx_skbuff[entry]);
4435 
4436 		if (likely(priv->extend_desc))
4437 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4438 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4439 			desc = &tx_q->dma_entx[entry].basic;
4440 		else
4441 			desc = tx_q->dma_tx + entry;
4442 
4443 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4444 				       DMA_TO_DEVICE);
4445 		if (dma_mapping_error(priv->device, des))
4446 			goto dma_map_err; /* should reuse desc w/o issues */
4447 
4448 		tx_q->tx_skbuff_dma[entry].buf = des;
4449 
4450 		stmmac_set_desc_addr(priv, desc, des);
4451 
4452 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4453 		tx_q->tx_skbuff_dma[entry].len = len;
4454 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4455 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4456 
4457 		/* Prepare the descriptor and set the own bit too */
4458 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4459 				priv->mode, 1, last_segment, skb->len);
4460 	}
4461 
4462 	/* Only the last descriptor gets to point to the skb. */
4463 	tx_q->tx_skbuff[entry] = skb;
4464 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4465 
4466 	/* According to the coalesce parameter the IC bit for the latest
4467 	 * segment is reset and the timer re-started to clean the tx status.
4468 	 * This approach takes care about the fragments: desc is the first
4469 	 * element in case of no SG.
4470 	 */
4471 	tx_packets = (entry + 1) - first_tx;
4472 	tx_q->tx_count_frames += tx_packets;
4473 
4474 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4475 		set_ic = true;
4476 	else if (!priv->tx_coal_frames[queue])
4477 		set_ic = false;
4478 	else if (tx_packets > priv->tx_coal_frames[queue])
4479 		set_ic = true;
4480 	else if ((tx_q->tx_count_frames %
4481 		  priv->tx_coal_frames[queue]) < tx_packets)
4482 		set_ic = true;
4483 	else
4484 		set_ic = false;
4485 
4486 	if (set_ic) {
4487 		if (likely(priv->extend_desc))
4488 			desc = &tx_q->dma_etx[entry].basic;
4489 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4490 			desc = &tx_q->dma_entx[entry].basic;
4491 		else
4492 			desc = &tx_q->dma_tx[entry];
4493 
4494 		tx_q->tx_count_frames = 0;
4495 		stmmac_set_tx_ic(priv, desc);
4496 	}
4497 
4498 	/* We've used all descriptors we need for this skb, however,
4499 	 * advance cur_tx so that it references a fresh descriptor.
4500 	 * ndo_start_xmit will fill this descriptor the next time it's
4501 	 * called and stmmac_tx_clean may clean up to this descriptor.
4502 	 */
4503 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4504 	tx_q->cur_tx = entry;
4505 
4506 	if (netif_msg_pktdata(priv)) {
4507 		netdev_dbg(priv->dev,
4508 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4509 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4510 			   entry, first, nfrags);
4511 
4512 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4513 		print_pkt(skb->data, skb->len);
4514 	}
4515 
4516 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4517 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4518 			  __func__);
4519 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4520 	}
4521 
4522 	flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
4523 	tx_q->txq_stats.tx_bytes += skb->len;
4524 	if (set_ic)
4525 		tx_q->txq_stats.tx_set_ic_bit++;
4526 	u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
4527 
4528 	if (priv->sarc_type)
4529 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4530 
4531 	skb_tx_timestamp(skb);
4532 
4533 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4534 	 * problems because all the descriptors are actually ready to be
4535 	 * passed to the DMA engine.
4536 	 */
4537 	if (likely(!is_jumbo)) {
4538 		bool last_segment = (nfrags == 0);
4539 
4540 		des = dma_map_single(priv->device, skb->data,
4541 				     nopaged_len, DMA_TO_DEVICE);
4542 		if (dma_mapping_error(priv->device, des))
4543 			goto dma_map_err;
4544 
4545 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4546 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4547 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4548 
4549 		stmmac_set_desc_addr(priv, first, des);
4550 
4551 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4552 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4553 
4554 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4555 			     priv->hwts_tx_en)) {
4556 			/* declare that device is doing timestamping */
4557 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4558 			stmmac_enable_tx_timestamp(priv, first);
4559 		}
4560 
4561 		/* Prepare the first descriptor setting the OWN bit too */
4562 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4563 				csum_insertion, priv->mode, 0, last_segment,
4564 				skb->len);
4565 	}
4566 
4567 	if (tx_q->tbs & STMMAC_TBS_EN) {
4568 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4569 
4570 		tbs_desc = &tx_q->dma_entx[first_entry];
4571 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4572 	}
4573 
4574 	stmmac_set_tx_owner(priv, first);
4575 
4576 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4577 
4578 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4579 
4580 	stmmac_flush_tx_descriptors(priv, queue);
4581 	stmmac_tx_timer_arm(priv, queue);
4582 
4583 	return NETDEV_TX_OK;
4584 
4585 dma_map_err:
4586 	netdev_err(priv->dev, "Tx DMA map failed\n");
4587 	dev_kfree_skb(skb);
4588 	priv->xstats.tx_dropped++;
4589 	return NETDEV_TX_OK;
4590 }
4591 
4592 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4593 {
4594 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4595 	__be16 vlan_proto = veth->h_vlan_proto;
4596 	u16 vlanid;
4597 
4598 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4599 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4600 	    (vlan_proto == htons(ETH_P_8021AD) &&
4601 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4602 		/* pop the vlan tag */
4603 		vlanid = ntohs(veth->h_vlan_TCI);
4604 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4605 		skb_pull(skb, VLAN_HLEN);
4606 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4607 	}
4608 }
4609 
4610 /**
4611  * stmmac_rx_refill - refill used skb preallocated buffers
4612  * @priv: driver private structure
4613  * @queue: RX queue index
4614  * Description : this is to reallocate the skb for the reception process
4615  * that is based on zero-copy.
4616  */
4617 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4618 {
4619 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4620 	int dirty = stmmac_rx_dirty(priv, queue);
4621 	unsigned int entry = rx_q->dirty_rx;
4622 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4623 
4624 	if (priv->dma_cap.host_dma_width <= 32)
4625 		gfp |= GFP_DMA32;
4626 
4627 	while (dirty-- > 0) {
4628 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4629 		struct dma_desc *p;
4630 		bool use_rx_wd;
4631 
4632 		if (priv->extend_desc)
4633 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4634 		else
4635 			p = rx_q->dma_rx + entry;
4636 
4637 		if (!buf->page) {
4638 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4639 			if (!buf->page)
4640 				break;
4641 		}
4642 
4643 		if (priv->sph && !buf->sec_page) {
4644 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4645 			if (!buf->sec_page)
4646 				break;
4647 
4648 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4649 		}
4650 
4651 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4652 
4653 		stmmac_set_desc_addr(priv, p, buf->addr);
4654 		if (priv->sph)
4655 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4656 		else
4657 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4658 		stmmac_refill_desc3(priv, rx_q, p);
4659 
4660 		rx_q->rx_count_frames++;
4661 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4662 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4663 			rx_q->rx_count_frames = 0;
4664 
4665 		use_rx_wd = !priv->rx_coal_frames[queue];
4666 		use_rx_wd |= rx_q->rx_count_frames > 0;
4667 		if (!priv->use_riwt)
4668 			use_rx_wd = false;
4669 
4670 		dma_wmb();
4671 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4672 
4673 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4674 	}
4675 	rx_q->dirty_rx = entry;
4676 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4677 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4678 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4679 }
4680 
4681 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4682 				       struct dma_desc *p,
4683 				       int status, unsigned int len)
4684 {
4685 	unsigned int plen = 0, hlen = 0;
4686 	int coe = priv->hw->rx_csum;
4687 
4688 	/* Not first descriptor, buffer is always zero */
4689 	if (priv->sph && len)
4690 		return 0;
4691 
4692 	/* First descriptor, get split header length */
4693 	stmmac_get_rx_header_len(priv, p, &hlen);
4694 	if (priv->sph && hlen) {
4695 		priv->xstats.rx_split_hdr_pkt_n++;
4696 		return hlen;
4697 	}
4698 
4699 	/* First descriptor, not last descriptor and not split header */
4700 	if (status & rx_not_ls)
4701 		return priv->dma_conf.dma_buf_sz;
4702 
4703 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4704 
4705 	/* First descriptor and last descriptor and not split header */
4706 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4707 }
4708 
4709 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4710 				       struct dma_desc *p,
4711 				       int status, unsigned int len)
4712 {
4713 	int coe = priv->hw->rx_csum;
4714 	unsigned int plen = 0;
4715 
4716 	/* Not split header, buffer is not available */
4717 	if (!priv->sph)
4718 		return 0;
4719 
4720 	/* Not last descriptor */
4721 	if (status & rx_not_ls)
4722 		return priv->dma_conf.dma_buf_sz;
4723 
4724 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4725 
4726 	/* Last descriptor */
4727 	return plen - len;
4728 }
4729 
4730 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4731 				struct xdp_frame *xdpf, bool dma_map)
4732 {
4733 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4734 	unsigned int entry = tx_q->cur_tx;
4735 	struct dma_desc *tx_desc;
4736 	dma_addr_t dma_addr;
4737 	bool set_ic;
4738 
4739 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4740 		return STMMAC_XDP_CONSUMED;
4741 
4742 	if (likely(priv->extend_desc))
4743 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4744 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4745 		tx_desc = &tx_q->dma_entx[entry].basic;
4746 	else
4747 		tx_desc = tx_q->dma_tx + entry;
4748 
4749 	if (dma_map) {
4750 		dma_addr = dma_map_single(priv->device, xdpf->data,
4751 					  xdpf->len, DMA_TO_DEVICE);
4752 		if (dma_mapping_error(priv->device, dma_addr))
4753 			return STMMAC_XDP_CONSUMED;
4754 
4755 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4756 	} else {
4757 		struct page *page = virt_to_page(xdpf->data);
4758 
4759 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4760 			   xdpf->headroom;
4761 		dma_sync_single_for_device(priv->device, dma_addr,
4762 					   xdpf->len, DMA_BIDIRECTIONAL);
4763 
4764 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4765 	}
4766 
4767 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4768 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4769 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4770 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4771 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4772 
4773 	tx_q->xdpf[entry] = xdpf;
4774 
4775 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4776 
4777 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4778 			       true, priv->mode, true, true,
4779 			       xdpf->len);
4780 
4781 	tx_q->tx_count_frames++;
4782 
4783 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4784 		set_ic = true;
4785 	else
4786 		set_ic = false;
4787 
4788 	if (set_ic) {
4789 		unsigned long flags;
4790 		tx_q->tx_count_frames = 0;
4791 		stmmac_set_tx_ic(priv, tx_desc);
4792 		flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
4793 		tx_q->txq_stats.tx_set_ic_bit++;
4794 		u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
4795 	}
4796 
4797 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4798 
4799 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4800 	tx_q->cur_tx = entry;
4801 
4802 	return STMMAC_XDP_TX;
4803 }
4804 
4805 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4806 				   int cpu)
4807 {
4808 	int index = cpu;
4809 
4810 	if (unlikely(index < 0))
4811 		index = 0;
4812 
4813 	while (index >= priv->plat->tx_queues_to_use)
4814 		index -= priv->plat->tx_queues_to_use;
4815 
4816 	return index;
4817 }
4818 
4819 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4820 				struct xdp_buff *xdp)
4821 {
4822 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4823 	int cpu = smp_processor_id();
4824 	struct netdev_queue *nq;
4825 	int queue;
4826 	int res;
4827 
4828 	if (unlikely(!xdpf))
4829 		return STMMAC_XDP_CONSUMED;
4830 
4831 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4832 	nq = netdev_get_tx_queue(priv->dev, queue);
4833 
4834 	__netif_tx_lock(nq, cpu);
4835 	/* Avoids TX time-out as we are sharing with slow path */
4836 	txq_trans_cond_update(nq);
4837 
4838 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4839 	if (res == STMMAC_XDP_TX)
4840 		stmmac_flush_tx_descriptors(priv, queue);
4841 
4842 	__netif_tx_unlock(nq);
4843 
4844 	return res;
4845 }
4846 
4847 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4848 				 struct bpf_prog *prog,
4849 				 struct xdp_buff *xdp)
4850 {
4851 	u32 act;
4852 	int res;
4853 
4854 	act = bpf_prog_run_xdp(prog, xdp);
4855 	switch (act) {
4856 	case XDP_PASS:
4857 		res = STMMAC_XDP_PASS;
4858 		break;
4859 	case XDP_TX:
4860 		res = stmmac_xdp_xmit_back(priv, xdp);
4861 		break;
4862 	case XDP_REDIRECT:
4863 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4864 			res = STMMAC_XDP_CONSUMED;
4865 		else
4866 			res = STMMAC_XDP_REDIRECT;
4867 		break;
4868 	default:
4869 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4870 		fallthrough;
4871 	case XDP_ABORTED:
4872 		trace_xdp_exception(priv->dev, prog, act);
4873 		fallthrough;
4874 	case XDP_DROP:
4875 		res = STMMAC_XDP_CONSUMED;
4876 		break;
4877 	}
4878 
4879 	return res;
4880 }
4881 
4882 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4883 					   struct xdp_buff *xdp)
4884 {
4885 	struct bpf_prog *prog;
4886 	int res;
4887 
4888 	prog = READ_ONCE(priv->xdp_prog);
4889 	if (!prog) {
4890 		res = STMMAC_XDP_PASS;
4891 		goto out;
4892 	}
4893 
4894 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
4895 out:
4896 	return ERR_PTR(-res);
4897 }
4898 
4899 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4900 				   int xdp_status)
4901 {
4902 	int cpu = smp_processor_id();
4903 	int queue;
4904 
4905 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4906 
4907 	if (xdp_status & STMMAC_XDP_TX)
4908 		stmmac_tx_timer_arm(priv, queue);
4909 
4910 	if (xdp_status & STMMAC_XDP_REDIRECT)
4911 		xdp_do_flush();
4912 }
4913 
4914 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4915 					       struct xdp_buff *xdp)
4916 {
4917 	unsigned int metasize = xdp->data - xdp->data_meta;
4918 	unsigned int datasize = xdp->data_end - xdp->data;
4919 	struct sk_buff *skb;
4920 
4921 	skb = __napi_alloc_skb(&ch->rxtx_napi,
4922 			       xdp->data_end - xdp->data_hard_start,
4923 			       GFP_ATOMIC | __GFP_NOWARN);
4924 	if (unlikely(!skb))
4925 		return NULL;
4926 
4927 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
4928 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4929 	if (metasize)
4930 		skb_metadata_set(skb, metasize);
4931 
4932 	return skb;
4933 }
4934 
4935 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4936 				   struct dma_desc *p, struct dma_desc *np,
4937 				   struct xdp_buff *xdp)
4938 {
4939 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4940 	struct stmmac_channel *ch = &priv->channel[queue];
4941 	unsigned int len = xdp->data_end - xdp->data;
4942 	enum pkt_hash_types hash_type;
4943 	int coe = priv->hw->rx_csum;
4944 	unsigned long flags;
4945 	struct sk_buff *skb;
4946 	u32 hash;
4947 
4948 	skb = stmmac_construct_skb_zc(ch, xdp);
4949 	if (!skb) {
4950 		priv->xstats.rx_dropped++;
4951 		return;
4952 	}
4953 
4954 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
4955 	stmmac_rx_vlan(priv->dev, skb);
4956 	skb->protocol = eth_type_trans(skb, priv->dev);
4957 
4958 	if (unlikely(!coe))
4959 		skb_checksum_none_assert(skb);
4960 	else
4961 		skb->ip_summed = CHECKSUM_UNNECESSARY;
4962 
4963 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4964 		skb_set_hash(skb, hash, hash_type);
4965 
4966 	skb_record_rx_queue(skb, queue);
4967 	napi_gro_receive(&ch->rxtx_napi, skb);
4968 
4969 	flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
4970 	rx_q->rxq_stats.rx_pkt_n++;
4971 	rx_q->rxq_stats.rx_bytes += len;
4972 	u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
4973 }
4974 
4975 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4976 {
4977 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4978 	unsigned int entry = rx_q->dirty_rx;
4979 	struct dma_desc *rx_desc = NULL;
4980 	bool ret = true;
4981 
4982 	budget = min(budget, stmmac_rx_dirty(priv, queue));
4983 
4984 	while (budget-- > 0 && entry != rx_q->cur_rx) {
4985 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4986 		dma_addr_t dma_addr;
4987 		bool use_rx_wd;
4988 
4989 		if (!buf->xdp) {
4990 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
4991 			if (!buf->xdp) {
4992 				ret = false;
4993 				break;
4994 			}
4995 		}
4996 
4997 		if (priv->extend_desc)
4998 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
4999 		else
5000 			rx_desc = rx_q->dma_rx + entry;
5001 
5002 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5003 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5004 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5005 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5006 
5007 		rx_q->rx_count_frames++;
5008 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5009 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5010 			rx_q->rx_count_frames = 0;
5011 
5012 		use_rx_wd = !priv->rx_coal_frames[queue];
5013 		use_rx_wd |= rx_q->rx_count_frames > 0;
5014 		if (!priv->use_riwt)
5015 			use_rx_wd = false;
5016 
5017 		dma_wmb();
5018 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5019 
5020 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5021 	}
5022 
5023 	if (rx_desc) {
5024 		rx_q->dirty_rx = entry;
5025 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5026 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5027 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5028 	}
5029 
5030 	return ret;
5031 }
5032 
5033 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5034 {
5035 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5036 	 * to represent incoming packet, whereas cb field in the same structure
5037 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5038 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5039 	 */
5040 	return (struct stmmac_xdp_buff *)xdp;
5041 }
5042 
5043 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5044 {
5045 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5046 	unsigned int count = 0, error = 0, len = 0;
5047 	int dirty = stmmac_rx_dirty(priv, queue);
5048 	unsigned int next_entry = rx_q->cur_rx;
5049 	u32 rx_errors = 0, rx_dropped = 0;
5050 	unsigned int desc_size;
5051 	struct bpf_prog *prog;
5052 	bool failure = false;
5053 	unsigned long flags;
5054 	int xdp_status = 0;
5055 	int status = 0;
5056 
5057 	if (netif_msg_rx_status(priv)) {
5058 		void *rx_head;
5059 
5060 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5061 		if (priv->extend_desc) {
5062 			rx_head = (void *)rx_q->dma_erx;
5063 			desc_size = sizeof(struct dma_extended_desc);
5064 		} else {
5065 			rx_head = (void *)rx_q->dma_rx;
5066 			desc_size = sizeof(struct dma_desc);
5067 		}
5068 
5069 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5070 				    rx_q->dma_rx_phy, desc_size);
5071 	}
5072 	while (count < limit) {
5073 		struct stmmac_rx_buffer *buf;
5074 		struct stmmac_xdp_buff *ctx;
5075 		unsigned int buf1_len = 0;
5076 		struct dma_desc *np, *p;
5077 		int entry;
5078 		int res;
5079 
5080 		if (!count && rx_q->state_saved) {
5081 			error = rx_q->state.error;
5082 			len = rx_q->state.len;
5083 		} else {
5084 			rx_q->state_saved = false;
5085 			error = 0;
5086 			len = 0;
5087 		}
5088 
5089 		if (count >= limit)
5090 			break;
5091 
5092 read_again:
5093 		buf1_len = 0;
5094 		entry = next_entry;
5095 		buf = &rx_q->buf_pool[entry];
5096 
5097 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5098 			failure = failure ||
5099 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5100 			dirty = 0;
5101 		}
5102 
5103 		if (priv->extend_desc)
5104 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5105 		else
5106 			p = rx_q->dma_rx + entry;
5107 
5108 		/* read the status of the incoming frame */
5109 		status = stmmac_rx_status(priv, &priv->xstats, p);
5110 		/* check if managed by the DMA otherwise go ahead */
5111 		if (unlikely(status & dma_own))
5112 			break;
5113 
5114 		/* Prefetch the next RX descriptor */
5115 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5116 						priv->dma_conf.dma_rx_size);
5117 		next_entry = rx_q->cur_rx;
5118 
5119 		if (priv->extend_desc)
5120 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5121 		else
5122 			np = rx_q->dma_rx + next_entry;
5123 
5124 		prefetch(np);
5125 
5126 		/* Ensure a valid XSK buffer before proceed */
5127 		if (!buf->xdp)
5128 			break;
5129 
5130 		if (priv->extend_desc)
5131 			stmmac_rx_extended_status(priv, &priv->xstats,
5132 						  rx_q->dma_erx + entry);
5133 		if (unlikely(status == discard_frame)) {
5134 			xsk_buff_free(buf->xdp);
5135 			buf->xdp = NULL;
5136 			dirty++;
5137 			error = 1;
5138 			if (!priv->hwts_rx_en)
5139 				rx_errors++;
5140 		}
5141 
5142 		if (unlikely(error && (status & rx_not_ls)))
5143 			goto read_again;
5144 		if (unlikely(error)) {
5145 			count++;
5146 			continue;
5147 		}
5148 
5149 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5150 		if (likely(status & rx_not_ls)) {
5151 			xsk_buff_free(buf->xdp);
5152 			buf->xdp = NULL;
5153 			dirty++;
5154 			count++;
5155 			goto read_again;
5156 		}
5157 
5158 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5159 		ctx->priv = priv;
5160 		ctx->desc = p;
5161 		ctx->ndesc = np;
5162 
5163 		/* XDP ZC Frame only support primary buffers for now */
5164 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5165 		len += buf1_len;
5166 
5167 		/* ACS is disabled; strip manually. */
5168 		if (likely(!(status & rx_not_ls))) {
5169 			buf1_len -= ETH_FCS_LEN;
5170 			len -= ETH_FCS_LEN;
5171 		}
5172 
5173 		/* RX buffer is good and fit into a XSK pool buffer */
5174 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5175 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5176 
5177 		prog = READ_ONCE(priv->xdp_prog);
5178 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5179 
5180 		switch (res) {
5181 		case STMMAC_XDP_PASS:
5182 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5183 			xsk_buff_free(buf->xdp);
5184 			break;
5185 		case STMMAC_XDP_CONSUMED:
5186 			xsk_buff_free(buf->xdp);
5187 			rx_dropped++;
5188 			break;
5189 		case STMMAC_XDP_TX:
5190 		case STMMAC_XDP_REDIRECT:
5191 			xdp_status |= res;
5192 			break;
5193 		}
5194 
5195 		buf->xdp = NULL;
5196 		dirty++;
5197 		count++;
5198 	}
5199 
5200 	if (status & rx_not_ls) {
5201 		rx_q->state_saved = true;
5202 		rx_q->state.error = error;
5203 		rx_q->state.len = len;
5204 	}
5205 
5206 	stmmac_finalize_xdp_rx(priv, xdp_status);
5207 
5208 	flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
5209 	rx_q->rxq_stats.rx_pkt_n += count;
5210 	u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
5211 
5212 	priv->xstats.rx_dropped += rx_dropped;
5213 	priv->xstats.rx_errors += rx_errors;
5214 
5215 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5216 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5217 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5218 		else
5219 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5220 
5221 		return (int)count;
5222 	}
5223 
5224 	return failure ? limit : (int)count;
5225 }
5226 
5227 /**
5228  * stmmac_rx - manage the receive process
5229  * @priv: driver private structure
5230  * @limit: napi bugget
5231  * @queue: RX queue index.
5232  * Description :  this the function called by the napi poll method.
5233  * It gets all the frames inside the ring.
5234  */
5235 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5236 {
5237 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5238 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5239 	struct stmmac_channel *ch = &priv->channel[queue];
5240 	unsigned int count = 0, error = 0, len = 0;
5241 	int status = 0, coe = priv->hw->rx_csum;
5242 	unsigned int next_entry = rx_q->cur_rx;
5243 	enum dma_data_direction dma_dir;
5244 	unsigned int desc_size;
5245 	struct sk_buff *skb = NULL;
5246 	struct stmmac_xdp_buff ctx;
5247 	unsigned long flags;
5248 	int xdp_status = 0;
5249 	int buf_sz;
5250 
5251 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5252 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5253 
5254 	if (netif_msg_rx_status(priv)) {
5255 		void *rx_head;
5256 
5257 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5258 		if (priv->extend_desc) {
5259 			rx_head = (void *)rx_q->dma_erx;
5260 			desc_size = sizeof(struct dma_extended_desc);
5261 		} else {
5262 			rx_head = (void *)rx_q->dma_rx;
5263 			desc_size = sizeof(struct dma_desc);
5264 		}
5265 
5266 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5267 				    rx_q->dma_rx_phy, desc_size);
5268 	}
5269 	while (count < limit) {
5270 		unsigned int buf1_len = 0, buf2_len = 0;
5271 		enum pkt_hash_types hash_type;
5272 		struct stmmac_rx_buffer *buf;
5273 		struct dma_desc *np, *p;
5274 		int entry;
5275 		u32 hash;
5276 
5277 		if (!count && rx_q->state_saved) {
5278 			skb = rx_q->state.skb;
5279 			error = rx_q->state.error;
5280 			len = rx_q->state.len;
5281 		} else {
5282 			rx_q->state_saved = false;
5283 			skb = NULL;
5284 			error = 0;
5285 			len = 0;
5286 		}
5287 
5288 		if (count >= limit)
5289 			break;
5290 
5291 read_again:
5292 		buf1_len = 0;
5293 		buf2_len = 0;
5294 		entry = next_entry;
5295 		buf = &rx_q->buf_pool[entry];
5296 
5297 		if (priv->extend_desc)
5298 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5299 		else
5300 			p = rx_q->dma_rx + entry;
5301 
5302 		/* read the status of the incoming frame */
5303 		status = stmmac_rx_status(priv, &priv->xstats, p);
5304 		/* check if managed by the DMA otherwise go ahead */
5305 		if (unlikely(status & dma_own))
5306 			break;
5307 
5308 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5309 						priv->dma_conf.dma_rx_size);
5310 		next_entry = rx_q->cur_rx;
5311 
5312 		if (priv->extend_desc)
5313 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5314 		else
5315 			np = rx_q->dma_rx + next_entry;
5316 
5317 		prefetch(np);
5318 
5319 		if (priv->extend_desc)
5320 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5321 		if (unlikely(status == discard_frame)) {
5322 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5323 			buf->page = NULL;
5324 			error = 1;
5325 			if (!priv->hwts_rx_en)
5326 				rx_errors++;
5327 		}
5328 
5329 		if (unlikely(error && (status & rx_not_ls)))
5330 			goto read_again;
5331 		if (unlikely(error)) {
5332 			dev_kfree_skb(skb);
5333 			skb = NULL;
5334 			count++;
5335 			continue;
5336 		}
5337 
5338 		/* Buffer is good. Go on. */
5339 
5340 		prefetch(page_address(buf->page) + buf->page_offset);
5341 		if (buf->sec_page)
5342 			prefetch(page_address(buf->sec_page));
5343 
5344 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5345 		len += buf1_len;
5346 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5347 		len += buf2_len;
5348 
5349 		/* ACS is disabled; strip manually. */
5350 		if (likely(!(status & rx_not_ls))) {
5351 			if (buf2_len) {
5352 				buf2_len -= ETH_FCS_LEN;
5353 				len -= ETH_FCS_LEN;
5354 			} else if (buf1_len) {
5355 				buf1_len -= ETH_FCS_LEN;
5356 				len -= ETH_FCS_LEN;
5357 			}
5358 		}
5359 
5360 		if (!skb) {
5361 			unsigned int pre_len, sync_len;
5362 
5363 			dma_sync_single_for_cpu(priv->device, buf->addr,
5364 						buf1_len, dma_dir);
5365 
5366 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5367 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5368 					 buf->page_offset, buf1_len, true);
5369 
5370 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5371 				  buf->page_offset;
5372 
5373 			ctx.priv = priv;
5374 			ctx.desc = p;
5375 			ctx.ndesc = np;
5376 
5377 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5378 			/* Due xdp_adjust_tail: DMA sync for_device
5379 			 * cover max len CPU touch
5380 			 */
5381 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5382 				   buf->page_offset;
5383 			sync_len = max(sync_len, pre_len);
5384 
5385 			/* For Not XDP_PASS verdict */
5386 			if (IS_ERR(skb)) {
5387 				unsigned int xdp_res = -PTR_ERR(skb);
5388 
5389 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5390 					page_pool_put_page(rx_q->page_pool,
5391 							   virt_to_head_page(ctx.xdp.data),
5392 							   sync_len, true);
5393 					buf->page = NULL;
5394 					rx_dropped++;
5395 
5396 					/* Clear skb as it was set as
5397 					 * status by XDP program.
5398 					 */
5399 					skb = NULL;
5400 
5401 					if (unlikely((status & rx_not_ls)))
5402 						goto read_again;
5403 
5404 					count++;
5405 					continue;
5406 				} else if (xdp_res & (STMMAC_XDP_TX |
5407 						      STMMAC_XDP_REDIRECT)) {
5408 					xdp_status |= xdp_res;
5409 					buf->page = NULL;
5410 					skb = NULL;
5411 					count++;
5412 					continue;
5413 				}
5414 			}
5415 		}
5416 
5417 		if (!skb) {
5418 			/* XDP program may expand or reduce tail */
5419 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5420 
5421 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5422 			if (!skb) {
5423 				rx_dropped++;
5424 				count++;
5425 				goto drain_data;
5426 			}
5427 
5428 			/* XDP program may adjust header */
5429 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5430 			skb_put(skb, buf1_len);
5431 
5432 			/* Data payload copied into SKB, page ready for recycle */
5433 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5434 			buf->page = NULL;
5435 		} else if (buf1_len) {
5436 			dma_sync_single_for_cpu(priv->device, buf->addr,
5437 						buf1_len, dma_dir);
5438 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5439 					buf->page, buf->page_offset, buf1_len,
5440 					priv->dma_conf.dma_buf_sz);
5441 
5442 			/* Data payload appended into SKB */
5443 			skb_mark_for_recycle(skb);
5444 			buf->page = NULL;
5445 		}
5446 
5447 		if (buf2_len) {
5448 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5449 						buf2_len, dma_dir);
5450 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5451 					buf->sec_page, 0, buf2_len,
5452 					priv->dma_conf.dma_buf_sz);
5453 
5454 			/* Data payload appended into SKB */
5455 			skb_mark_for_recycle(skb);
5456 			buf->sec_page = NULL;
5457 		}
5458 
5459 drain_data:
5460 		if (likely(status & rx_not_ls))
5461 			goto read_again;
5462 		if (!skb)
5463 			continue;
5464 
5465 		/* Got entire packet into SKB. Finish it. */
5466 
5467 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5468 		stmmac_rx_vlan(priv->dev, skb);
5469 		skb->protocol = eth_type_trans(skb, priv->dev);
5470 
5471 		if (unlikely(!coe))
5472 			skb_checksum_none_assert(skb);
5473 		else
5474 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5475 
5476 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5477 			skb_set_hash(skb, hash, hash_type);
5478 
5479 		skb_record_rx_queue(skb, queue);
5480 		napi_gro_receive(&ch->rx_napi, skb);
5481 		skb = NULL;
5482 
5483 		rx_packets++;
5484 		rx_bytes += len;
5485 		count++;
5486 	}
5487 
5488 	if (status & rx_not_ls || skb) {
5489 		rx_q->state_saved = true;
5490 		rx_q->state.skb = skb;
5491 		rx_q->state.error = error;
5492 		rx_q->state.len = len;
5493 	}
5494 
5495 	stmmac_finalize_xdp_rx(priv, xdp_status);
5496 
5497 	stmmac_rx_refill(priv, queue);
5498 
5499 	flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
5500 	rx_q->rxq_stats.rx_packets += rx_packets;
5501 	rx_q->rxq_stats.rx_bytes += rx_bytes;
5502 	rx_q->rxq_stats.rx_pkt_n += count;
5503 	u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
5504 
5505 	priv->xstats.rx_dropped += rx_dropped;
5506 	priv->xstats.rx_errors += rx_errors;
5507 
5508 	return count;
5509 }
5510 
5511 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5512 {
5513 	struct stmmac_channel *ch =
5514 		container_of(napi, struct stmmac_channel, rx_napi);
5515 	struct stmmac_priv *priv = ch->priv_data;
5516 	struct stmmac_rx_queue *rx_q;
5517 	u32 chan = ch->index;
5518 	unsigned long flags;
5519 	int work_done;
5520 
5521 	rx_q = &priv->dma_conf.rx_queue[chan];
5522 	flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
5523 	rx_q->rxq_stats.napi_poll++;
5524 	u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
5525 
5526 	work_done = stmmac_rx(priv, budget, chan);
5527 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5528 		unsigned long flags;
5529 
5530 		spin_lock_irqsave(&ch->lock, flags);
5531 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5532 		spin_unlock_irqrestore(&ch->lock, flags);
5533 	}
5534 
5535 	return work_done;
5536 }
5537 
5538 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5539 {
5540 	struct stmmac_channel *ch =
5541 		container_of(napi, struct stmmac_channel, tx_napi);
5542 	struct stmmac_priv *priv = ch->priv_data;
5543 	struct stmmac_tx_queue *tx_q;
5544 	u32 chan = ch->index;
5545 	unsigned long flags;
5546 	int work_done;
5547 
5548 	tx_q = &priv->dma_conf.tx_queue[chan];
5549 	flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
5550 	tx_q->txq_stats.napi_poll++;
5551 	u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
5552 
5553 	work_done = stmmac_tx_clean(priv, budget, chan);
5554 	work_done = min(work_done, budget);
5555 
5556 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5557 		unsigned long flags;
5558 
5559 		spin_lock_irqsave(&ch->lock, flags);
5560 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5561 		spin_unlock_irqrestore(&ch->lock, flags);
5562 	}
5563 
5564 	return work_done;
5565 }
5566 
5567 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5568 {
5569 	struct stmmac_channel *ch =
5570 		container_of(napi, struct stmmac_channel, rxtx_napi);
5571 	struct stmmac_priv *priv = ch->priv_data;
5572 	int rx_done, tx_done, rxtx_done;
5573 	struct stmmac_rx_queue *rx_q;
5574 	struct stmmac_tx_queue *tx_q;
5575 	u32 chan = ch->index;
5576 	unsigned long flags;
5577 
5578 	rx_q = &priv->dma_conf.rx_queue[chan];
5579 	flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
5580 	rx_q->rxq_stats.napi_poll++;
5581 	u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
5582 
5583 	tx_q = &priv->dma_conf.tx_queue[chan];
5584 	flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
5585 	tx_q->txq_stats.napi_poll++;
5586 	u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
5587 
5588 	tx_done = stmmac_tx_clean(priv, budget, chan);
5589 	tx_done = min(tx_done, budget);
5590 
5591 	rx_done = stmmac_rx_zc(priv, budget, chan);
5592 
5593 	rxtx_done = max(tx_done, rx_done);
5594 
5595 	/* If either TX or RX work is not complete, return budget
5596 	 * and keep pooling
5597 	 */
5598 	if (rxtx_done >= budget)
5599 		return budget;
5600 
5601 	/* all work done, exit the polling mode */
5602 	if (napi_complete_done(napi, rxtx_done)) {
5603 		unsigned long flags;
5604 
5605 		spin_lock_irqsave(&ch->lock, flags);
5606 		/* Both RX and TX work done are compelte,
5607 		 * so enable both RX & TX IRQs.
5608 		 */
5609 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5610 		spin_unlock_irqrestore(&ch->lock, flags);
5611 	}
5612 
5613 	return min(rxtx_done, budget - 1);
5614 }
5615 
5616 /**
5617  *  stmmac_tx_timeout
5618  *  @dev : Pointer to net device structure
5619  *  @txqueue: the index of the hanging transmit queue
5620  *  Description: this function is called when a packet transmission fails to
5621  *   complete within a reasonable time. The driver will mark the error in the
5622  *   netdev structure and arrange for the device to be reset to a sane state
5623  *   in order to transmit a new packet.
5624  */
5625 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5626 {
5627 	struct stmmac_priv *priv = netdev_priv(dev);
5628 
5629 	stmmac_global_err(priv);
5630 }
5631 
5632 /**
5633  *  stmmac_set_rx_mode - entry point for multicast addressing
5634  *  @dev : pointer to the device structure
5635  *  Description:
5636  *  This function is a driver entry point which gets called by the kernel
5637  *  whenever multicast addresses must be enabled/disabled.
5638  *  Return value:
5639  *  void.
5640  */
5641 static void stmmac_set_rx_mode(struct net_device *dev)
5642 {
5643 	struct stmmac_priv *priv = netdev_priv(dev);
5644 
5645 	stmmac_set_filter(priv, priv->hw, dev);
5646 }
5647 
5648 /**
5649  *  stmmac_change_mtu - entry point to change MTU size for the device.
5650  *  @dev : device pointer.
5651  *  @new_mtu : the new MTU size for the device.
5652  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5653  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5654  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5655  *  Return value:
5656  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5657  *  file on failure.
5658  */
5659 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5660 {
5661 	struct stmmac_priv *priv = netdev_priv(dev);
5662 	int txfifosz = priv->plat->tx_fifo_size;
5663 	struct stmmac_dma_conf *dma_conf;
5664 	const int mtu = new_mtu;
5665 	int ret;
5666 
5667 	if (txfifosz == 0)
5668 		txfifosz = priv->dma_cap.tx_fifo_size;
5669 
5670 	txfifosz /= priv->plat->tx_queues_to_use;
5671 
5672 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5673 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5674 		return -EINVAL;
5675 	}
5676 
5677 	new_mtu = STMMAC_ALIGN(new_mtu);
5678 
5679 	/* If condition true, FIFO is too small or MTU too large */
5680 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5681 		return -EINVAL;
5682 
5683 	if (netif_running(dev)) {
5684 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5685 		/* Try to allocate the new DMA conf with the new mtu */
5686 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5687 		if (IS_ERR(dma_conf)) {
5688 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5689 				   mtu);
5690 			return PTR_ERR(dma_conf);
5691 		}
5692 
5693 		stmmac_release(dev);
5694 
5695 		ret = __stmmac_open(dev, dma_conf);
5696 		if (ret) {
5697 			free_dma_desc_resources(priv, dma_conf);
5698 			kfree(dma_conf);
5699 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5700 			return ret;
5701 		}
5702 
5703 		kfree(dma_conf);
5704 
5705 		stmmac_set_rx_mode(dev);
5706 	}
5707 
5708 	dev->mtu = mtu;
5709 	netdev_update_features(dev);
5710 
5711 	return 0;
5712 }
5713 
5714 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5715 					     netdev_features_t features)
5716 {
5717 	struct stmmac_priv *priv = netdev_priv(dev);
5718 
5719 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5720 		features &= ~NETIF_F_RXCSUM;
5721 
5722 	if (!priv->plat->tx_coe)
5723 		features &= ~NETIF_F_CSUM_MASK;
5724 
5725 	/* Some GMAC devices have a bugged Jumbo frame support that
5726 	 * needs to have the Tx COE disabled for oversized frames
5727 	 * (due to limited buffer sizes). In this case we disable
5728 	 * the TX csum insertion in the TDES and not use SF.
5729 	 */
5730 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5731 		features &= ~NETIF_F_CSUM_MASK;
5732 
5733 	/* Disable tso if asked by ethtool */
5734 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5735 		if (features & NETIF_F_TSO)
5736 			priv->tso = true;
5737 		else
5738 			priv->tso = false;
5739 	}
5740 
5741 	return features;
5742 }
5743 
5744 static int stmmac_set_features(struct net_device *netdev,
5745 			       netdev_features_t features)
5746 {
5747 	struct stmmac_priv *priv = netdev_priv(netdev);
5748 
5749 	/* Keep the COE Type in case of csum is supporting */
5750 	if (features & NETIF_F_RXCSUM)
5751 		priv->hw->rx_csum = priv->plat->rx_coe;
5752 	else
5753 		priv->hw->rx_csum = 0;
5754 	/* No check needed because rx_coe has been set before and it will be
5755 	 * fixed in case of issue.
5756 	 */
5757 	stmmac_rx_ipc(priv, priv->hw);
5758 
5759 	if (priv->sph_cap) {
5760 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5761 		u32 chan;
5762 
5763 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5764 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5765 	}
5766 
5767 	return 0;
5768 }
5769 
5770 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5771 {
5772 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5773 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5774 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5775 	bool *hs_enable = &fpe_cfg->hs_enable;
5776 
5777 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5778 		return;
5779 
5780 	/* If LP has sent verify mPacket, LP is FPE capable */
5781 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5782 		if (*lp_state < FPE_STATE_CAPABLE)
5783 			*lp_state = FPE_STATE_CAPABLE;
5784 
5785 		/* If user has requested FPE enable, quickly response */
5786 		if (*hs_enable)
5787 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5788 						MPACKET_RESPONSE);
5789 	}
5790 
5791 	/* If Local has sent verify mPacket, Local is FPE capable */
5792 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5793 		if (*lo_state < FPE_STATE_CAPABLE)
5794 			*lo_state = FPE_STATE_CAPABLE;
5795 	}
5796 
5797 	/* If LP has sent response mPacket, LP is entering FPE ON */
5798 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5799 		*lp_state = FPE_STATE_ENTERING_ON;
5800 
5801 	/* If Local has sent response mPacket, Local is entering FPE ON */
5802 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5803 		*lo_state = FPE_STATE_ENTERING_ON;
5804 
5805 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5806 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5807 	    priv->fpe_wq) {
5808 		queue_work(priv->fpe_wq, &priv->fpe_task);
5809 	}
5810 }
5811 
5812 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5813 {
5814 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5815 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5816 	u32 queues_count;
5817 	u32 queue;
5818 	bool xmac;
5819 
5820 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5821 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5822 
5823 	if (priv->irq_wake)
5824 		pm_wakeup_event(priv->device, 0);
5825 
5826 	if (priv->dma_cap.estsel)
5827 		stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5828 				      &priv->xstats, tx_cnt);
5829 
5830 	if (priv->dma_cap.fpesel) {
5831 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5832 						   priv->dev);
5833 
5834 		stmmac_fpe_event_status(priv, status);
5835 	}
5836 
5837 	/* To handle GMAC own interrupts */
5838 	if ((priv->plat->has_gmac) || xmac) {
5839 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5840 
5841 		if (unlikely(status)) {
5842 			/* For LPI we need to save the tx status */
5843 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5844 				priv->tx_path_in_lpi_mode = true;
5845 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5846 				priv->tx_path_in_lpi_mode = false;
5847 		}
5848 
5849 		for (queue = 0; queue < queues_count; queue++) {
5850 			status = stmmac_host_mtl_irq_status(priv, priv->hw,
5851 							    queue);
5852 		}
5853 
5854 		/* PCS link status */
5855 		if (priv->hw->pcs &&
5856 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5857 			if (priv->xstats.pcs_link)
5858 				netif_carrier_on(priv->dev);
5859 			else
5860 				netif_carrier_off(priv->dev);
5861 		}
5862 
5863 		stmmac_timestamp_interrupt(priv, priv);
5864 	}
5865 }
5866 
5867 /**
5868  *  stmmac_interrupt - main ISR
5869  *  @irq: interrupt number.
5870  *  @dev_id: to pass the net device pointer.
5871  *  Description: this is the main driver interrupt service routine.
5872  *  It can call:
5873  *  o DMA service routine (to manage incoming frame reception and transmission
5874  *    status)
5875  *  o Core interrupts to manage: remote wake-up, management counter, LPI
5876  *    interrupts.
5877  */
5878 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5879 {
5880 	struct net_device *dev = (struct net_device *)dev_id;
5881 	struct stmmac_priv *priv = netdev_priv(dev);
5882 
5883 	/* Check if adapter is up */
5884 	if (test_bit(STMMAC_DOWN, &priv->state))
5885 		return IRQ_HANDLED;
5886 
5887 	/* Check if a fatal error happened */
5888 	if (stmmac_safety_feat_interrupt(priv))
5889 		return IRQ_HANDLED;
5890 
5891 	/* To handle Common interrupts */
5892 	stmmac_common_interrupt(priv);
5893 
5894 	/* To handle DMA interrupts */
5895 	stmmac_dma_interrupt(priv);
5896 
5897 	return IRQ_HANDLED;
5898 }
5899 
5900 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5901 {
5902 	struct net_device *dev = (struct net_device *)dev_id;
5903 	struct stmmac_priv *priv = netdev_priv(dev);
5904 
5905 	if (unlikely(!dev)) {
5906 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5907 		return IRQ_NONE;
5908 	}
5909 
5910 	/* Check if adapter is up */
5911 	if (test_bit(STMMAC_DOWN, &priv->state))
5912 		return IRQ_HANDLED;
5913 
5914 	/* To handle Common interrupts */
5915 	stmmac_common_interrupt(priv);
5916 
5917 	return IRQ_HANDLED;
5918 }
5919 
5920 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5921 {
5922 	struct net_device *dev = (struct net_device *)dev_id;
5923 	struct stmmac_priv *priv = netdev_priv(dev);
5924 
5925 	if (unlikely(!dev)) {
5926 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5927 		return IRQ_NONE;
5928 	}
5929 
5930 	/* Check if adapter is up */
5931 	if (test_bit(STMMAC_DOWN, &priv->state))
5932 		return IRQ_HANDLED;
5933 
5934 	/* Check if a fatal error happened */
5935 	stmmac_safety_feat_interrupt(priv);
5936 
5937 	return IRQ_HANDLED;
5938 }
5939 
5940 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5941 {
5942 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5943 	struct stmmac_dma_conf *dma_conf;
5944 	int chan = tx_q->queue_index;
5945 	struct stmmac_priv *priv;
5946 	int status;
5947 
5948 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
5949 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5950 
5951 	if (unlikely(!data)) {
5952 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5953 		return IRQ_NONE;
5954 	}
5955 
5956 	/* Check if adapter is up */
5957 	if (test_bit(STMMAC_DOWN, &priv->state))
5958 		return IRQ_HANDLED;
5959 
5960 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5961 
5962 	if (unlikely(status & tx_hard_error_bump_tc)) {
5963 		/* Try to bump up the dma threshold on this failure */
5964 		stmmac_bump_dma_threshold(priv, chan);
5965 	} else if (unlikely(status == tx_hard_error)) {
5966 		stmmac_tx_err(priv, chan);
5967 	}
5968 
5969 	return IRQ_HANDLED;
5970 }
5971 
5972 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
5973 {
5974 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
5975 	struct stmmac_dma_conf *dma_conf;
5976 	int chan = rx_q->queue_index;
5977 	struct stmmac_priv *priv;
5978 
5979 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
5980 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5981 
5982 	if (unlikely(!data)) {
5983 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5984 		return IRQ_NONE;
5985 	}
5986 
5987 	/* Check if adapter is up */
5988 	if (test_bit(STMMAC_DOWN, &priv->state))
5989 		return IRQ_HANDLED;
5990 
5991 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
5992 
5993 	return IRQ_HANDLED;
5994 }
5995 
5996 #ifdef CONFIG_NET_POLL_CONTROLLER
5997 /* Polling receive - used by NETCONSOLE and other diagnostic tools
5998  * to allow network I/O with interrupts disabled.
5999  */
6000 static void stmmac_poll_controller(struct net_device *dev)
6001 {
6002 	struct stmmac_priv *priv = netdev_priv(dev);
6003 	int i;
6004 
6005 	/* If adapter is down, do nothing */
6006 	if (test_bit(STMMAC_DOWN, &priv->state))
6007 		return;
6008 
6009 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) {
6010 		for (i = 0; i < priv->plat->rx_queues_to_use; i++)
6011 			stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
6012 
6013 		for (i = 0; i < priv->plat->tx_queues_to_use; i++)
6014 			stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
6015 	} else {
6016 		disable_irq(dev->irq);
6017 		stmmac_interrupt(dev->irq, dev);
6018 		enable_irq(dev->irq);
6019 	}
6020 }
6021 #endif
6022 
6023 /**
6024  *  stmmac_ioctl - Entry point for the Ioctl
6025  *  @dev: Device pointer.
6026  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6027  *  a proprietary structure used to pass information to the driver.
6028  *  @cmd: IOCTL command
6029  *  Description:
6030  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6031  */
6032 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6033 {
6034 	struct stmmac_priv *priv = netdev_priv (dev);
6035 	int ret = -EOPNOTSUPP;
6036 
6037 	if (!netif_running(dev))
6038 		return -EINVAL;
6039 
6040 	switch (cmd) {
6041 	case SIOCGMIIPHY:
6042 	case SIOCGMIIREG:
6043 	case SIOCSMIIREG:
6044 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6045 		break;
6046 	case SIOCSHWTSTAMP:
6047 		ret = stmmac_hwtstamp_set(dev, rq);
6048 		break;
6049 	case SIOCGHWTSTAMP:
6050 		ret = stmmac_hwtstamp_get(dev, rq);
6051 		break;
6052 	default:
6053 		break;
6054 	}
6055 
6056 	return ret;
6057 }
6058 
6059 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6060 				    void *cb_priv)
6061 {
6062 	struct stmmac_priv *priv = cb_priv;
6063 	int ret = -EOPNOTSUPP;
6064 
6065 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6066 		return ret;
6067 
6068 	__stmmac_disable_all_queues(priv);
6069 
6070 	switch (type) {
6071 	case TC_SETUP_CLSU32:
6072 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6073 		break;
6074 	case TC_SETUP_CLSFLOWER:
6075 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6076 		break;
6077 	default:
6078 		break;
6079 	}
6080 
6081 	stmmac_enable_all_queues(priv);
6082 	return ret;
6083 }
6084 
6085 static LIST_HEAD(stmmac_block_cb_list);
6086 
6087 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6088 			   void *type_data)
6089 {
6090 	struct stmmac_priv *priv = netdev_priv(ndev);
6091 
6092 	switch (type) {
6093 	case TC_QUERY_CAPS:
6094 		return stmmac_tc_query_caps(priv, priv, type_data);
6095 	case TC_SETUP_BLOCK:
6096 		return flow_block_cb_setup_simple(type_data,
6097 						  &stmmac_block_cb_list,
6098 						  stmmac_setup_tc_block_cb,
6099 						  priv, priv, true);
6100 	case TC_SETUP_QDISC_CBS:
6101 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6102 	case TC_SETUP_QDISC_TAPRIO:
6103 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6104 	case TC_SETUP_QDISC_ETF:
6105 		return stmmac_tc_setup_etf(priv, priv, type_data);
6106 	default:
6107 		return -EOPNOTSUPP;
6108 	}
6109 }
6110 
6111 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6112 			       struct net_device *sb_dev)
6113 {
6114 	int gso = skb_shinfo(skb)->gso_type;
6115 
6116 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6117 		/*
6118 		 * There is no way to determine the number of TSO/USO
6119 		 * capable Queues. Let's use always the Queue 0
6120 		 * because if TSO/USO is supported then at least this
6121 		 * one will be capable.
6122 		 */
6123 		return 0;
6124 	}
6125 
6126 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6127 }
6128 
6129 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6130 {
6131 	struct stmmac_priv *priv = netdev_priv(ndev);
6132 	int ret = 0;
6133 
6134 	ret = pm_runtime_resume_and_get(priv->device);
6135 	if (ret < 0)
6136 		return ret;
6137 
6138 	ret = eth_mac_addr(ndev, addr);
6139 	if (ret)
6140 		goto set_mac_error;
6141 
6142 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6143 
6144 set_mac_error:
6145 	pm_runtime_put(priv->device);
6146 
6147 	return ret;
6148 }
6149 
6150 #ifdef CONFIG_DEBUG_FS
6151 static struct dentry *stmmac_fs_dir;
6152 
6153 static void sysfs_display_ring(void *head, int size, int extend_desc,
6154 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6155 {
6156 	int i;
6157 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6158 	struct dma_desc *p = (struct dma_desc *)head;
6159 	dma_addr_t dma_addr;
6160 
6161 	for (i = 0; i < size; i++) {
6162 		if (extend_desc) {
6163 			dma_addr = dma_phy_addr + i * sizeof(*ep);
6164 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6165 				   i, &dma_addr,
6166 				   le32_to_cpu(ep->basic.des0),
6167 				   le32_to_cpu(ep->basic.des1),
6168 				   le32_to_cpu(ep->basic.des2),
6169 				   le32_to_cpu(ep->basic.des3));
6170 			ep++;
6171 		} else {
6172 			dma_addr = dma_phy_addr + i * sizeof(*p);
6173 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6174 				   i, &dma_addr,
6175 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6176 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6177 			p++;
6178 		}
6179 		seq_printf(seq, "\n");
6180 	}
6181 }
6182 
6183 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6184 {
6185 	struct net_device *dev = seq->private;
6186 	struct stmmac_priv *priv = netdev_priv(dev);
6187 	u32 rx_count = priv->plat->rx_queues_to_use;
6188 	u32 tx_count = priv->plat->tx_queues_to_use;
6189 	u32 queue;
6190 
6191 	if ((dev->flags & IFF_UP) == 0)
6192 		return 0;
6193 
6194 	for (queue = 0; queue < rx_count; queue++) {
6195 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6196 
6197 		seq_printf(seq, "RX Queue %d:\n", queue);
6198 
6199 		if (priv->extend_desc) {
6200 			seq_printf(seq, "Extended descriptor ring:\n");
6201 			sysfs_display_ring((void *)rx_q->dma_erx,
6202 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6203 		} else {
6204 			seq_printf(seq, "Descriptor ring:\n");
6205 			sysfs_display_ring((void *)rx_q->dma_rx,
6206 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6207 		}
6208 	}
6209 
6210 	for (queue = 0; queue < tx_count; queue++) {
6211 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6212 
6213 		seq_printf(seq, "TX Queue %d:\n", queue);
6214 
6215 		if (priv->extend_desc) {
6216 			seq_printf(seq, "Extended descriptor ring:\n");
6217 			sysfs_display_ring((void *)tx_q->dma_etx,
6218 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6219 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6220 			seq_printf(seq, "Descriptor ring:\n");
6221 			sysfs_display_ring((void *)tx_q->dma_tx,
6222 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6223 		}
6224 	}
6225 
6226 	return 0;
6227 }
6228 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6229 
6230 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6231 {
6232 	static const char * const dwxgmac_timestamp_source[] = {
6233 		"None",
6234 		"Internal",
6235 		"External",
6236 		"Both",
6237 	};
6238 	static const char * const dwxgmac_safety_feature_desc[] = {
6239 		"No",
6240 		"All Safety Features with ECC and Parity",
6241 		"All Safety Features without ECC or Parity",
6242 		"All Safety Features with Parity Only",
6243 		"ECC Only",
6244 		"UNDEFINED",
6245 		"UNDEFINED",
6246 		"UNDEFINED",
6247 	};
6248 	struct net_device *dev = seq->private;
6249 	struct stmmac_priv *priv = netdev_priv(dev);
6250 
6251 	if (!priv->hw_cap_support) {
6252 		seq_printf(seq, "DMA HW features not supported\n");
6253 		return 0;
6254 	}
6255 
6256 	seq_printf(seq, "==============================\n");
6257 	seq_printf(seq, "\tDMA HW features\n");
6258 	seq_printf(seq, "==============================\n");
6259 
6260 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6261 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6262 	seq_printf(seq, "\t1000 Mbps: %s\n",
6263 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6264 	seq_printf(seq, "\tHalf duplex: %s\n",
6265 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6266 	if (priv->plat->has_xgmac) {
6267 		seq_printf(seq,
6268 			   "\tNumber of Additional MAC address registers: %d\n",
6269 			   priv->dma_cap.multi_addr);
6270 	} else {
6271 		seq_printf(seq, "\tHash Filter: %s\n",
6272 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6273 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6274 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6275 	}
6276 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6277 		   (priv->dma_cap.pcs) ? "Y" : "N");
6278 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6279 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6280 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6281 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6282 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6283 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6284 	seq_printf(seq, "\tRMON module: %s\n",
6285 		   (priv->dma_cap.rmon) ? "Y" : "N");
6286 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6287 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6288 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6289 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6290 	if (priv->plat->has_xgmac)
6291 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6292 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6293 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6294 		   (priv->dma_cap.eee) ? "Y" : "N");
6295 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6296 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6297 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6298 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6299 	    priv->plat->has_xgmac) {
6300 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6301 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6302 	} else {
6303 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6304 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6305 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6306 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6307 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6308 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6309 	}
6310 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6311 		   priv->dma_cap.number_rx_channel);
6312 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6313 		   priv->dma_cap.number_tx_channel);
6314 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6315 		   priv->dma_cap.number_rx_queues);
6316 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6317 		   priv->dma_cap.number_tx_queues);
6318 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6319 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6320 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6321 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6322 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6323 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6324 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6325 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6326 		   priv->dma_cap.pps_out_num);
6327 	seq_printf(seq, "\tSafety Features: %s\n",
6328 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6329 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6330 		   priv->dma_cap.frpsel ? "Y" : "N");
6331 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6332 		   priv->dma_cap.host_dma_width);
6333 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6334 		   priv->dma_cap.rssen ? "Y" : "N");
6335 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6336 		   priv->dma_cap.vlhash ? "Y" : "N");
6337 	seq_printf(seq, "\tSplit Header: %s\n",
6338 		   priv->dma_cap.sphen ? "Y" : "N");
6339 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6340 		   priv->dma_cap.vlins ? "Y" : "N");
6341 	seq_printf(seq, "\tDouble VLAN: %s\n",
6342 		   priv->dma_cap.dvlan ? "Y" : "N");
6343 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6344 		   priv->dma_cap.l3l4fnum);
6345 	seq_printf(seq, "\tARP Offloading: %s\n",
6346 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6347 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6348 		   priv->dma_cap.estsel ? "Y" : "N");
6349 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6350 		   priv->dma_cap.fpesel ? "Y" : "N");
6351 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6352 		   priv->dma_cap.tbssel ? "Y" : "N");
6353 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6354 		   priv->dma_cap.tbs_ch_num);
6355 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6356 		   priv->dma_cap.sgfsel ? "Y" : "N");
6357 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6358 		   BIT(priv->dma_cap.ttsfd) >> 1);
6359 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6360 		   priv->dma_cap.numtc);
6361 	seq_printf(seq, "\tDCB Feature: %s\n",
6362 		   priv->dma_cap.dcben ? "Y" : "N");
6363 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6364 		   priv->dma_cap.advthword ? "Y" : "N");
6365 	seq_printf(seq, "\tPTP Offload: %s\n",
6366 		   priv->dma_cap.ptoen ? "Y" : "N");
6367 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6368 		   priv->dma_cap.osten ? "Y" : "N");
6369 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6370 		   priv->dma_cap.pfcen ? "Y" : "N");
6371 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6372 		   BIT(priv->dma_cap.frpes) << 6);
6373 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6374 		   BIT(priv->dma_cap.frpbs) << 6);
6375 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6376 		   priv->dma_cap.frppipe_num);
6377 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6378 		   priv->dma_cap.nrvf_num ?
6379 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6380 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6381 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6382 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6383 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6384 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6385 		   priv->dma_cap.cbtisel ? "Y" : "N");
6386 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6387 		   priv->dma_cap.aux_snapshot_n);
6388 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6389 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6390 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6391 		   priv->dma_cap.edma ? "Y" : "N");
6392 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6393 		   priv->dma_cap.ediffc ? "Y" : "N");
6394 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6395 		   priv->dma_cap.vxn ? "Y" : "N");
6396 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6397 		   priv->dma_cap.dbgmem ? "Y" : "N");
6398 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6399 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6400 	return 0;
6401 }
6402 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6403 
6404 /* Use network device events to rename debugfs file entries.
6405  */
6406 static int stmmac_device_event(struct notifier_block *unused,
6407 			       unsigned long event, void *ptr)
6408 {
6409 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6410 	struct stmmac_priv *priv = netdev_priv(dev);
6411 
6412 	if (dev->netdev_ops != &stmmac_netdev_ops)
6413 		goto done;
6414 
6415 	switch (event) {
6416 	case NETDEV_CHANGENAME:
6417 		if (priv->dbgfs_dir)
6418 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6419 							 priv->dbgfs_dir,
6420 							 stmmac_fs_dir,
6421 							 dev->name);
6422 		break;
6423 	}
6424 done:
6425 	return NOTIFY_DONE;
6426 }
6427 
6428 static struct notifier_block stmmac_notifier = {
6429 	.notifier_call = stmmac_device_event,
6430 };
6431 
6432 static void stmmac_init_fs(struct net_device *dev)
6433 {
6434 	struct stmmac_priv *priv = netdev_priv(dev);
6435 
6436 	rtnl_lock();
6437 
6438 	/* Create per netdev entries */
6439 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6440 
6441 	/* Entry to report DMA RX/TX rings */
6442 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6443 			    &stmmac_rings_status_fops);
6444 
6445 	/* Entry to report the DMA HW features */
6446 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6447 			    &stmmac_dma_cap_fops);
6448 
6449 	rtnl_unlock();
6450 }
6451 
6452 static void stmmac_exit_fs(struct net_device *dev)
6453 {
6454 	struct stmmac_priv *priv = netdev_priv(dev);
6455 
6456 	debugfs_remove_recursive(priv->dbgfs_dir);
6457 }
6458 #endif /* CONFIG_DEBUG_FS */
6459 
6460 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6461 {
6462 	unsigned char *data = (unsigned char *)&vid_le;
6463 	unsigned char data_byte = 0;
6464 	u32 crc = ~0x0;
6465 	u32 temp = 0;
6466 	int i, bits;
6467 
6468 	bits = get_bitmask_order(VLAN_VID_MASK);
6469 	for (i = 0; i < bits; i++) {
6470 		if ((i % 8) == 0)
6471 			data_byte = data[i / 8];
6472 
6473 		temp = ((crc & 1) ^ data_byte) & 1;
6474 		crc >>= 1;
6475 		data_byte >>= 1;
6476 
6477 		if (temp)
6478 			crc ^= 0xedb88320;
6479 	}
6480 
6481 	return crc;
6482 }
6483 
6484 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6485 {
6486 	u32 crc, hash = 0;
6487 	__le16 pmatch = 0;
6488 	int count = 0;
6489 	u16 vid = 0;
6490 
6491 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6492 		__le16 vid_le = cpu_to_le16(vid);
6493 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6494 		hash |= (1 << crc);
6495 		count++;
6496 	}
6497 
6498 	if (!priv->dma_cap.vlhash) {
6499 		if (count > 2) /* VID = 0 always passes filter */
6500 			return -EOPNOTSUPP;
6501 
6502 		pmatch = cpu_to_le16(vid);
6503 		hash = 0;
6504 	}
6505 
6506 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6507 }
6508 
6509 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6510 {
6511 	struct stmmac_priv *priv = netdev_priv(ndev);
6512 	bool is_double = false;
6513 	int ret;
6514 
6515 	ret = pm_runtime_resume_and_get(priv->device);
6516 	if (ret < 0)
6517 		return ret;
6518 
6519 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6520 		is_double = true;
6521 
6522 	set_bit(vid, priv->active_vlans);
6523 	ret = stmmac_vlan_update(priv, is_double);
6524 	if (ret) {
6525 		clear_bit(vid, priv->active_vlans);
6526 		goto err_pm_put;
6527 	}
6528 
6529 	if (priv->hw->num_vlan) {
6530 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6531 		if (ret)
6532 			goto err_pm_put;
6533 	}
6534 err_pm_put:
6535 	pm_runtime_put(priv->device);
6536 
6537 	return ret;
6538 }
6539 
6540 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6541 {
6542 	struct stmmac_priv *priv = netdev_priv(ndev);
6543 	bool is_double = false;
6544 	int ret;
6545 
6546 	ret = pm_runtime_resume_and_get(priv->device);
6547 	if (ret < 0)
6548 		return ret;
6549 
6550 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6551 		is_double = true;
6552 
6553 	clear_bit(vid, priv->active_vlans);
6554 
6555 	if (priv->hw->num_vlan) {
6556 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6557 		if (ret)
6558 			goto del_vlan_error;
6559 	}
6560 
6561 	ret = stmmac_vlan_update(priv, is_double);
6562 
6563 del_vlan_error:
6564 	pm_runtime_put(priv->device);
6565 
6566 	return ret;
6567 }
6568 
6569 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6570 {
6571 	struct stmmac_priv *priv = netdev_priv(dev);
6572 
6573 	switch (bpf->command) {
6574 	case XDP_SETUP_PROG:
6575 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6576 	case XDP_SETUP_XSK_POOL:
6577 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6578 					     bpf->xsk.queue_id);
6579 	default:
6580 		return -EOPNOTSUPP;
6581 	}
6582 }
6583 
6584 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6585 			   struct xdp_frame **frames, u32 flags)
6586 {
6587 	struct stmmac_priv *priv = netdev_priv(dev);
6588 	int cpu = smp_processor_id();
6589 	struct netdev_queue *nq;
6590 	int i, nxmit = 0;
6591 	int queue;
6592 
6593 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6594 		return -ENETDOWN;
6595 
6596 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6597 		return -EINVAL;
6598 
6599 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6600 	nq = netdev_get_tx_queue(priv->dev, queue);
6601 
6602 	__netif_tx_lock(nq, cpu);
6603 	/* Avoids TX time-out as we are sharing with slow path */
6604 	txq_trans_cond_update(nq);
6605 
6606 	for (i = 0; i < num_frames; i++) {
6607 		int res;
6608 
6609 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6610 		if (res == STMMAC_XDP_CONSUMED)
6611 			break;
6612 
6613 		nxmit++;
6614 	}
6615 
6616 	if (flags & XDP_XMIT_FLUSH) {
6617 		stmmac_flush_tx_descriptors(priv, queue);
6618 		stmmac_tx_timer_arm(priv, queue);
6619 	}
6620 
6621 	__netif_tx_unlock(nq);
6622 
6623 	return nxmit;
6624 }
6625 
6626 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6627 {
6628 	struct stmmac_channel *ch = &priv->channel[queue];
6629 	unsigned long flags;
6630 
6631 	spin_lock_irqsave(&ch->lock, flags);
6632 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6633 	spin_unlock_irqrestore(&ch->lock, flags);
6634 
6635 	stmmac_stop_rx_dma(priv, queue);
6636 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6637 }
6638 
6639 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6640 {
6641 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6642 	struct stmmac_channel *ch = &priv->channel[queue];
6643 	unsigned long flags;
6644 	u32 buf_size;
6645 	int ret;
6646 
6647 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6648 	if (ret) {
6649 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6650 		return;
6651 	}
6652 
6653 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6654 	if (ret) {
6655 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6656 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6657 		return;
6658 	}
6659 
6660 	stmmac_reset_rx_queue(priv, queue);
6661 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6662 
6663 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6664 			    rx_q->dma_rx_phy, rx_q->queue_index);
6665 
6666 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6667 			     sizeof(struct dma_desc));
6668 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6669 			       rx_q->rx_tail_addr, rx_q->queue_index);
6670 
6671 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6672 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6673 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6674 				      buf_size,
6675 				      rx_q->queue_index);
6676 	} else {
6677 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6678 				      priv->dma_conf.dma_buf_sz,
6679 				      rx_q->queue_index);
6680 	}
6681 
6682 	stmmac_start_rx_dma(priv, queue);
6683 
6684 	spin_lock_irqsave(&ch->lock, flags);
6685 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6686 	spin_unlock_irqrestore(&ch->lock, flags);
6687 }
6688 
6689 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6690 {
6691 	struct stmmac_channel *ch = &priv->channel[queue];
6692 	unsigned long flags;
6693 
6694 	spin_lock_irqsave(&ch->lock, flags);
6695 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6696 	spin_unlock_irqrestore(&ch->lock, flags);
6697 
6698 	stmmac_stop_tx_dma(priv, queue);
6699 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6700 }
6701 
6702 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6703 {
6704 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6705 	struct stmmac_channel *ch = &priv->channel[queue];
6706 	unsigned long flags;
6707 	int ret;
6708 
6709 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6710 	if (ret) {
6711 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6712 		return;
6713 	}
6714 
6715 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6716 	if (ret) {
6717 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6718 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6719 		return;
6720 	}
6721 
6722 	stmmac_reset_tx_queue(priv, queue);
6723 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6724 
6725 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6726 			    tx_q->dma_tx_phy, tx_q->queue_index);
6727 
6728 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6729 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6730 
6731 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6732 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6733 			       tx_q->tx_tail_addr, tx_q->queue_index);
6734 
6735 	stmmac_start_tx_dma(priv, queue);
6736 
6737 	spin_lock_irqsave(&ch->lock, flags);
6738 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6739 	spin_unlock_irqrestore(&ch->lock, flags);
6740 }
6741 
6742 void stmmac_xdp_release(struct net_device *dev)
6743 {
6744 	struct stmmac_priv *priv = netdev_priv(dev);
6745 	u32 chan;
6746 
6747 	/* Ensure tx function is not running */
6748 	netif_tx_disable(dev);
6749 
6750 	/* Disable NAPI process */
6751 	stmmac_disable_all_queues(priv);
6752 
6753 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6754 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6755 
6756 	/* Free the IRQ lines */
6757 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6758 
6759 	/* Stop TX/RX DMA channels */
6760 	stmmac_stop_all_dma(priv);
6761 
6762 	/* Release and free the Rx/Tx resources */
6763 	free_dma_desc_resources(priv, &priv->dma_conf);
6764 
6765 	/* Disable the MAC Rx/Tx */
6766 	stmmac_mac_set(priv, priv->ioaddr, false);
6767 
6768 	/* set trans_start so we don't get spurious
6769 	 * watchdogs during reset
6770 	 */
6771 	netif_trans_update(dev);
6772 	netif_carrier_off(dev);
6773 }
6774 
6775 int stmmac_xdp_open(struct net_device *dev)
6776 {
6777 	struct stmmac_priv *priv = netdev_priv(dev);
6778 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6779 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6780 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6781 	struct stmmac_rx_queue *rx_q;
6782 	struct stmmac_tx_queue *tx_q;
6783 	u32 buf_size;
6784 	bool sph_en;
6785 	u32 chan;
6786 	int ret;
6787 
6788 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6789 	if (ret < 0) {
6790 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6791 			   __func__);
6792 		goto dma_desc_error;
6793 	}
6794 
6795 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6796 	if (ret < 0) {
6797 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6798 			   __func__);
6799 		goto init_error;
6800 	}
6801 
6802 	stmmac_reset_queues_param(priv);
6803 
6804 	/* DMA CSR Channel configuration */
6805 	for (chan = 0; chan < dma_csr_ch; chan++) {
6806 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6807 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6808 	}
6809 
6810 	/* Adjust Split header */
6811 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6812 
6813 	/* DMA RX Channel Configuration */
6814 	for (chan = 0; chan < rx_cnt; chan++) {
6815 		rx_q = &priv->dma_conf.rx_queue[chan];
6816 
6817 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6818 				    rx_q->dma_rx_phy, chan);
6819 
6820 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6821 				     (rx_q->buf_alloc_num *
6822 				      sizeof(struct dma_desc));
6823 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6824 				       rx_q->rx_tail_addr, chan);
6825 
6826 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6827 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6828 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6829 					      buf_size,
6830 					      rx_q->queue_index);
6831 		} else {
6832 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6833 					      priv->dma_conf.dma_buf_sz,
6834 					      rx_q->queue_index);
6835 		}
6836 
6837 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6838 	}
6839 
6840 	/* DMA TX Channel Configuration */
6841 	for (chan = 0; chan < tx_cnt; chan++) {
6842 		tx_q = &priv->dma_conf.tx_queue[chan];
6843 
6844 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6845 				    tx_q->dma_tx_phy, chan);
6846 
6847 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6848 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6849 				       tx_q->tx_tail_addr, chan);
6850 
6851 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6852 		tx_q->txtimer.function = stmmac_tx_timer;
6853 	}
6854 
6855 	/* Enable the MAC Rx/Tx */
6856 	stmmac_mac_set(priv, priv->ioaddr, true);
6857 
6858 	/* Start Rx & Tx DMA Channels */
6859 	stmmac_start_all_dma(priv);
6860 
6861 	ret = stmmac_request_irq(dev);
6862 	if (ret)
6863 		goto irq_error;
6864 
6865 	/* Enable NAPI process*/
6866 	stmmac_enable_all_queues(priv);
6867 	netif_carrier_on(dev);
6868 	netif_tx_start_all_queues(dev);
6869 	stmmac_enable_all_dma_irq(priv);
6870 
6871 	return 0;
6872 
6873 irq_error:
6874 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6875 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6876 
6877 	stmmac_hw_teardown(dev);
6878 init_error:
6879 	free_dma_desc_resources(priv, &priv->dma_conf);
6880 dma_desc_error:
6881 	return ret;
6882 }
6883 
6884 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6885 {
6886 	struct stmmac_priv *priv = netdev_priv(dev);
6887 	struct stmmac_rx_queue *rx_q;
6888 	struct stmmac_tx_queue *tx_q;
6889 	struct stmmac_channel *ch;
6890 
6891 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6892 	    !netif_carrier_ok(priv->dev))
6893 		return -ENETDOWN;
6894 
6895 	if (!stmmac_xdp_is_enabled(priv))
6896 		return -EINVAL;
6897 
6898 	if (queue >= priv->plat->rx_queues_to_use ||
6899 	    queue >= priv->plat->tx_queues_to_use)
6900 		return -EINVAL;
6901 
6902 	rx_q = &priv->dma_conf.rx_queue[queue];
6903 	tx_q = &priv->dma_conf.tx_queue[queue];
6904 	ch = &priv->channel[queue];
6905 
6906 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6907 		return -EINVAL;
6908 
6909 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6910 		/* EQoS does not have per-DMA channel SW interrupt,
6911 		 * so we schedule RX Napi straight-away.
6912 		 */
6913 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6914 			__napi_schedule(&ch->rxtx_napi);
6915 	}
6916 
6917 	return 0;
6918 }
6919 
6920 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6921 {
6922 	struct stmmac_priv *priv = netdev_priv(dev);
6923 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6924 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6925 	unsigned int start;
6926 	int q;
6927 
6928 	for (q = 0; q < tx_cnt; q++) {
6929 		struct stmmac_txq_stats *txq_stats = &priv->dma_conf.tx_queue[q].txq_stats;
6930 		u64 tx_packets;
6931 		u64 tx_bytes;
6932 
6933 		do {
6934 			start = u64_stats_fetch_begin(&txq_stats->syncp);
6935 			tx_packets = txq_stats->tx_packets;
6936 			tx_bytes   = txq_stats->tx_bytes;
6937 		} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
6938 
6939 		stats->tx_packets += tx_packets;
6940 		stats->tx_bytes += tx_bytes;
6941 	}
6942 
6943 	for (q = 0; q < rx_cnt; q++) {
6944 		struct stmmac_rxq_stats *rxq_stats = &priv->dma_conf.rx_queue[q].rxq_stats;
6945 		u64 rx_packets;
6946 		u64 rx_bytes;
6947 
6948 		do {
6949 			start = u64_stats_fetch_begin(&rxq_stats->syncp);
6950 			rx_packets = rxq_stats->rx_packets;
6951 			rx_bytes   = rxq_stats->rx_bytes;
6952 		} while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
6953 
6954 		stats->rx_packets += rx_packets;
6955 		stats->rx_bytes += rx_bytes;
6956 	}
6957 
6958 	stats->rx_dropped = priv->xstats.rx_dropped;
6959 	stats->rx_errors = priv->xstats.rx_errors;
6960 	stats->tx_dropped = priv->xstats.tx_dropped;
6961 	stats->tx_errors = priv->xstats.tx_errors;
6962 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
6963 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
6964 	stats->rx_length_errors = priv->xstats.rx_length;
6965 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
6966 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
6967 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
6968 }
6969 
6970 static const struct net_device_ops stmmac_netdev_ops = {
6971 	.ndo_open = stmmac_open,
6972 	.ndo_start_xmit = stmmac_xmit,
6973 	.ndo_stop = stmmac_release,
6974 	.ndo_change_mtu = stmmac_change_mtu,
6975 	.ndo_fix_features = stmmac_fix_features,
6976 	.ndo_set_features = stmmac_set_features,
6977 	.ndo_set_rx_mode = stmmac_set_rx_mode,
6978 	.ndo_tx_timeout = stmmac_tx_timeout,
6979 	.ndo_eth_ioctl = stmmac_ioctl,
6980 	.ndo_get_stats64 = stmmac_get_stats64,
6981 	.ndo_setup_tc = stmmac_setup_tc,
6982 	.ndo_select_queue = stmmac_select_queue,
6983 #ifdef CONFIG_NET_POLL_CONTROLLER
6984 	.ndo_poll_controller = stmmac_poll_controller,
6985 #endif
6986 	.ndo_set_mac_address = stmmac_set_mac_address,
6987 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6988 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6989 	.ndo_bpf = stmmac_bpf,
6990 	.ndo_xdp_xmit = stmmac_xdp_xmit,
6991 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
6992 };
6993 
6994 static void stmmac_reset_subtask(struct stmmac_priv *priv)
6995 {
6996 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
6997 		return;
6998 	if (test_bit(STMMAC_DOWN, &priv->state))
6999 		return;
7000 
7001 	netdev_err(priv->dev, "Reset adapter.\n");
7002 
7003 	rtnl_lock();
7004 	netif_trans_update(priv->dev);
7005 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7006 		usleep_range(1000, 2000);
7007 
7008 	set_bit(STMMAC_DOWN, &priv->state);
7009 	dev_close(priv->dev);
7010 	dev_open(priv->dev, NULL);
7011 	clear_bit(STMMAC_DOWN, &priv->state);
7012 	clear_bit(STMMAC_RESETING, &priv->state);
7013 	rtnl_unlock();
7014 }
7015 
7016 static void stmmac_service_task(struct work_struct *work)
7017 {
7018 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7019 			service_task);
7020 
7021 	stmmac_reset_subtask(priv);
7022 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7023 }
7024 
7025 /**
7026  *  stmmac_hw_init - Init the MAC device
7027  *  @priv: driver private structure
7028  *  Description: this function is to configure the MAC device according to
7029  *  some platform parameters or the HW capability register. It prepares the
7030  *  driver to use either ring or chain modes and to setup either enhanced or
7031  *  normal descriptors.
7032  */
7033 static int stmmac_hw_init(struct stmmac_priv *priv)
7034 {
7035 	int ret;
7036 
7037 	/* dwmac-sun8i only work in chain mode */
7038 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7039 		chain_mode = 1;
7040 	priv->chain_mode = chain_mode;
7041 
7042 	/* Initialize HW Interface */
7043 	ret = stmmac_hwif_init(priv);
7044 	if (ret)
7045 		return ret;
7046 
7047 	/* Get the HW capability (new GMAC newer than 3.50a) */
7048 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7049 	if (priv->hw_cap_support) {
7050 		dev_info(priv->device, "DMA HW capability register supported\n");
7051 
7052 		/* We can override some gmac/dma configuration fields: e.g.
7053 		 * enh_desc, tx_coe (e.g. that are passed through the
7054 		 * platform) with the values from the HW capability
7055 		 * register (if supported).
7056 		 */
7057 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7058 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7059 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7060 		priv->hw->pmt = priv->plat->pmt;
7061 		if (priv->dma_cap.hash_tb_sz) {
7062 			priv->hw->multicast_filter_bins =
7063 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7064 			priv->hw->mcast_bits_log2 =
7065 					ilog2(priv->hw->multicast_filter_bins);
7066 		}
7067 
7068 		/* TXCOE doesn't work in thresh DMA mode */
7069 		if (priv->plat->force_thresh_dma_mode)
7070 			priv->plat->tx_coe = 0;
7071 		else
7072 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7073 
7074 		/* In case of GMAC4 rx_coe is from HW cap register. */
7075 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7076 
7077 		if (priv->dma_cap.rx_coe_type2)
7078 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7079 		else if (priv->dma_cap.rx_coe_type1)
7080 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7081 
7082 	} else {
7083 		dev_info(priv->device, "No HW DMA feature register supported\n");
7084 	}
7085 
7086 	if (priv->plat->rx_coe) {
7087 		priv->hw->rx_csum = priv->plat->rx_coe;
7088 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7089 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7090 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7091 	}
7092 	if (priv->plat->tx_coe)
7093 		dev_info(priv->device, "TX Checksum insertion supported\n");
7094 
7095 	if (priv->plat->pmt) {
7096 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7097 		device_set_wakeup_capable(priv->device, 1);
7098 	}
7099 
7100 	if (priv->dma_cap.tsoen)
7101 		dev_info(priv->device, "TSO supported\n");
7102 
7103 	priv->hw->vlan_fail_q_en =
7104 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7105 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7106 
7107 	/* Run HW quirks, if any */
7108 	if (priv->hwif_quirks) {
7109 		ret = priv->hwif_quirks(priv);
7110 		if (ret)
7111 			return ret;
7112 	}
7113 
7114 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7115 	 * In some case, for example on bugged HW this feature
7116 	 * has to be disable and this can be done by passing the
7117 	 * riwt_off field from the platform.
7118 	 */
7119 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7120 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7121 		priv->use_riwt = 1;
7122 		dev_info(priv->device,
7123 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7124 	}
7125 
7126 	return 0;
7127 }
7128 
7129 static void stmmac_napi_add(struct net_device *dev)
7130 {
7131 	struct stmmac_priv *priv = netdev_priv(dev);
7132 	u32 queue, maxq;
7133 
7134 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7135 
7136 	for (queue = 0; queue < maxq; queue++) {
7137 		struct stmmac_channel *ch = &priv->channel[queue];
7138 
7139 		ch->priv_data = priv;
7140 		ch->index = queue;
7141 		spin_lock_init(&ch->lock);
7142 
7143 		if (queue < priv->plat->rx_queues_to_use) {
7144 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7145 		}
7146 		if (queue < priv->plat->tx_queues_to_use) {
7147 			netif_napi_add_tx(dev, &ch->tx_napi,
7148 					  stmmac_napi_poll_tx);
7149 		}
7150 		if (queue < priv->plat->rx_queues_to_use &&
7151 		    queue < priv->plat->tx_queues_to_use) {
7152 			netif_napi_add(dev, &ch->rxtx_napi,
7153 				       stmmac_napi_poll_rxtx);
7154 		}
7155 	}
7156 }
7157 
7158 static void stmmac_napi_del(struct net_device *dev)
7159 {
7160 	struct stmmac_priv *priv = netdev_priv(dev);
7161 	u32 queue, maxq;
7162 
7163 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7164 
7165 	for (queue = 0; queue < maxq; queue++) {
7166 		struct stmmac_channel *ch = &priv->channel[queue];
7167 
7168 		if (queue < priv->plat->rx_queues_to_use)
7169 			netif_napi_del(&ch->rx_napi);
7170 		if (queue < priv->plat->tx_queues_to_use)
7171 			netif_napi_del(&ch->tx_napi);
7172 		if (queue < priv->plat->rx_queues_to_use &&
7173 		    queue < priv->plat->tx_queues_to_use) {
7174 			netif_napi_del(&ch->rxtx_napi);
7175 		}
7176 	}
7177 }
7178 
7179 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7180 {
7181 	struct stmmac_priv *priv = netdev_priv(dev);
7182 	int ret = 0, i;
7183 
7184 	if (netif_running(dev))
7185 		stmmac_release(dev);
7186 
7187 	stmmac_napi_del(dev);
7188 
7189 	priv->plat->rx_queues_to_use = rx_cnt;
7190 	priv->plat->tx_queues_to_use = tx_cnt;
7191 	if (!netif_is_rxfh_configured(dev))
7192 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7193 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7194 									rx_cnt);
7195 
7196 	stmmac_napi_add(dev);
7197 
7198 	if (netif_running(dev))
7199 		ret = stmmac_open(dev);
7200 
7201 	return ret;
7202 }
7203 
7204 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7205 {
7206 	struct stmmac_priv *priv = netdev_priv(dev);
7207 	int ret = 0;
7208 
7209 	if (netif_running(dev))
7210 		stmmac_release(dev);
7211 
7212 	priv->dma_conf.dma_rx_size = rx_size;
7213 	priv->dma_conf.dma_tx_size = tx_size;
7214 
7215 	if (netif_running(dev))
7216 		ret = stmmac_open(dev);
7217 
7218 	return ret;
7219 }
7220 
7221 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7222 static void stmmac_fpe_lp_task(struct work_struct *work)
7223 {
7224 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7225 						fpe_task);
7226 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7227 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7228 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7229 	bool *hs_enable = &fpe_cfg->hs_enable;
7230 	bool *enable = &fpe_cfg->enable;
7231 	int retries = 20;
7232 
7233 	while (retries-- > 0) {
7234 		/* Bail out immediately if FPE handshake is OFF */
7235 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7236 			break;
7237 
7238 		if (*lo_state == FPE_STATE_ENTERING_ON &&
7239 		    *lp_state == FPE_STATE_ENTERING_ON) {
7240 			stmmac_fpe_configure(priv, priv->ioaddr,
7241 					     priv->plat->tx_queues_to_use,
7242 					     priv->plat->rx_queues_to_use,
7243 					     *enable);
7244 
7245 			netdev_info(priv->dev, "configured FPE\n");
7246 
7247 			*lo_state = FPE_STATE_ON;
7248 			*lp_state = FPE_STATE_ON;
7249 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7250 			break;
7251 		}
7252 
7253 		if ((*lo_state == FPE_STATE_CAPABLE ||
7254 		     *lo_state == FPE_STATE_ENTERING_ON) &&
7255 		     *lp_state != FPE_STATE_ON) {
7256 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7257 				    *lo_state, *lp_state);
7258 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7259 						MPACKET_VERIFY);
7260 		}
7261 		/* Sleep then retry */
7262 		msleep(500);
7263 	}
7264 
7265 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7266 }
7267 
7268 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7269 {
7270 	if (priv->plat->fpe_cfg->hs_enable != enable) {
7271 		if (enable) {
7272 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7273 						MPACKET_VERIFY);
7274 		} else {
7275 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7276 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7277 		}
7278 
7279 		priv->plat->fpe_cfg->hs_enable = enable;
7280 	}
7281 }
7282 
7283 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7284 {
7285 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7286 	struct dma_desc *desc_contains_ts = ctx->desc;
7287 	struct stmmac_priv *priv = ctx->priv;
7288 	struct dma_desc *ndesc = ctx->ndesc;
7289 	struct dma_desc *desc = ctx->desc;
7290 	u64 ns = 0;
7291 
7292 	if (!priv->hwts_rx_en)
7293 		return -ENODATA;
7294 
7295 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7296 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7297 		desc_contains_ts = ndesc;
7298 
7299 	/* Check if timestamp is available */
7300 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7301 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7302 		ns -= priv->plat->cdc_error_adj;
7303 		*timestamp = ns_to_ktime(ns);
7304 		return 0;
7305 	}
7306 
7307 	return -ENODATA;
7308 }
7309 
7310 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7311 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7312 };
7313 
7314 /**
7315  * stmmac_dvr_probe
7316  * @device: device pointer
7317  * @plat_dat: platform data pointer
7318  * @res: stmmac resource pointer
7319  * Description: this is the main probe function used to
7320  * call the alloc_etherdev, allocate the priv structure.
7321  * Return:
7322  * returns 0 on success, otherwise errno.
7323  */
7324 int stmmac_dvr_probe(struct device *device,
7325 		     struct plat_stmmacenet_data *plat_dat,
7326 		     struct stmmac_resources *res)
7327 {
7328 	struct net_device *ndev = NULL;
7329 	struct stmmac_priv *priv;
7330 	u32 rxq;
7331 	int i, ret = 0;
7332 
7333 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7334 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7335 	if (!ndev)
7336 		return -ENOMEM;
7337 
7338 	SET_NETDEV_DEV(ndev, device);
7339 
7340 	priv = netdev_priv(ndev);
7341 	priv->device = device;
7342 	priv->dev = ndev;
7343 
7344 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7345 		u64_stats_init(&priv->dma_conf.rx_queue[i].rxq_stats.syncp);
7346 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7347 		u64_stats_init(&priv->dma_conf.tx_queue[i].txq_stats.syncp);
7348 
7349 	stmmac_set_ethtool_ops(ndev);
7350 	priv->pause = pause;
7351 	priv->plat = plat_dat;
7352 	priv->ioaddr = res->addr;
7353 	priv->dev->base_addr = (unsigned long)res->addr;
7354 	priv->plat->dma_cfg->multi_msi_en =
7355 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7356 
7357 	priv->dev->irq = res->irq;
7358 	priv->wol_irq = res->wol_irq;
7359 	priv->lpi_irq = res->lpi_irq;
7360 	priv->sfty_ce_irq = res->sfty_ce_irq;
7361 	priv->sfty_ue_irq = res->sfty_ue_irq;
7362 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7363 		priv->rx_irq[i] = res->rx_irq[i];
7364 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7365 		priv->tx_irq[i] = res->tx_irq[i];
7366 
7367 	if (!is_zero_ether_addr(res->mac))
7368 		eth_hw_addr_set(priv->dev, res->mac);
7369 
7370 	dev_set_drvdata(device, priv->dev);
7371 
7372 	/* Verify driver arguments */
7373 	stmmac_verify_args();
7374 
7375 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7376 	if (!priv->af_xdp_zc_qps)
7377 		return -ENOMEM;
7378 
7379 	/* Allocate workqueue */
7380 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7381 	if (!priv->wq) {
7382 		dev_err(priv->device, "failed to create workqueue\n");
7383 		ret = -ENOMEM;
7384 		goto error_wq_init;
7385 	}
7386 
7387 	INIT_WORK(&priv->service_task, stmmac_service_task);
7388 
7389 	/* Initialize Link Partner FPE workqueue */
7390 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7391 
7392 	/* Override with kernel parameters if supplied XXX CRS XXX
7393 	 * this needs to have multiple instances
7394 	 */
7395 	if ((phyaddr >= 0) && (phyaddr <= 31))
7396 		priv->plat->phy_addr = phyaddr;
7397 
7398 	if (priv->plat->stmmac_rst) {
7399 		ret = reset_control_assert(priv->plat->stmmac_rst);
7400 		reset_control_deassert(priv->plat->stmmac_rst);
7401 		/* Some reset controllers have only reset callback instead of
7402 		 * assert + deassert callbacks pair.
7403 		 */
7404 		if (ret == -ENOTSUPP)
7405 			reset_control_reset(priv->plat->stmmac_rst);
7406 	}
7407 
7408 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7409 	if (ret == -ENOTSUPP)
7410 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7411 			ERR_PTR(ret));
7412 
7413 	/* Init MAC and get the capabilities */
7414 	ret = stmmac_hw_init(priv);
7415 	if (ret)
7416 		goto error_hw_init;
7417 
7418 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7419 	 */
7420 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7421 		priv->plat->dma_cfg->dche = false;
7422 
7423 	stmmac_check_ether_addr(priv);
7424 
7425 	ndev->netdev_ops = &stmmac_netdev_ops;
7426 
7427 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7428 
7429 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7430 			    NETIF_F_RXCSUM;
7431 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7432 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7433 
7434 	ret = stmmac_tc_init(priv, priv);
7435 	if (!ret) {
7436 		ndev->hw_features |= NETIF_F_HW_TC;
7437 	}
7438 
7439 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7440 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7441 		if (priv->plat->has_gmac4)
7442 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7443 		priv->tso = true;
7444 		dev_info(priv->device, "TSO feature enabled\n");
7445 	}
7446 
7447 	if (priv->dma_cap.sphen &&
7448 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7449 		ndev->hw_features |= NETIF_F_GRO;
7450 		priv->sph_cap = true;
7451 		priv->sph = priv->sph_cap;
7452 		dev_info(priv->device, "SPH feature enabled\n");
7453 	}
7454 
7455 	/* Ideally our host DMA address width is the same as for the
7456 	 * device. However, it may differ and then we have to use our
7457 	 * host DMA width for allocation and the device DMA width for
7458 	 * register handling.
7459 	 */
7460 	if (priv->plat->host_dma_width)
7461 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7462 	else
7463 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7464 
7465 	if (priv->dma_cap.host_dma_width) {
7466 		ret = dma_set_mask_and_coherent(device,
7467 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7468 		if (!ret) {
7469 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7470 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7471 
7472 			/*
7473 			 * If more than 32 bits can be addressed, make sure to
7474 			 * enable enhanced addressing mode.
7475 			 */
7476 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7477 				priv->plat->dma_cfg->eame = true;
7478 		} else {
7479 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7480 			if (ret) {
7481 				dev_err(priv->device, "Failed to set DMA Mask\n");
7482 				goto error_hw_init;
7483 			}
7484 
7485 			priv->dma_cap.host_dma_width = 32;
7486 		}
7487 	}
7488 
7489 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7490 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7491 #ifdef STMMAC_VLAN_TAG_USED
7492 	/* Both mac100 and gmac support receive VLAN tag detection */
7493 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7494 	if (priv->dma_cap.vlhash) {
7495 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7496 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7497 	}
7498 	if (priv->dma_cap.vlins) {
7499 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7500 		if (priv->dma_cap.dvlan)
7501 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7502 	}
7503 #endif
7504 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7505 
7506 	priv->xstats.threshold = tc;
7507 
7508 	/* Initialize RSS */
7509 	rxq = priv->plat->rx_queues_to_use;
7510 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7511 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7512 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7513 
7514 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7515 		ndev->features |= NETIF_F_RXHASH;
7516 
7517 	ndev->vlan_features |= ndev->features;
7518 	/* TSO doesn't work on VLANs yet */
7519 	ndev->vlan_features &= ~NETIF_F_TSO;
7520 
7521 	/* MTU range: 46 - hw-specific max */
7522 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7523 	if (priv->plat->has_xgmac)
7524 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7525 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7526 		ndev->max_mtu = JUMBO_LEN;
7527 	else
7528 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7529 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7530 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7531 	 */
7532 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7533 	    (priv->plat->maxmtu >= ndev->min_mtu))
7534 		ndev->max_mtu = priv->plat->maxmtu;
7535 	else if (priv->plat->maxmtu < ndev->min_mtu)
7536 		dev_warn(priv->device,
7537 			 "%s: warning: maxmtu having invalid value (%d)\n",
7538 			 __func__, priv->plat->maxmtu);
7539 
7540 	if (flow_ctrl)
7541 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7542 
7543 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7544 
7545 	/* Setup channels NAPI */
7546 	stmmac_napi_add(ndev);
7547 
7548 	mutex_init(&priv->lock);
7549 
7550 	/* If a specific clk_csr value is passed from the platform
7551 	 * this means that the CSR Clock Range selection cannot be
7552 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7553 	 * set the MDC clock dynamically according to the csr actual
7554 	 * clock input.
7555 	 */
7556 	if (priv->plat->clk_csr >= 0)
7557 		priv->clk_csr = priv->plat->clk_csr;
7558 	else
7559 		stmmac_clk_csr_set(priv);
7560 
7561 	stmmac_check_pcs_mode(priv);
7562 
7563 	pm_runtime_get_noresume(device);
7564 	pm_runtime_set_active(device);
7565 	if (!pm_runtime_enabled(device))
7566 		pm_runtime_enable(device);
7567 
7568 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7569 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
7570 		/* MDIO bus Registration */
7571 		ret = stmmac_mdio_register(ndev);
7572 		if (ret < 0) {
7573 			dev_err_probe(priv->device, ret,
7574 				      "%s: MDIO bus (id: %d) registration failed\n",
7575 				      __func__, priv->plat->bus_id);
7576 			goto error_mdio_register;
7577 		}
7578 	}
7579 
7580 	if (priv->plat->speed_mode_2500)
7581 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7582 
7583 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7584 		ret = stmmac_xpcs_setup(priv->mii);
7585 		if (ret)
7586 			goto error_xpcs_setup;
7587 	}
7588 
7589 	ret = stmmac_phy_setup(priv);
7590 	if (ret) {
7591 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7592 		goto error_phy_setup;
7593 	}
7594 
7595 	ret = register_netdev(ndev);
7596 	if (ret) {
7597 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7598 			__func__, ret);
7599 		goto error_netdev_register;
7600 	}
7601 
7602 #ifdef CONFIG_DEBUG_FS
7603 	stmmac_init_fs(ndev);
7604 #endif
7605 
7606 	if (priv->plat->dump_debug_regs)
7607 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7608 
7609 	/* Let pm_runtime_put() disable the clocks.
7610 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7611 	 */
7612 	pm_runtime_put(device);
7613 
7614 	return ret;
7615 
7616 error_netdev_register:
7617 	phylink_destroy(priv->phylink);
7618 error_xpcs_setup:
7619 error_phy_setup:
7620 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7621 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7622 		stmmac_mdio_unregister(ndev);
7623 error_mdio_register:
7624 	stmmac_napi_del(ndev);
7625 error_hw_init:
7626 	destroy_workqueue(priv->wq);
7627 error_wq_init:
7628 	bitmap_free(priv->af_xdp_zc_qps);
7629 
7630 	return ret;
7631 }
7632 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7633 
7634 /**
7635  * stmmac_dvr_remove
7636  * @dev: device pointer
7637  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7638  * changes the link status, releases the DMA descriptor rings.
7639  */
7640 void stmmac_dvr_remove(struct device *dev)
7641 {
7642 	struct net_device *ndev = dev_get_drvdata(dev);
7643 	struct stmmac_priv *priv = netdev_priv(ndev);
7644 
7645 	netdev_info(priv->dev, "%s: removing driver", __func__);
7646 
7647 	pm_runtime_get_sync(dev);
7648 
7649 	stmmac_stop_all_dma(priv);
7650 	stmmac_mac_set(priv, priv->ioaddr, false);
7651 	netif_carrier_off(ndev);
7652 	unregister_netdev(ndev);
7653 
7654 #ifdef CONFIG_DEBUG_FS
7655 	stmmac_exit_fs(ndev);
7656 #endif
7657 	phylink_destroy(priv->phylink);
7658 	if (priv->plat->stmmac_rst)
7659 		reset_control_assert(priv->plat->stmmac_rst);
7660 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7661 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7662 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7663 		stmmac_mdio_unregister(ndev);
7664 	destroy_workqueue(priv->wq);
7665 	mutex_destroy(&priv->lock);
7666 	bitmap_free(priv->af_xdp_zc_qps);
7667 
7668 	pm_runtime_disable(dev);
7669 	pm_runtime_put_noidle(dev);
7670 }
7671 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7672 
7673 /**
7674  * stmmac_suspend - suspend callback
7675  * @dev: device pointer
7676  * Description: this is the function to suspend the device and it is called
7677  * by the platform driver to stop the network queue, release the resources,
7678  * program the PMT register (for WoL), clean and release driver resources.
7679  */
7680 int stmmac_suspend(struct device *dev)
7681 {
7682 	struct net_device *ndev = dev_get_drvdata(dev);
7683 	struct stmmac_priv *priv = netdev_priv(ndev);
7684 	u32 chan;
7685 
7686 	if (!ndev || !netif_running(ndev))
7687 		return 0;
7688 
7689 	mutex_lock(&priv->lock);
7690 
7691 	netif_device_detach(ndev);
7692 
7693 	stmmac_disable_all_queues(priv);
7694 
7695 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7696 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7697 
7698 	if (priv->eee_enabled) {
7699 		priv->tx_path_in_lpi_mode = false;
7700 		del_timer_sync(&priv->eee_ctrl_timer);
7701 	}
7702 
7703 	/* Stop TX/RX DMA */
7704 	stmmac_stop_all_dma(priv);
7705 
7706 	if (priv->plat->serdes_powerdown)
7707 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7708 
7709 	/* Enable Power down mode by programming the PMT regs */
7710 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7711 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7712 		priv->irq_wake = 1;
7713 	} else {
7714 		stmmac_mac_set(priv, priv->ioaddr, false);
7715 		pinctrl_pm_select_sleep_state(priv->device);
7716 	}
7717 
7718 	mutex_unlock(&priv->lock);
7719 
7720 	rtnl_lock();
7721 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7722 		phylink_suspend(priv->phylink, true);
7723 	} else {
7724 		if (device_may_wakeup(priv->device))
7725 			phylink_speed_down(priv->phylink, false);
7726 		phylink_suspend(priv->phylink, false);
7727 	}
7728 	rtnl_unlock();
7729 
7730 	if (priv->dma_cap.fpesel) {
7731 		/* Disable FPE */
7732 		stmmac_fpe_configure(priv, priv->ioaddr,
7733 				     priv->plat->tx_queues_to_use,
7734 				     priv->plat->rx_queues_to_use, false);
7735 
7736 		stmmac_fpe_handshake(priv, false);
7737 		stmmac_fpe_stop_wq(priv);
7738 	}
7739 
7740 	priv->speed = SPEED_UNKNOWN;
7741 	return 0;
7742 }
7743 EXPORT_SYMBOL_GPL(stmmac_suspend);
7744 
7745 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7746 {
7747 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7748 
7749 	rx_q->cur_rx = 0;
7750 	rx_q->dirty_rx = 0;
7751 }
7752 
7753 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7754 {
7755 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7756 
7757 	tx_q->cur_tx = 0;
7758 	tx_q->dirty_tx = 0;
7759 	tx_q->mss = 0;
7760 
7761 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7762 }
7763 
7764 /**
7765  * stmmac_reset_queues_param - reset queue parameters
7766  * @priv: device pointer
7767  */
7768 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7769 {
7770 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7771 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7772 	u32 queue;
7773 
7774 	for (queue = 0; queue < rx_cnt; queue++)
7775 		stmmac_reset_rx_queue(priv, queue);
7776 
7777 	for (queue = 0; queue < tx_cnt; queue++)
7778 		stmmac_reset_tx_queue(priv, queue);
7779 }
7780 
7781 /**
7782  * stmmac_resume - resume callback
7783  * @dev: device pointer
7784  * Description: when resume this function is invoked to setup the DMA and CORE
7785  * in a usable state.
7786  */
7787 int stmmac_resume(struct device *dev)
7788 {
7789 	struct net_device *ndev = dev_get_drvdata(dev);
7790 	struct stmmac_priv *priv = netdev_priv(ndev);
7791 	int ret;
7792 
7793 	if (!netif_running(ndev))
7794 		return 0;
7795 
7796 	/* Power Down bit, into the PM register, is cleared
7797 	 * automatically as soon as a magic packet or a Wake-up frame
7798 	 * is received. Anyway, it's better to manually clear
7799 	 * this bit because it can generate problems while resuming
7800 	 * from another devices (e.g. serial console).
7801 	 */
7802 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7803 		mutex_lock(&priv->lock);
7804 		stmmac_pmt(priv, priv->hw, 0);
7805 		mutex_unlock(&priv->lock);
7806 		priv->irq_wake = 0;
7807 	} else {
7808 		pinctrl_pm_select_default_state(priv->device);
7809 		/* reset the phy so that it's ready */
7810 		if (priv->mii)
7811 			stmmac_mdio_reset(priv->mii);
7812 	}
7813 
7814 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7815 	    priv->plat->serdes_powerup) {
7816 		ret = priv->plat->serdes_powerup(ndev,
7817 						 priv->plat->bsp_priv);
7818 
7819 		if (ret < 0)
7820 			return ret;
7821 	}
7822 
7823 	rtnl_lock();
7824 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7825 		phylink_resume(priv->phylink);
7826 	} else {
7827 		phylink_resume(priv->phylink);
7828 		if (device_may_wakeup(priv->device))
7829 			phylink_speed_up(priv->phylink);
7830 	}
7831 	rtnl_unlock();
7832 
7833 	rtnl_lock();
7834 	mutex_lock(&priv->lock);
7835 
7836 	stmmac_reset_queues_param(priv);
7837 
7838 	stmmac_free_tx_skbufs(priv);
7839 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7840 
7841 	stmmac_hw_setup(ndev, false);
7842 	stmmac_init_coalesce(priv);
7843 	stmmac_set_rx_mode(ndev);
7844 
7845 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7846 
7847 	stmmac_enable_all_queues(priv);
7848 	stmmac_enable_all_dma_irq(priv);
7849 
7850 	mutex_unlock(&priv->lock);
7851 	rtnl_unlock();
7852 
7853 	netif_device_attach(ndev);
7854 
7855 	return 0;
7856 }
7857 EXPORT_SYMBOL_GPL(stmmac_resume);
7858 
7859 #ifndef MODULE
7860 static int __init stmmac_cmdline_opt(char *str)
7861 {
7862 	char *opt;
7863 
7864 	if (!str || !*str)
7865 		return 1;
7866 	while ((opt = strsep(&str, ",")) != NULL) {
7867 		if (!strncmp(opt, "debug:", 6)) {
7868 			if (kstrtoint(opt + 6, 0, &debug))
7869 				goto err;
7870 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7871 			if (kstrtoint(opt + 8, 0, &phyaddr))
7872 				goto err;
7873 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7874 			if (kstrtoint(opt + 7, 0, &buf_sz))
7875 				goto err;
7876 		} else if (!strncmp(opt, "tc:", 3)) {
7877 			if (kstrtoint(opt + 3, 0, &tc))
7878 				goto err;
7879 		} else if (!strncmp(opt, "watchdog:", 9)) {
7880 			if (kstrtoint(opt + 9, 0, &watchdog))
7881 				goto err;
7882 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7883 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7884 				goto err;
7885 		} else if (!strncmp(opt, "pause:", 6)) {
7886 			if (kstrtoint(opt + 6, 0, &pause))
7887 				goto err;
7888 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7889 			if (kstrtoint(opt + 10, 0, &eee_timer))
7890 				goto err;
7891 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7892 			if (kstrtoint(opt + 11, 0, &chain_mode))
7893 				goto err;
7894 		}
7895 	}
7896 	return 1;
7897 
7898 err:
7899 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7900 	return 1;
7901 }
7902 
7903 __setup("stmmaceth=", stmmac_cmdline_opt);
7904 #endif /* MODULE */
7905 
7906 static int __init stmmac_init(void)
7907 {
7908 #ifdef CONFIG_DEBUG_FS
7909 	/* Create debugfs main directory if it doesn't exist yet */
7910 	if (!stmmac_fs_dir)
7911 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7912 	register_netdevice_notifier(&stmmac_notifier);
7913 #endif
7914 
7915 	return 0;
7916 }
7917 
7918 static void __exit stmmac_exit(void)
7919 {
7920 #ifdef CONFIG_DEBUG_FS
7921 	unregister_netdevice_notifier(&stmmac_notifier);
7922 	debugfs_remove_recursive(stmmac_fs_dir);
7923 #endif
7924 }
7925 
7926 module_init(stmmac_init)
7927 module_exit(stmmac_exit)
7928 
7929 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7930 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7931 MODULE_LICENSE("GPL");
7932