1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Using PCS we cannot dial with the phy registers at this stage
475 	 * so we do not support extra feature like EEE.
476 	 */
477 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
478 	    priv->hw->pcs == STMMAC_PCS_RTBI)
479 		return false;
480 
481 	/* Check if MAC core supports the EEE feature. */
482 	if (!priv->dma_cap.eee)
483 		return false;
484 
485 	mutex_lock(&priv->lock);
486 
487 	/* Check if it needs to be deactivated */
488 	if (!priv->eee_active) {
489 		if (priv->eee_enabled) {
490 			netdev_dbg(priv->dev, "disable EEE\n");
491 			stmmac_lpi_entry_timer_config(priv, 0);
492 			del_timer_sync(&priv->eee_ctrl_timer);
493 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494 			if (priv->hw->xpcs)
495 				xpcs_config_eee(priv->hw->xpcs,
496 						priv->plat->mult_fact_100ns,
497 						false);
498 		}
499 		mutex_unlock(&priv->lock);
500 		return false;
501 	}
502 
503 	if (priv->eee_active && !priv->eee_enabled) {
504 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506 				     eee_tw_timer);
507 		if (priv->hw->xpcs)
508 			xpcs_config_eee(priv->hw->xpcs,
509 					priv->plat->mult_fact_100ns,
510 					true);
511 	}
512 
513 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514 		del_timer_sync(&priv->eee_ctrl_timer);
515 		priv->tx_path_in_lpi_mode = false;
516 		stmmac_lpi_entry_timer_config(priv, 1);
517 	} else {
518 		stmmac_lpi_entry_timer_config(priv, 0);
519 		mod_timer(&priv->eee_ctrl_timer,
520 			  STMMAC_LPI_T(priv->tx_lpi_timer));
521 	}
522 
523 	mutex_unlock(&priv->lock);
524 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525 	return true;
526 }
527 
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529  * @priv: driver private structure
530  * @p : descriptor pointer
531  * @skb : the socket buffer
532  * Description :
533  * This function will read timestamp from the descriptor & pass it to stack.
534  * and also perform some sanity checks.
535  */
536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537 				   struct dma_desc *p, struct sk_buff *skb)
538 {
539 	struct skb_shared_hwtstamps shhwtstamp;
540 	bool found = false;
541 	u64 ns = 0;
542 
543 	if (!priv->hwts_tx_en)
544 		return;
545 
546 	/* exit if skb doesn't support hw tstamp */
547 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548 		return;
549 
550 	/* check tx tstamp status */
551 	if (stmmac_get_tx_timestamp_status(priv, p)) {
552 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553 		found = true;
554 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555 		found = true;
556 	}
557 
558 	if (found) {
559 		ns -= priv->plat->cdc_error_adj;
560 
561 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
563 
564 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565 		/* pass tstamp to stack */
566 		skb_tstamp_tx(skb, &shhwtstamp);
567 	}
568 }
569 
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571  * @priv: driver private structure
572  * @p : descriptor pointer
573  * @np : next descriptor pointer
574  * @skb : the socket buffer
575  * Description :
576  * This function will read received packet's timestamp from the descriptor
577  * and pass it to stack. It also perform some sanity checks.
578  */
579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580 				   struct dma_desc *np, struct sk_buff *skb)
581 {
582 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
583 	struct dma_desc *desc = p;
584 	u64 ns = 0;
585 
586 	if (!priv->hwts_rx_en)
587 		return;
588 	/* For GMAC4, the valid timestamp is from CTX next desc. */
589 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590 		desc = np;
591 
592 	/* Check if timestamp is available */
593 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595 
596 		ns -= priv->plat->cdc_error_adj;
597 
598 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599 		shhwtstamp = skb_hwtstamps(skb);
600 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
602 	} else  {
603 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604 	}
605 }
606 
607 /**
608  *  stmmac_hwtstamp_set - control hardware timestamping.
609  *  @dev: device pointer.
610  *  @ifr: An IOCTL specific structure, that can contain a pointer to
611  *  a proprietary structure used to pass information to the driver.
612  *  Description:
613  *  This function configures the MAC to enable/disable both outgoing(TX)
614  *  and incoming(RX) packets time stamping based on user input.
615  *  Return Value:
616  *  0 on success and an appropriate -ve integer on failure.
617  */
618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620 	struct stmmac_priv *priv = netdev_priv(dev);
621 	struct hwtstamp_config config;
622 	u32 ptp_v2 = 0;
623 	u32 tstamp_all = 0;
624 	u32 ptp_over_ipv4_udp = 0;
625 	u32 ptp_over_ipv6_udp = 0;
626 	u32 ptp_over_ethernet = 0;
627 	u32 snap_type_sel = 0;
628 	u32 ts_master_en = 0;
629 	u32 ts_event_en = 0;
630 
631 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632 		netdev_alert(priv->dev, "No support for HW time stamping\n");
633 		priv->hwts_tx_en = 0;
634 		priv->hwts_rx_en = 0;
635 
636 		return -EOPNOTSUPP;
637 	}
638 
639 	if (copy_from_user(&config, ifr->ifr_data,
640 			   sizeof(config)))
641 		return -EFAULT;
642 
643 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 		   __func__, config.flags, config.tx_type, config.rx_filter);
645 
646 	if (config.tx_type != HWTSTAMP_TX_OFF &&
647 	    config.tx_type != HWTSTAMP_TX_ON)
648 		return -ERANGE;
649 
650 	if (priv->adv_ts) {
651 		switch (config.rx_filter) {
652 		case HWTSTAMP_FILTER_NONE:
653 			/* time stamp no incoming packet at all */
654 			config.rx_filter = HWTSTAMP_FILTER_NONE;
655 			break;
656 
657 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 			/* PTP v1, UDP, any kind of event packet */
659 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 			/* 'xmac' hardware can support Sync, Pdelay_Req and
661 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 			 * This leaves Delay_Req timestamps out.
663 			 * Enable all events *and* general purpose message
664 			 * timestamping
665 			 */
666 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			break;
670 
671 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 			/* PTP v1, UDP, Sync packet */
673 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 			/* take time stamp for SYNC messages only */
675 			ts_event_en = PTP_TCR_TSEVNTENA;
676 
677 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 			break;
680 
681 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 			/* PTP v1, UDP, Delay_req packet */
683 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 			/* take time stamp for Delay_Req messages only */
685 			ts_master_en = PTP_TCR_TSMSTRENA;
686 			ts_event_en = PTP_TCR_TSEVNTENA;
687 
688 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 			break;
691 
692 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 			/* PTP v2, UDP, any kind of event packet */
694 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 			ptp_v2 = PTP_TCR_TSVER2ENA;
696 			/* take time stamp for all event messages */
697 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698 
699 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 			break;
702 
703 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 			/* PTP v2, UDP, Sync packet */
705 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 			ptp_v2 = PTP_TCR_TSVER2ENA;
707 			/* take time stamp for SYNC messages only */
708 			ts_event_en = PTP_TCR_TSEVNTENA;
709 
710 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 			break;
713 
714 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 			/* PTP v2, UDP, Delay_req packet */
716 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 			ptp_v2 = PTP_TCR_TSVER2ENA;
718 			/* take time stamp for Delay_Req messages only */
719 			ts_master_en = PTP_TCR_TSMSTRENA;
720 			ts_event_en = PTP_TCR_TSEVNTENA;
721 
722 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 			break;
725 
726 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 			/* PTP v2/802.AS1 any layer, any kind of event packet */
728 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 			ptp_v2 = PTP_TCR_TSVER2ENA;
730 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 			if (priv->synopsys_id < DWMAC_CORE_4_10)
732 				ts_event_en = PTP_TCR_TSEVNTENA;
733 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 			ptp_over_ethernet = PTP_TCR_TSIPENA;
736 			break;
737 
738 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 			/* PTP v2/802.AS1, any layer, Sync packet */
740 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 			ptp_v2 = PTP_TCR_TSVER2ENA;
742 			/* take time stamp for SYNC messages only */
743 			ts_event_en = PTP_TCR_TSEVNTENA;
744 
745 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 			ptp_over_ethernet = PTP_TCR_TSIPENA;
748 			break;
749 
750 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 			/* PTP v2/802.AS1, any layer, Delay_req packet */
752 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 			ptp_v2 = PTP_TCR_TSVER2ENA;
754 			/* take time stamp for Delay_Req messages only */
755 			ts_master_en = PTP_TCR_TSMSTRENA;
756 			ts_event_en = PTP_TCR_TSEVNTENA;
757 
758 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 			ptp_over_ethernet = PTP_TCR_TSIPENA;
761 			break;
762 
763 		case HWTSTAMP_FILTER_NTP_ALL:
764 		case HWTSTAMP_FILTER_ALL:
765 			/* time stamp any incoming packet */
766 			config.rx_filter = HWTSTAMP_FILTER_ALL;
767 			tstamp_all = PTP_TCR_TSENALL;
768 			break;
769 
770 		default:
771 			return -ERANGE;
772 		}
773 	} else {
774 		switch (config.rx_filter) {
775 		case HWTSTAMP_FILTER_NONE:
776 			config.rx_filter = HWTSTAMP_FILTER_NONE;
777 			break;
778 		default:
779 			/* PTP v1, UDP, any kind of event packet */
780 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 			break;
782 		}
783 	}
784 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786 
787 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
788 
789 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
790 		priv->systime_flags |= tstamp_all | ptp_v2 |
791 				       ptp_over_ethernet | ptp_over_ipv6_udp |
792 				       ptp_over_ipv4_udp | ts_event_en |
793 				       ts_master_en | snap_type_sel;
794 	}
795 
796 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797 
798 	memcpy(&priv->tstamp_config, &config, sizeof(config));
799 
800 	return copy_to_user(ifr->ifr_data, &config,
801 			    sizeof(config)) ? -EFAULT : 0;
802 }
803 
804 /**
805  *  stmmac_hwtstamp_get - read hardware timestamping.
806  *  @dev: device pointer.
807  *  @ifr: An IOCTL specific structure, that can contain a pointer to
808  *  a proprietary structure used to pass information to the driver.
809  *  Description:
810  *  This function obtain the current hardware timestamping settings
811  *  as requested.
812  */
813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815 	struct stmmac_priv *priv = netdev_priv(dev);
816 	struct hwtstamp_config *config = &priv->tstamp_config;
817 
818 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819 		return -EOPNOTSUPP;
820 
821 	return copy_to_user(ifr->ifr_data, config,
822 			    sizeof(*config)) ? -EFAULT : 0;
823 }
824 
825 /**
826  * stmmac_init_tstamp_counter - init hardware timestamping counter
827  * @priv: driver private structure
828  * @systime_flags: timestamping flags
829  * Description:
830  * Initialize hardware counter for packet timestamping.
831  * This is valid as long as the interface is open and not suspended.
832  * Will be rerun after resuming from suspend, case in which the timestamping
833  * flags updated by stmmac_hwtstamp_set() also need to be restored.
834  */
835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838 	struct timespec64 now;
839 	u32 sec_inc = 0;
840 	u64 temp = 0;
841 
842 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843 		return -EOPNOTSUPP;
844 
845 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846 	priv->systime_flags = systime_flags;
847 
848 	/* program Sub Second Increment reg */
849 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850 					   priv->plat->clk_ptp_rate,
851 					   xmac, &sec_inc);
852 	temp = div_u64(1000000000ULL, sec_inc);
853 
854 	/* Store sub second increment for later use */
855 	priv->sub_second_inc = sec_inc;
856 
857 	/* calculate default added value:
858 	 * formula is :
859 	 * addend = (2^32)/freq_div_ratio;
860 	 * where, freq_div_ratio = 1e9ns/sec_inc
861 	 */
862 	temp = (u64)(temp << 32);
863 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865 
866 	/* initialize system time */
867 	ktime_get_real_ts64(&now);
868 
869 	/* lower 32 bits of tv_sec are safe until y2106 */
870 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871 
872 	return 0;
873 }
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875 
876 /**
877  * stmmac_init_ptp - init PTP
878  * @priv: driver private structure
879  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880  * This is done by looking at the HW cap. register.
881  * This function also registers the ptp driver.
882  */
883 static int stmmac_init_ptp(struct stmmac_priv *priv)
884 {
885 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886 	int ret;
887 
888 	if (priv->plat->ptp_clk_freq_config)
889 		priv->plat->ptp_clk_freq_config(priv);
890 
891 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892 	if (ret)
893 		return ret;
894 
895 	priv->adv_ts = 0;
896 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 	if (xmac && priv->dma_cap.atime_stamp)
898 		priv->adv_ts = 1;
899 	/* Dwmac 3.x core with extend_desc can support adv_ts */
900 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901 		priv->adv_ts = 1;
902 
903 	if (priv->dma_cap.time_stamp)
904 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
905 
906 	if (priv->adv_ts)
907 		netdev_info(priv->dev,
908 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
909 
910 	priv->hwts_tx_en = 0;
911 	priv->hwts_rx_en = 0;
912 
913 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
914 		stmmac_hwtstamp_correct_latency(priv, priv);
915 
916 	return 0;
917 }
918 
919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
922 	stmmac_ptp_unregister(priv);
923 }
924 
925 /**
926  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
927  *  @priv: driver private structure
928  *  @duplex: duplex passed to the next function
929  *  Description: It is used for configuring the flow control in all queues
930  */
931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933 	u32 tx_cnt = priv->plat->tx_queues_to_use;
934 
935 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936 			priv->pause, tx_cnt);
937 }
938 
939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
940 						 phy_interface_t interface)
941 {
942 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943 
944 	if (priv->hw->xpcs)
945 		return &priv->hw->xpcs->pcs;
946 
947 	if (priv->hw->lynx_pcs)
948 		return priv->hw->lynx_pcs;
949 
950 	return NULL;
951 }
952 
953 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
954 			      const struct phylink_link_state *state)
955 {
956 	/* Nothing to do, xpcs_config() handles everything */
957 }
958 
959 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
960 {
961 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
962 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
963 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
964 	bool *hs_enable = &fpe_cfg->hs_enable;
965 
966 	if (is_up && *hs_enable) {
967 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
968 	} else {
969 		*lo_state = FPE_STATE_OFF;
970 		*lp_state = FPE_STATE_OFF;
971 	}
972 }
973 
974 static void stmmac_mac_link_down(struct phylink_config *config,
975 				 unsigned int mode, phy_interface_t interface)
976 {
977 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
978 
979 	stmmac_mac_set(priv, priv->ioaddr, false);
980 	priv->eee_active = false;
981 	priv->tx_lpi_enabled = false;
982 	priv->eee_enabled = stmmac_eee_init(priv);
983 	stmmac_set_eee_pls(priv, priv->hw, false);
984 
985 	if (priv->dma_cap.fpesel)
986 		stmmac_fpe_link_state_handle(priv, false);
987 }
988 
989 static void stmmac_mac_link_up(struct phylink_config *config,
990 			       struct phy_device *phy,
991 			       unsigned int mode, phy_interface_t interface,
992 			       int speed, int duplex,
993 			       bool tx_pause, bool rx_pause)
994 {
995 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
996 	u32 old_ctrl, ctrl;
997 
998 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
999 	    priv->plat->serdes_powerup)
1000 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1001 
1002 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1003 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1004 
1005 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1006 		switch (speed) {
1007 		case SPEED_10000:
1008 			ctrl |= priv->hw->link.xgmii.speed10000;
1009 			break;
1010 		case SPEED_5000:
1011 			ctrl |= priv->hw->link.xgmii.speed5000;
1012 			break;
1013 		case SPEED_2500:
1014 			ctrl |= priv->hw->link.xgmii.speed2500;
1015 			break;
1016 		default:
1017 			return;
1018 		}
1019 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1020 		switch (speed) {
1021 		case SPEED_100000:
1022 			ctrl |= priv->hw->link.xlgmii.speed100000;
1023 			break;
1024 		case SPEED_50000:
1025 			ctrl |= priv->hw->link.xlgmii.speed50000;
1026 			break;
1027 		case SPEED_40000:
1028 			ctrl |= priv->hw->link.xlgmii.speed40000;
1029 			break;
1030 		case SPEED_25000:
1031 			ctrl |= priv->hw->link.xlgmii.speed25000;
1032 			break;
1033 		case SPEED_10000:
1034 			ctrl |= priv->hw->link.xgmii.speed10000;
1035 			break;
1036 		case SPEED_2500:
1037 			ctrl |= priv->hw->link.speed2500;
1038 			break;
1039 		case SPEED_1000:
1040 			ctrl |= priv->hw->link.speed1000;
1041 			break;
1042 		default:
1043 			return;
1044 		}
1045 	} else {
1046 		switch (speed) {
1047 		case SPEED_2500:
1048 			ctrl |= priv->hw->link.speed2500;
1049 			break;
1050 		case SPEED_1000:
1051 			ctrl |= priv->hw->link.speed1000;
1052 			break;
1053 		case SPEED_100:
1054 			ctrl |= priv->hw->link.speed100;
1055 			break;
1056 		case SPEED_10:
1057 			ctrl |= priv->hw->link.speed10;
1058 			break;
1059 		default:
1060 			return;
1061 		}
1062 	}
1063 
1064 	priv->speed = speed;
1065 
1066 	if (priv->plat->fix_mac_speed)
1067 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1068 
1069 	if (!duplex)
1070 		ctrl &= ~priv->hw->link.duplex;
1071 	else
1072 		ctrl |= priv->hw->link.duplex;
1073 
1074 	/* Flow Control operation */
1075 	if (rx_pause && tx_pause)
1076 		priv->flow_ctrl = FLOW_AUTO;
1077 	else if (rx_pause && !tx_pause)
1078 		priv->flow_ctrl = FLOW_RX;
1079 	else if (!rx_pause && tx_pause)
1080 		priv->flow_ctrl = FLOW_TX;
1081 	else
1082 		priv->flow_ctrl = FLOW_OFF;
1083 
1084 	stmmac_mac_flow_ctrl(priv, duplex);
1085 
1086 	if (ctrl != old_ctrl)
1087 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1088 
1089 	stmmac_mac_set(priv, priv->ioaddr, true);
1090 	if (phy && priv->dma_cap.eee) {
1091 		priv->eee_active =
1092 			phy_init_eee(phy, !(priv->plat->flags &
1093 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1094 		priv->eee_enabled = stmmac_eee_init(priv);
1095 		priv->tx_lpi_enabled = priv->eee_enabled;
1096 		stmmac_set_eee_pls(priv, priv->hw, true);
1097 	}
1098 
1099 	if (priv->dma_cap.fpesel)
1100 		stmmac_fpe_link_state_handle(priv, true);
1101 
1102 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1103 		stmmac_hwtstamp_correct_latency(priv, priv);
1104 }
1105 
1106 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1107 	.mac_select_pcs = stmmac_mac_select_pcs,
1108 	.mac_config = stmmac_mac_config,
1109 	.mac_link_down = stmmac_mac_link_down,
1110 	.mac_link_up = stmmac_mac_link_up,
1111 };
1112 
1113 /**
1114  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1115  * @priv: driver private structure
1116  * Description: this is to verify if the HW supports the PCS.
1117  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1118  * configured for the TBI, RTBI, or SGMII PHY interface.
1119  */
1120 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1121 {
1122 	int interface = priv->plat->mac_interface;
1123 
1124 	if (priv->dma_cap.pcs) {
1125 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1126 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1127 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1128 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1129 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1130 			priv->hw->pcs = STMMAC_PCS_RGMII;
1131 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1132 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1133 			priv->hw->pcs = STMMAC_PCS_SGMII;
1134 		}
1135 	}
1136 }
1137 
1138 /**
1139  * stmmac_init_phy - PHY initialization
1140  * @dev: net device structure
1141  * Description: it initializes the driver's PHY state, and attaches the PHY
1142  * to the mac driver.
1143  *  Return value:
1144  *  0 on success
1145  */
1146 static int stmmac_init_phy(struct net_device *dev)
1147 {
1148 	struct stmmac_priv *priv = netdev_priv(dev);
1149 	struct fwnode_handle *phy_fwnode;
1150 	struct fwnode_handle *fwnode;
1151 	int ret;
1152 
1153 	if (!phylink_expects_phy(priv->phylink))
1154 		return 0;
1155 
1156 	fwnode = priv->plat->port_node;
1157 	if (!fwnode)
1158 		fwnode = dev_fwnode(priv->device);
1159 
1160 	if (fwnode)
1161 		phy_fwnode = fwnode_get_phy_node(fwnode);
1162 	else
1163 		phy_fwnode = NULL;
1164 
1165 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1166 	 * manually parse it
1167 	 */
1168 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1169 		int addr = priv->plat->phy_addr;
1170 		struct phy_device *phydev;
1171 
1172 		if (addr < 0) {
1173 			netdev_err(priv->dev, "no phy found\n");
1174 			return -ENODEV;
1175 		}
1176 
1177 		phydev = mdiobus_get_phy(priv->mii, addr);
1178 		if (!phydev) {
1179 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1180 			return -ENODEV;
1181 		}
1182 
1183 		ret = phylink_connect_phy(priv->phylink, phydev);
1184 	} else {
1185 		fwnode_handle_put(phy_fwnode);
1186 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1187 	}
1188 
1189 	if (!priv->plat->pmt) {
1190 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1191 
1192 		phylink_ethtool_get_wol(priv->phylink, &wol);
1193 		device_set_wakeup_capable(priv->device, !!wol.supported);
1194 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1195 	}
1196 
1197 	return ret;
1198 }
1199 
1200 static int stmmac_phy_setup(struct stmmac_priv *priv)
1201 {
1202 	struct stmmac_mdio_bus_data *mdio_bus_data;
1203 	int mode = priv->plat->phy_interface;
1204 	struct fwnode_handle *fwnode;
1205 	struct phylink *phylink;
1206 	int max_speed;
1207 
1208 	priv->phylink_config.dev = &priv->dev->dev;
1209 	priv->phylink_config.type = PHYLINK_NETDEV;
1210 	priv->phylink_config.mac_managed_pm = true;
1211 
1212 	mdio_bus_data = priv->plat->mdio_bus_data;
1213 	if (mdio_bus_data)
1214 		priv->phylink_config.ovr_an_inband =
1215 			mdio_bus_data->xpcs_an_inband;
1216 
1217 	/* Set the platform/firmware specified interface mode. Note, phylink
1218 	 * deals with the PHY interface mode, not the MAC interface mode.
1219 	 */
1220 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1221 
1222 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1223 	if (priv->hw->xpcs)
1224 		xpcs_get_interfaces(priv->hw->xpcs,
1225 				    priv->phylink_config.supported_interfaces);
1226 
1227 	priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1228 						MAC_10FD | MAC_100FD |
1229 						MAC_1000FD;
1230 
1231 	/* Half-Duplex can only work with single queue */
1232 	if (priv->plat->tx_queues_to_use <= 1)
1233 		priv->phylink_config.mac_capabilities |= MAC_10HD | MAC_100HD |
1234 							 MAC_1000HD;
1235 
1236 	/* Get the MAC specific capabilities */
1237 	stmmac_mac_phylink_get_caps(priv);
1238 
1239 	max_speed = priv->plat->max_speed;
1240 	if (max_speed)
1241 		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1242 
1243 	fwnode = priv->plat->port_node;
1244 	if (!fwnode)
1245 		fwnode = dev_fwnode(priv->device);
1246 
1247 	phylink = phylink_create(&priv->phylink_config, fwnode,
1248 				 mode, &stmmac_phylink_mac_ops);
1249 	if (IS_ERR(phylink))
1250 		return PTR_ERR(phylink);
1251 
1252 	priv->phylink = phylink;
1253 	return 0;
1254 }
1255 
1256 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1257 				    struct stmmac_dma_conf *dma_conf)
1258 {
1259 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1260 	unsigned int desc_size;
1261 	void *head_rx;
1262 	u32 queue;
1263 
1264 	/* Display RX rings */
1265 	for (queue = 0; queue < rx_cnt; queue++) {
1266 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1267 
1268 		pr_info("\tRX Queue %u rings\n", queue);
1269 
1270 		if (priv->extend_desc) {
1271 			head_rx = (void *)rx_q->dma_erx;
1272 			desc_size = sizeof(struct dma_extended_desc);
1273 		} else {
1274 			head_rx = (void *)rx_q->dma_rx;
1275 			desc_size = sizeof(struct dma_desc);
1276 		}
1277 
1278 		/* Display RX ring */
1279 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1280 				    rx_q->dma_rx_phy, desc_size);
1281 	}
1282 }
1283 
1284 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1285 				    struct stmmac_dma_conf *dma_conf)
1286 {
1287 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1288 	unsigned int desc_size;
1289 	void *head_tx;
1290 	u32 queue;
1291 
1292 	/* Display TX rings */
1293 	for (queue = 0; queue < tx_cnt; queue++) {
1294 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1295 
1296 		pr_info("\tTX Queue %d rings\n", queue);
1297 
1298 		if (priv->extend_desc) {
1299 			head_tx = (void *)tx_q->dma_etx;
1300 			desc_size = sizeof(struct dma_extended_desc);
1301 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1302 			head_tx = (void *)tx_q->dma_entx;
1303 			desc_size = sizeof(struct dma_edesc);
1304 		} else {
1305 			head_tx = (void *)tx_q->dma_tx;
1306 			desc_size = sizeof(struct dma_desc);
1307 		}
1308 
1309 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1310 				    tx_q->dma_tx_phy, desc_size);
1311 	}
1312 }
1313 
1314 static void stmmac_display_rings(struct stmmac_priv *priv,
1315 				 struct stmmac_dma_conf *dma_conf)
1316 {
1317 	/* Display RX ring */
1318 	stmmac_display_rx_rings(priv, dma_conf);
1319 
1320 	/* Display TX ring */
1321 	stmmac_display_tx_rings(priv, dma_conf);
1322 }
1323 
1324 static int stmmac_set_bfsize(int mtu, int bufsize)
1325 {
1326 	int ret = bufsize;
1327 
1328 	if (mtu >= BUF_SIZE_8KiB)
1329 		ret = BUF_SIZE_16KiB;
1330 	else if (mtu >= BUF_SIZE_4KiB)
1331 		ret = BUF_SIZE_8KiB;
1332 	else if (mtu >= BUF_SIZE_2KiB)
1333 		ret = BUF_SIZE_4KiB;
1334 	else if (mtu > DEFAULT_BUFSIZE)
1335 		ret = BUF_SIZE_2KiB;
1336 	else
1337 		ret = DEFAULT_BUFSIZE;
1338 
1339 	return ret;
1340 }
1341 
1342 /**
1343  * stmmac_clear_rx_descriptors - clear RX descriptors
1344  * @priv: driver private structure
1345  * @dma_conf: structure to take the dma data
1346  * @queue: RX queue index
1347  * Description: this function is called to clear the RX descriptors
1348  * in case of both basic and extended descriptors are used.
1349  */
1350 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1351 					struct stmmac_dma_conf *dma_conf,
1352 					u32 queue)
1353 {
1354 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1355 	int i;
1356 
1357 	/* Clear the RX descriptors */
1358 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1359 		if (priv->extend_desc)
1360 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1361 					priv->use_riwt, priv->mode,
1362 					(i == dma_conf->dma_rx_size - 1),
1363 					dma_conf->dma_buf_sz);
1364 		else
1365 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1366 					priv->use_riwt, priv->mode,
1367 					(i == dma_conf->dma_rx_size - 1),
1368 					dma_conf->dma_buf_sz);
1369 }
1370 
1371 /**
1372  * stmmac_clear_tx_descriptors - clear tx descriptors
1373  * @priv: driver private structure
1374  * @dma_conf: structure to take the dma data
1375  * @queue: TX queue index.
1376  * Description: this function is called to clear the TX descriptors
1377  * in case of both basic and extended descriptors are used.
1378  */
1379 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1380 					struct stmmac_dma_conf *dma_conf,
1381 					u32 queue)
1382 {
1383 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1384 	int i;
1385 
1386 	/* Clear the TX descriptors */
1387 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1388 		int last = (i == (dma_conf->dma_tx_size - 1));
1389 		struct dma_desc *p;
1390 
1391 		if (priv->extend_desc)
1392 			p = &tx_q->dma_etx[i].basic;
1393 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1394 			p = &tx_q->dma_entx[i].basic;
1395 		else
1396 			p = &tx_q->dma_tx[i];
1397 
1398 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1399 	}
1400 }
1401 
1402 /**
1403  * stmmac_clear_descriptors - clear descriptors
1404  * @priv: driver private structure
1405  * @dma_conf: structure to take the dma data
1406  * Description: this function is called to clear the TX and RX descriptors
1407  * in case of both basic and extended descriptors are used.
1408  */
1409 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1410 				     struct stmmac_dma_conf *dma_conf)
1411 {
1412 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1413 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1414 	u32 queue;
1415 
1416 	/* Clear the RX descriptors */
1417 	for (queue = 0; queue < rx_queue_cnt; queue++)
1418 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1419 
1420 	/* Clear the TX descriptors */
1421 	for (queue = 0; queue < tx_queue_cnt; queue++)
1422 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1423 }
1424 
1425 /**
1426  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1427  * @priv: driver private structure
1428  * @dma_conf: structure to take the dma data
1429  * @p: descriptor pointer
1430  * @i: descriptor index
1431  * @flags: gfp flag
1432  * @queue: RX queue index
1433  * Description: this function is called to allocate a receive buffer, perform
1434  * the DMA mapping and init the descriptor.
1435  */
1436 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1437 				  struct stmmac_dma_conf *dma_conf,
1438 				  struct dma_desc *p,
1439 				  int i, gfp_t flags, u32 queue)
1440 {
1441 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1442 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1443 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1444 
1445 	if (priv->dma_cap.host_dma_width <= 32)
1446 		gfp |= GFP_DMA32;
1447 
1448 	if (!buf->page) {
1449 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1450 		if (!buf->page)
1451 			return -ENOMEM;
1452 		buf->page_offset = stmmac_rx_offset(priv);
1453 	}
1454 
1455 	if (priv->sph && !buf->sec_page) {
1456 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1457 		if (!buf->sec_page)
1458 			return -ENOMEM;
1459 
1460 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1461 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1462 	} else {
1463 		buf->sec_page = NULL;
1464 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1465 	}
1466 
1467 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1468 
1469 	stmmac_set_desc_addr(priv, p, buf->addr);
1470 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1471 		stmmac_init_desc3(priv, p);
1472 
1473 	return 0;
1474 }
1475 
1476 /**
1477  * stmmac_free_rx_buffer - free RX dma buffers
1478  * @priv: private structure
1479  * @rx_q: RX queue
1480  * @i: buffer index.
1481  */
1482 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1483 				  struct stmmac_rx_queue *rx_q,
1484 				  int i)
1485 {
1486 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1487 
1488 	if (buf->page)
1489 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1490 	buf->page = NULL;
1491 
1492 	if (buf->sec_page)
1493 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1494 	buf->sec_page = NULL;
1495 }
1496 
1497 /**
1498  * stmmac_free_tx_buffer - free RX dma buffers
1499  * @priv: private structure
1500  * @dma_conf: structure to take the dma data
1501  * @queue: RX queue index
1502  * @i: buffer index.
1503  */
1504 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1505 				  struct stmmac_dma_conf *dma_conf,
1506 				  u32 queue, int i)
1507 {
1508 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1509 
1510 	if (tx_q->tx_skbuff_dma[i].buf &&
1511 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1512 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1513 			dma_unmap_page(priv->device,
1514 				       tx_q->tx_skbuff_dma[i].buf,
1515 				       tx_q->tx_skbuff_dma[i].len,
1516 				       DMA_TO_DEVICE);
1517 		else
1518 			dma_unmap_single(priv->device,
1519 					 tx_q->tx_skbuff_dma[i].buf,
1520 					 tx_q->tx_skbuff_dma[i].len,
1521 					 DMA_TO_DEVICE);
1522 	}
1523 
1524 	if (tx_q->xdpf[i] &&
1525 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1526 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1527 		xdp_return_frame(tx_q->xdpf[i]);
1528 		tx_q->xdpf[i] = NULL;
1529 	}
1530 
1531 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1532 		tx_q->xsk_frames_done++;
1533 
1534 	if (tx_q->tx_skbuff[i] &&
1535 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1536 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1537 		tx_q->tx_skbuff[i] = NULL;
1538 	}
1539 
1540 	tx_q->tx_skbuff_dma[i].buf = 0;
1541 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1542 }
1543 
1544 /**
1545  * dma_free_rx_skbufs - free RX dma buffers
1546  * @priv: private structure
1547  * @dma_conf: structure to take the dma data
1548  * @queue: RX queue index
1549  */
1550 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1551 			       struct stmmac_dma_conf *dma_conf,
1552 			       u32 queue)
1553 {
1554 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1555 	int i;
1556 
1557 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1558 		stmmac_free_rx_buffer(priv, rx_q, i);
1559 }
1560 
1561 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1562 				   struct stmmac_dma_conf *dma_conf,
1563 				   u32 queue, gfp_t flags)
1564 {
1565 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1566 	int i;
1567 
1568 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1569 		struct dma_desc *p;
1570 		int ret;
1571 
1572 		if (priv->extend_desc)
1573 			p = &((rx_q->dma_erx + i)->basic);
1574 		else
1575 			p = rx_q->dma_rx + i;
1576 
1577 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1578 					     queue);
1579 		if (ret)
1580 			return ret;
1581 
1582 		rx_q->buf_alloc_num++;
1583 	}
1584 
1585 	return 0;
1586 }
1587 
1588 /**
1589  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1590  * @priv: private structure
1591  * @dma_conf: structure to take the dma data
1592  * @queue: RX queue index
1593  */
1594 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1595 				struct stmmac_dma_conf *dma_conf,
1596 				u32 queue)
1597 {
1598 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1599 	int i;
1600 
1601 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1602 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1603 
1604 		if (!buf->xdp)
1605 			continue;
1606 
1607 		xsk_buff_free(buf->xdp);
1608 		buf->xdp = NULL;
1609 	}
1610 }
1611 
1612 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1613 				      struct stmmac_dma_conf *dma_conf,
1614 				      u32 queue)
1615 {
1616 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1617 	int i;
1618 
1619 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1620 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1621 	 * use this macro to make sure no size violations.
1622 	 */
1623 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1624 
1625 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1626 		struct stmmac_rx_buffer *buf;
1627 		dma_addr_t dma_addr;
1628 		struct dma_desc *p;
1629 
1630 		if (priv->extend_desc)
1631 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1632 		else
1633 			p = rx_q->dma_rx + i;
1634 
1635 		buf = &rx_q->buf_pool[i];
1636 
1637 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1638 		if (!buf->xdp)
1639 			return -ENOMEM;
1640 
1641 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1642 		stmmac_set_desc_addr(priv, p, dma_addr);
1643 		rx_q->buf_alloc_num++;
1644 	}
1645 
1646 	return 0;
1647 }
1648 
1649 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1650 {
1651 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1652 		return NULL;
1653 
1654 	return xsk_get_pool_from_qid(priv->dev, queue);
1655 }
1656 
1657 /**
1658  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1659  * @priv: driver private structure
1660  * @dma_conf: structure to take the dma data
1661  * @queue: RX queue index
1662  * @flags: gfp flag.
1663  * Description: this function initializes the DMA RX descriptors
1664  * and allocates the socket buffers. It supports the chained and ring
1665  * modes.
1666  */
1667 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1668 				    struct stmmac_dma_conf *dma_conf,
1669 				    u32 queue, gfp_t flags)
1670 {
1671 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1672 	int ret;
1673 
1674 	netif_dbg(priv, probe, priv->dev,
1675 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1676 		  (u32)rx_q->dma_rx_phy);
1677 
1678 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1679 
1680 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1681 
1682 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1683 
1684 	if (rx_q->xsk_pool) {
1685 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1686 						   MEM_TYPE_XSK_BUFF_POOL,
1687 						   NULL));
1688 		netdev_info(priv->dev,
1689 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1690 			    rx_q->queue_index);
1691 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1692 	} else {
1693 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1694 						   MEM_TYPE_PAGE_POOL,
1695 						   rx_q->page_pool));
1696 		netdev_info(priv->dev,
1697 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1698 			    rx_q->queue_index);
1699 	}
1700 
1701 	if (rx_q->xsk_pool) {
1702 		/* RX XDP ZC buffer pool may not be populated, e.g.
1703 		 * xdpsock TX-only.
1704 		 */
1705 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1706 	} else {
1707 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1708 		if (ret < 0)
1709 			return -ENOMEM;
1710 	}
1711 
1712 	/* Setup the chained descriptor addresses */
1713 	if (priv->mode == STMMAC_CHAIN_MODE) {
1714 		if (priv->extend_desc)
1715 			stmmac_mode_init(priv, rx_q->dma_erx,
1716 					 rx_q->dma_rx_phy,
1717 					 dma_conf->dma_rx_size, 1);
1718 		else
1719 			stmmac_mode_init(priv, rx_q->dma_rx,
1720 					 rx_q->dma_rx_phy,
1721 					 dma_conf->dma_rx_size, 0);
1722 	}
1723 
1724 	return 0;
1725 }
1726 
1727 static int init_dma_rx_desc_rings(struct net_device *dev,
1728 				  struct stmmac_dma_conf *dma_conf,
1729 				  gfp_t flags)
1730 {
1731 	struct stmmac_priv *priv = netdev_priv(dev);
1732 	u32 rx_count = priv->plat->rx_queues_to_use;
1733 	int queue;
1734 	int ret;
1735 
1736 	/* RX INITIALIZATION */
1737 	netif_dbg(priv, probe, priv->dev,
1738 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1739 
1740 	for (queue = 0; queue < rx_count; queue++) {
1741 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1742 		if (ret)
1743 			goto err_init_rx_buffers;
1744 	}
1745 
1746 	return 0;
1747 
1748 err_init_rx_buffers:
1749 	while (queue >= 0) {
1750 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1751 
1752 		if (rx_q->xsk_pool)
1753 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1754 		else
1755 			dma_free_rx_skbufs(priv, dma_conf, queue);
1756 
1757 		rx_q->buf_alloc_num = 0;
1758 		rx_q->xsk_pool = NULL;
1759 
1760 		queue--;
1761 	}
1762 
1763 	return ret;
1764 }
1765 
1766 /**
1767  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1768  * @priv: driver private structure
1769  * @dma_conf: structure to take the dma data
1770  * @queue: TX queue index
1771  * Description: this function initializes the DMA TX descriptors
1772  * and allocates the socket buffers. It supports the chained and ring
1773  * modes.
1774  */
1775 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1776 				    struct stmmac_dma_conf *dma_conf,
1777 				    u32 queue)
1778 {
1779 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1780 	int i;
1781 
1782 	netif_dbg(priv, probe, priv->dev,
1783 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1784 		  (u32)tx_q->dma_tx_phy);
1785 
1786 	/* Setup the chained descriptor addresses */
1787 	if (priv->mode == STMMAC_CHAIN_MODE) {
1788 		if (priv->extend_desc)
1789 			stmmac_mode_init(priv, tx_q->dma_etx,
1790 					 tx_q->dma_tx_phy,
1791 					 dma_conf->dma_tx_size, 1);
1792 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1793 			stmmac_mode_init(priv, tx_q->dma_tx,
1794 					 tx_q->dma_tx_phy,
1795 					 dma_conf->dma_tx_size, 0);
1796 	}
1797 
1798 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1799 
1800 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1801 		struct dma_desc *p;
1802 
1803 		if (priv->extend_desc)
1804 			p = &((tx_q->dma_etx + i)->basic);
1805 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1806 			p = &((tx_q->dma_entx + i)->basic);
1807 		else
1808 			p = tx_q->dma_tx + i;
1809 
1810 		stmmac_clear_desc(priv, p);
1811 
1812 		tx_q->tx_skbuff_dma[i].buf = 0;
1813 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1814 		tx_q->tx_skbuff_dma[i].len = 0;
1815 		tx_q->tx_skbuff_dma[i].last_segment = false;
1816 		tx_q->tx_skbuff[i] = NULL;
1817 	}
1818 
1819 	return 0;
1820 }
1821 
1822 static int init_dma_tx_desc_rings(struct net_device *dev,
1823 				  struct stmmac_dma_conf *dma_conf)
1824 {
1825 	struct stmmac_priv *priv = netdev_priv(dev);
1826 	u32 tx_queue_cnt;
1827 	u32 queue;
1828 
1829 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1830 
1831 	for (queue = 0; queue < tx_queue_cnt; queue++)
1832 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1833 
1834 	return 0;
1835 }
1836 
1837 /**
1838  * init_dma_desc_rings - init the RX/TX descriptor rings
1839  * @dev: net device structure
1840  * @dma_conf: structure to take the dma data
1841  * @flags: gfp flag.
1842  * Description: this function initializes the DMA RX/TX descriptors
1843  * and allocates the socket buffers. It supports the chained and ring
1844  * modes.
1845  */
1846 static int init_dma_desc_rings(struct net_device *dev,
1847 			       struct stmmac_dma_conf *dma_conf,
1848 			       gfp_t flags)
1849 {
1850 	struct stmmac_priv *priv = netdev_priv(dev);
1851 	int ret;
1852 
1853 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1854 	if (ret)
1855 		return ret;
1856 
1857 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1858 
1859 	stmmac_clear_descriptors(priv, dma_conf);
1860 
1861 	if (netif_msg_hw(priv))
1862 		stmmac_display_rings(priv, dma_conf);
1863 
1864 	return ret;
1865 }
1866 
1867 /**
1868  * dma_free_tx_skbufs - free TX dma buffers
1869  * @priv: private structure
1870  * @dma_conf: structure to take the dma data
1871  * @queue: TX queue index
1872  */
1873 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1874 			       struct stmmac_dma_conf *dma_conf,
1875 			       u32 queue)
1876 {
1877 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1878 	int i;
1879 
1880 	tx_q->xsk_frames_done = 0;
1881 
1882 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1883 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1884 
1885 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1886 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1887 		tx_q->xsk_frames_done = 0;
1888 		tx_q->xsk_pool = NULL;
1889 	}
1890 }
1891 
1892 /**
1893  * stmmac_free_tx_skbufs - free TX skb buffers
1894  * @priv: private structure
1895  */
1896 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1897 {
1898 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1899 	u32 queue;
1900 
1901 	for (queue = 0; queue < tx_queue_cnt; queue++)
1902 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1903 }
1904 
1905 /**
1906  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1907  * @priv: private structure
1908  * @dma_conf: structure to take the dma data
1909  * @queue: RX queue index
1910  */
1911 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1912 					 struct stmmac_dma_conf *dma_conf,
1913 					 u32 queue)
1914 {
1915 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1916 
1917 	/* Release the DMA RX socket buffers */
1918 	if (rx_q->xsk_pool)
1919 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1920 	else
1921 		dma_free_rx_skbufs(priv, dma_conf, queue);
1922 
1923 	rx_q->buf_alloc_num = 0;
1924 	rx_q->xsk_pool = NULL;
1925 
1926 	/* Free DMA regions of consistent memory previously allocated */
1927 	if (!priv->extend_desc)
1928 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1929 				  sizeof(struct dma_desc),
1930 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1931 	else
1932 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1933 				  sizeof(struct dma_extended_desc),
1934 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1935 
1936 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1937 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1938 
1939 	kfree(rx_q->buf_pool);
1940 	if (rx_q->page_pool)
1941 		page_pool_destroy(rx_q->page_pool);
1942 }
1943 
1944 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1945 				       struct stmmac_dma_conf *dma_conf)
1946 {
1947 	u32 rx_count = priv->plat->rx_queues_to_use;
1948 	u32 queue;
1949 
1950 	/* Free RX queue resources */
1951 	for (queue = 0; queue < rx_count; queue++)
1952 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1953 }
1954 
1955 /**
1956  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1957  * @priv: private structure
1958  * @dma_conf: structure to take the dma data
1959  * @queue: TX queue index
1960  */
1961 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1962 					 struct stmmac_dma_conf *dma_conf,
1963 					 u32 queue)
1964 {
1965 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1966 	size_t size;
1967 	void *addr;
1968 
1969 	/* Release the DMA TX socket buffers */
1970 	dma_free_tx_skbufs(priv, dma_conf, queue);
1971 
1972 	if (priv->extend_desc) {
1973 		size = sizeof(struct dma_extended_desc);
1974 		addr = tx_q->dma_etx;
1975 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1976 		size = sizeof(struct dma_edesc);
1977 		addr = tx_q->dma_entx;
1978 	} else {
1979 		size = sizeof(struct dma_desc);
1980 		addr = tx_q->dma_tx;
1981 	}
1982 
1983 	size *= dma_conf->dma_tx_size;
1984 
1985 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1986 
1987 	kfree(tx_q->tx_skbuff_dma);
1988 	kfree(tx_q->tx_skbuff);
1989 }
1990 
1991 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1992 				       struct stmmac_dma_conf *dma_conf)
1993 {
1994 	u32 tx_count = priv->plat->tx_queues_to_use;
1995 	u32 queue;
1996 
1997 	/* Free TX queue resources */
1998 	for (queue = 0; queue < tx_count; queue++)
1999 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2000 }
2001 
2002 /**
2003  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2004  * @priv: private structure
2005  * @dma_conf: structure to take the dma data
2006  * @queue: RX queue index
2007  * Description: according to which descriptor can be used (extend or basic)
2008  * this function allocates the resources for TX and RX paths. In case of
2009  * reception, for example, it pre-allocated the RX socket buffer in order to
2010  * allow zero-copy mechanism.
2011  */
2012 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2013 					 struct stmmac_dma_conf *dma_conf,
2014 					 u32 queue)
2015 {
2016 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2017 	struct stmmac_channel *ch = &priv->channel[queue];
2018 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2019 	struct page_pool_params pp_params = { 0 };
2020 	unsigned int num_pages;
2021 	unsigned int napi_id;
2022 	int ret;
2023 
2024 	rx_q->queue_index = queue;
2025 	rx_q->priv_data = priv;
2026 
2027 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2028 	pp_params.pool_size = dma_conf->dma_rx_size;
2029 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2030 	pp_params.order = ilog2(num_pages);
2031 	pp_params.nid = dev_to_node(priv->device);
2032 	pp_params.dev = priv->device;
2033 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2034 	pp_params.offset = stmmac_rx_offset(priv);
2035 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2036 
2037 	rx_q->page_pool = page_pool_create(&pp_params);
2038 	if (IS_ERR(rx_q->page_pool)) {
2039 		ret = PTR_ERR(rx_q->page_pool);
2040 		rx_q->page_pool = NULL;
2041 		return ret;
2042 	}
2043 
2044 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2045 				 sizeof(*rx_q->buf_pool),
2046 				 GFP_KERNEL);
2047 	if (!rx_q->buf_pool)
2048 		return -ENOMEM;
2049 
2050 	if (priv->extend_desc) {
2051 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2052 						   dma_conf->dma_rx_size *
2053 						   sizeof(struct dma_extended_desc),
2054 						   &rx_q->dma_rx_phy,
2055 						   GFP_KERNEL);
2056 		if (!rx_q->dma_erx)
2057 			return -ENOMEM;
2058 
2059 	} else {
2060 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2061 						  dma_conf->dma_rx_size *
2062 						  sizeof(struct dma_desc),
2063 						  &rx_q->dma_rx_phy,
2064 						  GFP_KERNEL);
2065 		if (!rx_q->dma_rx)
2066 			return -ENOMEM;
2067 	}
2068 
2069 	if (stmmac_xdp_is_enabled(priv) &&
2070 	    test_bit(queue, priv->af_xdp_zc_qps))
2071 		napi_id = ch->rxtx_napi.napi_id;
2072 	else
2073 		napi_id = ch->rx_napi.napi_id;
2074 
2075 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2076 			       rx_q->queue_index,
2077 			       napi_id);
2078 	if (ret) {
2079 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2080 		return -EINVAL;
2081 	}
2082 
2083 	return 0;
2084 }
2085 
2086 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2087 				       struct stmmac_dma_conf *dma_conf)
2088 {
2089 	u32 rx_count = priv->plat->rx_queues_to_use;
2090 	u32 queue;
2091 	int ret;
2092 
2093 	/* RX queues buffers and DMA */
2094 	for (queue = 0; queue < rx_count; queue++) {
2095 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2096 		if (ret)
2097 			goto err_dma;
2098 	}
2099 
2100 	return 0;
2101 
2102 err_dma:
2103 	free_dma_rx_desc_resources(priv, dma_conf);
2104 
2105 	return ret;
2106 }
2107 
2108 /**
2109  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2110  * @priv: private structure
2111  * @dma_conf: structure to take the dma data
2112  * @queue: TX queue index
2113  * Description: according to which descriptor can be used (extend or basic)
2114  * this function allocates the resources for TX and RX paths. In case of
2115  * reception, for example, it pre-allocated the RX socket buffer in order to
2116  * allow zero-copy mechanism.
2117  */
2118 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2119 					 struct stmmac_dma_conf *dma_conf,
2120 					 u32 queue)
2121 {
2122 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2123 	size_t size;
2124 	void *addr;
2125 
2126 	tx_q->queue_index = queue;
2127 	tx_q->priv_data = priv;
2128 
2129 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2130 				      sizeof(*tx_q->tx_skbuff_dma),
2131 				      GFP_KERNEL);
2132 	if (!tx_q->tx_skbuff_dma)
2133 		return -ENOMEM;
2134 
2135 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2136 				  sizeof(struct sk_buff *),
2137 				  GFP_KERNEL);
2138 	if (!tx_q->tx_skbuff)
2139 		return -ENOMEM;
2140 
2141 	if (priv->extend_desc)
2142 		size = sizeof(struct dma_extended_desc);
2143 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2144 		size = sizeof(struct dma_edesc);
2145 	else
2146 		size = sizeof(struct dma_desc);
2147 
2148 	size *= dma_conf->dma_tx_size;
2149 
2150 	addr = dma_alloc_coherent(priv->device, size,
2151 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2152 	if (!addr)
2153 		return -ENOMEM;
2154 
2155 	if (priv->extend_desc)
2156 		tx_q->dma_etx = addr;
2157 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2158 		tx_q->dma_entx = addr;
2159 	else
2160 		tx_q->dma_tx = addr;
2161 
2162 	return 0;
2163 }
2164 
2165 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2166 				       struct stmmac_dma_conf *dma_conf)
2167 {
2168 	u32 tx_count = priv->plat->tx_queues_to_use;
2169 	u32 queue;
2170 	int ret;
2171 
2172 	/* TX queues buffers and DMA */
2173 	for (queue = 0; queue < tx_count; queue++) {
2174 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2175 		if (ret)
2176 			goto err_dma;
2177 	}
2178 
2179 	return 0;
2180 
2181 err_dma:
2182 	free_dma_tx_desc_resources(priv, dma_conf);
2183 	return ret;
2184 }
2185 
2186 /**
2187  * alloc_dma_desc_resources - alloc TX/RX resources.
2188  * @priv: private structure
2189  * @dma_conf: structure to take the dma data
2190  * Description: according to which descriptor can be used (extend or basic)
2191  * this function allocates the resources for TX and RX paths. In case of
2192  * reception, for example, it pre-allocated the RX socket buffer in order to
2193  * allow zero-copy mechanism.
2194  */
2195 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2196 				    struct stmmac_dma_conf *dma_conf)
2197 {
2198 	/* RX Allocation */
2199 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2200 
2201 	if (ret)
2202 		return ret;
2203 
2204 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2205 
2206 	return ret;
2207 }
2208 
2209 /**
2210  * free_dma_desc_resources - free dma desc resources
2211  * @priv: private structure
2212  * @dma_conf: structure to take the dma data
2213  */
2214 static void free_dma_desc_resources(struct stmmac_priv *priv,
2215 				    struct stmmac_dma_conf *dma_conf)
2216 {
2217 	/* Release the DMA TX socket buffers */
2218 	free_dma_tx_desc_resources(priv, dma_conf);
2219 
2220 	/* Release the DMA RX socket buffers later
2221 	 * to ensure all pending XDP_TX buffers are returned.
2222 	 */
2223 	free_dma_rx_desc_resources(priv, dma_conf);
2224 }
2225 
2226 /**
2227  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2228  *  @priv: driver private structure
2229  *  Description: It is used for enabling the rx queues in the MAC
2230  */
2231 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2232 {
2233 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2234 	int queue;
2235 	u8 mode;
2236 
2237 	for (queue = 0; queue < rx_queues_count; queue++) {
2238 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2239 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2240 	}
2241 }
2242 
2243 /**
2244  * stmmac_start_rx_dma - start RX DMA channel
2245  * @priv: driver private structure
2246  * @chan: RX channel index
2247  * Description:
2248  * This starts a RX DMA channel
2249  */
2250 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2251 {
2252 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2253 	stmmac_start_rx(priv, priv->ioaddr, chan);
2254 }
2255 
2256 /**
2257  * stmmac_start_tx_dma - start TX DMA channel
2258  * @priv: driver private structure
2259  * @chan: TX channel index
2260  * Description:
2261  * This starts a TX DMA channel
2262  */
2263 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2264 {
2265 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2266 	stmmac_start_tx(priv, priv->ioaddr, chan);
2267 }
2268 
2269 /**
2270  * stmmac_stop_rx_dma - stop RX DMA channel
2271  * @priv: driver private structure
2272  * @chan: RX channel index
2273  * Description:
2274  * This stops a RX DMA channel
2275  */
2276 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2277 {
2278 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2279 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2280 }
2281 
2282 /**
2283  * stmmac_stop_tx_dma - stop TX DMA channel
2284  * @priv: driver private structure
2285  * @chan: TX channel index
2286  * Description:
2287  * This stops a TX DMA channel
2288  */
2289 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2290 {
2291 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2292 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2293 }
2294 
2295 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2296 {
2297 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2298 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2299 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2300 	u32 chan;
2301 
2302 	for (chan = 0; chan < dma_csr_ch; chan++) {
2303 		struct stmmac_channel *ch = &priv->channel[chan];
2304 		unsigned long flags;
2305 
2306 		spin_lock_irqsave(&ch->lock, flags);
2307 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2308 		spin_unlock_irqrestore(&ch->lock, flags);
2309 	}
2310 }
2311 
2312 /**
2313  * stmmac_start_all_dma - start all RX and TX DMA channels
2314  * @priv: driver private structure
2315  * Description:
2316  * This starts all the RX and TX DMA channels
2317  */
2318 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2319 {
2320 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2321 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2322 	u32 chan = 0;
2323 
2324 	for (chan = 0; chan < rx_channels_count; chan++)
2325 		stmmac_start_rx_dma(priv, chan);
2326 
2327 	for (chan = 0; chan < tx_channels_count; chan++)
2328 		stmmac_start_tx_dma(priv, chan);
2329 }
2330 
2331 /**
2332  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2333  * @priv: driver private structure
2334  * Description:
2335  * This stops the RX and TX DMA channels
2336  */
2337 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2338 {
2339 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2340 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2341 	u32 chan = 0;
2342 
2343 	for (chan = 0; chan < rx_channels_count; chan++)
2344 		stmmac_stop_rx_dma(priv, chan);
2345 
2346 	for (chan = 0; chan < tx_channels_count; chan++)
2347 		stmmac_stop_tx_dma(priv, chan);
2348 }
2349 
2350 /**
2351  *  stmmac_dma_operation_mode - HW DMA operation mode
2352  *  @priv: driver private structure
2353  *  Description: it is used for configuring the DMA operation mode register in
2354  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2355  */
2356 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2357 {
2358 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2359 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2360 	int rxfifosz = priv->plat->rx_fifo_size;
2361 	int txfifosz = priv->plat->tx_fifo_size;
2362 	u32 txmode = 0;
2363 	u32 rxmode = 0;
2364 	u32 chan = 0;
2365 	u8 qmode = 0;
2366 
2367 	if (rxfifosz == 0)
2368 		rxfifosz = priv->dma_cap.rx_fifo_size;
2369 	if (txfifosz == 0)
2370 		txfifosz = priv->dma_cap.tx_fifo_size;
2371 
2372 	/* Adjust for real per queue fifo size */
2373 	rxfifosz /= rx_channels_count;
2374 	txfifosz /= tx_channels_count;
2375 
2376 	if (priv->plat->force_thresh_dma_mode) {
2377 		txmode = tc;
2378 		rxmode = tc;
2379 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2380 		/*
2381 		 * In case of GMAC, SF mode can be enabled
2382 		 * to perform the TX COE in HW. This depends on:
2383 		 * 1) TX COE if actually supported
2384 		 * 2) There is no bugged Jumbo frame support
2385 		 *    that needs to not insert csum in the TDES.
2386 		 */
2387 		txmode = SF_DMA_MODE;
2388 		rxmode = SF_DMA_MODE;
2389 		priv->xstats.threshold = SF_DMA_MODE;
2390 	} else {
2391 		txmode = tc;
2392 		rxmode = SF_DMA_MODE;
2393 	}
2394 
2395 	/* configure all channels */
2396 	for (chan = 0; chan < rx_channels_count; chan++) {
2397 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2398 		u32 buf_size;
2399 
2400 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2401 
2402 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2403 				rxfifosz, qmode);
2404 
2405 		if (rx_q->xsk_pool) {
2406 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2407 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2408 					      buf_size,
2409 					      chan);
2410 		} else {
2411 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2412 					      priv->dma_conf.dma_buf_sz,
2413 					      chan);
2414 		}
2415 	}
2416 
2417 	for (chan = 0; chan < tx_channels_count; chan++) {
2418 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2419 
2420 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2421 				txfifosz, qmode);
2422 	}
2423 }
2424 
2425 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2426 {
2427 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2428 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2429 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2430 	unsigned int entry = tx_q->cur_tx;
2431 	struct dma_desc *tx_desc = NULL;
2432 	struct xdp_desc xdp_desc;
2433 	bool work_done = true;
2434 	u32 tx_set_ic_bit = 0;
2435 	unsigned long flags;
2436 
2437 	/* Avoids TX time-out as we are sharing with slow path */
2438 	txq_trans_cond_update(nq);
2439 
2440 	budget = min(budget, stmmac_tx_avail(priv, queue));
2441 
2442 	while (budget-- > 0) {
2443 		dma_addr_t dma_addr;
2444 		bool set_ic;
2445 
2446 		/* We are sharing with slow path and stop XSK TX desc submission when
2447 		 * available TX ring is less than threshold.
2448 		 */
2449 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2450 		    !netif_carrier_ok(priv->dev)) {
2451 			work_done = false;
2452 			break;
2453 		}
2454 
2455 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2456 			break;
2457 
2458 		if (likely(priv->extend_desc))
2459 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2460 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2461 			tx_desc = &tx_q->dma_entx[entry].basic;
2462 		else
2463 			tx_desc = tx_q->dma_tx + entry;
2464 
2465 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2466 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2467 
2468 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2469 
2470 		/* To return XDP buffer to XSK pool, we simple call
2471 		 * xsk_tx_completed(), so we don't need to fill up
2472 		 * 'buf' and 'xdpf'.
2473 		 */
2474 		tx_q->tx_skbuff_dma[entry].buf = 0;
2475 		tx_q->xdpf[entry] = NULL;
2476 
2477 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2478 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2479 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2480 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2481 
2482 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2483 
2484 		tx_q->tx_count_frames++;
2485 
2486 		if (!priv->tx_coal_frames[queue])
2487 			set_ic = false;
2488 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2489 			set_ic = true;
2490 		else
2491 			set_ic = false;
2492 
2493 		if (set_ic) {
2494 			tx_q->tx_count_frames = 0;
2495 			stmmac_set_tx_ic(priv, tx_desc);
2496 			tx_set_ic_bit++;
2497 		}
2498 
2499 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2500 				       true, priv->mode, true, true,
2501 				       xdp_desc.len);
2502 
2503 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2504 
2505 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2506 		entry = tx_q->cur_tx;
2507 	}
2508 	flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
2509 	tx_q->txq_stats.tx_set_ic_bit += tx_set_ic_bit;
2510 	u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
2511 
2512 	if (tx_desc) {
2513 		stmmac_flush_tx_descriptors(priv, queue);
2514 		xsk_tx_release(pool);
2515 	}
2516 
2517 	/* Return true if all of the 3 conditions are met
2518 	 *  a) TX Budget is still available
2519 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2520 	 *     pending XSK TX for transmission)
2521 	 */
2522 	return !!budget && work_done;
2523 }
2524 
2525 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2526 {
2527 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2528 		tc += 64;
2529 
2530 		if (priv->plat->force_thresh_dma_mode)
2531 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2532 		else
2533 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2534 						      chan);
2535 
2536 		priv->xstats.threshold = tc;
2537 	}
2538 }
2539 
2540 /**
2541  * stmmac_tx_clean - to manage the transmission completion
2542  * @priv: driver private structure
2543  * @budget: napi budget limiting this functions packet handling
2544  * @queue: TX queue index
2545  * Description: it reclaims the transmit resources after transmission completes.
2546  */
2547 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2548 {
2549 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2550 	unsigned int bytes_compl = 0, pkts_compl = 0;
2551 	unsigned int entry, xmits = 0, count = 0;
2552 	u32 tx_packets = 0, tx_errors = 0;
2553 	unsigned long flags;
2554 
2555 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2556 
2557 	tx_q->xsk_frames_done = 0;
2558 
2559 	entry = tx_q->dirty_tx;
2560 
2561 	/* Try to clean all TX complete frame in 1 shot */
2562 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2563 		struct xdp_frame *xdpf;
2564 		struct sk_buff *skb;
2565 		struct dma_desc *p;
2566 		int status;
2567 
2568 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2569 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2570 			xdpf = tx_q->xdpf[entry];
2571 			skb = NULL;
2572 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2573 			xdpf = NULL;
2574 			skb = tx_q->tx_skbuff[entry];
2575 		} else {
2576 			xdpf = NULL;
2577 			skb = NULL;
2578 		}
2579 
2580 		if (priv->extend_desc)
2581 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2582 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2583 			p = &tx_q->dma_entx[entry].basic;
2584 		else
2585 			p = tx_q->dma_tx + entry;
2586 
2587 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2588 		/* Check if the descriptor is owned by the DMA */
2589 		if (unlikely(status & tx_dma_own))
2590 			break;
2591 
2592 		count++;
2593 
2594 		/* Make sure descriptor fields are read after reading
2595 		 * the own bit.
2596 		 */
2597 		dma_rmb();
2598 
2599 		/* Just consider the last segment and ...*/
2600 		if (likely(!(status & tx_not_ls))) {
2601 			/* ... verify the status error condition */
2602 			if (unlikely(status & tx_err)) {
2603 				tx_errors++;
2604 				if (unlikely(status & tx_err_bump_tc))
2605 					stmmac_bump_dma_threshold(priv, queue);
2606 			} else {
2607 				tx_packets++;
2608 			}
2609 			if (skb)
2610 				stmmac_get_tx_hwtstamp(priv, p, skb);
2611 		}
2612 
2613 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2614 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2615 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2616 				dma_unmap_page(priv->device,
2617 					       tx_q->tx_skbuff_dma[entry].buf,
2618 					       tx_q->tx_skbuff_dma[entry].len,
2619 					       DMA_TO_DEVICE);
2620 			else
2621 				dma_unmap_single(priv->device,
2622 						 tx_q->tx_skbuff_dma[entry].buf,
2623 						 tx_q->tx_skbuff_dma[entry].len,
2624 						 DMA_TO_DEVICE);
2625 			tx_q->tx_skbuff_dma[entry].buf = 0;
2626 			tx_q->tx_skbuff_dma[entry].len = 0;
2627 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2628 		}
2629 
2630 		stmmac_clean_desc3(priv, tx_q, p);
2631 
2632 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2633 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2634 
2635 		if (xdpf &&
2636 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2637 			xdp_return_frame_rx_napi(xdpf);
2638 			tx_q->xdpf[entry] = NULL;
2639 		}
2640 
2641 		if (xdpf &&
2642 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2643 			xdp_return_frame(xdpf);
2644 			tx_q->xdpf[entry] = NULL;
2645 		}
2646 
2647 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2648 			tx_q->xsk_frames_done++;
2649 
2650 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2651 			if (likely(skb)) {
2652 				pkts_compl++;
2653 				bytes_compl += skb->len;
2654 				dev_consume_skb_any(skb);
2655 				tx_q->tx_skbuff[entry] = NULL;
2656 			}
2657 		}
2658 
2659 		stmmac_release_tx_desc(priv, p, priv->mode);
2660 
2661 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2662 	}
2663 	tx_q->dirty_tx = entry;
2664 
2665 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2666 				  pkts_compl, bytes_compl);
2667 
2668 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2669 								queue))) &&
2670 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2671 
2672 		netif_dbg(priv, tx_done, priv->dev,
2673 			  "%s: restart transmit\n", __func__);
2674 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2675 	}
2676 
2677 	if (tx_q->xsk_pool) {
2678 		bool work_done;
2679 
2680 		if (tx_q->xsk_frames_done)
2681 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2682 
2683 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2684 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2685 
2686 		/* For XSK TX, we try to send as many as possible.
2687 		 * If XSK work done (XSK TX desc empty and budget still
2688 		 * available), return "budget - 1" to reenable TX IRQ.
2689 		 * Else, return "budget" to make NAPI continue polling.
2690 		 */
2691 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2692 					       STMMAC_XSK_TX_BUDGET_MAX);
2693 		if (work_done)
2694 			xmits = budget - 1;
2695 		else
2696 			xmits = budget;
2697 	}
2698 
2699 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2700 	    priv->eee_sw_timer_en) {
2701 		if (stmmac_enable_eee_mode(priv))
2702 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2703 	}
2704 
2705 	/* We still have pending packets, let's call for a new scheduling */
2706 	if (tx_q->dirty_tx != tx_q->cur_tx)
2707 		hrtimer_start(&tx_q->txtimer,
2708 			      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2709 			      HRTIMER_MODE_REL);
2710 
2711 	flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
2712 	tx_q->txq_stats.tx_packets += tx_packets;
2713 	tx_q->txq_stats.tx_pkt_n += tx_packets;
2714 	tx_q->txq_stats.tx_clean++;
2715 	u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
2716 
2717 	priv->xstats.tx_errors += tx_errors;
2718 
2719 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2720 
2721 	/* Combine decisions from TX clean and XSK TX */
2722 	return max(count, xmits);
2723 }
2724 
2725 /**
2726  * stmmac_tx_err - to manage the tx error
2727  * @priv: driver private structure
2728  * @chan: channel index
2729  * Description: it cleans the descriptors and restarts the transmission
2730  * in case of transmission errors.
2731  */
2732 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2733 {
2734 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2735 
2736 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2737 
2738 	stmmac_stop_tx_dma(priv, chan);
2739 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2740 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2741 	stmmac_reset_tx_queue(priv, chan);
2742 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2743 			    tx_q->dma_tx_phy, chan);
2744 	stmmac_start_tx_dma(priv, chan);
2745 
2746 	priv->xstats.tx_errors++;
2747 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2748 }
2749 
2750 /**
2751  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2752  *  @priv: driver private structure
2753  *  @txmode: TX operating mode
2754  *  @rxmode: RX operating mode
2755  *  @chan: channel index
2756  *  Description: it is used for configuring of the DMA operation mode in
2757  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2758  *  mode.
2759  */
2760 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2761 					  u32 rxmode, u32 chan)
2762 {
2763 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2764 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2765 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2766 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2767 	int rxfifosz = priv->plat->rx_fifo_size;
2768 	int txfifosz = priv->plat->tx_fifo_size;
2769 
2770 	if (rxfifosz == 0)
2771 		rxfifosz = priv->dma_cap.rx_fifo_size;
2772 	if (txfifosz == 0)
2773 		txfifosz = priv->dma_cap.tx_fifo_size;
2774 
2775 	/* Adjust for real per queue fifo size */
2776 	rxfifosz /= rx_channels_count;
2777 	txfifosz /= tx_channels_count;
2778 
2779 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2780 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2781 }
2782 
2783 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2784 {
2785 	int ret;
2786 
2787 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2788 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2789 	if (ret && (ret != -EINVAL)) {
2790 		stmmac_global_err(priv);
2791 		return true;
2792 	}
2793 
2794 	return false;
2795 }
2796 
2797 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2798 {
2799 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2800 						 &priv->xstats, chan, dir);
2801 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2802 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2803 	struct stmmac_channel *ch = &priv->channel[chan];
2804 	struct napi_struct *rx_napi;
2805 	struct napi_struct *tx_napi;
2806 	unsigned long flags;
2807 
2808 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2809 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2810 
2811 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2812 		if (napi_schedule_prep(rx_napi)) {
2813 			spin_lock_irqsave(&ch->lock, flags);
2814 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2815 			spin_unlock_irqrestore(&ch->lock, flags);
2816 			__napi_schedule(rx_napi);
2817 		}
2818 	}
2819 
2820 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2821 		if (napi_schedule_prep(tx_napi)) {
2822 			spin_lock_irqsave(&ch->lock, flags);
2823 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2824 			spin_unlock_irqrestore(&ch->lock, flags);
2825 			__napi_schedule(tx_napi);
2826 		}
2827 	}
2828 
2829 	return status;
2830 }
2831 
2832 /**
2833  * stmmac_dma_interrupt - DMA ISR
2834  * @priv: driver private structure
2835  * Description: this is the DMA ISR. It is called by the main ISR.
2836  * It calls the dwmac dma routine and schedule poll method in case of some
2837  * work can be done.
2838  */
2839 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2840 {
2841 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2842 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2843 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2844 				tx_channel_count : rx_channel_count;
2845 	u32 chan;
2846 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2847 
2848 	/* Make sure we never check beyond our status buffer. */
2849 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2850 		channels_to_check = ARRAY_SIZE(status);
2851 
2852 	for (chan = 0; chan < channels_to_check; chan++)
2853 		status[chan] = stmmac_napi_check(priv, chan,
2854 						 DMA_DIR_RXTX);
2855 
2856 	for (chan = 0; chan < tx_channel_count; chan++) {
2857 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2858 			/* Try to bump up the dma threshold on this failure */
2859 			stmmac_bump_dma_threshold(priv, chan);
2860 		} else if (unlikely(status[chan] == tx_hard_error)) {
2861 			stmmac_tx_err(priv, chan);
2862 		}
2863 	}
2864 }
2865 
2866 /**
2867  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2868  * @priv: driver private structure
2869  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2870  */
2871 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2872 {
2873 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2874 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2875 
2876 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2877 
2878 	if (priv->dma_cap.rmon) {
2879 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2880 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2881 	} else
2882 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2883 }
2884 
2885 /**
2886  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2887  * @priv: driver private structure
2888  * Description:
2889  *  new GMAC chip generations have a new register to indicate the
2890  *  presence of the optional feature/functions.
2891  *  This can be also used to override the value passed through the
2892  *  platform and necessary for old MAC10/100 and GMAC chips.
2893  */
2894 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2895 {
2896 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2897 }
2898 
2899 /**
2900  * stmmac_check_ether_addr - check if the MAC addr is valid
2901  * @priv: driver private structure
2902  * Description:
2903  * it is to verify if the MAC address is valid, in case of failures it
2904  * generates a random MAC address
2905  */
2906 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2907 {
2908 	u8 addr[ETH_ALEN];
2909 
2910 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2911 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2912 		if (is_valid_ether_addr(addr))
2913 			eth_hw_addr_set(priv->dev, addr);
2914 		else
2915 			eth_hw_addr_random(priv->dev);
2916 		dev_info(priv->device, "device MAC address %pM\n",
2917 			 priv->dev->dev_addr);
2918 	}
2919 }
2920 
2921 /**
2922  * stmmac_init_dma_engine - DMA init.
2923  * @priv: driver private structure
2924  * Description:
2925  * It inits the DMA invoking the specific MAC/GMAC callback.
2926  * Some DMA parameters can be passed from the platform;
2927  * in case of these are not passed a default is kept for the MAC or GMAC.
2928  */
2929 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2930 {
2931 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2932 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2933 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2934 	struct stmmac_rx_queue *rx_q;
2935 	struct stmmac_tx_queue *tx_q;
2936 	u32 chan = 0;
2937 	int atds = 0;
2938 	int ret = 0;
2939 
2940 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2941 		dev_err(priv->device, "Invalid DMA configuration\n");
2942 		return -EINVAL;
2943 	}
2944 
2945 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2946 		atds = 1;
2947 
2948 	ret = stmmac_reset(priv, priv->ioaddr);
2949 	if (ret) {
2950 		dev_err(priv->device, "Failed to reset the dma\n");
2951 		return ret;
2952 	}
2953 
2954 	/* DMA Configuration */
2955 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2956 
2957 	if (priv->plat->axi)
2958 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2959 
2960 	/* DMA CSR Channel configuration */
2961 	for (chan = 0; chan < dma_csr_ch; chan++) {
2962 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2963 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2964 	}
2965 
2966 	/* DMA RX Channel Configuration */
2967 	for (chan = 0; chan < rx_channels_count; chan++) {
2968 		rx_q = &priv->dma_conf.rx_queue[chan];
2969 
2970 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2971 				    rx_q->dma_rx_phy, chan);
2972 
2973 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2974 				     (rx_q->buf_alloc_num *
2975 				      sizeof(struct dma_desc));
2976 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2977 				       rx_q->rx_tail_addr, chan);
2978 	}
2979 
2980 	/* DMA TX Channel Configuration */
2981 	for (chan = 0; chan < tx_channels_count; chan++) {
2982 		tx_q = &priv->dma_conf.tx_queue[chan];
2983 
2984 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2985 				    tx_q->dma_tx_phy, chan);
2986 
2987 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2988 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2989 				       tx_q->tx_tail_addr, chan);
2990 	}
2991 
2992 	return ret;
2993 }
2994 
2995 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2996 {
2997 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2998 
2999 	hrtimer_start(&tx_q->txtimer,
3000 		      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
3001 		      HRTIMER_MODE_REL);
3002 }
3003 
3004 /**
3005  * stmmac_tx_timer - mitigation sw timer for tx.
3006  * @t: data pointer
3007  * Description:
3008  * This is the timer handler to directly invoke the stmmac_tx_clean.
3009  */
3010 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3011 {
3012 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3013 	struct stmmac_priv *priv = tx_q->priv_data;
3014 	struct stmmac_channel *ch;
3015 	struct napi_struct *napi;
3016 
3017 	ch = &priv->channel[tx_q->queue_index];
3018 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3019 
3020 	if (likely(napi_schedule_prep(napi))) {
3021 		unsigned long flags;
3022 
3023 		spin_lock_irqsave(&ch->lock, flags);
3024 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3025 		spin_unlock_irqrestore(&ch->lock, flags);
3026 		__napi_schedule(napi);
3027 	}
3028 
3029 	return HRTIMER_NORESTART;
3030 }
3031 
3032 /**
3033  * stmmac_init_coalesce - init mitigation options.
3034  * @priv: driver private structure
3035  * Description:
3036  * This inits the coalesce parameters: i.e. timer rate,
3037  * timer handler and default threshold used for enabling the
3038  * interrupt on completion bit.
3039  */
3040 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3041 {
3042 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3043 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3044 	u32 chan;
3045 
3046 	for (chan = 0; chan < tx_channel_count; chan++) {
3047 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3048 
3049 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3050 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3051 
3052 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3053 		tx_q->txtimer.function = stmmac_tx_timer;
3054 	}
3055 
3056 	for (chan = 0; chan < rx_channel_count; chan++)
3057 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3058 }
3059 
3060 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3061 {
3062 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3063 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3064 	u32 chan;
3065 
3066 	/* set TX ring length */
3067 	for (chan = 0; chan < tx_channels_count; chan++)
3068 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3069 				       (priv->dma_conf.dma_tx_size - 1), chan);
3070 
3071 	/* set RX ring length */
3072 	for (chan = 0; chan < rx_channels_count; chan++)
3073 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3074 				       (priv->dma_conf.dma_rx_size - 1), chan);
3075 }
3076 
3077 /**
3078  *  stmmac_set_tx_queue_weight - Set TX queue weight
3079  *  @priv: driver private structure
3080  *  Description: It is used for setting TX queues weight
3081  */
3082 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3083 {
3084 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3085 	u32 weight;
3086 	u32 queue;
3087 
3088 	for (queue = 0; queue < tx_queues_count; queue++) {
3089 		weight = priv->plat->tx_queues_cfg[queue].weight;
3090 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3091 	}
3092 }
3093 
3094 /**
3095  *  stmmac_configure_cbs - Configure CBS in TX queue
3096  *  @priv: driver private structure
3097  *  Description: It is used for configuring CBS in AVB TX queues
3098  */
3099 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3100 {
3101 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3102 	u32 mode_to_use;
3103 	u32 queue;
3104 
3105 	/* queue 0 is reserved for legacy traffic */
3106 	for (queue = 1; queue < tx_queues_count; queue++) {
3107 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3108 		if (mode_to_use == MTL_QUEUE_DCB)
3109 			continue;
3110 
3111 		stmmac_config_cbs(priv, priv->hw,
3112 				priv->plat->tx_queues_cfg[queue].send_slope,
3113 				priv->plat->tx_queues_cfg[queue].idle_slope,
3114 				priv->plat->tx_queues_cfg[queue].high_credit,
3115 				priv->plat->tx_queues_cfg[queue].low_credit,
3116 				queue);
3117 	}
3118 }
3119 
3120 /**
3121  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3122  *  @priv: driver private structure
3123  *  Description: It is used for mapping RX queues to RX dma channels
3124  */
3125 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3126 {
3127 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3128 	u32 queue;
3129 	u32 chan;
3130 
3131 	for (queue = 0; queue < rx_queues_count; queue++) {
3132 		chan = priv->plat->rx_queues_cfg[queue].chan;
3133 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3134 	}
3135 }
3136 
3137 /**
3138  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3139  *  @priv: driver private structure
3140  *  Description: It is used for configuring the RX Queue Priority
3141  */
3142 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3143 {
3144 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3145 	u32 queue;
3146 	u32 prio;
3147 
3148 	for (queue = 0; queue < rx_queues_count; queue++) {
3149 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3150 			continue;
3151 
3152 		prio = priv->plat->rx_queues_cfg[queue].prio;
3153 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3154 	}
3155 }
3156 
3157 /**
3158  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3159  *  @priv: driver private structure
3160  *  Description: It is used for configuring the TX Queue Priority
3161  */
3162 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3163 {
3164 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3165 	u32 queue;
3166 	u32 prio;
3167 
3168 	for (queue = 0; queue < tx_queues_count; queue++) {
3169 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3170 			continue;
3171 
3172 		prio = priv->plat->tx_queues_cfg[queue].prio;
3173 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3174 	}
3175 }
3176 
3177 /**
3178  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3179  *  @priv: driver private structure
3180  *  Description: It is used for configuring the RX queue routing
3181  */
3182 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3183 {
3184 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3185 	u32 queue;
3186 	u8 packet;
3187 
3188 	for (queue = 0; queue < rx_queues_count; queue++) {
3189 		/* no specific packet type routing specified for the queue */
3190 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3191 			continue;
3192 
3193 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3194 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3195 	}
3196 }
3197 
3198 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3199 {
3200 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3201 		priv->rss.enable = false;
3202 		return;
3203 	}
3204 
3205 	if (priv->dev->features & NETIF_F_RXHASH)
3206 		priv->rss.enable = true;
3207 	else
3208 		priv->rss.enable = false;
3209 
3210 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3211 			     priv->plat->rx_queues_to_use);
3212 }
3213 
3214 /**
3215  *  stmmac_mtl_configuration - Configure MTL
3216  *  @priv: driver private structure
3217  *  Description: It is used for configurring MTL
3218  */
3219 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3220 {
3221 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3222 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3223 
3224 	if (tx_queues_count > 1)
3225 		stmmac_set_tx_queue_weight(priv);
3226 
3227 	/* Configure MTL RX algorithms */
3228 	if (rx_queues_count > 1)
3229 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3230 				priv->plat->rx_sched_algorithm);
3231 
3232 	/* Configure MTL TX algorithms */
3233 	if (tx_queues_count > 1)
3234 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3235 				priv->plat->tx_sched_algorithm);
3236 
3237 	/* Configure CBS in AVB TX queues */
3238 	if (tx_queues_count > 1)
3239 		stmmac_configure_cbs(priv);
3240 
3241 	/* Map RX MTL to DMA channels */
3242 	stmmac_rx_queue_dma_chan_map(priv);
3243 
3244 	/* Enable MAC RX Queues */
3245 	stmmac_mac_enable_rx_queues(priv);
3246 
3247 	/* Set RX priorities */
3248 	if (rx_queues_count > 1)
3249 		stmmac_mac_config_rx_queues_prio(priv);
3250 
3251 	/* Set TX priorities */
3252 	if (tx_queues_count > 1)
3253 		stmmac_mac_config_tx_queues_prio(priv);
3254 
3255 	/* Set RX routing */
3256 	if (rx_queues_count > 1)
3257 		stmmac_mac_config_rx_queues_routing(priv);
3258 
3259 	/* Receive Side Scaling */
3260 	if (rx_queues_count > 1)
3261 		stmmac_mac_config_rss(priv);
3262 }
3263 
3264 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3265 {
3266 	if (priv->dma_cap.asp) {
3267 		netdev_info(priv->dev, "Enabling Safety Features\n");
3268 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3269 					  priv->plat->safety_feat_cfg);
3270 	} else {
3271 		netdev_info(priv->dev, "No Safety Features support found\n");
3272 	}
3273 }
3274 
3275 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3276 {
3277 	char *name;
3278 
3279 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3280 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3281 
3282 	name = priv->wq_name;
3283 	sprintf(name, "%s-fpe", priv->dev->name);
3284 
3285 	priv->fpe_wq = create_singlethread_workqueue(name);
3286 	if (!priv->fpe_wq) {
3287 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3288 
3289 		return -ENOMEM;
3290 	}
3291 	netdev_info(priv->dev, "FPE workqueue start");
3292 
3293 	return 0;
3294 }
3295 
3296 /**
3297  * stmmac_hw_setup - setup mac in a usable state.
3298  *  @dev : pointer to the device structure.
3299  *  @ptp_register: register PTP if set
3300  *  Description:
3301  *  this is the main function to setup the HW in a usable state because the
3302  *  dma engine is reset, the core registers are configured (e.g. AXI,
3303  *  Checksum features, timers). The DMA is ready to start receiving and
3304  *  transmitting.
3305  *  Return value:
3306  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3307  *  file on failure.
3308  */
3309 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3310 {
3311 	struct stmmac_priv *priv = netdev_priv(dev);
3312 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3313 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3314 	bool sph_en;
3315 	u32 chan;
3316 	int ret;
3317 
3318 	/* DMA initialization and SW reset */
3319 	ret = stmmac_init_dma_engine(priv);
3320 	if (ret < 0) {
3321 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3322 			   __func__);
3323 		return ret;
3324 	}
3325 
3326 	/* Copy the MAC addr into the HW  */
3327 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3328 
3329 	/* PS and related bits will be programmed according to the speed */
3330 	if (priv->hw->pcs) {
3331 		int speed = priv->plat->mac_port_sel_speed;
3332 
3333 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3334 		    (speed == SPEED_1000)) {
3335 			priv->hw->ps = speed;
3336 		} else {
3337 			dev_warn(priv->device, "invalid port speed\n");
3338 			priv->hw->ps = 0;
3339 		}
3340 	}
3341 
3342 	/* Initialize the MAC Core */
3343 	stmmac_core_init(priv, priv->hw, dev);
3344 
3345 	/* Initialize MTL*/
3346 	stmmac_mtl_configuration(priv);
3347 
3348 	/* Initialize Safety Features */
3349 	stmmac_safety_feat_configuration(priv);
3350 
3351 	ret = stmmac_rx_ipc(priv, priv->hw);
3352 	if (!ret) {
3353 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3354 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3355 		priv->hw->rx_csum = 0;
3356 	}
3357 
3358 	/* Enable the MAC Rx/Tx */
3359 	stmmac_mac_set(priv, priv->ioaddr, true);
3360 
3361 	/* Set the HW DMA mode and the COE */
3362 	stmmac_dma_operation_mode(priv);
3363 
3364 	stmmac_mmc_setup(priv);
3365 
3366 	if (ptp_register) {
3367 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3368 		if (ret < 0)
3369 			netdev_warn(priv->dev,
3370 				    "failed to enable PTP reference clock: %pe\n",
3371 				    ERR_PTR(ret));
3372 	}
3373 
3374 	ret = stmmac_init_ptp(priv);
3375 	if (ret == -EOPNOTSUPP)
3376 		netdev_info(priv->dev, "PTP not supported by HW\n");
3377 	else if (ret)
3378 		netdev_warn(priv->dev, "PTP init failed\n");
3379 	else if (ptp_register)
3380 		stmmac_ptp_register(priv);
3381 
3382 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3383 
3384 	/* Convert the timer from msec to usec */
3385 	if (!priv->tx_lpi_timer)
3386 		priv->tx_lpi_timer = eee_timer * 1000;
3387 
3388 	if (priv->use_riwt) {
3389 		u32 queue;
3390 
3391 		for (queue = 0; queue < rx_cnt; queue++) {
3392 			if (!priv->rx_riwt[queue])
3393 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3394 
3395 			stmmac_rx_watchdog(priv, priv->ioaddr,
3396 					   priv->rx_riwt[queue], queue);
3397 		}
3398 	}
3399 
3400 	if (priv->hw->pcs)
3401 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3402 
3403 	/* set TX and RX rings length */
3404 	stmmac_set_rings_length(priv);
3405 
3406 	/* Enable TSO */
3407 	if (priv->tso) {
3408 		for (chan = 0; chan < tx_cnt; chan++) {
3409 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3410 
3411 			/* TSO and TBS cannot co-exist */
3412 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3413 				continue;
3414 
3415 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3416 		}
3417 	}
3418 
3419 	/* Enable Split Header */
3420 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3421 	for (chan = 0; chan < rx_cnt; chan++)
3422 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3423 
3424 
3425 	/* VLAN Tag Insertion */
3426 	if (priv->dma_cap.vlins)
3427 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3428 
3429 	/* TBS */
3430 	for (chan = 0; chan < tx_cnt; chan++) {
3431 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3432 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3433 
3434 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3435 	}
3436 
3437 	/* Configure real RX and TX queues */
3438 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3439 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3440 
3441 	/* Start the ball rolling... */
3442 	stmmac_start_all_dma(priv);
3443 
3444 	if (priv->dma_cap.fpesel) {
3445 		stmmac_fpe_start_wq(priv);
3446 
3447 		if (priv->plat->fpe_cfg->enable)
3448 			stmmac_fpe_handshake(priv, true);
3449 	}
3450 
3451 	return 0;
3452 }
3453 
3454 static void stmmac_hw_teardown(struct net_device *dev)
3455 {
3456 	struct stmmac_priv *priv = netdev_priv(dev);
3457 
3458 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3459 }
3460 
3461 static void stmmac_free_irq(struct net_device *dev,
3462 			    enum request_irq_err irq_err, int irq_idx)
3463 {
3464 	struct stmmac_priv *priv = netdev_priv(dev);
3465 	int j;
3466 
3467 	switch (irq_err) {
3468 	case REQ_IRQ_ERR_ALL:
3469 		irq_idx = priv->plat->tx_queues_to_use;
3470 		fallthrough;
3471 	case REQ_IRQ_ERR_TX:
3472 		for (j = irq_idx - 1; j >= 0; j--) {
3473 			if (priv->tx_irq[j] > 0) {
3474 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3475 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3476 			}
3477 		}
3478 		irq_idx = priv->plat->rx_queues_to_use;
3479 		fallthrough;
3480 	case REQ_IRQ_ERR_RX:
3481 		for (j = irq_idx - 1; j >= 0; j--) {
3482 			if (priv->rx_irq[j] > 0) {
3483 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3484 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3485 			}
3486 		}
3487 
3488 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3489 			free_irq(priv->sfty_ue_irq, dev);
3490 		fallthrough;
3491 	case REQ_IRQ_ERR_SFTY_UE:
3492 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3493 			free_irq(priv->sfty_ce_irq, dev);
3494 		fallthrough;
3495 	case REQ_IRQ_ERR_SFTY_CE:
3496 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3497 			free_irq(priv->lpi_irq, dev);
3498 		fallthrough;
3499 	case REQ_IRQ_ERR_LPI:
3500 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3501 			free_irq(priv->wol_irq, dev);
3502 		fallthrough;
3503 	case REQ_IRQ_ERR_WOL:
3504 		free_irq(dev->irq, dev);
3505 		fallthrough;
3506 	case REQ_IRQ_ERR_MAC:
3507 	case REQ_IRQ_ERR_NO:
3508 		/* If MAC IRQ request error, no more IRQ to free */
3509 		break;
3510 	}
3511 }
3512 
3513 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3514 {
3515 	struct stmmac_priv *priv = netdev_priv(dev);
3516 	enum request_irq_err irq_err;
3517 	cpumask_t cpu_mask;
3518 	int irq_idx = 0;
3519 	char *int_name;
3520 	int ret;
3521 	int i;
3522 
3523 	/* For common interrupt */
3524 	int_name = priv->int_name_mac;
3525 	sprintf(int_name, "%s:%s", dev->name, "mac");
3526 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3527 			  0, int_name, dev);
3528 	if (unlikely(ret < 0)) {
3529 		netdev_err(priv->dev,
3530 			   "%s: alloc mac MSI %d (error: %d)\n",
3531 			   __func__, dev->irq, ret);
3532 		irq_err = REQ_IRQ_ERR_MAC;
3533 		goto irq_error;
3534 	}
3535 
3536 	/* Request the Wake IRQ in case of another line
3537 	 * is used for WoL
3538 	 */
3539 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3540 		int_name = priv->int_name_wol;
3541 		sprintf(int_name, "%s:%s", dev->name, "wol");
3542 		ret = request_irq(priv->wol_irq,
3543 				  stmmac_mac_interrupt,
3544 				  0, int_name, dev);
3545 		if (unlikely(ret < 0)) {
3546 			netdev_err(priv->dev,
3547 				   "%s: alloc wol MSI %d (error: %d)\n",
3548 				   __func__, priv->wol_irq, ret);
3549 			irq_err = REQ_IRQ_ERR_WOL;
3550 			goto irq_error;
3551 		}
3552 	}
3553 
3554 	/* Request the LPI IRQ in case of another line
3555 	 * is used for LPI
3556 	 */
3557 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3558 		int_name = priv->int_name_lpi;
3559 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3560 		ret = request_irq(priv->lpi_irq,
3561 				  stmmac_mac_interrupt,
3562 				  0, int_name, dev);
3563 		if (unlikely(ret < 0)) {
3564 			netdev_err(priv->dev,
3565 				   "%s: alloc lpi MSI %d (error: %d)\n",
3566 				   __func__, priv->lpi_irq, ret);
3567 			irq_err = REQ_IRQ_ERR_LPI;
3568 			goto irq_error;
3569 		}
3570 	}
3571 
3572 	/* Request the Safety Feature Correctible Error line in
3573 	 * case of another line is used
3574 	 */
3575 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3576 		int_name = priv->int_name_sfty_ce;
3577 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3578 		ret = request_irq(priv->sfty_ce_irq,
3579 				  stmmac_safety_interrupt,
3580 				  0, int_name, dev);
3581 		if (unlikely(ret < 0)) {
3582 			netdev_err(priv->dev,
3583 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3584 				   __func__, priv->sfty_ce_irq, ret);
3585 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3586 			goto irq_error;
3587 		}
3588 	}
3589 
3590 	/* Request the Safety Feature Uncorrectible Error line in
3591 	 * case of another line is used
3592 	 */
3593 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3594 		int_name = priv->int_name_sfty_ue;
3595 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3596 		ret = request_irq(priv->sfty_ue_irq,
3597 				  stmmac_safety_interrupt,
3598 				  0, int_name, dev);
3599 		if (unlikely(ret < 0)) {
3600 			netdev_err(priv->dev,
3601 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3602 				   __func__, priv->sfty_ue_irq, ret);
3603 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3604 			goto irq_error;
3605 		}
3606 	}
3607 
3608 	/* Request Rx MSI irq */
3609 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3610 		if (i >= MTL_MAX_RX_QUEUES)
3611 			break;
3612 		if (priv->rx_irq[i] == 0)
3613 			continue;
3614 
3615 		int_name = priv->int_name_rx_irq[i];
3616 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3617 		ret = request_irq(priv->rx_irq[i],
3618 				  stmmac_msi_intr_rx,
3619 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3620 		if (unlikely(ret < 0)) {
3621 			netdev_err(priv->dev,
3622 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3623 				   __func__, i, priv->rx_irq[i], ret);
3624 			irq_err = REQ_IRQ_ERR_RX;
3625 			irq_idx = i;
3626 			goto irq_error;
3627 		}
3628 		cpumask_clear(&cpu_mask);
3629 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3630 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3631 	}
3632 
3633 	/* Request Tx MSI irq */
3634 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3635 		if (i >= MTL_MAX_TX_QUEUES)
3636 			break;
3637 		if (priv->tx_irq[i] == 0)
3638 			continue;
3639 
3640 		int_name = priv->int_name_tx_irq[i];
3641 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3642 		ret = request_irq(priv->tx_irq[i],
3643 				  stmmac_msi_intr_tx,
3644 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3645 		if (unlikely(ret < 0)) {
3646 			netdev_err(priv->dev,
3647 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3648 				   __func__, i, priv->tx_irq[i], ret);
3649 			irq_err = REQ_IRQ_ERR_TX;
3650 			irq_idx = i;
3651 			goto irq_error;
3652 		}
3653 		cpumask_clear(&cpu_mask);
3654 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3655 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3656 	}
3657 
3658 	return 0;
3659 
3660 irq_error:
3661 	stmmac_free_irq(dev, irq_err, irq_idx);
3662 	return ret;
3663 }
3664 
3665 static int stmmac_request_irq_single(struct net_device *dev)
3666 {
3667 	struct stmmac_priv *priv = netdev_priv(dev);
3668 	enum request_irq_err irq_err;
3669 	int ret;
3670 
3671 	ret = request_irq(dev->irq, stmmac_interrupt,
3672 			  IRQF_SHARED, dev->name, dev);
3673 	if (unlikely(ret < 0)) {
3674 		netdev_err(priv->dev,
3675 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3676 			   __func__, dev->irq, ret);
3677 		irq_err = REQ_IRQ_ERR_MAC;
3678 		goto irq_error;
3679 	}
3680 
3681 	/* Request the Wake IRQ in case of another line
3682 	 * is used for WoL
3683 	 */
3684 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3685 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3686 				  IRQF_SHARED, dev->name, dev);
3687 		if (unlikely(ret < 0)) {
3688 			netdev_err(priv->dev,
3689 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3690 				   __func__, priv->wol_irq, ret);
3691 			irq_err = REQ_IRQ_ERR_WOL;
3692 			goto irq_error;
3693 		}
3694 	}
3695 
3696 	/* Request the IRQ lines */
3697 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3698 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3699 				  IRQF_SHARED, dev->name, dev);
3700 		if (unlikely(ret < 0)) {
3701 			netdev_err(priv->dev,
3702 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3703 				   __func__, priv->lpi_irq, ret);
3704 			irq_err = REQ_IRQ_ERR_LPI;
3705 			goto irq_error;
3706 		}
3707 	}
3708 
3709 	return 0;
3710 
3711 irq_error:
3712 	stmmac_free_irq(dev, irq_err, 0);
3713 	return ret;
3714 }
3715 
3716 static int stmmac_request_irq(struct net_device *dev)
3717 {
3718 	struct stmmac_priv *priv = netdev_priv(dev);
3719 	int ret;
3720 
3721 	/* Request the IRQ lines */
3722 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3723 		ret = stmmac_request_irq_multi_msi(dev);
3724 	else
3725 		ret = stmmac_request_irq_single(dev);
3726 
3727 	return ret;
3728 }
3729 
3730 /**
3731  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3732  *  @priv: driver private structure
3733  *  @mtu: MTU to setup the dma queue and buf with
3734  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3735  *  Allocate the Tx/Rx DMA queue and init them.
3736  *  Return value:
3737  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3738  */
3739 static struct stmmac_dma_conf *
3740 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3741 {
3742 	struct stmmac_dma_conf *dma_conf;
3743 	int chan, bfsize, ret;
3744 
3745 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3746 	if (!dma_conf) {
3747 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3748 			   __func__);
3749 		return ERR_PTR(-ENOMEM);
3750 	}
3751 
3752 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3753 	if (bfsize < 0)
3754 		bfsize = 0;
3755 
3756 	if (bfsize < BUF_SIZE_16KiB)
3757 		bfsize = stmmac_set_bfsize(mtu, 0);
3758 
3759 	dma_conf->dma_buf_sz = bfsize;
3760 	/* Chose the tx/rx size from the already defined one in the
3761 	 * priv struct. (if defined)
3762 	 */
3763 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3764 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3765 
3766 	if (!dma_conf->dma_tx_size)
3767 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3768 	if (!dma_conf->dma_rx_size)
3769 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3770 
3771 	/* Earlier check for TBS */
3772 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3773 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3774 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3775 
3776 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3777 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3778 	}
3779 
3780 	ret = alloc_dma_desc_resources(priv, dma_conf);
3781 	if (ret < 0) {
3782 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3783 			   __func__);
3784 		goto alloc_error;
3785 	}
3786 
3787 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3788 	if (ret < 0) {
3789 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3790 			   __func__);
3791 		goto init_error;
3792 	}
3793 
3794 	return dma_conf;
3795 
3796 init_error:
3797 	free_dma_desc_resources(priv, dma_conf);
3798 alloc_error:
3799 	kfree(dma_conf);
3800 	return ERR_PTR(ret);
3801 }
3802 
3803 /**
3804  *  __stmmac_open - open entry point of the driver
3805  *  @dev : pointer to the device structure.
3806  *  @dma_conf :  structure to take the dma data
3807  *  Description:
3808  *  This function is the open entry point of the driver.
3809  *  Return value:
3810  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3811  *  file on failure.
3812  */
3813 static int __stmmac_open(struct net_device *dev,
3814 			 struct stmmac_dma_conf *dma_conf)
3815 {
3816 	struct stmmac_priv *priv = netdev_priv(dev);
3817 	int mode = priv->plat->phy_interface;
3818 	u32 chan;
3819 	int ret;
3820 
3821 	ret = pm_runtime_resume_and_get(priv->device);
3822 	if (ret < 0)
3823 		return ret;
3824 
3825 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3826 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
3827 	    (!priv->hw->xpcs ||
3828 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
3829 	    !priv->hw->lynx_pcs) {
3830 		ret = stmmac_init_phy(dev);
3831 		if (ret) {
3832 			netdev_err(priv->dev,
3833 				   "%s: Cannot attach to PHY (error: %d)\n",
3834 				   __func__, ret);
3835 			goto init_phy_error;
3836 		}
3837 	}
3838 
3839 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3840 
3841 	buf_sz = dma_conf->dma_buf_sz;
3842 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3843 
3844 	stmmac_reset_queues_param(priv);
3845 
3846 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3847 	    priv->plat->serdes_powerup) {
3848 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3849 		if (ret < 0) {
3850 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3851 				   __func__);
3852 			goto init_error;
3853 		}
3854 	}
3855 
3856 	ret = stmmac_hw_setup(dev, true);
3857 	if (ret < 0) {
3858 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3859 		goto init_error;
3860 	}
3861 
3862 	stmmac_init_coalesce(priv);
3863 
3864 	phylink_start(priv->phylink);
3865 	/* We may have called phylink_speed_down before */
3866 	phylink_speed_up(priv->phylink);
3867 
3868 	ret = stmmac_request_irq(dev);
3869 	if (ret)
3870 		goto irq_error;
3871 
3872 	stmmac_enable_all_queues(priv);
3873 	netif_tx_start_all_queues(priv->dev);
3874 	stmmac_enable_all_dma_irq(priv);
3875 
3876 	return 0;
3877 
3878 irq_error:
3879 	phylink_stop(priv->phylink);
3880 
3881 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3882 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3883 
3884 	stmmac_hw_teardown(dev);
3885 init_error:
3886 	phylink_disconnect_phy(priv->phylink);
3887 init_phy_error:
3888 	pm_runtime_put(priv->device);
3889 	return ret;
3890 }
3891 
3892 static int stmmac_open(struct net_device *dev)
3893 {
3894 	struct stmmac_priv *priv = netdev_priv(dev);
3895 	struct stmmac_dma_conf *dma_conf;
3896 	int ret;
3897 
3898 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3899 	if (IS_ERR(dma_conf))
3900 		return PTR_ERR(dma_conf);
3901 
3902 	ret = __stmmac_open(dev, dma_conf);
3903 	if (ret)
3904 		free_dma_desc_resources(priv, dma_conf);
3905 
3906 	kfree(dma_conf);
3907 	return ret;
3908 }
3909 
3910 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3911 {
3912 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3913 
3914 	if (priv->fpe_wq)
3915 		destroy_workqueue(priv->fpe_wq);
3916 
3917 	netdev_info(priv->dev, "FPE workqueue stop");
3918 }
3919 
3920 /**
3921  *  stmmac_release - close entry point of the driver
3922  *  @dev : device pointer.
3923  *  Description:
3924  *  This is the stop entry point of the driver.
3925  */
3926 static int stmmac_release(struct net_device *dev)
3927 {
3928 	struct stmmac_priv *priv = netdev_priv(dev);
3929 	u32 chan;
3930 
3931 	if (device_may_wakeup(priv->device))
3932 		phylink_speed_down(priv->phylink, false);
3933 	/* Stop and disconnect the PHY */
3934 	phylink_stop(priv->phylink);
3935 	phylink_disconnect_phy(priv->phylink);
3936 
3937 	stmmac_disable_all_queues(priv);
3938 
3939 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3940 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3941 
3942 	netif_tx_disable(dev);
3943 
3944 	/* Free the IRQ lines */
3945 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3946 
3947 	if (priv->eee_enabled) {
3948 		priv->tx_path_in_lpi_mode = false;
3949 		del_timer_sync(&priv->eee_ctrl_timer);
3950 	}
3951 
3952 	/* Stop TX/RX DMA and clear the descriptors */
3953 	stmmac_stop_all_dma(priv);
3954 
3955 	/* Release and free the Rx/Tx resources */
3956 	free_dma_desc_resources(priv, &priv->dma_conf);
3957 
3958 	/* Disable the MAC Rx/Tx */
3959 	stmmac_mac_set(priv, priv->ioaddr, false);
3960 
3961 	/* Powerdown Serdes if there is */
3962 	if (priv->plat->serdes_powerdown)
3963 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3964 
3965 	netif_carrier_off(dev);
3966 
3967 	stmmac_release_ptp(priv);
3968 
3969 	pm_runtime_put(priv->device);
3970 
3971 	if (priv->dma_cap.fpesel)
3972 		stmmac_fpe_stop_wq(priv);
3973 
3974 	return 0;
3975 }
3976 
3977 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3978 			       struct stmmac_tx_queue *tx_q)
3979 {
3980 	u16 tag = 0x0, inner_tag = 0x0;
3981 	u32 inner_type = 0x0;
3982 	struct dma_desc *p;
3983 
3984 	if (!priv->dma_cap.vlins)
3985 		return false;
3986 	if (!skb_vlan_tag_present(skb))
3987 		return false;
3988 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3989 		inner_tag = skb_vlan_tag_get(skb);
3990 		inner_type = STMMAC_VLAN_INSERT;
3991 	}
3992 
3993 	tag = skb_vlan_tag_get(skb);
3994 
3995 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3996 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3997 	else
3998 		p = &tx_q->dma_tx[tx_q->cur_tx];
3999 
4000 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4001 		return false;
4002 
4003 	stmmac_set_tx_owner(priv, p);
4004 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4005 	return true;
4006 }
4007 
4008 /**
4009  *  stmmac_tso_allocator - close entry point of the driver
4010  *  @priv: driver private structure
4011  *  @des: buffer start address
4012  *  @total_len: total length to fill in descriptors
4013  *  @last_segment: condition for the last descriptor
4014  *  @queue: TX queue index
4015  *  Description:
4016  *  This function fills descriptor and request new descriptors according to
4017  *  buffer length to fill
4018  */
4019 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4020 				 int total_len, bool last_segment, u32 queue)
4021 {
4022 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4023 	struct dma_desc *desc;
4024 	u32 buff_size;
4025 	int tmp_len;
4026 
4027 	tmp_len = total_len;
4028 
4029 	while (tmp_len > 0) {
4030 		dma_addr_t curr_addr;
4031 
4032 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4033 						priv->dma_conf.dma_tx_size);
4034 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4035 
4036 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4037 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4038 		else
4039 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4040 
4041 		curr_addr = des + (total_len - tmp_len);
4042 		if (priv->dma_cap.addr64 <= 32)
4043 			desc->des0 = cpu_to_le32(curr_addr);
4044 		else
4045 			stmmac_set_desc_addr(priv, desc, curr_addr);
4046 
4047 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4048 			    TSO_MAX_BUFF_SIZE : tmp_len;
4049 
4050 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4051 				0, 1,
4052 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4053 				0, 0);
4054 
4055 		tmp_len -= TSO_MAX_BUFF_SIZE;
4056 	}
4057 }
4058 
4059 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4060 {
4061 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4062 	int desc_size;
4063 
4064 	if (likely(priv->extend_desc))
4065 		desc_size = sizeof(struct dma_extended_desc);
4066 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4067 		desc_size = sizeof(struct dma_edesc);
4068 	else
4069 		desc_size = sizeof(struct dma_desc);
4070 
4071 	/* The own bit must be the latest setting done when prepare the
4072 	 * descriptor and then barrier is needed to make sure that
4073 	 * all is coherent before granting the DMA engine.
4074 	 */
4075 	wmb();
4076 
4077 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4078 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4079 }
4080 
4081 /**
4082  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4083  *  @skb : the socket buffer
4084  *  @dev : device pointer
4085  *  Description: this is the transmit function that is called on TSO frames
4086  *  (support available on GMAC4 and newer chips).
4087  *  Diagram below show the ring programming in case of TSO frames:
4088  *
4089  *  First Descriptor
4090  *   --------
4091  *   | DES0 |---> buffer1 = L2/L3/L4 header
4092  *   | DES1 |---> TCP Payload (can continue on next descr...)
4093  *   | DES2 |---> buffer 1 and 2 len
4094  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4095  *   --------
4096  *	|
4097  *     ...
4098  *	|
4099  *   --------
4100  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4101  *   | DES1 | --|
4102  *   | DES2 | --> buffer 1 and 2 len
4103  *   | DES3 |
4104  *   --------
4105  *
4106  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4107  */
4108 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4109 {
4110 	struct dma_desc *desc, *first, *mss_desc = NULL;
4111 	struct stmmac_priv *priv = netdev_priv(dev);
4112 	int nfrags = skb_shinfo(skb)->nr_frags;
4113 	u32 queue = skb_get_queue_mapping(skb);
4114 	unsigned int first_entry, tx_packets;
4115 	int tmp_pay_len = 0, first_tx;
4116 	struct stmmac_tx_queue *tx_q;
4117 	bool has_vlan, set_ic;
4118 	u8 proto_hdr_len, hdr;
4119 	unsigned long flags;
4120 	u32 pay_len, mss;
4121 	dma_addr_t des;
4122 	int i;
4123 
4124 	tx_q = &priv->dma_conf.tx_queue[queue];
4125 	first_tx = tx_q->cur_tx;
4126 
4127 	/* Compute header lengths */
4128 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4129 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4130 		hdr = sizeof(struct udphdr);
4131 	} else {
4132 		proto_hdr_len = skb_tcp_all_headers(skb);
4133 		hdr = tcp_hdrlen(skb);
4134 	}
4135 
4136 	/* Desc availability based on threshold should be enough safe */
4137 	if (unlikely(stmmac_tx_avail(priv, queue) <
4138 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4139 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4140 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4141 								queue));
4142 			/* This is a hard error, log it. */
4143 			netdev_err(priv->dev,
4144 				   "%s: Tx Ring full when queue awake\n",
4145 				   __func__);
4146 		}
4147 		return NETDEV_TX_BUSY;
4148 	}
4149 
4150 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4151 
4152 	mss = skb_shinfo(skb)->gso_size;
4153 
4154 	/* set new MSS value if needed */
4155 	if (mss != tx_q->mss) {
4156 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4157 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4158 		else
4159 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4160 
4161 		stmmac_set_mss(priv, mss_desc, mss);
4162 		tx_q->mss = mss;
4163 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4164 						priv->dma_conf.dma_tx_size);
4165 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4166 	}
4167 
4168 	if (netif_msg_tx_queued(priv)) {
4169 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4170 			__func__, hdr, proto_hdr_len, pay_len, mss);
4171 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4172 			skb->data_len);
4173 	}
4174 
4175 	/* Check if VLAN can be inserted by HW */
4176 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4177 
4178 	first_entry = tx_q->cur_tx;
4179 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4180 
4181 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4182 		desc = &tx_q->dma_entx[first_entry].basic;
4183 	else
4184 		desc = &tx_q->dma_tx[first_entry];
4185 	first = desc;
4186 
4187 	if (has_vlan)
4188 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4189 
4190 	/* first descriptor: fill Headers on Buf1 */
4191 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4192 			     DMA_TO_DEVICE);
4193 	if (dma_mapping_error(priv->device, des))
4194 		goto dma_map_err;
4195 
4196 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4197 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4198 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4199 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4200 
4201 	if (priv->dma_cap.addr64 <= 32) {
4202 		first->des0 = cpu_to_le32(des);
4203 
4204 		/* Fill start of payload in buff2 of first descriptor */
4205 		if (pay_len)
4206 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4207 
4208 		/* If needed take extra descriptors to fill the remaining payload */
4209 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4210 	} else {
4211 		stmmac_set_desc_addr(priv, first, des);
4212 		tmp_pay_len = pay_len;
4213 		des += proto_hdr_len;
4214 		pay_len = 0;
4215 	}
4216 
4217 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4218 
4219 	/* Prepare fragments */
4220 	for (i = 0; i < nfrags; i++) {
4221 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4222 
4223 		des = skb_frag_dma_map(priv->device, frag, 0,
4224 				       skb_frag_size(frag),
4225 				       DMA_TO_DEVICE);
4226 		if (dma_mapping_error(priv->device, des))
4227 			goto dma_map_err;
4228 
4229 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4230 				     (i == nfrags - 1), queue);
4231 
4232 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4233 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4234 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4235 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4236 	}
4237 
4238 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4239 
4240 	/* Only the last descriptor gets to point to the skb. */
4241 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4242 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4243 
4244 	/* Manage tx mitigation */
4245 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4246 	tx_q->tx_count_frames += tx_packets;
4247 
4248 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4249 		set_ic = true;
4250 	else if (!priv->tx_coal_frames[queue])
4251 		set_ic = false;
4252 	else if (tx_packets > priv->tx_coal_frames[queue])
4253 		set_ic = true;
4254 	else if ((tx_q->tx_count_frames %
4255 		  priv->tx_coal_frames[queue]) < tx_packets)
4256 		set_ic = true;
4257 	else
4258 		set_ic = false;
4259 
4260 	if (set_ic) {
4261 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4262 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4263 		else
4264 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4265 
4266 		tx_q->tx_count_frames = 0;
4267 		stmmac_set_tx_ic(priv, desc);
4268 	}
4269 
4270 	/* We've used all descriptors we need for this skb, however,
4271 	 * advance cur_tx so that it references a fresh descriptor.
4272 	 * ndo_start_xmit will fill this descriptor the next time it's
4273 	 * called and stmmac_tx_clean may clean up to this descriptor.
4274 	 */
4275 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4276 
4277 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4278 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4279 			  __func__);
4280 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4281 	}
4282 
4283 	flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
4284 	tx_q->txq_stats.tx_bytes += skb->len;
4285 	tx_q->txq_stats.tx_tso_frames++;
4286 	tx_q->txq_stats.tx_tso_nfrags += nfrags;
4287 	if (set_ic)
4288 		tx_q->txq_stats.tx_set_ic_bit++;
4289 	u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
4290 
4291 	if (priv->sarc_type)
4292 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4293 
4294 	skb_tx_timestamp(skb);
4295 
4296 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4297 		     priv->hwts_tx_en)) {
4298 		/* declare that device is doing timestamping */
4299 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4300 		stmmac_enable_tx_timestamp(priv, first);
4301 	}
4302 
4303 	/* Complete the first descriptor before granting the DMA */
4304 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4305 			proto_hdr_len,
4306 			pay_len,
4307 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4308 			hdr / 4, (skb->len - proto_hdr_len));
4309 
4310 	/* If context desc is used to change MSS */
4311 	if (mss_desc) {
4312 		/* Make sure that first descriptor has been completely
4313 		 * written, including its own bit. This is because MSS is
4314 		 * actually before first descriptor, so we need to make
4315 		 * sure that MSS's own bit is the last thing written.
4316 		 */
4317 		dma_wmb();
4318 		stmmac_set_tx_owner(priv, mss_desc);
4319 	}
4320 
4321 	if (netif_msg_pktdata(priv)) {
4322 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4323 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4324 			tx_q->cur_tx, first, nfrags);
4325 		pr_info(">>> frame to be transmitted: ");
4326 		print_pkt(skb->data, skb_headlen(skb));
4327 	}
4328 
4329 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4330 
4331 	stmmac_flush_tx_descriptors(priv, queue);
4332 	stmmac_tx_timer_arm(priv, queue);
4333 
4334 	return NETDEV_TX_OK;
4335 
4336 dma_map_err:
4337 	dev_err(priv->device, "Tx dma map failed\n");
4338 	dev_kfree_skb(skb);
4339 	priv->xstats.tx_dropped++;
4340 	return NETDEV_TX_OK;
4341 }
4342 
4343 /**
4344  *  stmmac_xmit - Tx entry point of the driver
4345  *  @skb : the socket buffer
4346  *  @dev : device pointer
4347  *  Description : this is the tx entry point of the driver.
4348  *  It programs the chain or the ring and supports oversized frames
4349  *  and SG feature.
4350  */
4351 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4352 {
4353 	unsigned int first_entry, tx_packets, enh_desc;
4354 	struct stmmac_priv *priv = netdev_priv(dev);
4355 	unsigned int nopaged_len = skb_headlen(skb);
4356 	int i, csum_insertion = 0, is_jumbo = 0;
4357 	u32 queue = skb_get_queue_mapping(skb);
4358 	int nfrags = skb_shinfo(skb)->nr_frags;
4359 	int gso = skb_shinfo(skb)->gso_type;
4360 	struct dma_edesc *tbs_desc = NULL;
4361 	struct dma_desc *desc, *first;
4362 	struct stmmac_tx_queue *tx_q;
4363 	bool has_vlan, set_ic;
4364 	int entry, first_tx;
4365 	unsigned long flags;
4366 	dma_addr_t des;
4367 
4368 	tx_q = &priv->dma_conf.tx_queue[queue];
4369 	first_tx = tx_q->cur_tx;
4370 
4371 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4372 		stmmac_disable_eee_mode(priv);
4373 
4374 	/* Manage oversized TCP frames for GMAC4 device */
4375 	if (skb_is_gso(skb) && priv->tso) {
4376 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4377 			return stmmac_tso_xmit(skb, dev);
4378 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4379 			return stmmac_tso_xmit(skb, dev);
4380 	}
4381 
4382 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4383 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4384 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4385 								queue));
4386 			/* This is a hard error, log it. */
4387 			netdev_err(priv->dev,
4388 				   "%s: Tx Ring full when queue awake\n",
4389 				   __func__);
4390 		}
4391 		return NETDEV_TX_BUSY;
4392 	}
4393 
4394 	/* Check if VLAN can be inserted by HW */
4395 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4396 
4397 	entry = tx_q->cur_tx;
4398 	first_entry = entry;
4399 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4400 
4401 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4402 
4403 	if (likely(priv->extend_desc))
4404 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4405 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4406 		desc = &tx_q->dma_entx[entry].basic;
4407 	else
4408 		desc = tx_q->dma_tx + entry;
4409 
4410 	first = desc;
4411 
4412 	if (has_vlan)
4413 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4414 
4415 	enh_desc = priv->plat->enh_desc;
4416 	/* To program the descriptors according to the size of the frame */
4417 	if (enh_desc)
4418 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4419 
4420 	if (unlikely(is_jumbo)) {
4421 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4422 		if (unlikely(entry < 0) && (entry != -EINVAL))
4423 			goto dma_map_err;
4424 	}
4425 
4426 	for (i = 0; i < nfrags; i++) {
4427 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4428 		int len = skb_frag_size(frag);
4429 		bool last_segment = (i == (nfrags - 1));
4430 
4431 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4432 		WARN_ON(tx_q->tx_skbuff[entry]);
4433 
4434 		if (likely(priv->extend_desc))
4435 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4436 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4437 			desc = &tx_q->dma_entx[entry].basic;
4438 		else
4439 			desc = tx_q->dma_tx + entry;
4440 
4441 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4442 				       DMA_TO_DEVICE);
4443 		if (dma_mapping_error(priv->device, des))
4444 			goto dma_map_err; /* should reuse desc w/o issues */
4445 
4446 		tx_q->tx_skbuff_dma[entry].buf = des;
4447 
4448 		stmmac_set_desc_addr(priv, desc, des);
4449 
4450 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4451 		tx_q->tx_skbuff_dma[entry].len = len;
4452 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4453 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4454 
4455 		/* Prepare the descriptor and set the own bit too */
4456 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4457 				priv->mode, 1, last_segment, skb->len);
4458 	}
4459 
4460 	/* Only the last descriptor gets to point to the skb. */
4461 	tx_q->tx_skbuff[entry] = skb;
4462 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4463 
4464 	/* According to the coalesce parameter the IC bit for the latest
4465 	 * segment is reset and the timer re-started to clean the tx status.
4466 	 * This approach takes care about the fragments: desc is the first
4467 	 * element in case of no SG.
4468 	 */
4469 	tx_packets = (entry + 1) - first_tx;
4470 	tx_q->tx_count_frames += tx_packets;
4471 
4472 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4473 		set_ic = true;
4474 	else if (!priv->tx_coal_frames[queue])
4475 		set_ic = false;
4476 	else if (tx_packets > priv->tx_coal_frames[queue])
4477 		set_ic = true;
4478 	else if ((tx_q->tx_count_frames %
4479 		  priv->tx_coal_frames[queue]) < tx_packets)
4480 		set_ic = true;
4481 	else
4482 		set_ic = false;
4483 
4484 	if (set_ic) {
4485 		if (likely(priv->extend_desc))
4486 			desc = &tx_q->dma_etx[entry].basic;
4487 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4488 			desc = &tx_q->dma_entx[entry].basic;
4489 		else
4490 			desc = &tx_q->dma_tx[entry];
4491 
4492 		tx_q->tx_count_frames = 0;
4493 		stmmac_set_tx_ic(priv, desc);
4494 	}
4495 
4496 	/* We've used all descriptors we need for this skb, however,
4497 	 * advance cur_tx so that it references a fresh descriptor.
4498 	 * ndo_start_xmit will fill this descriptor the next time it's
4499 	 * called and stmmac_tx_clean may clean up to this descriptor.
4500 	 */
4501 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4502 	tx_q->cur_tx = entry;
4503 
4504 	if (netif_msg_pktdata(priv)) {
4505 		netdev_dbg(priv->dev,
4506 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4507 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4508 			   entry, first, nfrags);
4509 
4510 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4511 		print_pkt(skb->data, skb->len);
4512 	}
4513 
4514 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4515 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4516 			  __func__);
4517 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4518 	}
4519 
4520 	flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
4521 	tx_q->txq_stats.tx_bytes += skb->len;
4522 	if (set_ic)
4523 		tx_q->txq_stats.tx_set_ic_bit++;
4524 	u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
4525 
4526 	if (priv->sarc_type)
4527 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4528 
4529 	skb_tx_timestamp(skb);
4530 
4531 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4532 	 * problems because all the descriptors are actually ready to be
4533 	 * passed to the DMA engine.
4534 	 */
4535 	if (likely(!is_jumbo)) {
4536 		bool last_segment = (nfrags == 0);
4537 
4538 		des = dma_map_single(priv->device, skb->data,
4539 				     nopaged_len, DMA_TO_DEVICE);
4540 		if (dma_mapping_error(priv->device, des))
4541 			goto dma_map_err;
4542 
4543 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4544 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4545 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4546 
4547 		stmmac_set_desc_addr(priv, first, des);
4548 
4549 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4550 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4551 
4552 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4553 			     priv->hwts_tx_en)) {
4554 			/* declare that device is doing timestamping */
4555 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4556 			stmmac_enable_tx_timestamp(priv, first);
4557 		}
4558 
4559 		/* Prepare the first descriptor setting the OWN bit too */
4560 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4561 				csum_insertion, priv->mode, 0, last_segment,
4562 				skb->len);
4563 	}
4564 
4565 	if (tx_q->tbs & STMMAC_TBS_EN) {
4566 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4567 
4568 		tbs_desc = &tx_q->dma_entx[first_entry];
4569 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4570 	}
4571 
4572 	stmmac_set_tx_owner(priv, first);
4573 
4574 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4575 
4576 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4577 
4578 	stmmac_flush_tx_descriptors(priv, queue);
4579 	stmmac_tx_timer_arm(priv, queue);
4580 
4581 	return NETDEV_TX_OK;
4582 
4583 dma_map_err:
4584 	netdev_err(priv->dev, "Tx DMA map failed\n");
4585 	dev_kfree_skb(skb);
4586 	priv->xstats.tx_dropped++;
4587 	return NETDEV_TX_OK;
4588 }
4589 
4590 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4591 {
4592 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4593 	__be16 vlan_proto = veth->h_vlan_proto;
4594 	u16 vlanid;
4595 
4596 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4597 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4598 	    (vlan_proto == htons(ETH_P_8021AD) &&
4599 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4600 		/* pop the vlan tag */
4601 		vlanid = ntohs(veth->h_vlan_TCI);
4602 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4603 		skb_pull(skb, VLAN_HLEN);
4604 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4605 	}
4606 }
4607 
4608 /**
4609  * stmmac_rx_refill - refill used skb preallocated buffers
4610  * @priv: driver private structure
4611  * @queue: RX queue index
4612  * Description : this is to reallocate the skb for the reception process
4613  * that is based on zero-copy.
4614  */
4615 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4616 {
4617 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4618 	int dirty = stmmac_rx_dirty(priv, queue);
4619 	unsigned int entry = rx_q->dirty_rx;
4620 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4621 
4622 	if (priv->dma_cap.host_dma_width <= 32)
4623 		gfp |= GFP_DMA32;
4624 
4625 	while (dirty-- > 0) {
4626 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4627 		struct dma_desc *p;
4628 		bool use_rx_wd;
4629 
4630 		if (priv->extend_desc)
4631 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4632 		else
4633 			p = rx_q->dma_rx + entry;
4634 
4635 		if (!buf->page) {
4636 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4637 			if (!buf->page)
4638 				break;
4639 		}
4640 
4641 		if (priv->sph && !buf->sec_page) {
4642 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4643 			if (!buf->sec_page)
4644 				break;
4645 
4646 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4647 		}
4648 
4649 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4650 
4651 		stmmac_set_desc_addr(priv, p, buf->addr);
4652 		if (priv->sph)
4653 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4654 		else
4655 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4656 		stmmac_refill_desc3(priv, rx_q, p);
4657 
4658 		rx_q->rx_count_frames++;
4659 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4660 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4661 			rx_q->rx_count_frames = 0;
4662 
4663 		use_rx_wd = !priv->rx_coal_frames[queue];
4664 		use_rx_wd |= rx_q->rx_count_frames > 0;
4665 		if (!priv->use_riwt)
4666 			use_rx_wd = false;
4667 
4668 		dma_wmb();
4669 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4670 
4671 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4672 	}
4673 	rx_q->dirty_rx = entry;
4674 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4675 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4676 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4677 }
4678 
4679 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4680 				       struct dma_desc *p,
4681 				       int status, unsigned int len)
4682 {
4683 	unsigned int plen = 0, hlen = 0;
4684 	int coe = priv->hw->rx_csum;
4685 
4686 	/* Not first descriptor, buffer is always zero */
4687 	if (priv->sph && len)
4688 		return 0;
4689 
4690 	/* First descriptor, get split header length */
4691 	stmmac_get_rx_header_len(priv, p, &hlen);
4692 	if (priv->sph && hlen) {
4693 		priv->xstats.rx_split_hdr_pkt_n++;
4694 		return hlen;
4695 	}
4696 
4697 	/* First descriptor, not last descriptor and not split header */
4698 	if (status & rx_not_ls)
4699 		return priv->dma_conf.dma_buf_sz;
4700 
4701 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4702 
4703 	/* First descriptor and last descriptor and not split header */
4704 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4705 }
4706 
4707 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4708 				       struct dma_desc *p,
4709 				       int status, unsigned int len)
4710 {
4711 	int coe = priv->hw->rx_csum;
4712 	unsigned int plen = 0;
4713 
4714 	/* Not split header, buffer is not available */
4715 	if (!priv->sph)
4716 		return 0;
4717 
4718 	/* Not last descriptor */
4719 	if (status & rx_not_ls)
4720 		return priv->dma_conf.dma_buf_sz;
4721 
4722 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4723 
4724 	/* Last descriptor */
4725 	return plen - len;
4726 }
4727 
4728 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4729 				struct xdp_frame *xdpf, bool dma_map)
4730 {
4731 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4732 	unsigned int entry = tx_q->cur_tx;
4733 	struct dma_desc *tx_desc;
4734 	dma_addr_t dma_addr;
4735 	bool set_ic;
4736 
4737 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4738 		return STMMAC_XDP_CONSUMED;
4739 
4740 	if (likely(priv->extend_desc))
4741 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4742 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4743 		tx_desc = &tx_q->dma_entx[entry].basic;
4744 	else
4745 		tx_desc = tx_q->dma_tx + entry;
4746 
4747 	if (dma_map) {
4748 		dma_addr = dma_map_single(priv->device, xdpf->data,
4749 					  xdpf->len, DMA_TO_DEVICE);
4750 		if (dma_mapping_error(priv->device, dma_addr))
4751 			return STMMAC_XDP_CONSUMED;
4752 
4753 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4754 	} else {
4755 		struct page *page = virt_to_page(xdpf->data);
4756 
4757 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4758 			   xdpf->headroom;
4759 		dma_sync_single_for_device(priv->device, dma_addr,
4760 					   xdpf->len, DMA_BIDIRECTIONAL);
4761 
4762 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4763 	}
4764 
4765 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4766 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4767 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4768 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4769 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4770 
4771 	tx_q->xdpf[entry] = xdpf;
4772 
4773 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4774 
4775 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4776 			       true, priv->mode, true, true,
4777 			       xdpf->len);
4778 
4779 	tx_q->tx_count_frames++;
4780 
4781 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4782 		set_ic = true;
4783 	else
4784 		set_ic = false;
4785 
4786 	if (set_ic) {
4787 		unsigned long flags;
4788 		tx_q->tx_count_frames = 0;
4789 		stmmac_set_tx_ic(priv, tx_desc);
4790 		flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
4791 		tx_q->txq_stats.tx_set_ic_bit++;
4792 		u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
4793 	}
4794 
4795 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4796 
4797 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4798 	tx_q->cur_tx = entry;
4799 
4800 	return STMMAC_XDP_TX;
4801 }
4802 
4803 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4804 				   int cpu)
4805 {
4806 	int index = cpu;
4807 
4808 	if (unlikely(index < 0))
4809 		index = 0;
4810 
4811 	while (index >= priv->plat->tx_queues_to_use)
4812 		index -= priv->plat->tx_queues_to_use;
4813 
4814 	return index;
4815 }
4816 
4817 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4818 				struct xdp_buff *xdp)
4819 {
4820 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4821 	int cpu = smp_processor_id();
4822 	struct netdev_queue *nq;
4823 	int queue;
4824 	int res;
4825 
4826 	if (unlikely(!xdpf))
4827 		return STMMAC_XDP_CONSUMED;
4828 
4829 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4830 	nq = netdev_get_tx_queue(priv->dev, queue);
4831 
4832 	__netif_tx_lock(nq, cpu);
4833 	/* Avoids TX time-out as we are sharing with slow path */
4834 	txq_trans_cond_update(nq);
4835 
4836 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4837 	if (res == STMMAC_XDP_TX)
4838 		stmmac_flush_tx_descriptors(priv, queue);
4839 
4840 	__netif_tx_unlock(nq);
4841 
4842 	return res;
4843 }
4844 
4845 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4846 				 struct bpf_prog *prog,
4847 				 struct xdp_buff *xdp)
4848 {
4849 	u32 act;
4850 	int res;
4851 
4852 	act = bpf_prog_run_xdp(prog, xdp);
4853 	switch (act) {
4854 	case XDP_PASS:
4855 		res = STMMAC_XDP_PASS;
4856 		break;
4857 	case XDP_TX:
4858 		res = stmmac_xdp_xmit_back(priv, xdp);
4859 		break;
4860 	case XDP_REDIRECT:
4861 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4862 			res = STMMAC_XDP_CONSUMED;
4863 		else
4864 			res = STMMAC_XDP_REDIRECT;
4865 		break;
4866 	default:
4867 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4868 		fallthrough;
4869 	case XDP_ABORTED:
4870 		trace_xdp_exception(priv->dev, prog, act);
4871 		fallthrough;
4872 	case XDP_DROP:
4873 		res = STMMAC_XDP_CONSUMED;
4874 		break;
4875 	}
4876 
4877 	return res;
4878 }
4879 
4880 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4881 					   struct xdp_buff *xdp)
4882 {
4883 	struct bpf_prog *prog;
4884 	int res;
4885 
4886 	prog = READ_ONCE(priv->xdp_prog);
4887 	if (!prog) {
4888 		res = STMMAC_XDP_PASS;
4889 		goto out;
4890 	}
4891 
4892 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
4893 out:
4894 	return ERR_PTR(-res);
4895 }
4896 
4897 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4898 				   int xdp_status)
4899 {
4900 	int cpu = smp_processor_id();
4901 	int queue;
4902 
4903 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4904 
4905 	if (xdp_status & STMMAC_XDP_TX)
4906 		stmmac_tx_timer_arm(priv, queue);
4907 
4908 	if (xdp_status & STMMAC_XDP_REDIRECT)
4909 		xdp_do_flush();
4910 }
4911 
4912 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4913 					       struct xdp_buff *xdp)
4914 {
4915 	unsigned int metasize = xdp->data - xdp->data_meta;
4916 	unsigned int datasize = xdp->data_end - xdp->data;
4917 	struct sk_buff *skb;
4918 
4919 	skb = __napi_alloc_skb(&ch->rxtx_napi,
4920 			       xdp->data_end - xdp->data_hard_start,
4921 			       GFP_ATOMIC | __GFP_NOWARN);
4922 	if (unlikely(!skb))
4923 		return NULL;
4924 
4925 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
4926 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4927 	if (metasize)
4928 		skb_metadata_set(skb, metasize);
4929 
4930 	return skb;
4931 }
4932 
4933 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4934 				   struct dma_desc *p, struct dma_desc *np,
4935 				   struct xdp_buff *xdp)
4936 {
4937 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4938 	struct stmmac_channel *ch = &priv->channel[queue];
4939 	unsigned int len = xdp->data_end - xdp->data;
4940 	enum pkt_hash_types hash_type;
4941 	int coe = priv->hw->rx_csum;
4942 	unsigned long flags;
4943 	struct sk_buff *skb;
4944 	u32 hash;
4945 
4946 	skb = stmmac_construct_skb_zc(ch, xdp);
4947 	if (!skb) {
4948 		priv->xstats.rx_dropped++;
4949 		return;
4950 	}
4951 
4952 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
4953 	stmmac_rx_vlan(priv->dev, skb);
4954 	skb->protocol = eth_type_trans(skb, priv->dev);
4955 
4956 	if (unlikely(!coe))
4957 		skb_checksum_none_assert(skb);
4958 	else
4959 		skb->ip_summed = CHECKSUM_UNNECESSARY;
4960 
4961 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4962 		skb_set_hash(skb, hash, hash_type);
4963 
4964 	skb_record_rx_queue(skb, queue);
4965 	napi_gro_receive(&ch->rxtx_napi, skb);
4966 
4967 	flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
4968 	rx_q->rxq_stats.rx_pkt_n++;
4969 	rx_q->rxq_stats.rx_bytes += len;
4970 	u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
4971 }
4972 
4973 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4974 {
4975 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4976 	unsigned int entry = rx_q->dirty_rx;
4977 	struct dma_desc *rx_desc = NULL;
4978 	bool ret = true;
4979 
4980 	budget = min(budget, stmmac_rx_dirty(priv, queue));
4981 
4982 	while (budget-- > 0 && entry != rx_q->cur_rx) {
4983 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4984 		dma_addr_t dma_addr;
4985 		bool use_rx_wd;
4986 
4987 		if (!buf->xdp) {
4988 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
4989 			if (!buf->xdp) {
4990 				ret = false;
4991 				break;
4992 			}
4993 		}
4994 
4995 		if (priv->extend_desc)
4996 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
4997 		else
4998 			rx_desc = rx_q->dma_rx + entry;
4999 
5000 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5001 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5002 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5003 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5004 
5005 		rx_q->rx_count_frames++;
5006 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5007 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5008 			rx_q->rx_count_frames = 0;
5009 
5010 		use_rx_wd = !priv->rx_coal_frames[queue];
5011 		use_rx_wd |= rx_q->rx_count_frames > 0;
5012 		if (!priv->use_riwt)
5013 			use_rx_wd = false;
5014 
5015 		dma_wmb();
5016 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5017 
5018 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5019 	}
5020 
5021 	if (rx_desc) {
5022 		rx_q->dirty_rx = entry;
5023 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5024 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5025 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5026 	}
5027 
5028 	return ret;
5029 }
5030 
5031 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5032 {
5033 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5034 	 * to represent incoming packet, whereas cb field in the same structure
5035 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5036 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5037 	 */
5038 	return (struct stmmac_xdp_buff *)xdp;
5039 }
5040 
5041 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5042 {
5043 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5044 	unsigned int count = 0, error = 0, len = 0;
5045 	int dirty = stmmac_rx_dirty(priv, queue);
5046 	unsigned int next_entry = rx_q->cur_rx;
5047 	u32 rx_errors = 0, rx_dropped = 0;
5048 	unsigned int desc_size;
5049 	struct bpf_prog *prog;
5050 	bool failure = false;
5051 	unsigned long flags;
5052 	int xdp_status = 0;
5053 	int status = 0;
5054 
5055 	if (netif_msg_rx_status(priv)) {
5056 		void *rx_head;
5057 
5058 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5059 		if (priv->extend_desc) {
5060 			rx_head = (void *)rx_q->dma_erx;
5061 			desc_size = sizeof(struct dma_extended_desc);
5062 		} else {
5063 			rx_head = (void *)rx_q->dma_rx;
5064 			desc_size = sizeof(struct dma_desc);
5065 		}
5066 
5067 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5068 				    rx_q->dma_rx_phy, desc_size);
5069 	}
5070 	while (count < limit) {
5071 		struct stmmac_rx_buffer *buf;
5072 		struct stmmac_xdp_buff *ctx;
5073 		unsigned int buf1_len = 0;
5074 		struct dma_desc *np, *p;
5075 		int entry;
5076 		int res;
5077 
5078 		if (!count && rx_q->state_saved) {
5079 			error = rx_q->state.error;
5080 			len = rx_q->state.len;
5081 		} else {
5082 			rx_q->state_saved = false;
5083 			error = 0;
5084 			len = 0;
5085 		}
5086 
5087 		if (count >= limit)
5088 			break;
5089 
5090 read_again:
5091 		buf1_len = 0;
5092 		entry = next_entry;
5093 		buf = &rx_q->buf_pool[entry];
5094 
5095 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5096 			failure = failure ||
5097 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5098 			dirty = 0;
5099 		}
5100 
5101 		if (priv->extend_desc)
5102 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5103 		else
5104 			p = rx_q->dma_rx + entry;
5105 
5106 		/* read the status of the incoming frame */
5107 		status = stmmac_rx_status(priv, &priv->xstats, p);
5108 		/* check if managed by the DMA otherwise go ahead */
5109 		if (unlikely(status & dma_own))
5110 			break;
5111 
5112 		/* Prefetch the next RX descriptor */
5113 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5114 						priv->dma_conf.dma_rx_size);
5115 		next_entry = rx_q->cur_rx;
5116 
5117 		if (priv->extend_desc)
5118 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5119 		else
5120 			np = rx_q->dma_rx + next_entry;
5121 
5122 		prefetch(np);
5123 
5124 		/* Ensure a valid XSK buffer before proceed */
5125 		if (!buf->xdp)
5126 			break;
5127 
5128 		if (priv->extend_desc)
5129 			stmmac_rx_extended_status(priv, &priv->xstats,
5130 						  rx_q->dma_erx + entry);
5131 		if (unlikely(status == discard_frame)) {
5132 			xsk_buff_free(buf->xdp);
5133 			buf->xdp = NULL;
5134 			dirty++;
5135 			error = 1;
5136 			if (!priv->hwts_rx_en)
5137 				rx_errors++;
5138 		}
5139 
5140 		if (unlikely(error && (status & rx_not_ls)))
5141 			goto read_again;
5142 		if (unlikely(error)) {
5143 			count++;
5144 			continue;
5145 		}
5146 
5147 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5148 		if (likely(status & rx_not_ls)) {
5149 			xsk_buff_free(buf->xdp);
5150 			buf->xdp = NULL;
5151 			dirty++;
5152 			count++;
5153 			goto read_again;
5154 		}
5155 
5156 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5157 		ctx->priv = priv;
5158 		ctx->desc = p;
5159 		ctx->ndesc = np;
5160 
5161 		/* XDP ZC Frame only support primary buffers for now */
5162 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5163 		len += buf1_len;
5164 
5165 		/* ACS is disabled; strip manually. */
5166 		if (likely(!(status & rx_not_ls))) {
5167 			buf1_len -= ETH_FCS_LEN;
5168 			len -= ETH_FCS_LEN;
5169 		}
5170 
5171 		/* RX buffer is good and fit into a XSK pool buffer */
5172 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5173 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5174 
5175 		prog = READ_ONCE(priv->xdp_prog);
5176 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5177 
5178 		switch (res) {
5179 		case STMMAC_XDP_PASS:
5180 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5181 			xsk_buff_free(buf->xdp);
5182 			break;
5183 		case STMMAC_XDP_CONSUMED:
5184 			xsk_buff_free(buf->xdp);
5185 			rx_dropped++;
5186 			break;
5187 		case STMMAC_XDP_TX:
5188 		case STMMAC_XDP_REDIRECT:
5189 			xdp_status |= res;
5190 			break;
5191 		}
5192 
5193 		buf->xdp = NULL;
5194 		dirty++;
5195 		count++;
5196 	}
5197 
5198 	if (status & rx_not_ls) {
5199 		rx_q->state_saved = true;
5200 		rx_q->state.error = error;
5201 		rx_q->state.len = len;
5202 	}
5203 
5204 	stmmac_finalize_xdp_rx(priv, xdp_status);
5205 
5206 	flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
5207 	rx_q->rxq_stats.rx_pkt_n += count;
5208 	u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
5209 
5210 	priv->xstats.rx_dropped += rx_dropped;
5211 	priv->xstats.rx_errors += rx_errors;
5212 
5213 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5214 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5215 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5216 		else
5217 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5218 
5219 		return (int)count;
5220 	}
5221 
5222 	return failure ? limit : (int)count;
5223 }
5224 
5225 /**
5226  * stmmac_rx - manage the receive process
5227  * @priv: driver private structure
5228  * @limit: napi bugget
5229  * @queue: RX queue index.
5230  * Description :  this the function called by the napi poll method.
5231  * It gets all the frames inside the ring.
5232  */
5233 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5234 {
5235 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5236 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5237 	struct stmmac_channel *ch = &priv->channel[queue];
5238 	unsigned int count = 0, error = 0, len = 0;
5239 	int status = 0, coe = priv->hw->rx_csum;
5240 	unsigned int next_entry = rx_q->cur_rx;
5241 	enum dma_data_direction dma_dir;
5242 	unsigned int desc_size;
5243 	struct sk_buff *skb = NULL;
5244 	struct stmmac_xdp_buff ctx;
5245 	unsigned long flags;
5246 	int xdp_status = 0;
5247 	int buf_sz;
5248 
5249 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5250 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5251 
5252 	if (netif_msg_rx_status(priv)) {
5253 		void *rx_head;
5254 
5255 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5256 		if (priv->extend_desc) {
5257 			rx_head = (void *)rx_q->dma_erx;
5258 			desc_size = sizeof(struct dma_extended_desc);
5259 		} else {
5260 			rx_head = (void *)rx_q->dma_rx;
5261 			desc_size = sizeof(struct dma_desc);
5262 		}
5263 
5264 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5265 				    rx_q->dma_rx_phy, desc_size);
5266 	}
5267 	while (count < limit) {
5268 		unsigned int buf1_len = 0, buf2_len = 0;
5269 		enum pkt_hash_types hash_type;
5270 		struct stmmac_rx_buffer *buf;
5271 		struct dma_desc *np, *p;
5272 		int entry;
5273 		u32 hash;
5274 
5275 		if (!count && rx_q->state_saved) {
5276 			skb = rx_q->state.skb;
5277 			error = rx_q->state.error;
5278 			len = rx_q->state.len;
5279 		} else {
5280 			rx_q->state_saved = false;
5281 			skb = NULL;
5282 			error = 0;
5283 			len = 0;
5284 		}
5285 
5286 		if (count >= limit)
5287 			break;
5288 
5289 read_again:
5290 		buf1_len = 0;
5291 		buf2_len = 0;
5292 		entry = next_entry;
5293 		buf = &rx_q->buf_pool[entry];
5294 
5295 		if (priv->extend_desc)
5296 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5297 		else
5298 			p = rx_q->dma_rx + entry;
5299 
5300 		/* read the status of the incoming frame */
5301 		status = stmmac_rx_status(priv, &priv->xstats, p);
5302 		/* check if managed by the DMA otherwise go ahead */
5303 		if (unlikely(status & dma_own))
5304 			break;
5305 
5306 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5307 						priv->dma_conf.dma_rx_size);
5308 		next_entry = rx_q->cur_rx;
5309 
5310 		if (priv->extend_desc)
5311 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5312 		else
5313 			np = rx_q->dma_rx + next_entry;
5314 
5315 		prefetch(np);
5316 
5317 		if (priv->extend_desc)
5318 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5319 		if (unlikely(status == discard_frame)) {
5320 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5321 			buf->page = NULL;
5322 			error = 1;
5323 			if (!priv->hwts_rx_en)
5324 				rx_errors++;
5325 		}
5326 
5327 		if (unlikely(error && (status & rx_not_ls)))
5328 			goto read_again;
5329 		if (unlikely(error)) {
5330 			dev_kfree_skb(skb);
5331 			skb = NULL;
5332 			count++;
5333 			continue;
5334 		}
5335 
5336 		/* Buffer is good. Go on. */
5337 
5338 		prefetch(page_address(buf->page) + buf->page_offset);
5339 		if (buf->sec_page)
5340 			prefetch(page_address(buf->sec_page));
5341 
5342 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5343 		len += buf1_len;
5344 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5345 		len += buf2_len;
5346 
5347 		/* ACS is disabled; strip manually. */
5348 		if (likely(!(status & rx_not_ls))) {
5349 			if (buf2_len) {
5350 				buf2_len -= ETH_FCS_LEN;
5351 				len -= ETH_FCS_LEN;
5352 			} else if (buf1_len) {
5353 				buf1_len -= ETH_FCS_LEN;
5354 				len -= ETH_FCS_LEN;
5355 			}
5356 		}
5357 
5358 		if (!skb) {
5359 			unsigned int pre_len, sync_len;
5360 
5361 			dma_sync_single_for_cpu(priv->device, buf->addr,
5362 						buf1_len, dma_dir);
5363 
5364 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5365 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5366 					 buf->page_offset, buf1_len, true);
5367 
5368 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5369 				  buf->page_offset;
5370 
5371 			ctx.priv = priv;
5372 			ctx.desc = p;
5373 			ctx.ndesc = np;
5374 
5375 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5376 			/* Due xdp_adjust_tail: DMA sync for_device
5377 			 * cover max len CPU touch
5378 			 */
5379 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5380 				   buf->page_offset;
5381 			sync_len = max(sync_len, pre_len);
5382 
5383 			/* For Not XDP_PASS verdict */
5384 			if (IS_ERR(skb)) {
5385 				unsigned int xdp_res = -PTR_ERR(skb);
5386 
5387 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5388 					page_pool_put_page(rx_q->page_pool,
5389 							   virt_to_head_page(ctx.xdp.data),
5390 							   sync_len, true);
5391 					buf->page = NULL;
5392 					rx_dropped++;
5393 
5394 					/* Clear skb as it was set as
5395 					 * status by XDP program.
5396 					 */
5397 					skb = NULL;
5398 
5399 					if (unlikely((status & rx_not_ls)))
5400 						goto read_again;
5401 
5402 					count++;
5403 					continue;
5404 				} else if (xdp_res & (STMMAC_XDP_TX |
5405 						      STMMAC_XDP_REDIRECT)) {
5406 					xdp_status |= xdp_res;
5407 					buf->page = NULL;
5408 					skb = NULL;
5409 					count++;
5410 					continue;
5411 				}
5412 			}
5413 		}
5414 
5415 		if (!skb) {
5416 			/* XDP program may expand or reduce tail */
5417 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5418 
5419 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5420 			if (!skb) {
5421 				rx_dropped++;
5422 				count++;
5423 				goto drain_data;
5424 			}
5425 
5426 			/* XDP program may adjust header */
5427 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5428 			skb_put(skb, buf1_len);
5429 
5430 			/* Data payload copied into SKB, page ready for recycle */
5431 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5432 			buf->page = NULL;
5433 		} else if (buf1_len) {
5434 			dma_sync_single_for_cpu(priv->device, buf->addr,
5435 						buf1_len, dma_dir);
5436 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5437 					buf->page, buf->page_offset, buf1_len,
5438 					priv->dma_conf.dma_buf_sz);
5439 
5440 			/* Data payload appended into SKB */
5441 			skb_mark_for_recycle(skb);
5442 			buf->page = NULL;
5443 		}
5444 
5445 		if (buf2_len) {
5446 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5447 						buf2_len, dma_dir);
5448 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5449 					buf->sec_page, 0, buf2_len,
5450 					priv->dma_conf.dma_buf_sz);
5451 
5452 			/* Data payload appended into SKB */
5453 			skb_mark_for_recycle(skb);
5454 			buf->sec_page = NULL;
5455 		}
5456 
5457 drain_data:
5458 		if (likely(status & rx_not_ls))
5459 			goto read_again;
5460 		if (!skb)
5461 			continue;
5462 
5463 		/* Got entire packet into SKB. Finish it. */
5464 
5465 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5466 		stmmac_rx_vlan(priv->dev, skb);
5467 		skb->protocol = eth_type_trans(skb, priv->dev);
5468 
5469 		if (unlikely(!coe))
5470 			skb_checksum_none_assert(skb);
5471 		else
5472 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5473 
5474 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5475 			skb_set_hash(skb, hash, hash_type);
5476 
5477 		skb_record_rx_queue(skb, queue);
5478 		napi_gro_receive(&ch->rx_napi, skb);
5479 		skb = NULL;
5480 
5481 		rx_packets++;
5482 		rx_bytes += len;
5483 		count++;
5484 	}
5485 
5486 	if (status & rx_not_ls || skb) {
5487 		rx_q->state_saved = true;
5488 		rx_q->state.skb = skb;
5489 		rx_q->state.error = error;
5490 		rx_q->state.len = len;
5491 	}
5492 
5493 	stmmac_finalize_xdp_rx(priv, xdp_status);
5494 
5495 	stmmac_rx_refill(priv, queue);
5496 
5497 	flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
5498 	rx_q->rxq_stats.rx_packets += rx_packets;
5499 	rx_q->rxq_stats.rx_bytes += rx_bytes;
5500 	rx_q->rxq_stats.rx_pkt_n += count;
5501 	u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
5502 
5503 	priv->xstats.rx_dropped += rx_dropped;
5504 	priv->xstats.rx_errors += rx_errors;
5505 
5506 	return count;
5507 }
5508 
5509 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5510 {
5511 	struct stmmac_channel *ch =
5512 		container_of(napi, struct stmmac_channel, rx_napi);
5513 	struct stmmac_priv *priv = ch->priv_data;
5514 	struct stmmac_rx_queue *rx_q;
5515 	u32 chan = ch->index;
5516 	unsigned long flags;
5517 	int work_done;
5518 
5519 	rx_q = &priv->dma_conf.rx_queue[chan];
5520 	flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
5521 	rx_q->rxq_stats.napi_poll++;
5522 	u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
5523 
5524 	work_done = stmmac_rx(priv, budget, chan);
5525 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5526 		unsigned long flags;
5527 
5528 		spin_lock_irqsave(&ch->lock, flags);
5529 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5530 		spin_unlock_irqrestore(&ch->lock, flags);
5531 	}
5532 
5533 	return work_done;
5534 }
5535 
5536 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5537 {
5538 	struct stmmac_channel *ch =
5539 		container_of(napi, struct stmmac_channel, tx_napi);
5540 	struct stmmac_priv *priv = ch->priv_data;
5541 	struct stmmac_tx_queue *tx_q;
5542 	u32 chan = ch->index;
5543 	unsigned long flags;
5544 	int work_done;
5545 
5546 	tx_q = &priv->dma_conf.tx_queue[chan];
5547 	flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
5548 	tx_q->txq_stats.napi_poll++;
5549 	u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
5550 
5551 	work_done = stmmac_tx_clean(priv, budget, chan);
5552 	work_done = min(work_done, budget);
5553 
5554 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5555 		unsigned long flags;
5556 
5557 		spin_lock_irqsave(&ch->lock, flags);
5558 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5559 		spin_unlock_irqrestore(&ch->lock, flags);
5560 	}
5561 
5562 	return work_done;
5563 }
5564 
5565 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5566 {
5567 	struct stmmac_channel *ch =
5568 		container_of(napi, struct stmmac_channel, rxtx_napi);
5569 	struct stmmac_priv *priv = ch->priv_data;
5570 	int rx_done, tx_done, rxtx_done;
5571 	struct stmmac_rx_queue *rx_q;
5572 	struct stmmac_tx_queue *tx_q;
5573 	u32 chan = ch->index;
5574 	unsigned long flags;
5575 
5576 	rx_q = &priv->dma_conf.rx_queue[chan];
5577 	flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
5578 	rx_q->rxq_stats.napi_poll++;
5579 	u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
5580 
5581 	tx_q = &priv->dma_conf.tx_queue[chan];
5582 	flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
5583 	tx_q->txq_stats.napi_poll++;
5584 	u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
5585 
5586 	tx_done = stmmac_tx_clean(priv, budget, chan);
5587 	tx_done = min(tx_done, budget);
5588 
5589 	rx_done = stmmac_rx_zc(priv, budget, chan);
5590 
5591 	rxtx_done = max(tx_done, rx_done);
5592 
5593 	/* If either TX or RX work is not complete, return budget
5594 	 * and keep pooling
5595 	 */
5596 	if (rxtx_done >= budget)
5597 		return budget;
5598 
5599 	/* all work done, exit the polling mode */
5600 	if (napi_complete_done(napi, rxtx_done)) {
5601 		unsigned long flags;
5602 
5603 		spin_lock_irqsave(&ch->lock, flags);
5604 		/* Both RX and TX work done are compelte,
5605 		 * so enable both RX & TX IRQs.
5606 		 */
5607 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5608 		spin_unlock_irqrestore(&ch->lock, flags);
5609 	}
5610 
5611 	return min(rxtx_done, budget - 1);
5612 }
5613 
5614 /**
5615  *  stmmac_tx_timeout
5616  *  @dev : Pointer to net device structure
5617  *  @txqueue: the index of the hanging transmit queue
5618  *  Description: this function is called when a packet transmission fails to
5619  *   complete within a reasonable time. The driver will mark the error in the
5620  *   netdev structure and arrange for the device to be reset to a sane state
5621  *   in order to transmit a new packet.
5622  */
5623 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5624 {
5625 	struct stmmac_priv *priv = netdev_priv(dev);
5626 
5627 	stmmac_global_err(priv);
5628 }
5629 
5630 /**
5631  *  stmmac_set_rx_mode - entry point for multicast addressing
5632  *  @dev : pointer to the device structure
5633  *  Description:
5634  *  This function is a driver entry point which gets called by the kernel
5635  *  whenever multicast addresses must be enabled/disabled.
5636  *  Return value:
5637  *  void.
5638  */
5639 static void stmmac_set_rx_mode(struct net_device *dev)
5640 {
5641 	struct stmmac_priv *priv = netdev_priv(dev);
5642 
5643 	stmmac_set_filter(priv, priv->hw, dev);
5644 }
5645 
5646 /**
5647  *  stmmac_change_mtu - entry point to change MTU size for the device.
5648  *  @dev : device pointer.
5649  *  @new_mtu : the new MTU size for the device.
5650  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5651  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5652  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5653  *  Return value:
5654  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5655  *  file on failure.
5656  */
5657 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5658 {
5659 	struct stmmac_priv *priv = netdev_priv(dev);
5660 	int txfifosz = priv->plat->tx_fifo_size;
5661 	struct stmmac_dma_conf *dma_conf;
5662 	const int mtu = new_mtu;
5663 	int ret;
5664 
5665 	if (txfifosz == 0)
5666 		txfifosz = priv->dma_cap.tx_fifo_size;
5667 
5668 	txfifosz /= priv->plat->tx_queues_to_use;
5669 
5670 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5671 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5672 		return -EINVAL;
5673 	}
5674 
5675 	new_mtu = STMMAC_ALIGN(new_mtu);
5676 
5677 	/* If condition true, FIFO is too small or MTU too large */
5678 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5679 		return -EINVAL;
5680 
5681 	if (netif_running(dev)) {
5682 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5683 		/* Try to allocate the new DMA conf with the new mtu */
5684 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5685 		if (IS_ERR(dma_conf)) {
5686 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5687 				   mtu);
5688 			return PTR_ERR(dma_conf);
5689 		}
5690 
5691 		stmmac_release(dev);
5692 
5693 		ret = __stmmac_open(dev, dma_conf);
5694 		if (ret) {
5695 			free_dma_desc_resources(priv, dma_conf);
5696 			kfree(dma_conf);
5697 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5698 			return ret;
5699 		}
5700 
5701 		kfree(dma_conf);
5702 
5703 		stmmac_set_rx_mode(dev);
5704 	}
5705 
5706 	dev->mtu = mtu;
5707 	netdev_update_features(dev);
5708 
5709 	return 0;
5710 }
5711 
5712 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5713 					     netdev_features_t features)
5714 {
5715 	struct stmmac_priv *priv = netdev_priv(dev);
5716 
5717 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5718 		features &= ~NETIF_F_RXCSUM;
5719 
5720 	if (!priv->plat->tx_coe)
5721 		features &= ~NETIF_F_CSUM_MASK;
5722 
5723 	/* Some GMAC devices have a bugged Jumbo frame support that
5724 	 * needs to have the Tx COE disabled for oversized frames
5725 	 * (due to limited buffer sizes). In this case we disable
5726 	 * the TX csum insertion in the TDES and not use SF.
5727 	 */
5728 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5729 		features &= ~NETIF_F_CSUM_MASK;
5730 
5731 	/* Disable tso if asked by ethtool */
5732 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5733 		if (features & NETIF_F_TSO)
5734 			priv->tso = true;
5735 		else
5736 			priv->tso = false;
5737 	}
5738 
5739 	return features;
5740 }
5741 
5742 static int stmmac_set_features(struct net_device *netdev,
5743 			       netdev_features_t features)
5744 {
5745 	struct stmmac_priv *priv = netdev_priv(netdev);
5746 
5747 	/* Keep the COE Type in case of csum is supporting */
5748 	if (features & NETIF_F_RXCSUM)
5749 		priv->hw->rx_csum = priv->plat->rx_coe;
5750 	else
5751 		priv->hw->rx_csum = 0;
5752 	/* No check needed because rx_coe has been set before and it will be
5753 	 * fixed in case of issue.
5754 	 */
5755 	stmmac_rx_ipc(priv, priv->hw);
5756 
5757 	if (priv->sph_cap) {
5758 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5759 		u32 chan;
5760 
5761 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5762 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5763 	}
5764 
5765 	return 0;
5766 }
5767 
5768 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5769 {
5770 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5771 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5772 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5773 	bool *hs_enable = &fpe_cfg->hs_enable;
5774 
5775 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5776 		return;
5777 
5778 	/* If LP has sent verify mPacket, LP is FPE capable */
5779 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5780 		if (*lp_state < FPE_STATE_CAPABLE)
5781 			*lp_state = FPE_STATE_CAPABLE;
5782 
5783 		/* If user has requested FPE enable, quickly response */
5784 		if (*hs_enable)
5785 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5786 						MPACKET_RESPONSE);
5787 	}
5788 
5789 	/* If Local has sent verify mPacket, Local is FPE capable */
5790 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5791 		if (*lo_state < FPE_STATE_CAPABLE)
5792 			*lo_state = FPE_STATE_CAPABLE;
5793 	}
5794 
5795 	/* If LP has sent response mPacket, LP is entering FPE ON */
5796 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5797 		*lp_state = FPE_STATE_ENTERING_ON;
5798 
5799 	/* If Local has sent response mPacket, Local is entering FPE ON */
5800 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5801 		*lo_state = FPE_STATE_ENTERING_ON;
5802 
5803 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5804 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5805 	    priv->fpe_wq) {
5806 		queue_work(priv->fpe_wq, &priv->fpe_task);
5807 	}
5808 }
5809 
5810 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5811 {
5812 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5813 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5814 	u32 queues_count;
5815 	u32 queue;
5816 	bool xmac;
5817 
5818 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5819 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5820 
5821 	if (priv->irq_wake)
5822 		pm_wakeup_event(priv->device, 0);
5823 
5824 	if (priv->dma_cap.estsel)
5825 		stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5826 				      &priv->xstats, tx_cnt);
5827 
5828 	if (priv->dma_cap.fpesel) {
5829 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5830 						   priv->dev);
5831 
5832 		stmmac_fpe_event_status(priv, status);
5833 	}
5834 
5835 	/* To handle GMAC own interrupts */
5836 	if ((priv->plat->has_gmac) || xmac) {
5837 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5838 
5839 		if (unlikely(status)) {
5840 			/* For LPI we need to save the tx status */
5841 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5842 				priv->tx_path_in_lpi_mode = true;
5843 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5844 				priv->tx_path_in_lpi_mode = false;
5845 		}
5846 
5847 		for (queue = 0; queue < queues_count; queue++) {
5848 			status = stmmac_host_mtl_irq_status(priv, priv->hw,
5849 							    queue);
5850 		}
5851 
5852 		/* PCS link status */
5853 		if (priv->hw->pcs &&
5854 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5855 			if (priv->xstats.pcs_link)
5856 				netif_carrier_on(priv->dev);
5857 			else
5858 				netif_carrier_off(priv->dev);
5859 		}
5860 
5861 		stmmac_timestamp_interrupt(priv, priv);
5862 	}
5863 }
5864 
5865 /**
5866  *  stmmac_interrupt - main ISR
5867  *  @irq: interrupt number.
5868  *  @dev_id: to pass the net device pointer.
5869  *  Description: this is the main driver interrupt service routine.
5870  *  It can call:
5871  *  o DMA service routine (to manage incoming frame reception and transmission
5872  *    status)
5873  *  o Core interrupts to manage: remote wake-up, management counter, LPI
5874  *    interrupts.
5875  */
5876 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5877 {
5878 	struct net_device *dev = (struct net_device *)dev_id;
5879 	struct stmmac_priv *priv = netdev_priv(dev);
5880 
5881 	/* Check if adapter is up */
5882 	if (test_bit(STMMAC_DOWN, &priv->state))
5883 		return IRQ_HANDLED;
5884 
5885 	/* Check if a fatal error happened */
5886 	if (stmmac_safety_feat_interrupt(priv))
5887 		return IRQ_HANDLED;
5888 
5889 	/* To handle Common interrupts */
5890 	stmmac_common_interrupt(priv);
5891 
5892 	/* To handle DMA interrupts */
5893 	stmmac_dma_interrupt(priv);
5894 
5895 	return IRQ_HANDLED;
5896 }
5897 
5898 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5899 {
5900 	struct net_device *dev = (struct net_device *)dev_id;
5901 	struct stmmac_priv *priv = netdev_priv(dev);
5902 
5903 	if (unlikely(!dev)) {
5904 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5905 		return IRQ_NONE;
5906 	}
5907 
5908 	/* Check if adapter is up */
5909 	if (test_bit(STMMAC_DOWN, &priv->state))
5910 		return IRQ_HANDLED;
5911 
5912 	/* To handle Common interrupts */
5913 	stmmac_common_interrupt(priv);
5914 
5915 	return IRQ_HANDLED;
5916 }
5917 
5918 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5919 {
5920 	struct net_device *dev = (struct net_device *)dev_id;
5921 	struct stmmac_priv *priv = netdev_priv(dev);
5922 
5923 	if (unlikely(!dev)) {
5924 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5925 		return IRQ_NONE;
5926 	}
5927 
5928 	/* Check if adapter is up */
5929 	if (test_bit(STMMAC_DOWN, &priv->state))
5930 		return IRQ_HANDLED;
5931 
5932 	/* Check if a fatal error happened */
5933 	stmmac_safety_feat_interrupt(priv);
5934 
5935 	return IRQ_HANDLED;
5936 }
5937 
5938 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5939 {
5940 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5941 	struct stmmac_dma_conf *dma_conf;
5942 	int chan = tx_q->queue_index;
5943 	struct stmmac_priv *priv;
5944 	int status;
5945 
5946 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
5947 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5948 
5949 	if (unlikely(!data)) {
5950 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5951 		return IRQ_NONE;
5952 	}
5953 
5954 	/* Check if adapter is up */
5955 	if (test_bit(STMMAC_DOWN, &priv->state))
5956 		return IRQ_HANDLED;
5957 
5958 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5959 
5960 	if (unlikely(status & tx_hard_error_bump_tc)) {
5961 		/* Try to bump up the dma threshold on this failure */
5962 		stmmac_bump_dma_threshold(priv, chan);
5963 	} else if (unlikely(status == tx_hard_error)) {
5964 		stmmac_tx_err(priv, chan);
5965 	}
5966 
5967 	return IRQ_HANDLED;
5968 }
5969 
5970 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
5971 {
5972 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
5973 	struct stmmac_dma_conf *dma_conf;
5974 	int chan = rx_q->queue_index;
5975 	struct stmmac_priv *priv;
5976 
5977 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
5978 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5979 
5980 	if (unlikely(!data)) {
5981 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5982 		return IRQ_NONE;
5983 	}
5984 
5985 	/* Check if adapter is up */
5986 	if (test_bit(STMMAC_DOWN, &priv->state))
5987 		return IRQ_HANDLED;
5988 
5989 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
5990 
5991 	return IRQ_HANDLED;
5992 }
5993 
5994 #ifdef CONFIG_NET_POLL_CONTROLLER
5995 /* Polling receive - used by NETCONSOLE and other diagnostic tools
5996  * to allow network I/O with interrupts disabled.
5997  */
5998 static void stmmac_poll_controller(struct net_device *dev)
5999 {
6000 	struct stmmac_priv *priv = netdev_priv(dev);
6001 	int i;
6002 
6003 	/* If adapter is down, do nothing */
6004 	if (test_bit(STMMAC_DOWN, &priv->state))
6005 		return;
6006 
6007 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) {
6008 		for (i = 0; i < priv->plat->rx_queues_to_use; i++)
6009 			stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
6010 
6011 		for (i = 0; i < priv->plat->tx_queues_to_use; i++)
6012 			stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
6013 	} else {
6014 		disable_irq(dev->irq);
6015 		stmmac_interrupt(dev->irq, dev);
6016 		enable_irq(dev->irq);
6017 	}
6018 }
6019 #endif
6020 
6021 /**
6022  *  stmmac_ioctl - Entry point for the Ioctl
6023  *  @dev: Device pointer.
6024  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6025  *  a proprietary structure used to pass information to the driver.
6026  *  @cmd: IOCTL command
6027  *  Description:
6028  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6029  */
6030 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6031 {
6032 	struct stmmac_priv *priv = netdev_priv (dev);
6033 	int ret = -EOPNOTSUPP;
6034 
6035 	if (!netif_running(dev))
6036 		return -EINVAL;
6037 
6038 	switch (cmd) {
6039 	case SIOCGMIIPHY:
6040 	case SIOCGMIIREG:
6041 	case SIOCSMIIREG:
6042 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6043 		break;
6044 	case SIOCSHWTSTAMP:
6045 		ret = stmmac_hwtstamp_set(dev, rq);
6046 		break;
6047 	case SIOCGHWTSTAMP:
6048 		ret = stmmac_hwtstamp_get(dev, rq);
6049 		break;
6050 	default:
6051 		break;
6052 	}
6053 
6054 	return ret;
6055 }
6056 
6057 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6058 				    void *cb_priv)
6059 {
6060 	struct stmmac_priv *priv = cb_priv;
6061 	int ret = -EOPNOTSUPP;
6062 
6063 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6064 		return ret;
6065 
6066 	__stmmac_disable_all_queues(priv);
6067 
6068 	switch (type) {
6069 	case TC_SETUP_CLSU32:
6070 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6071 		break;
6072 	case TC_SETUP_CLSFLOWER:
6073 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6074 		break;
6075 	default:
6076 		break;
6077 	}
6078 
6079 	stmmac_enable_all_queues(priv);
6080 	return ret;
6081 }
6082 
6083 static LIST_HEAD(stmmac_block_cb_list);
6084 
6085 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6086 			   void *type_data)
6087 {
6088 	struct stmmac_priv *priv = netdev_priv(ndev);
6089 
6090 	switch (type) {
6091 	case TC_QUERY_CAPS:
6092 		return stmmac_tc_query_caps(priv, priv, type_data);
6093 	case TC_SETUP_BLOCK:
6094 		return flow_block_cb_setup_simple(type_data,
6095 						  &stmmac_block_cb_list,
6096 						  stmmac_setup_tc_block_cb,
6097 						  priv, priv, true);
6098 	case TC_SETUP_QDISC_CBS:
6099 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6100 	case TC_SETUP_QDISC_TAPRIO:
6101 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6102 	case TC_SETUP_QDISC_ETF:
6103 		return stmmac_tc_setup_etf(priv, priv, type_data);
6104 	default:
6105 		return -EOPNOTSUPP;
6106 	}
6107 }
6108 
6109 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6110 			       struct net_device *sb_dev)
6111 {
6112 	int gso = skb_shinfo(skb)->gso_type;
6113 
6114 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6115 		/*
6116 		 * There is no way to determine the number of TSO/USO
6117 		 * capable Queues. Let's use always the Queue 0
6118 		 * because if TSO/USO is supported then at least this
6119 		 * one will be capable.
6120 		 */
6121 		return 0;
6122 	}
6123 
6124 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6125 }
6126 
6127 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6128 {
6129 	struct stmmac_priv *priv = netdev_priv(ndev);
6130 	int ret = 0;
6131 
6132 	ret = pm_runtime_resume_and_get(priv->device);
6133 	if (ret < 0)
6134 		return ret;
6135 
6136 	ret = eth_mac_addr(ndev, addr);
6137 	if (ret)
6138 		goto set_mac_error;
6139 
6140 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6141 
6142 set_mac_error:
6143 	pm_runtime_put(priv->device);
6144 
6145 	return ret;
6146 }
6147 
6148 #ifdef CONFIG_DEBUG_FS
6149 static struct dentry *stmmac_fs_dir;
6150 
6151 static void sysfs_display_ring(void *head, int size, int extend_desc,
6152 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6153 {
6154 	int i;
6155 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6156 	struct dma_desc *p = (struct dma_desc *)head;
6157 	dma_addr_t dma_addr;
6158 
6159 	for (i = 0; i < size; i++) {
6160 		if (extend_desc) {
6161 			dma_addr = dma_phy_addr + i * sizeof(*ep);
6162 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6163 				   i, &dma_addr,
6164 				   le32_to_cpu(ep->basic.des0),
6165 				   le32_to_cpu(ep->basic.des1),
6166 				   le32_to_cpu(ep->basic.des2),
6167 				   le32_to_cpu(ep->basic.des3));
6168 			ep++;
6169 		} else {
6170 			dma_addr = dma_phy_addr + i * sizeof(*p);
6171 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6172 				   i, &dma_addr,
6173 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6174 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6175 			p++;
6176 		}
6177 		seq_printf(seq, "\n");
6178 	}
6179 }
6180 
6181 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6182 {
6183 	struct net_device *dev = seq->private;
6184 	struct stmmac_priv *priv = netdev_priv(dev);
6185 	u32 rx_count = priv->plat->rx_queues_to_use;
6186 	u32 tx_count = priv->plat->tx_queues_to_use;
6187 	u32 queue;
6188 
6189 	if ((dev->flags & IFF_UP) == 0)
6190 		return 0;
6191 
6192 	for (queue = 0; queue < rx_count; queue++) {
6193 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6194 
6195 		seq_printf(seq, "RX Queue %d:\n", queue);
6196 
6197 		if (priv->extend_desc) {
6198 			seq_printf(seq, "Extended descriptor ring:\n");
6199 			sysfs_display_ring((void *)rx_q->dma_erx,
6200 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6201 		} else {
6202 			seq_printf(seq, "Descriptor ring:\n");
6203 			sysfs_display_ring((void *)rx_q->dma_rx,
6204 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6205 		}
6206 	}
6207 
6208 	for (queue = 0; queue < tx_count; queue++) {
6209 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6210 
6211 		seq_printf(seq, "TX Queue %d:\n", queue);
6212 
6213 		if (priv->extend_desc) {
6214 			seq_printf(seq, "Extended descriptor ring:\n");
6215 			sysfs_display_ring((void *)tx_q->dma_etx,
6216 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6217 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6218 			seq_printf(seq, "Descriptor ring:\n");
6219 			sysfs_display_ring((void *)tx_q->dma_tx,
6220 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6221 		}
6222 	}
6223 
6224 	return 0;
6225 }
6226 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6227 
6228 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6229 {
6230 	static const char * const dwxgmac_timestamp_source[] = {
6231 		"None",
6232 		"Internal",
6233 		"External",
6234 		"Both",
6235 	};
6236 	static const char * const dwxgmac_safety_feature_desc[] = {
6237 		"No",
6238 		"All Safety Features with ECC and Parity",
6239 		"All Safety Features without ECC or Parity",
6240 		"All Safety Features with Parity Only",
6241 		"ECC Only",
6242 		"UNDEFINED",
6243 		"UNDEFINED",
6244 		"UNDEFINED",
6245 	};
6246 	struct net_device *dev = seq->private;
6247 	struct stmmac_priv *priv = netdev_priv(dev);
6248 
6249 	if (!priv->hw_cap_support) {
6250 		seq_printf(seq, "DMA HW features not supported\n");
6251 		return 0;
6252 	}
6253 
6254 	seq_printf(seq, "==============================\n");
6255 	seq_printf(seq, "\tDMA HW features\n");
6256 	seq_printf(seq, "==============================\n");
6257 
6258 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6259 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6260 	seq_printf(seq, "\t1000 Mbps: %s\n",
6261 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6262 	seq_printf(seq, "\tHalf duplex: %s\n",
6263 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6264 	if (priv->plat->has_xgmac) {
6265 		seq_printf(seq,
6266 			   "\tNumber of Additional MAC address registers: %d\n",
6267 			   priv->dma_cap.multi_addr);
6268 	} else {
6269 		seq_printf(seq, "\tHash Filter: %s\n",
6270 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6271 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6272 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6273 	}
6274 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6275 		   (priv->dma_cap.pcs) ? "Y" : "N");
6276 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6277 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6278 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6279 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6280 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6281 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6282 	seq_printf(seq, "\tRMON module: %s\n",
6283 		   (priv->dma_cap.rmon) ? "Y" : "N");
6284 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6285 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6286 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6287 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6288 	if (priv->plat->has_xgmac)
6289 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6290 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6291 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6292 		   (priv->dma_cap.eee) ? "Y" : "N");
6293 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6294 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6295 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6296 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6297 	    priv->plat->has_xgmac) {
6298 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6299 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6300 	} else {
6301 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6302 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6303 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6304 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6305 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6306 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6307 	}
6308 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6309 		   priv->dma_cap.number_rx_channel);
6310 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6311 		   priv->dma_cap.number_tx_channel);
6312 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6313 		   priv->dma_cap.number_rx_queues);
6314 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6315 		   priv->dma_cap.number_tx_queues);
6316 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6317 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6318 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6319 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6320 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6321 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6322 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6323 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6324 		   priv->dma_cap.pps_out_num);
6325 	seq_printf(seq, "\tSafety Features: %s\n",
6326 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6327 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6328 		   priv->dma_cap.frpsel ? "Y" : "N");
6329 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6330 		   priv->dma_cap.host_dma_width);
6331 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6332 		   priv->dma_cap.rssen ? "Y" : "N");
6333 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6334 		   priv->dma_cap.vlhash ? "Y" : "N");
6335 	seq_printf(seq, "\tSplit Header: %s\n",
6336 		   priv->dma_cap.sphen ? "Y" : "N");
6337 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6338 		   priv->dma_cap.vlins ? "Y" : "N");
6339 	seq_printf(seq, "\tDouble VLAN: %s\n",
6340 		   priv->dma_cap.dvlan ? "Y" : "N");
6341 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6342 		   priv->dma_cap.l3l4fnum);
6343 	seq_printf(seq, "\tARP Offloading: %s\n",
6344 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6345 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6346 		   priv->dma_cap.estsel ? "Y" : "N");
6347 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6348 		   priv->dma_cap.fpesel ? "Y" : "N");
6349 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6350 		   priv->dma_cap.tbssel ? "Y" : "N");
6351 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6352 		   priv->dma_cap.tbs_ch_num);
6353 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6354 		   priv->dma_cap.sgfsel ? "Y" : "N");
6355 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6356 		   BIT(priv->dma_cap.ttsfd) >> 1);
6357 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6358 		   priv->dma_cap.numtc);
6359 	seq_printf(seq, "\tDCB Feature: %s\n",
6360 		   priv->dma_cap.dcben ? "Y" : "N");
6361 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6362 		   priv->dma_cap.advthword ? "Y" : "N");
6363 	seq_printf(seq, "\tPTP Offload: %s\n",
6364 		   priv->dma_cap.ptoen ? "Y" : "N");
6365 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6366 		   priv->dma_cap.osten ? "Y" : "N");
6367 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6368 		   priv->dma_cap.pfcen ? "Y" : "N");
6369 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6370 		   BIT(priv->dma_cap.frpes) << 6);
6371 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6372 		   BIT(priv->dma_cap.frpbs) << 6);
6373 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6374 		   priv->dma_cap.frppipe_num);
6375 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6376 		   priv->dma_cap.nrvf_num ?
6377 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6378 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6379 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6380 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6381 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6382 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6383 		   priv->dma_cap.cbtisel ? "Y" : "N");
6384 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6385 		   priv->dma_cap.aux_snapshot_n);
6386 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6387 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6388 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6389 		   priv->dma_cap.edma ? "Y" : "N");
6390 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6391 		   priv->dma_cap.ediffc ? "Y" : "N");
6392 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6393 		   priv->dma_cap.vxn ? "Y" : "N");
6394 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6395 		   priv->dma_cap.dbgmem ? "Y" : "N");
6396 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6397 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6398 	return 0;
6399 }
6400 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6401 
6402 /* Use network device events to rename debugfs file entries.
6403  */
6404 static int stmmac_device_event(struct notifier_block *unused,
6405 			       unsigned long event, void *ptr)
6406 {
6407 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6408 	struct stmmac_priv *priv = netdev_priv(dev);
6409 
6410 	if (dev->netdev_ops != &stmmac_netdev_ops)
6411 		goto done;
6412 
6413 	switch (event) {
6414 	case NETDEV_CHANGENAME:
6415 		if (priv->dbgfs_dir)
6416 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6417 							 priv->dbgfs_dir,
6418 							 stmmac_fs_dir,
6419 							 dev->name);
6420 		break;
6421 	}
6422 done:
6423 	return NOTIFY_DONE;
6424 }
6425 
6426 static struct notifier_block stmmac_notifier = {
6427 	.notifier_call = stmmac_device_event,
6428 };
6429 
6430 static void stmmac_init_fs(struct net_device *dev)
6431 {
6432 	struct stmmac_priv *priv = netdev_priv(dev);
6433 
6434 	rtnl_lock();
6435 
6436 	/* Create per netdev entries */
6437 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6438 
6439 	/* Entry to report DMA RX/TX rings */
6440 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6441 			    &stmmac_rings_status_fops);
6442 
6443 	/* Entry to report the DMA HW features */
6444 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6445 			    &stmmac_dma_cap_fops);
6446 
6447 	rtnl_unlock();
6448 }
6449 
6450 static void stmmac_exit_fs(struct net_device *dev)
6451 {
6452 	struct stmmac_priv *priv = netdev_priv(dev);
6453 
6454 	debugfs_remove_recursive(priv->dbgfs_dir);
6455 }
6456 #endif /* CONFIG_DEBUG_FS */
6457 
6458 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6459 {
6460 	unsigned char *data = (unsigned char *)&vid_le;
6461 	unsigned char data_byte = 0;
6462 	u32 crc = ~0x0;
6463 	u32 temp = 0;
6464 	int i, bits;
6465 
6466 	bits = get_bitmask_order(VLAN_VID_MASK);
6467 	for (i = 0; i < bits; i++) {
6468 		if ((i % 8) == 0)
6469 			data_byte = data[i / 8];
6470 
6471 		temp = ((crc & 1) ^ data_byte) & 1;
6472 		crc >>= 1;
6473 		data_byte >>= 1;
6474 
6475 		if (temp)
6476 			crc ^= 0xedb88320;
6477 	}
6478 
6479 	return crc;
6480 }
6481 
6482 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6483 {
6484 	u32 crc, hash = 0;
6485 	__le16 pmatch = 0;
6486 	int count = 0;
6487 	u16 vid = 0;
6488 
6489 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6490 		__le16 vid_le = cpu_to_le16(vid);
6491 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6492 		hash |= (1 << crc);
6493 		count++;
6494 	}
6495 
6496 	if (!priv->dma_cap.vlhash) {
6497 		if (count > 2) /* VID = 0 always passes filter */
6498 			return -EOPNOTSUPP;
6499 
6500 		pmatch = cpu_to_le16(vid);
6501 		hash = 0;
6502 	}
6503 
6504 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6505 }
6506 
6507 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6508 {
6509 	struct stmmac_priv *priv = netdev_priv(ndev);
6510 	bool is_double = false;
6511 	int ret;
6512 
6513 	ret = pm_runtime_resume_and_get(priv->device);
6514 	if (ret < 0)
6515 		return ret;
6516 
6517 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6518 		is_double = true;
6519 
6520 	set_bit(vid, priv->active_vlans);
6521 	ret = stmmac_vlan_update(priv, is_double);
6522 	if (ret) {
6523 		clear_bit(vid, priv->active_vlans);
6524 		goto err_pm_put;
6525 	}
6526 
6527 	if (priv->hw->num_vlan) {
6528 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6529 		if (ret)
6530 			goto err_pm_put;
6531 	}
6532 err_pm_put:
6533 	pm_runtime_put(priv->device);
6534 
6535 	return ret;
6536 }
6537 
6538 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6539 {
6540 	struct stmmac_priv *priv = netdev_priv(ndev);
6541 	bool is_double = false;
6542 	int ret;
6543 
6544 	ret = pm_runtime_resume_and_get(priv->device);
6545 	if (ret < 0)
6546 		return ret;
6547 
6548 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6549 		is_double = true;
6550 
6551 	clear_bit(vid, priv->active_vlans);
6552 
6553 	if (priv->hw->num_vlan) {
6554 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6555 		if (ret)
6556 			goto del_vlan_error;
6557 	}
6558 
6559 	ret = stmmac_vlan_update(priv, is_double);
6560 
6561 del_vlan_error:
6562 	pm_runtime_put(priv->device);
6563 
6564 	return ret;
6565 }
6566 
6567 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6568 {
6569 	struct stmmac_priv *priv = netdev_priv(dev);
6570 
6571 	switch (bpf->command) {
6572 	case XDP_SETUP_PROG:
6573 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6574 	case XDP_SETUP_XSK_POOL:
6575 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6576 					     bpf->xsk.queue_id);
6577 	default:
6578 		return -EOPNOTSUPP;
6579 	}
6580 }
6581 
6582 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6583 			   struct xdp_frame **frames, u32 flags)
6584 {
6585 	struct stmmac_priv *priv = netdev_priv(dev);
6586 	int cpu = smp_processor_id();
6587 	struct netdev_queue *nq;
6588 	int i, nxmit = 0;
6589 	int queue;
6590 
6591 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6592 		return -ENETDOWN;
6593 
6594 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6595 		return -EINVAL;
6596 
6597 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6598 	nq = netdev_get_tx_queue(priv->dev, queue);
6599 
6600 	__netif_tx_lock(nq, cpu);
6601 	/* Avoids TX time-out as we are sharing with slow path */
6602 	txq_trans_cond_update(nq);
6603 
6604 	for (i = 0; i < num_frames; i++) {
6605 		int res;
6606 
6607 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6608 		if (res == STMMAC_XDP_CONSUMED)
6609 			break;
6610 
6611 		nxmit++;
6612 	}
6613 
6614 	if (flags & XDP_XMIT_FLUSH) {
6615 		stmmac_flush_tx_descriptors(priv, queue);
6616 		stmmac_tx_timer_arm(priv, queue);
6617 	}
6618 
6619 	__netif_tx_unlock(nq);
6620 
6621 	return nxmit;
6622 }
6623 
6624 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6625 {
6626 	struct stmmac_channel *ch = &priv->channel[queue];
6627 	unsigned long flags;
6628 
6629 	spin_lock_irqsave(&ch->lock, flags);
6630 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6631 	spin_unlock_irqrestore(&ch->lock, flags);
6632 
6633 	stmmac_stop_rx_dma(priv, queue);
6634 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6635 }
6636 
6637 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6638 {
6639 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6640 	struct stmmac_channel *ch = &priv->channel[queue];
6641 	unsigned long flags;
6642 	u32 buf_size;
6643 	int ret;
6644 
6645 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6646 	if (ret) {
6647 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6648 		return;
6649 	}
6650 
6651 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6652 	if (ret) {
6653 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6654 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6655 		return;
6656 	}
6657 
6658 	stmmac_reset_rx_queue(priv, queue);
6659 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6660 
6661 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6662 			    rx_q->dma_rx_phy, rx_q->queue_index);
6663 
6664 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6665 			     sizeof(struct dma_desc));
6666 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6667 			       rx_q->rx_tail_addr, rx_q->queue_index);
6668 
6669 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6670 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6671 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6672 				      buf_size,
6673 				      rx_q->queue_index);
6674 	} else {
6675 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6676 				      priv->dma_conf.dma_buf_sz,
6677 				      rx_q->queue_index);
6678 	}
6679 
6680 	stmmac_start_rx_dma(priv, queue);
6681 
6682 	spin_lock_irqsave(&ch->lock, flags);
6683 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6684 	spin_unlock_irqrestore(&ch->lock, flags);
6685 }
6686 
6687 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6688 {
6689 	struct stmmac_channel *ch = &priv->channel[queue];
6690 	unsigned long flags;
6691 
6692 	spin_lock_irqsave(&ch->lock, flags);
6693 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6694 	spin_unlock_irqrestore(&ch->lock, flags);
6695 
6696 	stmmac_stop_tx_dma(priv, queue);
6697 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6698 }
6699 
6700 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6701 {
6702 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6703 	struct stmmac_channel *ch = &priv->channel[queue];
6704 	unsigned long flags;
6705 	int ret;
6706 
6707 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6708 	if (ret) {
6709 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6710 		return;
6711 	}
6712 
6713 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6714 	if (ret) {
6715 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6716 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6717 		return;
6718 	}
6719 
6720 	stmmac_reset_tx_queue(priv, queue);
6721 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6722 
6723 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6724 			    tx_q->dma_tx_phy, tx_q->queue_index);
6725 
6726 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6727 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6728 
6729 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6730 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6731 			       tx_q->tx_tail_addr, tx_q->queue_index);
6732 
6733 	stmmac_start_tx_dma(priv, queue);
6734 
6735 	spin_lock_irqsave(&ch->lock, flags);
6736 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6737 	spin_unlock_irqrestore(&ch->lock, flags);
6738 }
6739 
6740 void stmmac_xdp_release(struct net_device *dev)
6741 {
6742 	struct stmmac_priv *priv = netdev_priv(dev);
6743 	u32 chan;
6744 
6745 	/* Ensure tx function is not running */
6746 	netif_tx_disable(dev);
6747 
6748 	/* Disable NAPI process */
6749 	stmmac_disable_all_queues(priv);
6750 
6751 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6752 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6753 
6754 	/* Free the IRQ lines */
6755 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6756 
6757 	/* Stop TX/RX DMA channels */
6758 	stmmac_stop_all_dma(priv);
6759 
6760 	/* Release and free the Rx/Tx resources */
6761 	free_dma_desc_resources(priv, &priv->dma_conf);
6762 
6763 	/* Disable the MAC Rx/Tx */
6764 	stmmac_mac_set(priv, priv->ioaddr, false);
6765 
6766 	/* set trans_start so we don't get spurious
6767 	 * watchdogs during reset
6768 	 */
6769 	netif_trans_update(dev);
6770 	netif_carrier_off(dev);
6771 }
6772 
6773 int stmmac_xdp_open(struct net_device *dev)
6774 {
6775 	struct stmmac_priv *priv = netdev_priv(dev);
6776 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6777 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6778 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6779 	struct stmmac_rx_queue *rx_q;
6780 	struct stmmac_tx_queue *tx_q;
6781 	u32 buf_size;
6782 	bool sph_en;
6783 	u32 chan;
6784 	int ret;
6785 
6786 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6787 	if (ret < 0) {
6788 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6789 			   __func__);
6790 		goto dma_desc_error;
6791 	}
6792 
6793 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6794 	if (ret < 0) {
6795 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6796 			   __func__);
6797 		goto init_error;
6798 	}
6799 
6800 	stmmac_reset_queues_param(priv);
6801 
6802 	/* DMA CSR Channel configuration */
6803 	for (chan = 0; chan < dma_csr_ch; chan++) {
6804 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6805 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6806 	}
6807 
6808 	/* Adjust Split header */
6809 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6810 
6811 	/* DMA RX Channel Configuration */
6812 	for (chan = 0; chan < rx_cnt; chan++) {
6813 		rx_q = &priv->dma_conf.rx_queue[chan];
6814 
6815 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6816 				    rx_q->dma_rx_phy, chan);
6817 
6818 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6819 				     (rx_q->buf_alloc_num *
6820 				      sizeof(struct dma_desc));
6821 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6822 				       rx_q->rx_tail_addr, chan);
6823 
6824 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6825 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6826 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6827 					      buf_size,
6828 					      rx_q->queue_index);
6829 		} else {
6830 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6831 					      priv->dma_conf.dma_buf_sz,
6832 					      rx_q->queue_index);
6833 		}
6834 
6835 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6836 	}
6837 
6838 	/* DMA TX Channel Configuration */
6839 	for (chan = 0; chan < tx_cnt; chan++) {
6840 		tx_q = &priv->dma_conf.tx_queue[chan];
6841 
6842 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6843 				    tx_q->dma_tx_phy, chan);
6844 
6845 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6846 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6847 				       tx_q->tx_tail_addr, chan);
6848 
6849 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6850 		tx_q->txtimer.function = stmmac_tx_timer;
6851 	}
6852 
6853 	/* Enable the MAC Rx/Tx */
6854 	stmmac_mac_set(priv, priv->ioaddr, true);
6855 
6856 	/* Start Rx & Tx DMA Channels */
6857 	stmmac_start_all_dma(priv);
6858 
6859 	ret = stmmac_request_irq(dev);
6860 	if (ret)
6861 		goto irq_error;
6862 
6863 	/* Enable NAPI process*/
6864 	stmmac_enable_all_queues(priv);
6865 	netif_carrier_on(dev);
6866 	netif_tx_start_all_queues(dev);
6867 	stmmac_enable_all_dma_irq(priv);
6868 
6869 	return 0;
6870 
6871 irq_error:
6872 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6873 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6874 
6875 	stmmac_hw_teardown(dev);
6876 init_error:
6877 	free_dma_desc_resources(priv, &priv->dma_conf);
6878 dma_desc_error:
6879 	return ret;
6880 }
6881 
6882 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6883 {
6884 	struct stmmac_priv *priv = netdev_priv(dev);
6885 	struct stmmac_rx_queue *rx_q;
6886 	struct stmmac_tx_queue *tx_q;
6887 	struct stmmac_channel *ch;
6888 
6889 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6890 	    !netif_carrier_ok(priv->dev))
6891 		return -ENETDOWN;
6892 
6893 	if (!stmmac_xdp_is_enabled(priv))
6894 		return -EINVAL;
6895 
6896 	if (queue >= priv->plat->rx_queues_to_use ||
6897 	    queue >= priv->plat->tx_queues_to_use)
6898 		return -EINVAL;
6899 
6900 	rx_q = &priv->dma_conf.rx_queue[queue];
6901 	tx_q = &priv->dma_conf.tx_queue[queue];
6902 	ch = &priv->channel[queue];
6903 
6904 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6905 		return -EINVAL;
6906 
6907 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6908 		/* EQoS does not have per-DMA channel SW interrupt,
6909 		 * so we schedule RX Napi straight-away.
6910 		 */
6911 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6912 			__napi_schedule(&ch->rxtx_napi);
6913 	}
6914 
6915 	return 0;
6916 }
6917 
6918 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6919 {
6920 	struct stmmac_priv *priv = netdev_priv(dev);
6921 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6922 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6923 	unsigned int start;
6924 	int q;
6925 
6926 	for (q = 0; q < tx_cnt; q++) {
6927 		struct stmmac_txq_stats *txq_stats = &priv->dma_conf.tx_queue[q].txq_stats;
6928 		u64 tx_packets;
6929 		u64 tx_bytes;
6930 
6931 		do {
6932 			start = u64_stats_fetch_begin(&txq_stats->syncp);
6933 			tx_packets = txq_stats->tx_packets;
6934 			tx_bytes   = txq_stats->tx_bytes;
6935 		} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
6936 
6937 		stats->tx_packets += tx_packets;
6938 		stats->tx_bytes += tx_bytes;
6939 	}
6940 
6941 	for (q = 0; q < rx_cnt; q++) {
6942 		struct stmmac_rxq_stats *rxq_stats = &priv->dma_conf.rx_queue[q].rxq_stats;
6943 		u64 rx_packets;
6944 		u64 rx_bytes;
6945 
6946 		do {
6947 			start = u64_stats_fetch_begin(&rxq_stats->syncp);
6948 			rx_packets = rxq_stats->rx_packets;
6949 			rx_bytes   = rxq_stats->rx_bytes;
6950 		} while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
6951 
6952 		stats->rx_packets += rx_packets;
6953 		stats->rx_bytes += rx_bytes;
6954 	}
6955 
6956 	stats->rx_dropped = priv->xstats.rx_dropped;
6957 	stats->rx_errors = priv->xstats.rx_errors;
6958 	stats->tx_dropped = priv->xstats.tx_dropped;
6959 	stats->tx_errors = priv->xstats.tx_errors;
6960 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
6961 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
6962 	stats->rx_length_errors = priv->xstats.rx_length;
6963 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
6964 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
6965 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
6966 }
6967 
6968 static const struct net_device_ops stmmac_netdev_ops = {
6969 	.ndo_open = stmmac_open,
6970 	.ndo_start_xmit = stmmac_xmit,
6971 	.ndo_stop = stmmac_release,
6972 	.ndo_change_mtu = stmmac_change_mtu,
6973 	.ndo_fix_features = stmmac_fix_features,
6974 	.ndo_set_features = stmmac_set_features,
6975 	.ndo_set_rx_mode = stmmac_set_rx_mode,
6976 	.ndo_tx_timeout = stmmac_tx_timeout,
6977 	.ndo_eth_ioctl = stmmac_ioctl,
6978 	.ndo_get_stats64 = stmmac_get_stats64,
6979 	.ndo_setup_tc = stmmac_setup_tc,
6980 	.ndo_select_queue = stmmac_select_queue,
6981 #ifdef CONFIG_NET_POLL_CONTROLLER
6982 	.ndo_poll_controller = stmmac_poll_controller,
6983 #endif
6984 	.ndo_set_mac_address = stmmac_set_mac_address,
6985 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6986 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6987 	.ndo_bpf = stmmac_bpf,
6988 	.ndo_xdp_xmit = stmmac_xdp_xmit,
6989 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
6990 };
6991 
6992 static void stmmac_reset_subtask(struct stmmac_priv *priv)
6993 {
6994 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
6995 		return;
6996 	if (test_bit(STMMAC_DOWN, &priv->state))
6997 		return;
6998 
6999 	netdev_err(priv->dev, "Reset adapter.\n");
7000 
7001 	rtnl_lock();
7002 	netif_trans_update(priv->dev);
7003 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7004 		usleep_range(1000, 2000);
7005 
7006 	set_bit(STMMAC_DOWN, &priv->state);
7007 	dev_close(priv->dev);
7008 	dev_open(priv->dev, NULL);
7009 	clear_bit(STMMAC_DOWN, &priv->state);
7010 	clear_bit(STMMAC_RESETING, &priv->state);
7011 	rtnl_unlock();
7012 }
7013 
7014 static void stmmac_service_task(struct work_struct *work)
7015 {
7016 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7017 			service_task);
7018 
7019 	stmmac_reset_subtask(priv);
7020 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7021 }
7022 
7023 /**
7024  *  stmmac_hw_init - Init the MAC device
7025  *  @priv: driver private structure
7026  *  Description: this function is to configure the MAC device according to
7027  *  some platform parameters or the HW capability register. It prepares the
7028  *  driver to use either ring or chain modes and to setup either enhanced or
7029  *  normal descriptors.
7030  */
7031 static int stmmac_hw_init(struct stmmac_priv *priv)
7032 {
7033 	int ret;
7034 
7035 	/* dwmac-sun8i only work in chain mode */
7036 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7037 		chain_mode = 1;
7038 	priv->chain_mode = chain_mode;
7039 
7040 	/* Initialize HW Interface */
7041 	ret = stmmac_hwif_init(priv);
7042 	if (ret)
7043 		return ret;
7044 
7045 	/* Get the HW capability (new GMAC newer than 3.50a) */
7046 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7047 	if (priv->hw_cap_support) {
7048 		dev_info(priv->device, "DMA HW capability register supported\n");
7049 
7050 		/* We can override some gmac/dma configuration fields: e.g.
7051 		 * enh_desc, tx_coe (e.g. that are passed through the
7052 		 * platform) with the values from the HW capability
7053 		 * register (if supported).
7054 		 */
7055 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7056 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7057 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7058 		priv->hw->pmt = priv->plat->pmt;
7059 		if (priv->dma_cap.hash_tb_sz) {
7060 			priv->hw->multicast_filter_bins =
7061 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7062 			priv->hw->mcast_bits_log2 =
7063 					ilog2(priv->hw->multicast_filter_bins);
7064 		}
7065 
7066 		/* TXCOE doesn't work in thresh DMA mode */
7067 		if (priv->plat->force_thresh_dma_mode)
7068 			priv->plat->tx_coe = 0;
7069 		else
7070 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7071 
7072 		/* In case of GMAC4 rx_coe is from HW cap register. */
7073 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7074 
7075 		if (priv->dma_cap.rx_coe_type2)
7076 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7077 		else if (priv->dma_cap.rx_coe_type1)
7078 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7079 
7080 	} else {
7081 		dev_info(priv->device, "No HW DMA feature register supported\n");
7082 	}
7083 
7084 	if (priv->plat->rx_coe) {
7085 		priv->hw->rx_csum = priv->plat->rx_coe;
7086 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7087 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7088 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7089 	}
7090 	if (priv->plat->tx_coe)
7091 		dev_info(priv->device, "TX Checksum insertion supported\n");
7092 
7093 	if (priv->plat->pmt) {
7094 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7095 		device_set_wakeup_capable(priv->device, 1);
7096 	}
7097 
7098 	if (priv->dma_cap.tsoen)
7099 		dev_info(priv->device, "TSO supported\n");
7100 
7101 	priv->hw->vlan_fail_q_en =
7102 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7103 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7104 
7105 	/* Run HW quirks, if any */
7106 	if (priv->hwif_quirks) {
7107 		ret = priv->hwif_quirks(priv);
7108 		if (ret)
7109 			return ret;
7110 	}
7111 
7112 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7113 	 * In some case, for example on bugged HW this feature
7114 	 * has to be disable and this can be done by passing the
7115 	 * riwt_off field from the platform.
7116 	 */
7117 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7118 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7119 		priv->use_riwt = 1;
7120 		dev_info(priv->device,
7121 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7122 	}
7123 
7124 	return 0;
7125 }
7126 
7127 static void stmmac_napi_add(struct net_device *dev)
7128 {
7129 	struct stmmac_priv *priv = netdev_priv(dev);
7130 	u32 queue, maxq;
7131 
7132 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7133 
7134 	for (queue = 0; queue < maxq; queue++) {
7135 		struct stmmac_channel *ch = &priv->channel[queue];
7136 
7137 		ch->priv_data = priv;
7138 		ch->index = queue;
7139 		spin_lock_init(&ch->lock);
7140 
7141 		if (queue < priv->plat->rx_queues_to_use) {
7142 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7143 		}
7144 		if (queue < priv->plat->tx_queues_to_use) {
7145 			netif_napi_add_tx(dev, &ch->tx_napi,
7146 					  stmmac_napi_poll_tx);
7147 		}
7148 		if (queue < priv->plat->rx_queues_to_use &&
7149 		    queue < priv->plat->tx_queues_to_use) {
7150 			netif_napi_add(dev, &ch->rxtx_napi,
7151 				       stmmac_napi_poll_rxtx);
7152 		}
7153 	}
7154 }
7155 
7156 static void stmmac_napi_del(struct net_device *dev)
7157 {
7158 	struct stmmac_priv *priv = netdev_priv(dev);
7159 	u32 queue, maxq;
7160 
7161 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7162 
7163 	for (queue = 0; queue < maxq; queue++) {
7164 		struct stmmac_channel *ch = &priv->channel[queue];
7165 
7166 		if (queue < priv->plat->rx_queues_to_use)
7167 			netif_napi_del(&ch->rx_napi);
7168 		if (queue < priv->plat->tx_queues_to_use)
7169 			netif_napi_del(&ch->tx_napi);
7170 		if (queue < priv->plat->rx_queues_to_use &&
7171 		    queue < priv->plat->tx_queues_to_use) {
7172 			netif_napi_del(&ch->rxtx_napi);
7173 		}
7174 	}
7175 }
7176 
7177 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7178 {
7179 	struct stmmac_priv *priv = netdev_priv(dev);
7180 	int ret = 0, i;
7181 
7182 	if (netif_running(dev))
7183 		stmmac_release(dev);
7184 
7185 	stmmac_napi_del(dev);
7186 
7187 	priv->plat->rx_queues_to_use = rx_cnt;
7188 	priv->plat->tx_queues_to_use = tx_cnt;
7189 	if (!netif_is_rxfh_configured(dev))
7190 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7191 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7192 									rx_cnt);
7193 
7194 	stmmac_napi_add(dev);
7195 
7196 	if (netif_running(dev))
7197 		ret = stmmac_open(dev);
7198 
7199 	return ret;
7200 }
7201 
7202 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7203 {
7204 	struct stmmac_priv *priv = netdev_priv(dev);
7205 	int ret = 0;
7206 
7207 	if (netif_running(dev))
7208 		stmmac_release(dev);
7209 
7210 	priv->dma_conf.dma_rx_size = rx_size;
7211 	priv->dma_conf.dma_tx_size = tx_size;
7212 
7213 	if (netif_running(dev))
7214 		ret = stmmac_open(dev);
7215 
7216 	return ret;
7217 }
7218 
7219 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7220 static void stmmac_fpe_lp_task(struct work_struct *work)
7221 {
7222 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7223 						fpe_task);
7224 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7225 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7226 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7227 	bool *hs_enable = &fpe_cfg->hs_enable;
7228 	bool *enable = &fpe_cfg->enable;
7229 	int retries = 20;
7230 
7231 	while (retries-- > 0) {
7232 		/* Bail out immediately if FPE handshake is OFF */
7233 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7234 			break;
7235 
7236 		if (*lo_state == FPE_STATE_ENTERING_ON &&
7237 		    *lp_state == FPE_STATE_ENTERING_ON) {
7238 			stmmac_fpe_configure(priv, priv->ioaddr,
7239 					     priv->plat->tx_queues_to_use,
7240 					     priv->plat->rx_queues_to_use,
7241 					     *enable);
7242 
7243 			netdev_info(priv->dev, "configured FPE\n");
7244 
7245 			*lo_state = FPE_STATE_ON;
7246 			*lp_state = FPE_STATE_ON;
7247 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7248 			break;
7249 		}
7250 
7251 		if ((*lo_state == FPE_STATE_CAPABLE ||
7252 		     *lo_state == FPE_STATE_ENTERING_ON) &&
7253 		     *lp_state != FPE_STATE_ON) {
7254 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7255 				    *lo_state, *lp_state);
7256 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7257 						MPACKET_VERIFY);
7258 		}
7259 		/* Sleep then retry */
7260 		msleep(500);
7261 	}
7262 
7263 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7264 }
7265 
7266 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7267 {
7268 	if (priv->plat->fpe_cfg->hs_enable != enable) {
7269 		if (enable) {
7270 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7271 						MPACKET_VERIFY);
7272 		} else {
7273 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7274 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7275 		}
7276 
7277 		priv->plat->fpe_cfg->hs_enable = enable;
7278 	}
7279 }
7280 
7281 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7282 {
7283 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7284 	struct dma_desc *desc_contains_ts = ctx->desc;
7285 	struct stmmac_priv *priv = ctx->priv;
7286 	struct dma_desc *ndesc = ctx->ndesc;
7287 	struct dma_desc *desc = ctx->desc;
7288 	u64 ns = 0;
7289 
7290 	if (!priv->hwts_rx_en)
7291 		return -ENODATA;
7292 
7293 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7294 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7295 		desc_contains_ts = ndesc;
7296 
7297 	/* Check if timestamp is available */
7298 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7299 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7300 		ns -= priv->plat->cdc_error_adj;
7301 		*timestamp = ns_to_ktime(ns);
7302 		return 0;
7303 	}
7304 
7305 	return -ENODATA;
7306 }
7307 
7308 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7309 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7310 };
7311 
7312 /**
7313  * stmmac_dvr_probe
7314  * @device: device pointer
7315  * @plat_dat: platform data pointer
7316  * @res: stmmac resource pointer
7317  * Description: this is the main probe function used to
7318  * call the alloc_etherdev, allocate the priv structure.
7319  * Return:
7320  * returns 0 on success, otherwise errno.
7321  */
7322 int stmmac_dvr_probe(struct device *device,
7323 		     struct plat_stmmacenet_data *plat_dat,
7324 		     struct stmmac_resources *res)
7325 {
7326 	struct net_device *ndev = NULL;
7327 	struct stmmac_priv *priv;
7328 	u32 rxq;
7329 	int i, ret = 0;
7330 
7331 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7332 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7333 	if (!ndev)
7334 		return -ENOMEM;
7335 
7336 	SET_NETDEV_DEV(ndev, device);
7337 
7338 	priv = netdev_priv(ndev);
7339 	priv->device = device;
7340 	priv->dev = ndev;
7341 
7342 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7343 		u64_stats_init(&priv->dma_conf.rx_queue[i].rxq_stats.syncp);
7344 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7345 		u64_stats_init(&priv->dma_conf.tx_queue[i].txq_stats.syncp);
7346 
7347 	stmmac_set_ethtool_ops(ndev);
7348 	priv->pause = pause;
7349 	priv->plat = plat_dat;
7350 	priv->ioaddr = res->addr;
7351 	priv->dev->base_addr = (unsigned long)res->addr;
7352 	priv->plat->dma_cfg->multi_msi_en =
7353 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7354 
7355 	priv->dev->irq = res->irq;
7356 	priv->wol_irq = res->wol_irq;
7357 	priv->lpi_irq = res->lpi_irq;
7358 	priv->sfty_ce_irq = res->sfty_ce_irq;
7359 	priv->sfty_ue_irq = res->sfty_ue_irq;
7360 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7361 		priv->rx_irq[i] = res->rx_irq[i];
7362 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7363 		priv->tx_irq[i] = res->tx_irq[i];
7364 
7365 	if (!is_zero_ether_addr(res->mac))
7366 		eth_hw_addr_set(priv->dev, res->mac);
7367 
7368 	dev_set_drvdata(device, priv->dev);
7369 
7370 	/* Verify driver arguments */
7371 	stmmac_verify_args();
7372 
7373 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7374 	if (!priv->af_xdp_zc_qps)
7375 		return -ENOMEM;
7376 
7377 	/* Allocate workqueue */
7378 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7379 	if (!priv->wq) {
7380 		dev_err(priv->device, "failed to create workqueue\n");
7381 		ret = -ENOMEM;
7382 		goto error_wq_init;
7383 	}
7384 
7385 	INIT_WORK(&priv->service_task, stmmac_service_task);
7386 
7387 	/* Initialize Link Partner FPE workqueue */
7388 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7389 
7390 	/* Override with kernel parameters if supplied XXX CRS XXX
7391 	 * this needs to have multiple instances
7392 	 */
7393 	if ((phyaddr >= 0) && (phyaddr <= 31))
7394 		priv->plat->phy_addr = phyaddr;
7395 
7396 	if (priv->plat->stmmac_rst) {
7397 		ret = reset_control_assert(priv->plat->stmmac_rst);
7398 		reset_control_deassert(priv->plat->stmmac_rst);
7399 		/* Some reset controllers have only reset callback instead of
7400 		 * assert + deassert callbacks pair.
7401 		 */
7402 		if (ret == -ENOTSUPP)
7403 			reset_control_reset(priv->plat->stmmac_rst);
7404 	}
7405 
7406 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7407 	if (ret == -ENOTSUPP)
7408 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7409 			ERR_PTR(ret));
7410 
7411 	/* Init MAC and get the capabilities */
7412 	ret = stmmac_hw_init(priv);
7413 	if (ret)
7414 		goto error_hw_init;
7415 
7416 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7417 	 */
7418 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7419 		priv->plat->dma_cfg->dche = false;
7420 
7421 	stmmac_check_ether_addr(priv);
7422 
7423 	ndev->netdev_ops = &stmmac_netdev_ops;
7424 
7425 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7426 
7427 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7428 			    NETIF_F_RXCSUM;
7429 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7430 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7431 
7432 	ret = stmmac_tc_init(priv, priv);
7433 	if (!ret) {
7434 		ndev->hw_features |= NETIF_F_HW_TC;
7435 	}
7436 
7437 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7438 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7439 		if (priv->plat->has_gmac4)
7440 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7441 		priv->tso = true;
7442 		dev_info(priv->device, "TSO feature enabled\n");
7443 	}
7444 
7445 	if (priv->dma_cap.sphen &&
7446 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7447 		ndev->hw_features |= NETIF_F_GRO;
7448 		priv->sph_cap = true;
7449 		priv->sph = priv->sph_cap;
7450 		dev_info(priv->device, "SPH feature enabled\n");
7451 	}
7452 
7453 	/* Ideally our host DMA address width is the same as for the
7454 	 * device. However, it may differ and then we have to use our
7455 	 * host DMA width for allocation and the device DMA width for
7456 	 * register handling.
7457 	 */
7458 	if (priv->plat->host_dma_width)
7459 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7460 	else
7461 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7462 
7463 	if (priv->dma_cap.host_dma_width) {
7464 		ret = dma_set_mask_and_coherent(device,
7465 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7466 		if (!ret) {
7467 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7468 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7469 
7470 			/*
7471 			 * If more than 32 bits can be addressed, make sure to
7472 			 * enable enhanced addressing mode.
7473 			 */
7474 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7475 				priv->plat->dma_cfg->eame = true;
7476 		} else {
7477 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7478 			if (ret) {
7479 				dev_err(priv->device, "Failed to set DMA Mask\n");
7480 				goto error_hw_init;
7481 			}
7482 
7483 			priv->dma_cap.host_dma_width = 32;
7484 		}
7485 	}
7486 
7487 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7488 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7489 #ifdef STMMAC_VLAN_TAG_USED
7490 	/* Both mac100 and gmac support receive VLAN tag detection */
7491 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7492 	if (priv->dma_cap.vlhash) {
7493 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7494 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7495 	}
7496 	if (priv->dma_cap.vlins) {
7497 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7498 		if (priv->dma_cap.dvlan)
7499 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7500 	}
7501 #endif
7502 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7503 
7504 	priv->xstats.threshold = tc;
7505 
7506 	/* Initialize RSS */
7507 	rxq = priv->plat->rx_queues_to_use;
7508 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7509 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7510 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7511 
7512 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7513 		ndev->features |= NETIF_F_RXHASH;
7514 
7515 	ndev->vlan_features |= ndev->features;
7516 	/* TSO doesn't work on VLANs yet */
7517 	ndev->vlan_features &= ~NETIF_F_TSO;
7518 
7519 	/* MTU range: 46 - hw-specific max */
7520 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7521 	if (priv->plat->has_xgmac)
7522 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7523 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7524 		ndev->max_mtu = JUMBO_LEN;
7525 	else
7526 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7527 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7528 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7529 	 */
7530 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7531 	    (priv->plat->maxmtu >= ndev->min_mtu))
7532 		ndev->max_mtu = priv->plat->maxmtu;
7533 	else if (priv->plat->maxmtu < ndev->min_mtu)
7534 		dev_warn(priv->device,
7535 			 "%s: warning: maxmtu having invalid value (%d)\n",
7536 			 __func__, priv->plat->maxmtu);
7537 
7538 	if (flow_ctrl)
7539 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7540 
7541 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7542 
7543 	/* Setup channels NAPI */
7544 	stmmac_napi_add(ndev);
7545 
7546 	mutex_init(&priv->lock);
7547 
7548 	/* If a specific clk_csr value is passed from the platform
7549 	 * this means that the CSR Clock Range selection cannot be
7550 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7551 	 * set the MDC clock dynamically according to the csr actual
7552 	 * clock input.
7553 	 */
7554 	if (priv->plat->clk_csr >= 0)
7555 		priv->clk_csr = priv->plat->clk_csr;
7556 	else
7557 		stmmac_clk_csr_set(priv);
7558 
7559 	stmmac_check_pcs_mode(priv);
7560 
7561 	pm_runtime_get_noresume(device);
7562 	pm_runtime_set_active(device);
7563 	if (!pm_runtime_enabled(device))
7564 		pm_runtime_enable(device);
7565 
7566 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7567 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
7568 		/* MDIO bus Registration */
7569 		ret = stmmac_mdio_register(ndev);
7570 		if (ret < 0) {
7571 			dev_err_probe(priv->device, ret,
7572 				      "%s: MDIO bus (id: %d) registration failed\n",
7573 				      __func__, priv->plat->bus_id);
7574 			goto error_mdio_register;
7575 		}
7576 	}
7577 
7578 	if (priv->plat->speed_mode_2500)
7579 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7580 
7581 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7582 		ret = stmmac_xpcs_setup(priv->mii);
7583 		if (ret)
7584 			goto error_xpcs_setup;
7585 	}
7586 
7587 	ret = stmmac_phy_setup(priv);
7588 	if (ret) {
7589 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7590 		goto error_phy_setup;
7591 	}
7592 
7593 	ret = register_netdev(ndev);
7594 	if (ret) {
7595 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7596 			__func__, ret);
7597 		goto error_netdev_register;
7598 	}
7599 
7600 #ifdef CONFIG_DEBUG_FS
7601 	stmmac_init_fs(ndev);
7602 #endif
7603 
7604 	if (priv->plat->dump_debug_regs)
7605 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7606 
7607 	/* Let pm_runtime_put() disable the clocks.
7608 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7609 	 */
7610 	pm_runtime_put(device);
7611 
7612 	return ret;
7613 
7614 error_netdev_register:
7615 	phylink_destroy(priv->phylink);
7616 error_xpcs_setup:
7617 error_phy_setup:
7618 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7619 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7620 		stmmac_mdio_unregister(ndev);
7621 error_mdio_register:
7622 	stmmac_napi_del(ndev);
7623 error_hw_init:
7624 	destroy_workqueue(priv->wq);
7625 error_wq_init:
7626 	bitmap_free(priv->af_xdp_zc_qps);
7627 
7628 	return ret;
7629 }
7630 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7631 
7632 /**
7633  * stmmac_dvr_remove
7634  * @dev: device pointer
7635  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7636  * changes the link status, releases the DMA descriptor rings.
7637  */
7638 void stmmac_dvr_remove(struct device *dev)
7639 {
7640 	struct net_device *ndev = dev_get_drvdata(dev);
7641 	struct stmmac_priv *priv = netdev_priv(ndev);
7642 
7643 	netdev_info(priv->dev, "%s: removing driver", __func__);
7644 
7645 	pm_runtime_get_sync(dev);
7646 
7647 	stmmac_stop_all_dma(priv);
7648 	stmmac_mac_set(priv, priv->ioaddr, false);
7649 	netif_carrier_off(ndev);
7650 	unregister_netdev(ndev);
7651 
7652 #ifdef CONFIG_DEBUG_FS
7653 	stmmac_exit_fs(ndev);
7654 #endif
7655 	phylink_destroy(priv->phylink);
7656 	if (priv->plat->stmmac_rst)
7657 		reset_control_assert(priv->plat->stmmac_rst);
7658 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7659 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7660 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7661 		stmmac_mdio_unregister(ndev);
7662 	destroy_workqueue(priv->wq);
7663 	mutex_destroy(&priv->lock);
7664 	bitmap_free(priv->af_xdp_zc_qps);
7665 
7666 	pm_runtime_disable(dev);
7667 	pm_runtime_put_noidle(dev);
7668 }
7669 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7670 
7671 /**
7672  * stmmac_suspend - suspend callback
7673  * @dev: device pointer
7674  * Description: this is the function to suspend the device and it is called
7675  * by the platform driver to stop the network queue, release the resources,
7676  * program the PMT register (for WoL), clean and release driver resources.
7677  */
7678 int stmmac_suspend(struct device *dev)
7679 {
7680 	struct net_device *ndev = dev_get_drvdata(dev);
7681 	struct stmmac_priv *priv = netdev_priv(ndev);
7682 	u32 chan;
7683 
7684 	if (!ndev || !netif_running(ndev))
7685 		return 0;
7686 
7687 	mutex_lock(&priv->lock);
7688 
7689 	netif_device_detach(ndev);
7690 
7691 	stmmac_disable_all_queues(priv);
7692 
7693 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7694 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7695 
7696 	if (priv->eee_enabled) {
7697 		priv->tx_path_in_lpi_mode = false;
7698 		del_timer_sync(&priv->eee_ctrl_timer);
7699 	}
7700 
7701 	/* Stop TX/RX DMA */
7702 	stmmac_stop_all_dma(priv);
7703 
7704 	if (priv->plat->serdes_powerdown)
7705 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7706 
7707 	/* Enable Power down mode by programming the PMT regs */
7708 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7709 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7710 		priv->irq_wake = 1;
7711 	} else {
7712 		stmmac_mac_set(priv, priv->ioaddr, false);
7713 		pinctrl_pm_select_sleep_state(priv->device);
7714 	}
7715 
7716 	mutex_unlock(&priv->lock);
7717 
7718 	rtnl_lock();
7719 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7720 		phylink_suspend(priv->phylink, true);
7721 	} else {
7722 		if (device_may_wakeup(priv->device))
7723 			phylink_speed_down(priv->phylink, false);
7724 		phylink_suspend(priv->phylink, false);
7725 	}
7726 	rtnl_unlock();
7727 
7728 	if (priv->dma_cap.fpesel) {
7729 		/* Disable FPE */
7730 		stmmac_fpe_configure(priv, priv->ioaddr,
7731 				     priv->plat->tx_queues_to_use,
7732 				     priv->plat->rx_queues_to_use, false);
7733 
7734 		stmmac_fpe_handshake(priv, false);
7735 		stmmac_fpe_stop_wq(priv);
7736 	}
7737 
7738 	priv->speed = SPEED_UNKNOWN;
7739 	return 0;
7740 }
7741 EXPORT_SYMBOL_GPL(stmmac_suspend);
7742 
7743 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7744 {
7745 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7746 
7747 	rx_q->cur_rx = 0;
7748 	rx_q->dirty_rx = 0;
7749 }
7750 
7751 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7752 {
7753 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7754 
7755 	tx_q->cur_tx = 0;
7756 	tx_q->dirty_tx = 0;
7757 	tx_q->mss = 0;
7758 
7759 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7760 }
7761 
7762 /**
7763  * stmmac_reset_queues_param - reset queue parameters
7764  * @priv: device pointer
7765  */
7766 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7767 {
7768 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7769 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7770 	u32 queue;
7771 
7772 	for (queue = 0; queue < rx_cnt; queue++)
7773 		stmmac_reset_rx_queue(priv, queue);
7774 
7775 	for (queue = 0; queue < tx_cnt; queue++)
7776 		stmmac_reset_tx_queue(priv, queue);
7777 }
7778 
7779 /**
7780  * stmmac_resume - resume callback
7781  * @dev: device pointer
7782  * Description: when resume this function is invoked to setup the DMA and CORE
7783  * in a usable state.
7784  */
7785 int stmmac_resume(struct device *dev)
7786 {
7787 	struct net_device *ndev = dev_get_drvdata(dev);
7788 	struct stmmac_priv *priv = netdev_priv(ndev);
7789 	int ret;
7790 
7791 	if (!netif_running(ndev))
7792 		return 0;
7793 
7794 	/* Power Down bit, into the PM register, is cleared
7795 	 * automatically as soon as a magic packet or a Wake-up frame
7796 	 * is received. Anyway, it's better to manually clear
7797 	 * this bit because it can generate problems while resuming
7798 	 * from another devices (e.g. serial console).
7799 	 */
7800 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7801 		mutex_lock(&priv->lock);
7802 		stmmac_pmt(priv, priv->hw, 0);
7803 		mutex_unlock(&priv->lock);
7804 		priv->irq_wake = 0;
7805 	} else {
7806 		pinctrl_pm_select_default_state(priv->device);
7807 		/* reset the phy so that it's ready */
7808 		if (priv->mii)
7809 			stmmac_mdio_reset(priv->mii);
7810 	}
7811 
7812 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7813 	    priv->plat->serdes_powerup) {
7814 		ret = priv->plat->serdes_powerup(ndev,
7815 						 priv->plat->bsp_priv);
7816 
7817 		if (ret < 0)
7818 			return ret;
7819 	}
7820 
7821 	rtnl_lock();
7822 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7823 		phylink_resume(priv->phylink);
7824 	} else {
7825 		phylink_resume(priv->phylink);
7826 		if (device_may_wakeup(priv->device))
7827 			phylink_speed_up(priv->phylink);
7828 	}
7829 	rtnl_unlock();
7830 
7831 	rtnl_lock();
7832 	mutex_lock(&priv->lock);
7833 
7834 	stmmac_reset_queues_param(priv);
7835 
7836 	stmmac_free_tx_skbufs(priv);
7837 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7838 
7839 	stmmac_hw_setup(ndev, false);
7840 	stmmac_init_coalesce(priv);
7841 	stmmac_set_rx_mode(ndev);
7842 
7843 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7844 
7845 	stmmac_enable_all_queues(priv);
7846 	stmmac_enable_all_dma_irq(priv);
7847 
7848 	mutex_unlock(&priv->lock);
7849 	rtnl_unlock();
7850 
7851 	netif_device_attach(ndev);
7852 
7853 	return 0;
7854 }
7855 EXPORT_SYMBOL_GPL(stmmac_resume);
7856 
7857 #ifndef MODULE
7858 static int __init stmmac_cmdline_opt(char *str)
7859 {
7860 	char *opt;
7861 
7862 	if (!str || !*str)
7863 		return 1;
7864 	while ((opt = strsep(&str, ",")) != NULL) {
7865 		if (!strncmp(opt, "debug:", 6)) {
7866 			if (kstrtoint(opt + 6, 0, &debug))
7867 				goto err;
7868 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7869 			if (kstrtoint(opt + 8, 0, &phyaddr))
7870 				goto err;
7871 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7872 			if (kstrtoint(opt + 7, 0, &buf_sz))
7873 				goto err;
7874 		} else if (!strncmp(opt, "tc:", 3)) {
7875 			if (kstrtoint(opt + 3, 0, &tc))
7876 				goto err;
7877 		} else if (!strncmp(opt, "watchdog:", 9)) {
7878 			if (kstrtoint(opt + 9, 0, &watchdog))
7879 				goto err;
7880 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7881 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7882 				goto err;
7883 		} else if (!strncmp(opt, "pause:", 6)) {
7884 			if (kstrtoint(opt + 6, 0, &pause))
7885 				goto err;
7886 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7887 			if (kstrtoint(opt + 10, 0, &eee_timer))
7888 				goto err;
7889 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7890 			if (kstrtoint(opt + 11, 0, &chain_mode))
7891 				goto err;
7892 		}
7893 	}
7894 	return 1;
7895 
7896 err:
7897 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7898 	return 1;
7899 }
7900 
7901 __setup("stmmaceth=", stmmac_cmdline_opt);
7902 #endif /* MODULE */
7903 
7904 static int __init stmmac_init(void)
7905 {
7906 #ifdef CONFIG_DEBUG_FS
7907 	/* Create debugfs main directory if it doesn't exist yet */
7908 	if (!stmmac_fs_dir)
7909 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7910 	register_netdevice_notifier(&stmmac_notifier);
7911 #endif
7912 
7913 	return 0;
7914 }
7915 
7916 static void __exit stmmac_exit(void)
7917 {
7918 #ifdef CONFIG_DEBUG_FS
7919 	unregister_netdevice_notifier(&stmmac_notifier);
7920 	debugfs_remove_recursive(stmmac_fs_dir);
7921 #endif
7922 }
7923 
7924 module_init(stmmac_init)
7925 module_exit(stmmac_exit)
7926 
7927 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7928 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7929 MODULE_LICENSE("GPL");
7930