1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Using PCS we cannot dial with the phy registers at this stage
475 	 * so we do not support extra feature like EEE.
476 	 */
477 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
478 	    priv->hw->pcs == STMMAC_PCS_RTBI)
479 		return false;
480 
481 	/* Check if MAC core supports the EEE feature. */
482 	if (!priv->dma_cap.eee)
483 		return false;
484 
485 	mutex_lock(&priv->lock);
486 
487 	/* Check if it needs to be deactivated */
488 	if (!priv->eee_active) {
489 		if (priv->eee_enabled) {
490 			netdev_dbg(priv->dev, "disable EEE\n");
491 			stmmac_lpi_entry_timer_config(priv, 0);
492 			del_timer_sync(&priv->eee_ctrl_timer);
493 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494 			if (priv->hw->xpcs)
495 				xpcs_config_eee(priv->hw->xpcs,
496 						priv->plat->mult_fact_100ns,
497 						false);
498 		}
499 		mutex_unlock(&priv->lock);
500 		return false;
501 	}
502 
503 	if (priv->eee_active && !priv->eee_enabled) {
504 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506 				     eee_tw_timer);
507 		if (priv->hw->xpcs)
508 			xpcs_config_eee(priv->hw->xpcs,
509 					priv->plat->mult_fact_100ns,
510 					true);
511 	}
512 
513 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514 		del_timer_sync(&priv->eee_ctrl_timer);
515 		priv->tx_path_in_lpi_mode = false;
516 		stmmac_lpi_entry_timer_config(priv, 1);
517 	} else {
518 		stmmac_lpi_entry_timer_config(priv, 0);
519 		mod_timer(&priv->eee_ctrl_timer,
520 			  STMMAC_LPI_T(priv->tx_lpi_timer));
521 	}
522 
523 	mutex_unlock(&priv->lock);
524 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525 	return true;
526 }
527 
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529  * @priv: driver private structure
530  * @p : descriptor pointer
531  * @skb : the socket buffer
532  * Description :
533  * This function will read timestamp from the descriptor & pass it to stack.
534  * and also perform some sanity checks.
535  */
536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537 				   struct dma_desc *p, struct sk_buff *skb)
538 {
539 	struct skb_shared_hwtstamps shhwtstamp;
540 	bool found = false;
541 	u64 ns = 0;
542 
543 	if (!priv->hwts_tx_en)
544 		return;
545 
546 	/* exit if skb doesn't support hw tstamp */
547 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548 		return;
549 
550 	/* check tx tstamp status */
551 	if (stmmac_get_tx_timestamp_status(priv, p)) {
552 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553 		found = true;
554 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555 		found = true;
556 	}
557 
558 	if (found) {
559 		ns -= priv->plat->cdc_error_adj;
560 
561 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
563 
564 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565 		/* pass tstamp to stack */
566 		skb_tstamp_tx(skb, &shhwtstamp);
567 	}
568 }
569 
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571  * @priv: driver private structure
572  * @p : descriptor pointer
573  * @np : next descriptor pointer
574  * @skb : the socket buffer
575  * Description :
576  * This function will read received packet's timestamp from the descriptor
577  * and pass it to stack. It also perform some sanity checks.
578  */
579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580 				   struct dma_desc *np, struct sk_buff *skb)
581 {
582 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
583 	struct dma_desc *desc = p;
584 	u64 ns = 0;
585 
586 	if (!priv->hwts_rx_en)
587 		return;
588 	/* For GMAC4, the valid timestamp is from CTX next desc. */
589 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590 		desc = np;
591 
592 	/* Check if timestamp is available */
593 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595 
596 		ns -= priv->plat->cdc_error_adj;
597 
598 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599 		shhwtstamp = skb_hwtstamps(skb);
600 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
602 	} else  {
603 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604 	}
605 }
606 
607 /**
608  *  stmmac_hwtstamp_set - control hardware timestamping.
609  *  @dev: device pointer.
610  *  @ifr: An IOCTL specific structure, that can contain a pointer to
611  *  a proprietary structure used to pass information to the driver.
612  *  Description:
613  *  This function configures the MAC to enable/disable both outgoing(TX)
614  *  and incoming(RX) packets time stamping based on user input.
615  *  Return Value:
616  *  0 on success and an appropriate -ve integer on failure.
617  */
618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620 	struct stmmac_priv *priv = netdev_priv(dev);
621 	struct hwtstamp_config config;
622 	u32 ptp_v2 = 0;
623 	u32 tstamp_all = 0;
624 	u32 ptp_over_ipv4_udp = 0;
625 	u32 ptp_over_ipv6_udp = 0;
626 	u32 ptp_over_ethernet = 0;
627 	u32 snap_type_sel = 0;
628 	u32 ts_master_en = 0;
629 	u32 ts_event_en = 0;
630 
631 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632 		netdev_alert(priv->dev, "No support for HW time stamping\n");
633 		priv->hwts_tx_en = 0;
634 		priv->hwts_rx_en = 0;
635 
636 		return -EOPNOTSUPP;
637 	}
638 
639 	if (copy_from_user(&config, ifr->ifr_data,
640 			   sizeof(config)))
641 		return -EFAULT;
642 
643 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 		   __func__, config.flags, config.tx_type, config.rx_filter);
645 
646 	if (config.tx_type != HWTSTAMP_TX_OFF &&
647 	    config.tx_type != HWTSTAMP_TX_ON)
648 		return -ERANGE;
649 
650 	if (priv->adv_ts) {
651 		switch (config.rx_filter) {
652 		case HWTSTAMP_FILTER_NONE:
653 			/* time stamp no incoming packet at all */
654 			config.rx_filter = HWTSTAMP_FILTER_NONE;
655 			break;
656 
657 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 			/* PTP v1, UDP, any kind of event packet */
659 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 			/* 'xmac' hardware can support Sync, Pdelay_Req and
661 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 			 * This leaves Delay_Req timestamps out.
663 			 * Enable all events *and* general purpose message
664 			 * timestamping
665 			 */
666 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			break;
670 
671 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 			/* PTP v1, UDP, Sync packet */
673 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 			/* take time stamp for SYNC messages only */
675 			ts_event_en = PTP_TCR_TSEVNTENA;
676 
677 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 			break;
680 
681 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 			/* PTP v1, UDP, Delay_req packet */
683 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 			/* take time stamp for Delay_Req messages only */
685 			ts_master_en = PTP_TCR_TSMSTRENA;
686 			ts_event_en = PTP_TCR_TSEVNTENA;
687 
688 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 			break;
691 
692 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 			/* PTP v2, UDP, any kind of event packet */
694 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 			ptp_v2 = PTP_TCR_TSVER2ENA;
696 			/* take time stamp for all event messages */
697 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698 
699 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 			break;
702 
703 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 			/* PTP v2, UDP, Sync packet */
705 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 			ptp_v2 = PTP_TCR_TSVER2ENA;
707 			/* take time stamp for SYNC messages only */
708 			ts_event_en = PTP_TCR_TSEVNTENA;
709 
710 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 			break;
713 
714 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 			/* PTP v2, UDP, Delay_req packet */
716 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 			ptp_v2 = PTP_TCR_TSVER2ENA;
718 			/* take time stamp for Delay_Req messages only */
719 			ts_master_en = PTP_TCR_TSMSTRENA;
720 			ts_event_en = PTP_TCR_TSEVNTENA;
721 
722 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 			break;
725 
726 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 			/* PTP v2/802.AS1 any layer, any kind of event packet */
728 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 			ptp_v2 = PTP_TCR_TSVER2ENA;
730 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 			if (priv->synopsys_id < DWMAC_CORE_4_10)
732 				ts_event_en = PTP_TCR_TSEVNTENA;
733 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 			ptp_over_ethernet = PTP_TCR_TSIPENA;
736 			break;
737 
738 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 			/* PTP v2/802.AS1, any layer, Sync packet */
740 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 			ptp_v2 = PTP_TCR_TSVER2ENA;
742 			/* take time stamp for SYNC messages only */
743 			ts_event_en = PTP_TCR_TSEVNTENA;
744 
745 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 			ptp_over_ethernet = PTP_TCR_TSIPENA;
748 			break;
749 
750 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 			/* PTP v2/802.AS1, any layer, Delay_req packet */
752 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 			ptp_v2 = PTP_TCR_TSVER2ENA;
754 			/* take time stamp for Delay_Req messages only */
755 			ts_master_en = PTP_TCR_TSMSTRENA;
756 			ts_event_en = PTP_TCR_TSEVNTENA;
757 
758 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 			ptp_over_ethernet = PTP_TCR_TSIPENA;
761 			break;
762 
763 		case HWTSTAMP_FILTER_NTP_ALL:
764 		case HWTSTAMP_FILTER_ALL:
765 			/* time stamp any incoming packet */
766 			config.rx_filter = HWTSTAMP_FILTER_ALL;
767 			tstamp_all = PTP_TCR_TSENALL;
768 			break;
769 
770 		default:
771 			return -ERANGE;
772 		}
773 	} else {
774 		switch (config.rx_filter) {
775 		case HWTSTAMP_FILTER_NONE:
776 			config.rx_filter = HWTSTAMP_FILTER_NONE;
777 			break;
778 		default:
779 			/* PTP v1, UDP, any kind of event packet */
780 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 			break;
782 		}
783 	}
784 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786 
787 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
788 
789 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
790 		priv->systime_flags |= tstamp_all | ptp_v2 |
791 				       ptp_over_ethernet | ptp_over_ipv6_udp |
792 				       ptp_over_ipv4_udp | ts_event_en |
793 				       ts_master_en | snap_type_sel;
794 	}
795 
796 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797 
798 	memcpy(&priv->tstamp_config, &config, sizeof(config));
799 
800 	return copy_to_user(ifr->ifr_data, &config,
801 			    sizeof(config)) ? -EFAULT : 0;
802 }
803 
804 /**
805  *  stmmac_hwtstamp_get - read hardware timestamping.
806  *  @dev: device pointer.
807  *  @ifr: An IOCTL specific structure, that can contain a pointer to
808  *  a proprietary structure used to pass information to the driver.
809  *  Description:
810  *  This function obtain the current hardware timestamping settings
811  *  as requested.
812  */
813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815 	struct stmmac_priv *priv = netdev_priv(dev);
816 	struct hwtstamp_config *config = &priv->tstamp_config;
817 
818 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819 		return -EOPNOTSUPP;
820 
821 	return copy_to_user(ifr->ifr_data, config,
822 			    sizeof(*config)) ? -EFAULT : 0;
823 }
824 
825 /**
826  * stmmac_init_tstamp_counter - init hardware timestamping counter
827  * @priv: driver private structure
828  * @systime_flags: timestamping flags
829  * Description:
830  * Initialize hardware counter for packet timestamping.
831  * This is valid as long as the interface is open and not suspended.
832  * Will be rerun after resuming from suspend, case in which the timestamping
833  * flags updated by stmmac_hwtstamp_set() also need to be restored.
834  */
835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838 	struct timespec64 now;
839 	u32 sec_inc = 0;
840 	u64 temp = 0;
841 
842 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843 		return -EOPNOTSUPP;
844 
845 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846 	priv->systime_flags = systime_flags;
847 
848 	/* program Sub Second Increment reg */
849 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850 					   priv->plat->clk_ptp_rate,
851 					   xmac, &sec_inc);
852 	temp = div_u64(1000000000ULL, sec_inc);
853 
854 	/* Store sub second increment for later use */
855 	priv->sub_second_inc = sec_inc;
856 
857 	/* calculate default added value:
858 	 * formula is :
859 	 * addend = (2^32)/freq_div_ratio;
860 	 * where, freq_div_ratio = 1e9ns/sec_inc
861 	 */
862 	temp = (u64)(temp << 32);
863 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865 
866 	/* initialize system time */
867 	ktime_get_real_ts64(&now);
868 
869 	/* lower 32 bits of tv_sec are safe until y2106 */
870 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871 
872 	return 0;
873 }
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875 
876 /**
877  * stmmac_init_ptp - init PTP
878  * @priv: driver private structure
879  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880  * This is done by looking at the HW cap. register.
881  * This function also registers the ptp driver.
882  */
883 static int stmmac_init_ptp(struct stmmac_priv *priv)
884 {
885 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886 	int ret;
887 
888 	if (priv->plat->ptp_clk_freq_config)
889 		priv->plat->ptp_clk_freq_config(priv);
890 
891 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892 	if (ret)
893 		return ret;
894 
895 	priv->adv_ts = 0;
896 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 	if (xmac && priv->dma_cap.atime_stamp)
898 		priv->adv_ts = 1;
899 	/* Dwmac 3.x core with extend_desc can support adv_ts */
900 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901 		priv->adv_ts = 1;
902 
903 	if (priv->dma_cap.time_stamp)
904 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
905 
906 	if (priv->adv_ts)
907 		netdev_info(priv->dev,
908 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
909 
910 	priv->hwts_tx_en = 0;
911 	priv->hwts_rx_en = 0;
912 
913 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
914 		stmmac_hwtstamp_correct_latency(priv, priv);
915 
916 	return 0;
917 }
918 
919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
922 	stmmac_ptp_unregister(priv);
923 }
924 
925 /**
926  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
927  *  @priv: driver private structure
928  *  @duplex: duplex passed to the next function
929  *  Description: It is used for configuring the flow control in all queues
930  */
931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933 	u32 tx_cnt = priv->plat->tx_queues_to_use;
934 
935 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936 			priv->pause, tx_cnt);
937 }
938 
939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
940 						 phy_interface_t interface)
941 {
942 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943 
944 	if (priv->hw->xpcs)
945 		return &priv->hw->xpcs->pcs;
946 
947 	if (priv->hw->lynx_pcs)
948 		return priv->hw->lynx_pcs;
949 
950 	return NULL;
951 }
952 
953 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
954 			      const struct phylink_link_state *state)
955 {
956 	/* Nothing to do, xpcs_config() handles everything */
957 }
958 
959 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
960 {
961 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
962 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
963 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
964 	bool *hs_enable = &fpe_cfg->hs_enable;
965 
966 	if (is_up && *hs_enable) {
967 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
968 	} else {
969 		*lo_state = FPE_STATE_OFF;
970 		*lp_state = FPE_STATE_OFF;
971 	}
972 }
973 
974 static void stmmac_mac_link_down(struct phylink_config *config,
975 				 unsigned int mode, phy_interface_t interface)
976 {
977 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
978 
979 	stmmac_mac_set(priv, priv->ioaddr, false);
980 	priv->eee_active = false;
981 	priv->tx_lpi_enabled = false;
982 	priv->eee_enabled = stmmac_eee_init(priv);
983 	stmmac_set_eee_pls(priv, priv->hw, false);
984 
985 	if (priv->dma_cap.fpesel)
986 		stmmac_fpe_link_state_handle(priv, false);
987 }
988 
989 static void stmmac_mac_link_up(struct phylink_config *config,
990 			       struct phy_device *phy,
991 			       unsigned int mode, phy_interface_t interface,
992 			       int speed, int duplex,
993 			       bool tx_pause, bool rx_pause)
994 {
995 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
996 	u32 old_ctrl, ctrl;
997 
998 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
999 	    priv->plat->serdes_powerup)
1000 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1001 
1002 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1003 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1004 
1005 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1006 		switch (speed) {
1007 		case SPEED_10000:
1008 			ctrl |= priv->hw->link.xgmii.speed10000;
1009 			break;
1010 		case SPEED_5000:
1011 			ctrl |= priv->hw->link.xgmii.speed5000;
1012 			break;
1013 		case SPEED_2500:
1014 			ctrl |= priv->hw->link.xgmii.speed2500;
1015 			break;
1016 		default:
1017 			return;
1018 		}
1019 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1020 		switch (speed) {
1021 		case SPEED_100000:
1022 			ctrl |= priv->hw->link.xlgmii.speed100000;
1023 			break;
1024 		case SPEED_50000:
1025 			ctrl |= priv->hw->link.xlgmii.speed50000;
1026 			break;
1027 		case SPEED_40000:
1028 			ctrl |= priv->hw->link.xlgmii.speed40000;
1029 			break;
1030 		case SPEED_25000:
1031 			ctrl |= priv->hw->link.xlgmii.speed25000;
1032 			break;
1033 		case SPEED_10000:
1034 			ctrl |= priv->hw->link.xgmii.speed10000;
1035 			break;
1036 		case SPEED_2500:
1037 			ctrl |= priv->hw->link.speed2500;
1038 			break;
1039 		case SPEED_1000:
1040 			ctrl |= priv->hw->link.speed1000;
1041 			break;
1042 		default:
1043 			return;
1044 		}
1045 	} else {
1046 		switch (speed) {
1047 		case SPEED_2500:
1048 			ctrl |= priv->hw->link.speed2500;
1049 			break;
1050 		case SPEED_1000:
1051 			ctrl |= priv->hw->link.speed1000;
1052 			break;
1053 		case SPEED_100:
1054 			ctrl |= priv->hw->link.speed100;
1055 			break;
1056 		case SPEED_10:
1057 			ctrl |= priv->hw->link.speed10;
1058 			break;
1059 		default:
1060 			return;
1061 		}
1062 	}
1063 
1064 	priv->speed = speed;
1065 
1066 	if (priv->plat->fix_mac_speed)
1067 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1068 
1069 	if (!duplex)
1070 		ctrl &= ~priv->hw->link.duplex;
1071 	else
1072 		ctrl |= priv->hw->link.duplex;
1073 
1074 	/* Flow Control operation */
1075 	if (rx_pause && tx_pause)
1076 		priv->flow_ctrl = FLOW_AUTO;
1077 	else if (rx_pause && !tx_pause)
1078 		priv->flow_ctrl = FLOW_RX;
1079 	else if (!rx_pause && tx_pause)
1080 		priv->flow_ctrl = FLOW_TX;
1081 	else
1082 		priv->flow_ctrl = FLOW_OFF;
1083 
1084 	stmmac_mac_flow_ctrl(priv, duplex);
1085 
1086 	if (ctrl != old_ctrl)
1087 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1088 
1089 	stmmac_mac_set(priv, priv->ioaddr, true);
1090 	if (phy && priv->dma_cap.eee) {
1091 		priv->eee_active =
1092 			phy_init_eee(phy, !(priv->plat->flags &
1093 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1094 		priv->eee_enabled = stmmac_eee_init(priv);
1095 		priv->tx_lpi_enabled = priv->eee_enabled;
1096 		stmmac_set_eee_pls(priv, priv->hw, true);
1097 	}
1098 
1099 	if (priv->dma_cap.fpesel)
1100 		stmmac_fpe_link_state_handle(priv, true);
1101 
1102 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1103 		stmmac_hwtstamp_correct_latency(priv, priv);
1104 }
1105 
1106 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1107 	.mac_select_pcs = stmmac_mac_select_pcs,
1108 	.mac_config = stmmac_mac_config,
1109 	.mac_link_down = stmmac_mac_link_down,
1110 	.mac_link_up = stmmac_mac_link_up,
1111 };
1112 
1113 /**
1114  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1115  * @priv: driver private structure
1116  * Description: this is to verify if the HW supports the PCS.
1117  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1118  * configured for the TBI, RTBI, or SGMII PHY interface.
1119  */
1120 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1121 {
1122 	int interface = priv->plat->interface;
1123 
1124 	if (priv->dma_cap.pcs) {
1125 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1126 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1127 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1128 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1129 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1130 			priv->hw->pcs = STMMAC_PCS_RGMII;
1131 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1132 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1133 			priv->hw->pcs = STMMAC_PCS_SGMII;
1134 		}
1135 	}
1136 }
1137 
1138 /**
1139  * stmmac_init_phy - PHY initialization
1140  * @dev: net device structure
1141  * Description: it initializes the driver's PHY state, and attaches the PHY
1142  * to the mac driver.
1143  *  Return value:
1144  *  0 on success
1145  */
1146 static int stmmac_init_phy(struct net_device *dev)
1147 {
1148 	struct stmmac_priv *priv = netdev_priv(dev);
1149 	struct fwnode_handle *phy_fwnode;
1150 	struct fwnode_handle *fwnode;
1151 	int ret;
1152 
1153 	if (!phylink_expects_phy(priv->phylink))
1154 		return 0;
1155 
1156 	fwnode = priv->plat->port_node;
1157 	if (!fwnode)
1158 		fwnode = dev_fwnode(priv->device);
1159 
1160 	if (fwnode)
1161 		phy_fwnode = fwnode_get_phy_node(fwnode);
1162 	else
1163 		phy_fwnode = NULL;
1164 
1165 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1166 	 * manually parse it
1167 	 */
1168 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1169 		int addr = priv->plat->phy_addr;
1170 		struct phy_device *phydev;
1171 
1172 		if (addr < 0) {
1173 			netdev_err(priv->dev, "no phy found\n");
1174 			return -ENODEV;
1175 		}
1176 
1177 		phydev = mdiobus_get_phy(priv->mii, addr);
1178 		if (!phydev) {
1179 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1180 			return -ENODEV;
1181 		}
1182 
1183 		ret = phylink_connect_phy(priv->phylink, phydev);
1184 	} else {
1185 		fwnode_handle_put(phy_fwnode);
1186 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1187 	}
1188 
1189 	if (!priv->plat->pmt) {
1190 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1191 
1192 		phylink_ethtool_get_wol(priv->phylink, &wol);
1193 		device_set_wakeup_capable(priv->device, !!wol.supported);
1194 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1195 	}
1196 
1197 	return ret;
1198 }
1199 
1200 static int stmmac_phy_setup(struct stmmac_priv *priv)
1201 {
1202 	struct stmmac_mdio_bus_data *mdio_bus_data;
1203 	int mode = priv->plat->phy_interface;
1204 	struct fwnode_handle *fwnode;
1205 	struct phylink *phylink;
1206 	int max_speed;
1207 
1208 	priv->phylink_config.dev = &priv->dev->dev;
1209 	priv->phylink_config.type = PHYLINK_NETDEV;
1210 	priv->phylink_config.mac_managed_pm = true;
1211 
1212 	mdio_bus_data = priv->plat->mdio_bus_data;
1213 	if (mdio_bus_data)
1214 		priv->phylink_config.ovr_an_inband =
1215 			mdio_bus_data->xpcs_an_inband;
1216 
1217 	/* Set the platform/firmware specified interface mode */
1218 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1219 
1220 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1221 	if (priv->hw->xpcs)
1222 		xpcs_get_interfaces(priv->hw->xpcs,
1223 				    priv->phylink_config.supported_interfaces);
1224 
1225 	priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1226 						MAC_10FD | MAC_100FD |
1227 						MAC_1000FD;
1228 
1229 	/* Half-Duplex can only work with single queue */
1230 	if (priv->plat->tx_queues_to_use <= 1)
1231 		priv->phylink_config.mac_capabilities |= MAC_10HD | MAC_100HD |
1232 							 MAC_1000HD;
1233 
1234 	/* Get the MAC specific capabilities */
1235 	stmmac_mac_phylink_get_caps(priv);
1236 
1237 	max_speed = priv->plat->max_speed;
1238 	if (max_speed)
1239 		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1240 
1241 	fwnode = priv->plat->port_node;
1242 	if (!fwnode)
1243 		fwnode = dev_fwnode(priv->device);
1244 
1245 	phylink = phylink_create(&priv->phylink_config, fwnode,
1246 				 mode, &stmmac_phylink_mac_ops);
1247 	if (IS_ERR(phylink))
1248 		return PTR_ERR(phylink);
1249 
1250 	priv->phylink = phylink;
1251 	return 0;
1252 }
1253 
1254 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1255 				    struct stmmac_dma_conf *dma_conf)
1256 {
1257 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1258 	unsigned int desc_size;
1259 	void *head_rx;
1260 	u32 queue;
1261 
1262 	/* Display RX rings */
1263 	for (queue = 0; queue < rx_cnt; queue++) {
1264 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1265 
1266 		pr_info("\tRX Queue %u rings\n", queue);
1267 
1268 		if (priv->extend_desc) {
1269 			head_rx = (void *)rx_q->dma_erx;
1270 			desc_size = sizeof(struct dma_extended_desc);
1271 		} else {
1272 			head_rx = (void *)rx_q->dma_rx;
1273 			desc_size = sizeof(struct dma_desc);
1274 		}
1275 
1276 		/* Display RX ring */
1277 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1278 				    rx_q->dma_rx_phy, desc_size);
1279 	}
1280 }
1281 
1282 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1283 				    struct stmmac_dma_conf *dma_conf)
1284 {
1285 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1286 	unsigned int desc_size;
1287 	void *head_tx;
1288 	u32 queue;
1289 
1290 	/* Display TX rings */
1291 	for (queue = 0; queue < tx_cnt; queue++) {
1292 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1293 
1294 		pr_info("\tTX Queue %d rings\n", queue);
1295 
1296 		if (priv->extend_desc) {
1297 			head_tx = (void *)tx_q->dma_etx;
1298 			desc_size = sizeof(struct dma_extended_desc);
1299 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1300 			head_tx = (void *)tx_q->dma_entx;
1301 			desc_size = sizeof(struct dma_edesc);
1302 		} else {
1303 			head_tx = (void *)tx_q->dma_tx;
1304 			desc_size = sizeof(struct dma_desc);
1305 		}
1306 
1307 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1308 				    tx_q->dma_tx_phy, desc_size);
1309 	}
1310 }
1311 
1312 static void stmmac_display_rings(struct stmmac_priv *priv,
1313 				 struct stmmac_dma_conf *dma_conf)
1314 {
1315 	/* Display RX ring */
1316 	stmmac_display_rx_rings(priv, dma_conf);
1317 
1318 	/* Display TX ring */
1319 	stmmac_display_tx_rings(priv, dma_conf);
1320 }
1321 
1322 static int stmmac_set_bfsize(int mtu, int bufsize)
1323 {
1324 	int ret = bufsize;
1325 
1326 	if (mtu >= BUF_SIZE_8KiB)
1327 		ret = BUF_SIZE_16KiB;
1328 	else if (mtu >= BUF_SIZE_4KiB)
1329 		ret = BUF_SIZE_8KiB;
1330 	else if (mtu >= BUF_SIZE_2KiB)
1331 		ret = BUF_SIZE_4KiB;
1332 	else if (mtu > DEFAULT_BUFSIZE)
1333 		ret = BUF_SIZE_2KiB;
1334 	else
1335 		ret = DEFAULT_BUFSIZE;
1336 
1337 	return ret;
1338 }
1339 
1340 /**
1341  * stmmac_clear_rx_descriptors - clear RX descriptors
1342  * @priv: driver private structure
1343  * @dma_conf: structure to take the dma data
1344  * @queue: RX queue index
1345  * Description: this function is called to clear the RX descriptors
1346  * in case of both basic and extended descriptors are used.
1347  */
1348 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1349 					struct stmmac_dma_conf *dma_conf,
1350 					u32 queue)
1351 {
1352 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1353 	int i;
1354 
1355 	/* Clear the RX descriptors */
1356 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1357 		if (priv->extend_desc)
1358 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1359 					priv->use_riwt, priv->mode,
1360 					(i == dma_conf->dma_rx_size - 1),
1361 					dma_conf->dma_buf_sz);
1362 		else
1363 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1364 					priv->use_riwt, priv->mode,
1365 					(i == dma_conf->dma_rx_size - 1),
1366 					dma_conf->dma_buf_sz);
1367 }
1368 
1369 /**
1370  * stmmac_clear_tx_descriptors - clear tx descriptors
1371  * @priv: driver private structure
1372  * @dma_conf: structure to take the dma data
1373  * @queue: TX queue index.
1374  * Description: this function is called to clear the TX descriptors
1375  * in case of both basic and extended descriptors are used.
1376  */
1377 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1378 					struct stmmac_dma_conf *dma_conf,
1379 					u32 queue)
1380 {
1381 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1382 	int i;
1383 
1384 	/* Clear the TX descriptors */
1385 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1386 		int last = (i == (dma_conf->dma_tx_size - 1));
1387 		struct dma_desc *p;
1388 
1389 		if (priv->extend_desc)
1390 			p = &tx_q->dma_etx[i].basic;
1391 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1392 			p = &tx_q->dma_entx[i].basic;
1393 		else
1394 			p = &tx_q->dma_tx[i];
1395 
1396 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1397 	}
1398 }
1399 
1400 /**
1401  * stmmac_clear_descriptors - clear descriptors
1402  * @priv: driver private structure
1403  * @dma_conf: structure to take the dma data
1404  * Description: this function is called to clear the TX and RX descriptors
1405  * in case of both basic and extended descriptors are used.
1406  */
1407 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1408 				     struct stmmac_dma_conf *dma_conf)
1409 {
1410 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1411 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1412 	u32 queue;
1413 
1414 	/* Clear the RX descriptors */
1415 	for (queue = 0; queue < rx_queue_cnt; queue++)
1416 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1417 
1418 	/* Clear the TX descriptors */
1419 	for (queue = 0; queue < tx_queue_cnt; queue++)
1420 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1421 }
1422 
1423 /**
1424  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1425  * @priv: driver private structure
1426  * @dma_conf: structure to take the dma data
1427  * @p: descriptor pointer
1428  * @i: descriptor index
1429  * @flags: gfp flag
1430  * @queue: RX queue index
1431  * Description: this function is called to allocate a receive buffer, perform
1432  * the DMA mapping and init the descriptor.
1433  */
1434 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1435 				  struct stmmac_dma_conf *dma_conf,
1436 				  struct dma_desc *p,
1437 				  int i, gfp_t flags, u32 queue)
1438 {
1439 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1440 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1441 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1442 
1443 	if (priv->dma_cap.host_dma_width <= 32)
1444 		gfp |= GFP_DMA32;
1445 
1446 	if (!buf->page) {
1447 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1448 		if (!buf->page)
1449 			return -ENOMEM;
1450 		buf->page_offset = stmmac_rx_offset(priv);
1451 	}
1452 
1453 	if (priv->sph && !buf->sec_page) {
1454 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1455 		if (!buf->sec_page)
1456 			return -ENOMEM;
1457 
1458 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1459 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1460 	} else {
1461 		buf->sec_page = NULL;
1462 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1463 	}
1464 
1465 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1466 
1467 	stmmac_set_desc_addr(priv, p, buf->addr);
1468 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1469 		stmmac_init_desc3(priv, p);
1470 
1471 	return 0;
1472 }
1473 
1474 /**
1475  * stmmac_free_rx_buffer - free RX dma buffers
1476  * @priv: private structure
1477  * @rx_q: RX queue
1478  * @i: buffer index.
1479  */
1480 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1481 				  struct stmmac_rx_queue *rx_q,
1482 				  int i)
1483 {
1484 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1485 
1486 	if (buf->page)
1487 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1488 	buf->page = NULL;
1489 
1490 	if (buf->sec_page)
1491 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1492 	buf->sec_page = NULL;
1493 }
1494 
1495 /**
1496  * stmmac_free_tx_buffer - free RX dma buffers
1497  * @priv: private structure
1498  * @dma_conf: structure to take the dma data
1499  * @queue: RX queue index
1500  * @i: buffer index.
1501  */
1502 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1503 				  struct stmmac_dma_conf *dma_conf,
1504 				  u32 queue, int i)
1505 {
1506 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1507 
1508 	if (tx_q->tx_skbuff_dma[i].buf &&
1509 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1510 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1511 			dma_unmap_page(priv->device,
1512 				       tx_q->tx_skbuff_dma[i].buf,
1513 				       tx_q->tx_skbuff_dma[i].len,
1514 				       DMA_TO_DEVICE);
1515 		else
1516 			dma_unmap_single(priv->device,
1517 					 tx_q->tx_skbuff_dma[i].buf,
1518 					 tx_q->tx_skbuff_dma[i].len,
1519 					 DMA_TO_DEVICE);
1520 	}
1521 
1522 	if (tx_q->xdpf[i] &&
1523 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1524 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1525 		xdp_return_frame(tx_q->xdpf[i]);
1526 		tx_q->xdpf[i] = NULL;
1527 	}
1528 
1529 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1530 		tx_q->xsk_frames_done++;
1531 
1532 	if (tx_q->tx_skbuff[i] &&
1533 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1534 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1535 		tx_q->tx_skbuff[i] = NULL;
1536 	}
1537 
1538 	tx_q->tx_skbuff_dma[i].buf = 0;
1539 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1540 }
1541 
1542 /**
1543  * dma_free_rx_skbufs - free RX dma buffers
1544  * @priv: private structure
1545  * @dma_conf: structure to take the dma data
1546  * @queue: RX queue index
1547  */
1548 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1549 			       struct stmmac_dma_conf *dma_conf,
1550 			       u32 queue)
1551 {
1552 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1553 	int i;
1554 
1555 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1556 		stmmac_free_rx_buffer(priv, rx_q, i);
1557 }
1558 
1559 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1560 				   struct stmmac_dma_conf *dma_conf,
1561 				   u32 queue, gfp_t flags)
1562 {
1563 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1564 	int i;
1565 
1566 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1567 		struct dma_desc *p;
1568 		int ret;
1569 
1570 		if (priv->extend_desc)
1571 			p = &((rx_q->dma_erx + i)->basic);
1572 		else
1573 			p = rx_q->dma_rx + i;
1574 
1575 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1576 					     queue);
1577 		if (ret)
1578 			return ret;
1579 
1580 		rx_q->buf_alloc_num++;
1581 	}
1582 
1583 	return 0;
1584 }
1585 
1586 /**
1587  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1588  * @priv: private structure
1589  * @dma_conf: structure to take the dma data
1590  * @queue: RX queue index
1591  */
1592 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1593 				struct stmmac_dma_conf *dma_conf,
1594 				u32 queue)
1595 {
1596 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1597 	int i;
1598 
1599 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1600 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1601 
1602 		if (!buf->xdp)
1603 			continue;
1604 
1605 		xsk_buff_free(buf->xdp);
1606 		buf->xdp = NULL;
1607 	}
1608 }
1609 
1610 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1611 				      struct stmmac_dma_conf *dma_conf,
1612 				      u32 queue)
1613 {
1614 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1615 	int i;
1616 
1617 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1618 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1619 	 * use this macro to make sure no size violations.
1620 	 */
1621 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1622 
1623 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1624 		struct stmmac_rx_buffer *buf;
1625 		dma_addr_t dma_addr;
1626 		struct dma_desc *p;
1627 
1628 		if (priv->extend_desc)
1629 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1630 		else
1631 			p = rx_q->dma_rx + i;
1632 
1633 		buf = &rx_q->buf_pool[i];
1634 
1635 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1636 		if (!buf->xdp)
1637 			return -ENOMEM;
1638 
1639 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1640 		stmmac_set_desc_addr(priv, p, dma_addr);
1641 		rx_q->buf_alloc_num++;
1642 	}
1643 
1644 	return 0;
1645 }
1646 
1647 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1648 {
1649 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1650 		return NULL;
1651 
1652 	return xsk_get_pool_from_qid(priv->dev, queue);
1653 }
1654 
1655 /**
1656  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1657  * @priv: driver private structure
1658  * @dma_conf: structure to take the dma data
1659  * @queue: RX queue index
1660  * @flags: gfp flag.
1661  * Description: this function initializes the DMA RX descriptors
1662  * and allocates the socket buffers. It supports the chained and ring
1663  * modes.
1664  */
1665 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1666 				    struct stmmac_dma_conf *dma_conf,
1667 				    u32 queue, gfp_t flags)
1668 {
1669 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1670 	int ret;
1671 
1672 	netif_dbg(priv, probe, priv->dev,
1673 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1674 		  (u32)rx_q->dma_rx_phy);
1675 
1676 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1677 
1678 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1679 
1680 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1681 
1682 	if (rx_q->xsk_pool) {
1683 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1684 						   MEM_TYPE_XSK_BUFF_POOL,
1685 						   NULL));
1686 		netdev_info(priv->dev,
1687 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1688 			    rx_q->queue_index);
1689 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1690 	} else {
1691 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1692 						   MEM_TYPE_PAGE_POOL,
1693 						   rx_q->page_pool));
1694 		netdev_info(priv->dev,
1695 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1696 			    rx_q->queue_index);
1697 	}
1698 
1699 	if (rx_q->xsk_pool) {
1700 		/* RX XDP ZC buffer pool may not be populated, e.g.
1701 		 * xdpsock TX-only.
1702 		 */
1703 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1704 	} else {
1705 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1706 		if (ret < 0)
1707 			return -ENOMEM;
1708 	}
1709 
1710 	/* Setup the chained descriptor addresses */
1711 	if (priv->mode == STMMAC_CHAIN_MODE) {
1712 		if (priv->extend_desc)
1713 			stmmac_mode_init(priv, rx_q->dma_erx,
1714 					 rx_q->dma_rx_phy,
1715 					 dma_conf->dma_rx_size, 1);
1716 		else
1717 			stmmac_mode_init(priv, rx_q->dma_rx,
1718 					 rx_q->dma_rx_phy,
1719 					 dma_conf->dma_rx_size, 0);
1720 	}
1721 
1722 	return 0;
1723 }
1724 
1725 static int init_dma_rx_desc_rings(struct net_device *dev,
1726 				  struct stmmac_dma_conf *dma_conf,
1727 				  gfp_t flags)
1728 {
1729 	struct stmmac_priv *priv = netdev_priv(dev);
1730 	u32 rx_count = priv->plat->rx_queues_to_use;
1731 	int queue;
1732 	int ret;
1733 
1734 	/* RX INITIALIZATION */
1735 	netif_dbg(priv, probe, priv->dev,
1736 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1737 
1738 	for (queue = 0; queue < rx_count; queue++) {
1739 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1740 		if (ret)
1741 			goto err_init_rx_buffers;
1742 	}
1743 
1744 	return 0;
1745 
1746 err_init_rx_buffers:
1747 	while (queue >= 0) {
1748 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1749 
1750 		if (rx_q->xsk_pool)
1751 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1752 		else
1753 			dma_free_rx_skbufs(priv, dma_conf, queue);
1754 
1755 		rx_q->buf_alloc_num = 0;
1756 		rx_q->xsk_pool = NULL;
1757 
1758 		queue--;
1759 	}
1760 
1761 	return ret;
1762 }
1763 
1764 /**
1765  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1766  * @priv: driver private structure
1767  * @dma_conf: structure to take the dma data
1768  * @queue: TX queue index
1769  * Description: this function initializes the DMA TX descriptors
1770  * and allocates the socket buffers. It supports the chained and ring
1771  * modes.
1772  */
1773 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1774 				    struct stmmac_dma_conf *dma_conf,
1775 				    u32 queue)
1776 {
1777 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1778 	int i;
1779 
1780 	netif_dbg(priv, probe, priv->dev,
1781 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1782 		  (u32)tx_q->dma_tx_phy);
1783 
1784 	/* Setup the chained descriptor addresses */
1785 	if (priv->mode == STMMAC_CHAIN_MODE) {
1786 		if (priv->extend_desc)
1787 			stmmac_mode_init(priv, tx_q->dma_etx,
1788 					 tx_q->dma_tx_phy,
1789 					 dma_conf->dma_tx_size, 1);
1790 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1791 			stmmac_mode_init(priv, tx_q->dma_tx,
1792 					 tx_q->dma_tx_phy,
1793 					 dma_conf->dma_tx_size, 0);
1794 	}
1795 
1796 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1797 
1798 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1799 		struct dma_desc *p;
1800 
1801 		if (priv->extend_desc)
1802 			p = &((tx_q->dma_etx + i)->basic);
1803 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1804 			p = &((tx_q->dma_entx + i)->basic);
1805 		else
1806 			p = tx_q->dma_tx + i;
1807 
1808 		stmmac_clear_desc(priv, p);
1809 
1810 		tx_q->tx_skbuff_dma[i].buf = 0;
1811 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1812 		tx_q->tx_skbuff_dma[i].len = 0;
1813 		tx_q->tx_skbuff_dma[i].last_segment = false;
1814 		tx_q->tx_skbuff[i] = NULL;
1815 	}
1816 
1817 	return 0;
1818 }
1819 
1820 static int init_dma_tx_desc_rings(struct net_device *dev,
1821 				  struct stmmac_dma_conf *dma_conf)
1822 {
1823 	struct stmmac_priv *priv = netdev_priv(dev);
1824 	u32 tx_queue_cnt;
1825 	u32 queue;
1826 
1827 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1828 
1829 	for (queue = 0; queue < tx_queue_cnt; queue++)
1830 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1831 
1832 	return 0;
1833 }
1834 
1835 /**
1836  * init_dma_desc_rings - init the RX/TX descriptor rings
1837  * @dev: net device structure
1838  * @dma_conf: structure to take the dma data
1839  * @flags: gfp flag.
1840  * Description: this function initializes the DMA RX/TX descriptors
1841  * and allocates the socket buffers. It supports the chained and ring
1842  * modes.
1843  */
1844 static int init_dma_desc_rings(struct net_device *dev,
1845 			       struct stmmac_dma_conf *dma_conf,
1846 			       gfp_t flags)
1847 {
1848 	struct stmmac_priv *priv = netdev_priv(dev);
1849 	int ret;
1850 
1851 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1852 	if (ret)
1853 		return ret;
1854 
1855 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1856 
1857 	stmmac_clear_descriptors(priv, dma_conf);
1858 
1859 	if (netif_msg_hw(priv))
1860 		stmmac_display_rings(priv, dma_conf);
1861 
1862 	return ret;
1863 }
1864 
1865 /**
1866  * dma_free_tx_skbufs - free TX dma buffers
1867  * @priv: private structure
1868  * @dma_conf: structure to take the dma data
1869  * @queue: TX queue index
1870  */
1871 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1872 			       struct stmmac_dma_conf *dma_conf,
1873 			       u32 queue)
1874 {
1875 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1876 	int i;
1877 
1878 	tx_q->xsk_frames_done = 0;
1879 
1880 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1881 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1882 
1883 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1884 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1885 		tx_q->xsk_frames_done = 0;
1886 		tx_q->xsk_pool = NULL;
1887 	}
1888 }
1889 
1890 /**
1891  * stmmac_free_tx_skbufs - free TX skb buffers
1892  * @priv: private structure
1893  */
1894 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1895 {
1896 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1897 	u32 queue;
1898 
1899 	for (queue = 0; queue < tx_queue_cnt; queue++)
1900 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1901 }
1902 
1903 /**
1904  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1905  * @priv: private structure
1906  * @dma_conf: structure to take the dma data
1907  * @queue: RX queue index
1908  */
1909 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1910 					 struct stmmac_dma_conf *dma_conf,
1911 					 u32 queue)
1912 {
1913 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1914 
1915 	/* Release the DMA RX socket buffers */
1916 	if (rx_q->xsk_pool)
1917 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1918 	else
1919 		dma_free_rx_skbufs(priv, dma_conf, queue);
1920 
1921 	rx_q->buf_alloc_num = 0;
1922 	rx_q->xsk_pool = NULL;
1923 
1924 	/* Free DMA regions of consistent memory previously allocated */
1925 	if (!priv->extend_desc)
1926 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1927 				  sizeof(struct dma_desc),
1928 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1929 	else
1930 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1931 				  sizeof(struct dma_extended_desc),
1932 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1933 
1934 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1935 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1936 
1937 	kfree(rx_q->buf_pool);
1938 	if (rx_q->page_pool)
1939 		page_pool_destroy(rx_q->page_pool);
1940 }
1941 
1942 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1943 				       struct stmmac_dma_conf *dma_conf)
1944 {
1945 	u32 rx_count = priv->plat->rx_queues_to_use;
1946 	u32 queue;
1947 
1948 	/* Free RX queue resources */
1949 	for (queue = 0; queue < rx_count; queue++)
1950 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1951 }
1952 
1953 /**
1954  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1955  * @priv: private structure
1956  * @dma_conf: structure to take the dma data
1957  * @queue: TX queue index
1958  */
1959 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1960 					 struct stmmac_dma_conf *dma_conf,
1961 					 u32 queue)
1962 {
1963 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1964 	size_t size;
1965 	void *addr;
1966 
1967 	/* Release the DMA TX socket buffers */
1968 	dma_free_tx_skbufs(priv, dma_conf, queue);
1969 
1970 	if (priv->extend_desc) {
1971 		size = sizeof(struct dma_extended_desc);
1972 		addr = tx_q->dma_etx;
1973 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1974 		size = sizeof(struct dma_edesc);
1975 		addr = tx_q->dma_entx;
1976 	} else {
1977 		size = sizeof(struct dma_desc);
1978 		addr = tx_q->dma_tx;
1979 	}
1980 
1981 	size *= dma_conf->dma_tx_size;
1982 
1983 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1984 
1985 	kfree(tx_q->tx_skbuff_dma);
1986 	kfree(tx_q->tx_skbuff);
1987 }
1988 
1989 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1990 				       struct stmmac_dma_conf *dma_conf)
1991 {
1992 	u32 tx_count = priv->plat->tx_queues_to_use;
1993 	u32 queue;
1994 
1995 	/* Free TX queue resources */
1996 	for (queue = 0; queue < tx_count; queue++)
1997 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
1998 }
1999 
2000 /**
2001  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2002  * @priv: private structure
2003  * @dma_conf: structure to take the dma data
2004  * @queue: RX queue index
2005  * Description: according to which descriptor can be used (extend or basic)
2006  * this function allocates the resources for TX and RX paths. In case of
2007  * reception, for example, it pre-allocated the RX socket buffer in order to
2008  * allow zero-copy mechanism.
2009  */
2010 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2011 					 struct stmmac_dma_conf *dma_conf,
2012 					 u32 queue)
2013 {
2014 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2015 	struct stmmac_channel *ch = &priv->channel[queue];
2016 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2017 	struct page_pool_params pp_params = { 0 };
2018 	unsigned int num_pages;
2019 	unsigned int napi_id;
2020 	int ret;
2021 
2022 	rx_q->queue_index = queue;
2023 	rx_q->priv_data = priv;
2024 
2025 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2026 	pp_params.pool_size = dma_conf->dma_rx_size;
2027 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2028 	pp_params.order = ilog2(num_pages);
2029 	pp_params.nid = dev_to_node(priv->device);
2030 	pp_params.dev = priv->device;
2031 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2032 	pp_params.offset = stmmac_rx_offset(priv);
2033 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2034 
2035 	rx_q->page_pool = page_pool_create(&pp_params);
2036 	if (IS_ERR(rx_q->page_pool)) {
2037 		ret = PTR_ERR(rx_q->page_pool);
2038 		rx_q->page_pool = NULL;
2039 		return ret;
2040 	}
2041 
2042 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2043 				 sizeof(*rx_q->buf_pool),
2044 				 GFP_KERNEL);
2045 	if (!rx_q->buf_pool)
2046 		return -ENOMEM;
2047 
2048 	if (priv->extend_desc) {
2049 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2050 						   dma_conf->dma_rx_size *
2051 						   sizeof(struct dma_extended_desc),
2052 						   &rx_q->dma_rx_phy,
2053 						   GFP_KERNEL);
2054 		if (!rx_q->dma_erx)
2055 			return -ENOMEM;
2056 
2057 	} else {
2058 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2059 						  dma_conf->dma_rx_size *
2060 						  sizeof(struct dma_desc),
2061 						  &rx_q->dma_rx_phy,
2062 						  GFP_KERNEL);
2063 		if (!rx_q->dma_rx)
2064 			return -ENOMEM;
2065 	}
2066 
2067 	if (stmmac_xdp_is_enabled(priv) &&
2068 	    test_bit(queue, priv->af_xdp_zc_qps))
2069 		napi_id = ch->rxtx_napi.napi_id;
2070 	else
2071 		napi_id = ch->rx_napi.napi_id;
2072 
2073 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2074 			       rx_q->queue_index,
2075 			       napi_id);
2076 	if (ret) {
2077 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2078 		return -EINVAL;
2079 	}
2080 
2081 	return 0;
2082 }
2083 
2084 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2085 				       struct stmmac_dma_conf *dma_conf)
2086 {
2087 	u32 rx_count = priv->plat->rx_queues_to_use;
2088 	u32 queue;
2089 	int ret;
2090 
2091 	/* RX queues buffers and DMA */
2092 	for (queue = 0; queue < rx_count; queue++) {
2093 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2094 		if (ret)
2095 			goto err_dma;
2096 	}
2097 
2098 	return 0;
2099 
2100 err_dma:
2101 	free_dma_rx_desc_resources(priv, dma_conf);
2102 
2103 	return ret;
2104 }
2105 
2106 /**
2107  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2108  * @priv: private structure
2109  * @dma_conf: structure to take the dma data
2110  * @queue: TX queue index
2111  * Description: according to which descriptor can be used (extend or basic)
2112  * this function allocates the resources for TX and RX paths. In case of
2113  * reception, for example, it pre-allocated the RX socket buffer in order to
2114  * allow zero-copy mechanism.
2115  */
2116 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2117 					 struct stmmac_dma_conf *dma_conf,
2118 					 u32 queue)
2119 {
2120 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2121 	size_t size;
2122 	void *addr;
2123 
2124 	tx_q->queue_index = queue;
2125 	tx_q->priv_data = priv;
2126 
2127 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2128 				      sizeof(*tx_q->tx_skbuff_dma),
2129 				      GFP_KERNEL);
2130 	if (!tx_q->tx_skbuff_dma)
2131 		return -ENOMEM;
2132 
2133 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2134 				  sizeof(struct sk_buff *),
2135 				  GFP_KERNEL);
2136 	if (!tx_q->tx_skbuff)
2137 		return -ENOMEM;
2138 
2139 	if (priv->extend_desc)
2140 		size = sizeof(struct dma_extended_desc);
2141 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2142 		size = sizeof(struct dma_edesc);
2143 	else
2144 		size = sizeof(struct dma_desc);
2145 
2146 	size *= dma_conf->dma_tx_size;
2147 
2148 	addr = dma_alloc_coherent(priv->device, size,
2149 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2150 	if (!addr)
2151 		return -ENOMEM;
2152 
2153 	if (priv->extend_desc)
2154 		tx_q->dma_etx = addr;
2155 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2156 		tx_q->dma_entx = addr;
2157 	else
2158 		tx_q->dma_tx = addr;
2159 
2160 	return 0;
2161 }
2162 
2163 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2164 				       struct stmmac_dma_conf *dma_conf)
2165 {
2166 	u32 tx_count = priv->plat->tx_queues_to_use;
2167 	u32 queue;
2168 	int ret;
2169 
2170 	/* TX queues buffers and DMA */
2171 	for (queue = 0; queue < tx_count; queue++) {
2172 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2173 		if (ret)
2174 			goto err_dma;
2175 	}
2176 
2177 	return 0;
2178 
2179 err_dma:
2180 	free_dma_tx_desc_resources(priv, dma_conf);
2181 	return ret;
2182 }
2183 
2184 /**
2185  * alloc_dma_desc_resources - alloc TX/RX resources.
2186  * @priv: private structure
2187  * @dma_conf: structure to take the dma data
2188  * Description: according to which descriptor can be used (extend or basic)
2189  * this function allocates the resources for TX and RX paths. In case of
2190  * reception, for example, it pre-allocated the RX socket buffer in order to
2191  * allow zero-copy mechanism.
2192  */
2193 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2194 				    struct stmmac_dma_conf *dma_conf)
2195 {
2196 	/* RX Allocation */
2197 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2198 
2199 	if (ret)
2200 		return ret;
2201 
2202 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2203 
2204 	return ret;
2205 }
2206 
2207 /**
2208  * free_dma_desc_resources - free dma desc resources
2209  * @priv: private structure
2210  * @dma_conf: structure to take the dma data
2211  */
2212 static void free_dma_desc_resources(struct stmmac_priv *priv,
2213 				    struct stmmac_dma_conf *dma_conf)
2214 {
2215 	/* Release the DMA TX socket buffers */
2216 	free_dma_tx_desc_resources(priv, dma_conf);
2217 
2218 	/* Release the DMA RX socket buffers later
2219 	 * to ensure all pending XDP_TX buffers are returned.
2220 	 */
2221 	free_dma_rx_desc_resources(priv, dma_conf);
2222 }
2223 
2224 /**
2225  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2226  *  @priv: driver private structure
2227  *  Description: It is used for enabling the rx queues in the MAC
2228  */
2229 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2230 {
2231 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2232 	int queue;
2233 	u8 mode;
2234 
2235 	for (queue = 0; queue < rx_queues_count; queue++) {
2236 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2237 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2238 	}
2239 }
2240 
2241 /**
2242  * stmmac_start_rx_dma - start RX DMA channel
2243  * @priv: driver private structure
2244  * @chan: RX channel index
2245  * Description:
2246  * This starts a RX DMA channel
2247  */
2248 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2249 {
2250 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2251 	stmmac_start_rx(priv, priv->ioaddr, chan);
2252 }
2253 
2254 /**
2255  * stmmac_start_tx_dma - start TX DMA channel
2256  * @priv: driver private structure
2257  * @chan: TX channel index
2258  * Description:
2259  * This starts a TX DMA channel
2260  */
2261 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2262 {
2263 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2264 	stmmac_start_tx(priv, priv->ioaddr, chan);
2265 }
2266 
2267 /**
2268  * stmmac_stop_rx_dma - stop RX DMA channel
2269  * @priv: driver private structure
2270  * @chan: RX channel index
2271  * Description:
2272  * This stops a RX DMA channel
2273  */
2274 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2275 {
2276 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2277 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2278 }
2279 
2280 /**
2281  * stmmac_stop_tx_dma - stop TX DMA channel
2282  * @priv: driver private structure
2283  * @chan: TX channel index
2284  * Description:
2285  * This stops a TX DMA channel
2286  */
2287 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2288 {
2289 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2290 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2291 }
2292 
2293 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2294 {
2295 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2296 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2297 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2298 	u32 chan;
2299 
2300 	for (chan = 0; chan < dma_csr_ch; chan++) {
2301 		struct stmmac_channel *ch = &priv->channel[chan];
2302 		unsigned long flags;
2303 
2304 		spin_lock_irqsave(&ch->lock, flags);
2305 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2306 		spin_unlock_irqrestore(&ch->lock, flags);
2307 	}
2308 }
2309 
2310 /**
2311  * stmmac_start_all_dma - start all RX and TX DMA channels
2312  * @priv: driver private structure
2313  * Description:
2314  * This starts all the RX and TX DMA channels
2315  */
2316 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2317 {
2318 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2319 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2320 	u32 chan = 0;
2321 
2322 	for (chan = 0; chan < rx_channels_count; chan++)
2323 		stmmac_start_rx_dma(priv, chan);
2324 
2325 	for (chan = 0; chan < tx_channels_count; chan++)
2326 		stmmac_start_tx_dma(priv, chan);
2327 }
2328 
2329 /**
2330  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2331  * @priv: driver private structure
2332  * Description:
2333  * This stops the RX and TX DMA channels
2334  */
2335 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2336 {
2337 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2338 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2339 	u32 chan = 0;
2340 
2341 	for (chan = 0; chan < rx_channels_count; chan++)
2342 		stmmac_stop_rx_dma(priv, chan);
2343 
2344 	for (chan = 0; chan < tx_channels_count; chan++)
2345 		stmmac_stop_tx_dma(priv, chan);
2346 }
2347 
2348 /**
2349  *  stmmac_dma_operation_mode - HW DMA operation mode
2350  *  @priv: driver private structure
2351  *  Description: it is used for configuring the DMA operation mode register in
2352  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2353  */
2354 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2355 {
2356 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2357 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2358 	int rxfifosz = priv->plat->rx_fifo_size;
2359 	int txfifosz = priv->plat->tx_fifo_size;
2360 	u32 txmode = 0;
2361 	u32 rxmode = 0;
2362 	u32 chan = 0;
2363 	u8 qmode = 0;
2364 
2365 	if (rxfifosz == 0)
2366 		rxfifosz = priv->dma_cap.rx_fifo_size;
2367 	if (txfifosz == 0)
2368 		txfifosz = priv->dma_cap.tx_fifo_size;
2369 
2370 	/* Adjust for real per queue fifo size */
2371 	rxfifosz /= rx_channels_count;
2372 	txfifosz /= tx_channels_count;
2373 
2374 	if (priv->plat->force_thresh_dma_mode) {
2375 		txmode = tc;
2376 		rxmode = tc;
2377 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2378 		/*
2379 		 * In case of GMAC, SF mode can be enabled
2380 		 * to perform the TX COE in HW. This depends on:
2381 		 * 1) TX COE if actually supported
2382 		 * 2) There is no bugged Jumbo frame support
2383 		 *    that needs to not insert csum in the TDES.
2384 		 */
2385 		txmode = SF_DMA_MODE;
2386 		rxmode = SF_DMA_MODE;
2387 		priv->xstats.threshold = SF_DMA_MODE;
2388 	} else {
2389 		txmode = tc;
2390 		rxmode = SF_DMA_MODE;
2391 	}
2392 
2393 	/* configure all channels */
2394 	for (chan = 0; chan < rx_channels_count; chan++) {
2395 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2396 		u32 buf_size;
2397 
2398 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2399 
2400 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2401 				rxfifosz, qmode);
2402 
2403 		if (rx_q->xsk_pool) {
2404 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2405 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2406 					      buf_size,
2407 					      chan);
2408 		} else {
2409 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2410 					      priv->dma_conf.dma_buf_sz,
2411 					      chan);
2412 		}
2413 	}
2414 
2415 	for (chan = 0; chan < tx_channels_count; chan++) {
2416 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2417 
2418 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2419 				txfifosz, qmode);
2420 	}
2421 }
2422 
2423 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2424 {
2425 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2426 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2427 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2428 	unsigned int entry = tx_q->cur_tx;
2429 	struct dma_desc *tx_desc = NULL;
2430 	struct xdp_desc xdp_desc;
2431 	bool work_done = true;
2432 	u32 tx_set_ic_bit = 0;
2433 	unsigned long flags;
2434 
2435 	/* Avoids TX time-out as we are sharing with slow path */
2436 	txq_trans_cond_update(nq);
2437 
2438 	budget = min(budget, stmmac_tx_avail(priv, queue));
2439 
2440 	while (budget-- > 0) {
2441 		dma_addr_t dma_addr;
2442 		bool set_ic;
2443 
2444 		/* We are sharing with slow path and stop XSK TX desc submission when
2445 		 * available TX ring is less than threshold.
2446 		 */
2447 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2448 		    !netif_carrier_ok(priv->dev)) {
2449 			work_done = false;
2450 			break;
2451 		}
2452 
2453 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2454 			break;
2455 
2456 		if (likely(priv->extend_desc))
2457 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2458 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2459 			tx_desc = &tx_q->dma_entx[entry].basic;
2460 		else
2461 			tx_desc = tx_q->dma_tx + entry;
2462 
2463 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2464 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2465 
2466 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2467 
2468 		/* To return XDP buffer to XSK pool, we simple call
2469 		 * xsk_tx_completed(), so we don't need to fill up
2470 		 * 'buf' and 'xdpf'.
2471 		 */
2472 		tx_q->tx_skbuff_dma[entry].buf = 0;
2473 		tx_q->xdpf[entry] = NULL;
2474 
2475 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2476 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2477 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2478 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2479 
2480 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2481 
2482 		tx_q->tx_count_frames++;
2483 
2484 		if (!priv->tx_coal_frames[queue])
2485 			set_ic = false;
2486 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2487 			set_ic = true;
2488 		else
2489 			set_ic = false;
2490 
2491 		if (set_ic) {
2492 			tx_q->tx_count_frames = 0;
2493 			stmmac_set_tx_ic(priv, tx_desc);
2494 			tx_set_ic_bit++;
2495 		}
2496 
2497 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2498 				       true, priv->mode, true, true,
2499 				       xdp_desc.len);
2500 
2501 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2502 
2503 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2504 		entry = tx_q->cur_tx;
2505 	}
2506 	flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
2507 	tx_q->txq_stats.tx_set_ic_bit += tx_set_ic_bit;
2508 	u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
2509 
2510 	if (tx_desc) {
2511 		stmmac_flush_tx_descriptors(priv, queue);
2512 		xsk_tx_release(pool);
2513 	}
2514 
2515 	/* Return true if all of the 3 conditions are met
2516 	 *  a) TX Budget is still available
2517 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2518 	 *     pending XSK TX for transmission)
2519 	 */
2520 	return !!budget && work_done;
2521 }
2522 
2523 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2524 {
2525 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2526 		tc += 64;
2527 
2528 		if (priv->plat->force_thresh_dma_mode)
2529 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2530 		else
2531 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2532 						      chan);
2533 
2534 		priv->xstats.threshold = tc;
2535 	}
2536 }
2537 
2538 /**
2539  * stmmac_tx_clean - to manage the transmission completion
2540  * @priv: driver private structure
2541  * @budget: napi budget limiting this functions packet handling
2542  * @queue: TX queue index
2543  * Description: it reclaims the transmit resources after transmission completes.
2544  */
2545 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2546 {
2547 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2548 	unsigned int bytes_compl = 0, pkts_compl = 0;
2549 	unsigned int entry, xmits = 0, count = 0;
2550 	u32 tx_packets = 0, tx_errors = 0;
2551 	unsigned long flags;
2552 
2553 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2554 
2555 	tx_q->xsk_frames_done = 0;
2556 
2557 	entry = tx_q->dirty_tx;
2558 
2559 	/* Try to clean all TX complete frame in 1 shot */
2560 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2561 		struct xdp_frame *xdpf;
2562 		struct sk_buff *skb;
2563 		struct dma_desc *p;
2564 		int status;
2565 
2566 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2567 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2568 			xdpf = tx_q->xdpf[entry];
2569 			skb = NULL;
2570 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2571 			xdpf = NULL;
2572 			skb = tx_q->tx_skbuff[entry];
2573 		} else {
2574 			xdpf = NULL;
2575 			skb = NULL;
2576 		}
2577 
2578 		if (priv->extend_desc)
2579 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2580 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2581 			p = &tx_q->dma_entx[entry].basic;
2582 		else
2583 			p = tx_q->dma_tx + entry;
2584 
2585 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2586 		/* Check if the descriptor is owned by the DMA */
2587 		if (unlikely(status & tx_dma_own))
2588 			break;
2589 
2590 		count++;
2591 
2592 		/* Make sure descriptor fields are read after reading
2593 		 * the own bit.
2594 		 */
2595 		dma_rmb();
2596 
2597 		/* Just consider the last segment and ...*/
2598 		if (likely(!(status & tx_not_ls))) {
2599 			/* ... verify the status error condition */
2600 			if (unlikely(status & tx_err)) {
2601 				tx_errors++;
2602 				if (unlikely(status & tx_err_bump_tc))
2603 					stmmac_bump_dma_threshold(priv, queue);
2604 			} else {
2605 				tx_packets++;
2606 			}
2607 			if (skb)
2608 				stmmac_get_tx_hwtstamp(priv, p, skb);
2609 		}
2610 
2611 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2612 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2613 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2614 				dma_unmap_page(priv->device,
2615 					       tx_q->tx_skbuff_dma[entry].buf,
2616 					       tx_q->tx_skbuff_dma[entry].len,
2617 					       DMA_TO_DEVICE);
2618 			else
2619 				dma_unmap_single(priv->device,
2620 						 tx_q->tx_skbuff_dma[entry].buf,
2621 						 tx_q->tx_skbuff_dma[entry].len,
2622 						 DMA_TO_DEVICE);
2623 			tx_q->tx_skbuff_dma[entry].buf = 0;
2624 			tx_q->tx_skbuff_dma[entry].len = 0;
2625 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2626 		}
2627 
2628 		stmmac_clean_desc3(priv, tx_q, p);
2629 
2630 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2631 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2632 
2633 		if (xdpf &&
2634 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2635 			xdp_return_frame_rx_napi(xdpf);
2636 			tx_q->xdpf[entry] = NULL;
2637 		}
2638 
2639 		if (xdpf &&
2640 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2641 			xdp_return_frame(xdpf);
2642 			tx_q->xdpf[entry] = NULL;
2643 		}
2644 
2645 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2646 			tx_q->xsk_frames_done++;
2647 
2648 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2649 			if (likely(skb)) {
2650 				pkts_compl++;
2651 				bytes_compl += skb->len;
2652 				dev_consume_skb_any(skb);
2653 				tx_q->tx_skbuff[entry] = NULL;
2654 			}
2655 		}
2656 
2657 		stmmac_release_tx_desc(priv, p, priv->mode);
2658 
2659 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2660 	}
2661 	tx_q->dirty_tx = entry;
2662 
2663 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2664 				  pkts_compl, bytes_compl);
2665 
2666 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2667 								queue))) &&
2668 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2669 
2670 		netif_dbg(priv, tx_done, priv->dev,
2671 			  "%s: restart transmit\n", __func__);
2672 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2673 	}
2674 
2675 	if (tx_q->xsk_pool) {
2676 		bool work_done;
2677 
2678 		if (tx_q->xsk_frames_done)
2679 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2680 
2681 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2682 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2683 
2684 		/* For XSK TX, we try to send as many as possible.
2685 		 * If XSK work done (XSK TX desc empty and budget still
2686 		 * available), return "budget - 1" to reenable TX IRQ.
2687 		 * Else, return "budget" to make NAPI continue polling.
2688 		 */
2689 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2690 					       STMMAC_XSK_TX_BUDGET_MAX);
2691 		if (work_done)
2692 			xmits = budget - 1;
2693 		else
2694 			xmits = budget;
2695 	}
2696 
2697 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2698 	    priv->eee_sw_timer_en) {
2699 		if (stmmac_enable_eee_mode(priv))
2700 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2701 	}
2702 
2703 	/* We still have pending packets, let's call for a new scheduling */
2704 	if (tx_q->dirty_tx != tx_q->cur_tx)
2705 		hrtimer_start(&tx_q->txtimer,
2706 			      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2707 			      HRTIMER_MODE_REL);
2708 
2709 	flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
2710 	tx_q->txq_stats.tx_packets += tx_packets;
2711 	tx_q->txq_stats.tx_pkt_n += tx_packets;
2712 	tx_q->txq_stats.tx_clean++;
2713 	u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
2714 
2715 	priv->xstats.tx_errors += tx_errors;
2716 
2717 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2718 
2719 	/* Combine decisions from TX clean and XSK TX */
2720 	return max(count, xmits);
2721 }
2722 
2723 /**
2724  * stmmac_tx_err - to manage the tx error
2725  * @priv: driver private structure
2726  * @chan: channel index
2727  * Description: it cleans the descriptors and restarts the transmission
2728  * in case of transmission errors.
2729  */
2730 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2731 {
2732 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2733 
2734 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2735 
2736 	stmmac_stop_tx_dma(priv, chan);
2737 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2738 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2739 	stmmac_reset_tx_queue(priv, chan);
2740 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2741 			    tx_q->dma_tx_phy, chan);
2742 	stmmac_start_tx_dma(priv, chan);
2743 
2744 	priv->xstats.tx_errors++;
2745 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2746 }
2747 
2748 /**
2749  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2750  *  @priv: driver private structure
2751  *  @txmode: TX operating mode
2752  *  @rxmode: RX operating mode
2753  *  @chan: channel index
2754  *  Description: it is used for configuring of the DMA operation mode in
2755  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2756  *  mode.
2757  */
2758 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2759 					  u32 rxmode, u32 chan)
2760 {
2761 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2762 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2763 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2764 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2765 	int rxfifosz = priv->plat->rx_fifo_size;
2766 	int txfifosz = priv->plat->tx_fifo_size;
2767 
2768 	if (rxfifosz == 0)
2769 		rxfifosz = priv->dma_cap.rx_fifo_size;
2770 	if (txfifosz == 0)
2771 		txfifosz = priv->dma_cap.tx_fifo_size;
2772 
2773 	/* Adjust for real per queue fifo size */
2774 	rxfifosz /= rx_channels_count;
2775 	txfifosz /= tx_channels_count;
2776 
2777 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2778 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2779 }
2780 
2781 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2782 {
2783 	int ret;
2784 
2785 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2786 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2787 	if (ret && (ret != -EINVAL)) {
2788 		stmmac_global_err(priv);
2789 		return true;
2790 	}
2791 
2792 	return false;
2793 }
2794 
2795 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2796 {
2797 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2798 						 &priv->xstats, chan, dir);
2799 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2800 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2801 	struct stmmac_channel *ch = &priv->channel[chan];
2802 	struct napi_struct *rx_napi;
2803 	struct napi_struct *tx_napi;
2804 	unsigned long flags;
2805 
2806 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2807 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2808 
2809 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2810 		if (napi_schedule_prep(rx_napi)) {
2811 			spin_lock_irqsave(&ch->lock, flags);
2812 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2813 			spin_unlock_irqrestore(&ch->lock, flags);
2814 			__napi_schedule(rx_napi);
2815 		}
2816 	}
2817 
2818 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2819 		if (napi_schedule_prep(tx_napi)) {
2820 			spin_lock_irqsave(&ch->lock, flags);
2821 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2822 			spin_unlock_irqrestore(&ch->lock, flags);
2823 			__napi_schedule(tx_napi);
2824 		}
2825 	}
2826 
2827 	return status;
2828 }
2829 
2830 /**
2831  * stmmac_dma_interrupt - DMA ISR
2832  * @priv: driver private structure
2833  * Description: this is the DMA ISR. It is called by the main ISR.
2834  * It calls the dwmac dma routine and schedule poll method in case of some
2835  * work can be done.
2836  */
2837 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2838 {
2839 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2840 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2841 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2842 				tx_channel_count : rx_channel_count;
2843 	u32 chan;
2844 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2845 
2846 	/* Make sure we never check beyond our status buffer. */
2847 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2848 		channels_to_check = ARRAY_SIZE(status);
2849 
2850 	for (chan = 0; chan < channels_to_check; chan++)
2851 		status[chan] = stmmac_napi_check(priv, chan,
2852 						 DMA_DIR_RXTX);
2853 
2854 	for (chan = 0; chan < tx_channel_count; chan++) {
2855 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2856 			/* Try to bump up the dma threshold on this failure */
2857 			stmmac_bump_dma_threshold(priv, chan);
2858 		} else if (unlikely(status[chan] == tx_hard_error)) {
2859 			stmmac_tx_err(priv, chan);
2860 		}
2861 	}
2862 }
2863 
2864 /**
2865  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2866  * @priv: driver private structure
2867  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2868  */
2869 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2870 {
2871 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2872 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2873 
2874 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2875 
2876 	if (priv->dma_cap.rmon) {
2877 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2878 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2879 	} else
2880 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2881 }
2882 
2883 /**
2884  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2885  * @priv: driver private structure
2886  * Description:
2887  *  new GMAC chip generations have a new register to indicate the
2888  *  presence of the optional feature/functions.
2889  *  This can be also used to override the value passed through the
2890  *  platform and necessary for old MAC10/100 and GMAC chips.
2891  */
2892 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2893 {
2894 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2895 }
2896 
2897 /**
2898  * stmmac_check_ether_addr - check if the MAC addr is valid
2899  * @priv: driver private structure
2900  * Description:
2901  * it is to verify if the MAC address is valid, in case of failures it
2902  * generates a random MAC address
2903  */
2904 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2905 {
2906 	u8 addr[ETH_ALEN];
2907 
2908 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2909 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2910 		if (is_valid_ether_addr(addr))
2911 			eth_hw_addr_set(priv->dev, addr);
2912 		else
2913 			eth_hw_addr_random(priv->dev);
2914 		dev_info(priv->device, "device MAC address %pM\n",
2915 			 priv->dev->dev_addr);
2916 	}
2917 }
2918 
2919 /**
2920  * stmmac_init_dma_engine - DMA init.
2921  * @priv: driver private structure
2922  * Description:
2923  * It inits the DMA invoking the specific MAC/GMAC callback.
2924  * Some DMA parameters can be passed from the platform;
2925  * in case of these are not passed a default is kept for the MAC or GMAC.
2926  */
2927 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2928 {
2929 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2930 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2931 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2932 	struct stmmac_rx_queue *rx_q;
2933 	struct stmmac_tx_queue *tx_q;
2934 	u32 chan = 0;
2935 	int atds = 0;
2936 	int ret = 0;
2937 
2938 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2939 		dev_err(priv->device, "Invalid DMA configuration\n");
2940 		return -EINVAL;
2941 	}
2942 
2943 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2944 		atds = 1;
2945 
2946 	ret = stmmac_reset(priv, priv->ioaddr);
2947 	if (ret) {
2948 		dev_err(priv->device, "Failed to reset the dma\n");
2949 		return ret;
2950 	}
2951 
2952 	/* DMA Configuration */
2953 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2954 
2955 	if (priv->plat->axi)
2956 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2957 
2958 	/* DMA CSR Channel configuration */
2959 	for (chan = 0; chan < dma_csr_ch; chan++) {
2960 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2961 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2962 	}
2963 
2964 	/* DMA RX Channel Configuration */
2965 	for (chan = 0; chan < rx_channels_count; chan++) {
2966 		rx_q = &priv->dma_conf.rx_queue[chan];
2967 
2968 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2969 				    rx_q->dma_rx_phy, chan);
2970 
2971 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2972 				     (rx_q->buf_alloc_num *
2973 				      sizeof(struct dma_desc));
2974 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2975 				       rx_q->rx_tail_addr, chan);
2976 	}
2977 
2978 	/* DMA TX Channel Configuration */
2979 	for (chan = 0; chan < tx_channels_count; chan++) {
2980 		tx_q = &priv->dma_conf.tx_queue[chan];
2981 
2982 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2983 				    tx_q->dma_tx_phy, chan);
2984 
2985 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2986 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2987 				       tx_q->tx_tail_addr, chan);
2988 	}
2989 
2990 	return ret;
2991 }
2992 
2993 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2994 {
2995 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2996 
2997 	hrtimer_start(&tx_q->txtimer,
2998 		      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2999 		      HRTIMER_MODE_REL);
3000 }
3001 
3002 /**
3003  * stmmac_tx_timer - mitigation sw timer for tx.
3004  * @t: data pointer
3005  * Description:
3006  * This is the timer handler to directly invoke the stmmac_tx_clean.
3007  */
3008 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3009 {
3010 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3011 	struct stmmac_priv *priv = tx_q->priv_data;
3012 	struct stmmac_channel *ch;
3013 	struct napi_struct *napi;
3014 
3015 	ch = &priv->channel[tx_q->queue_index];
3016 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3017 
3018 	if (likely(napi_schedule_prep(napi))) {
3019 		unsigned long flags;
3020 
3021 		spin_lock_irqsave(&ch->lock, flags);
3022 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3023 		spin_unlock_irqrestore(&ch->lock, flags);
3024 		__napi_schedule(napi);
3025 	}
3026 
3027 	return HRTIMER_NORESTART;
3028 }
3029 
3030 /**
3031  * stmmac_init_coalesce - init mitigation options.
3032  * @priv: driver private structure
3033  * Description:
3034  * This inits the coalesce parameters: i.e. timer rate,
3035  * timer handler and default threshold used for enabling the
3036  * interrupt on completion bit.
3037  */
3038 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3039 {
3040 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3041 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3042 	u32 chan;
3043 
3044 	for (chan = 0; chan < tx_channel_count; chan++) {
3045 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3046 
3047 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3048 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3049 
3050 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3051 		tx_q->txtimer.function = stmmac_tx_timer;
3052 	}
3053 
3054 	for (chan = 0; chan < rx_channel_count; chan++)
3055 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3056 }
3057 
3058 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3059 {
3060 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3061 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3062 	u32 chan;
3063 
3064 	/* set TX ring length */
3065 	for (chan = 0; chan < tx_channels_count; chan++)
3066 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3067 				       (priv->dma_conf.dma_tx_size - 1), chan);
3068 
3069 	/* set RX ring length */
3070 	for (chan = 0; chan < rx_channels_count; chan++)
3071 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3072 				       (priv->dma_conf.dma_rx_size - 1), chan);
3073 }
3074 
3075 /**
3076  *  stmmac_set_tx_queue_weight - Set TX queue weight
3077  *  @priv: driver private structure
3078  *  Description: It is used for setting TX queues weight
3079  */
3080 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3081 {
3082 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3083 	u32 weight;
3084 	u32 queue;
3085 
3086 	for (queue = 0; queue < tx_queues_count; queue++) {
3087 		weight = priv->plat->tx_queues_cfg[queue].weight;
3088 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3089 	}
3090 }
3091 
3092 /**
3093  *  stmmac_configure_cbs - Configure CBS in TX queue
3094  *  @priv: driver private structure
3095  *  Description: It is used for configuring CBS in AVB TX queues
3096  */
3097 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3098 {
3099 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3100 	u32 mode_to_use;
3101 	u32 queue;
3102 
3103 	/* queue 0 is reserved for legacy traffic */
3104 	for (queue = 1; queue < tx_queues_count; queue++) {
3105 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3106 		if (mode_to_use == MTL_QUEUE_DCB)
3107 			continue;
3108 
3109 		stmmac_config_cbs(priv, priv->hw,
3110 				priv->plat->tx_queues_cfg[queue].send_slope,
3111 				priv->plat->tx_queues_cfg[queue].idle_slope,
3112 				priv->plat->tx_queues_cfg[queue].high_credit,
3113 				priv->plat->tx_queues_cfg[queue].low_credit,
3114 				queue);
3115 	}
3116 }
3117 
3118 /**
3119  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3120  *  @priv: driver private structure
3121  *  Description: It is used for mapping RX queues to RX dma channels
3122  */
3123 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3124 {
3125 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3126 	u32 queue;
3127 	u32 chan;
3128 
3129 	for (queue = 0; queue < rx_queues_count; queue++) {
3130 		chan = priv->plat->rx_queues_cfg[queue].chan;
3131 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3132 	}
3133 }
3134 
3135 /**
3136  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3137  *  @priv: driver private structure
3138  *  Description: It is used for configuring the RX Queue Priority
3139  */
3140 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3141 {
3142 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3143 	u32 queue;
3144 	u32 prio;
3145 
3146 	for (queue = 0; queue < rx_queues_count; queue++) {
3147 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3148 			continue;
3149 
3150 		prio = priv->plat->rx_queues_cfg[queue].prio;
3151 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3152 	}
3153 }
3154 
3155 /**
3156  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3157  *  @priv: driver private structure
3158  *  Description: It is used for configuring the TX Queue Priority
3159  */
3160 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3161 {
3162 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3163 	u32 queue;
3164 	u32 prio;
3165 
3166 	for (queue = 0; queue < tx_queues_count; queue++) {
3167 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3168 			continue;
3169 
3170 		prio = priv->plat->tx_queues_cfg[queue].prio;
3171 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3172 	}
3173 }
3174 
3175 /**
3176  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3177  *  @priv: driver private structure
3178  *  Description: It is used for configuring the RX queue routing
3179  */
3180 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3181 {
3182 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3183 	u32 queue;
3184 	u8 packet;
3185 
3186 	for (queue = 0; queue < rx_queues_count; queue++) {
3187 		/* no specific packet type routing specified for the queue */
3188 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3189 			continue;
3190 
3191 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3192 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3193 	}
3194 }
3195 
3196 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3197 {
3198 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3199 		priv->rss.enable = false;
3200 		return;
3201 	}
3202 
3203 	if (priv->dev->features & NETIF_F_RXHASH)
3204 		priv->rss.enable = true;
3205 	else
3206 		priv->rss.enable = false;
3207 
3208 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3209 			     priv->plat->rx_queues_to_use);
3210 }
3211 
3212 /**
3213  *  stmmac_mtl_configuration - Configure MTL
3214  *  @priv: driver private structure
3215  *  Description: It is used for configurring MTL
3216  */
3217 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3218 {
3219 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3220 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3221 
3222 	if (tx_queues_count > 1)
3223 		stmmac_set_tx_queue_weight(priv);
3224 
3225 	/* Configure MTL RX algorithms */
3226 	if (rx_queues_count > 1)
3227 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3228 				priv->plat->rx_sched_algorithm);
3229 
3230 	/* Configure MTL TX algorithms */
3231 	if (tx_queues_count > 1)
3232 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3233 				priv->plat->tx_sched_algorithm);
3234 
3235 	/* Configure CBS in AVB TX queues */
3236 	if (tx_queues_count > 1)
3237 		stmmac_configure_cbs(priv);
3238 
3239 	/* Map RX MTL to DMA channels */
3240 	stmmac_rx_queue_dma_chan_map(priv);
3241 
3242 	/* Enable MAC RX Queues */
3243 	stmmac_mac_enable_rx_queues(priv);
3244 
3245 	/* Set RX priorities */
3246 	if (rx_queues_count > 1)
3247 		stmmac_mac_config_rx_queues_prio(priv);
3248 
3249 	/* Set TX priorities */
3250 	if (tx_queues_count > 1)
3251 		stmmac_mac_config_tx_queues_prio(priv);
3252 
3253 	/* Set RX routing */
3254 	if (rx_queues_count > 1)
3255 		stmmac_mac_config_rx_queues_routing(priv);
3256 
3257 	/* Receive Side Scaling */
3258 	if (rx_queues_count > 1)
3259 		stmmac_mac_config_rss(priv);
3260 }
3261 
3262 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3263 {
3264 	if (priv->dma_cap.asp) {
3265 		netdev_info(priv->dev, "Enabling Safety Features\n");
3266 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3267 					  priv->plat->safety_feat_cfg);
3268 	} else {
3269 		netdev_info(priv->dev, "No Safety Features support found\n");
3270 	}
3271 }
3272 
3273 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3274 {
3275 	char *name;
3276 
3277 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3278 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3279 
3280 	name = priv->wq_name;
3281 	sprintf(name, "%s-fpe", priv->dev->name);
3282 
3283 	priv->fpe_wq = create_singlethread_workqueue(name);
3284 	if (!priv->fpe_wq) {
3285 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3286 
3287 		return -ENOMEM;
3288 	}
3289 	netdev_info(priv->dev, "FPE workqueue start");
3290 
3291 	return 0;
3292 }
3293 
3294 /**
3295  * stmmac_hw_setup - setup mac in a usable state.
3296  *  @dev : pointer to the device structure.
3297  *  @ptp_register: register PTP if set
3298  *  Description:
3299  *  this is the main function to setup the HW in a usable state because the
3300  *  dma engine is reset, the core registers are configured (e.g. AXI,
3301  *  Checksum features, timers). The DMA is ready to start receiving and
3302  *  transmitting.
3303  *  Return value:
3304  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3305  *  file on failure.
3306  */
3307 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3308 {
3309 	struct stmmac_priv *priv = netdev_priv(dev);
3310 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3311 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3312 	bool sph_en;
3313 	u32 chan;
3314 	int ret;
3315 
3316 	/* DMA initialization and SW reset */
3317 	ret = stmmac_init_dma_engine(priv);
3318 	if (ret < 0) {
3319 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3320 			   __func__);
3321 		return ret;
3322 	}
3323 
3324 	/* Copy the MAC addr into the HW  */
3325 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3326 
3327 	/* PS and related bits will be programmed according to the speed */
3328 	if (priv->hw->pcs) {
3329 		int speed = priv->plat->mac_port_sel_speed;
3330 
3331 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3332 		    (speed == SPEED_1000)) {
3333 			priv->hw->ps = speed;
3334 		} else {
3335 			dev_warn(priv->device, "invalid port speed\n");
3336 			priv->hw->ps = 0;
3337 		}
3338 	}
3339 
3340 	/* Initialize the MAC Core */
3341 	stmmac_core_init(priv, priv->hw, dev);
3342 
3343 	/* Initialize MTL*/
3344 	stmmac_mtl_configuration(priv);
3345 
3346 	/* Initialize Safety Features */
3347 	stmmac_safety_feat_configuration(priv);
3348 
3349 	ret = stmmac_rx_ipc(priv, priv->hw);
3350 	if (!ret) {
3351 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3352 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3353 		priv->hw->rx_csum = 0;
3354 	}
3355 
3356 	/* Enable the MAC Rx/Tx */
3357 	stmmac_mac_set(priv, priv->ioaddr, true);
3358 
3359 	/* Set the HW DMA mode and the COE */
3360 	stmmac_dma_operation_mode(priv);
3361 
3362 	stmmac_mmc_setup(priv);
3363 
3364 	if (ptp_register) {
3365 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3366 		if (ret < 0)
3367 			netdev_warn(priv->dev,
3368 				    "failed to enable PTP reference clock: %pe\n",
3369 				    ERR_PTR(ret));
3370 	}
3371 
3372 	ret = stmmac_init_ptp(priv);
3373 	if (ret == -EOPNOTSUPP)
3374 		netdev_info(priv->dev, "PTP not supported by HW\n");
3375 	else if (ret)
3376 		netdev_warn(priv->dev, "PTP init failed\n");
3377 	else if (ptp_register)
3378 		stmmac_ptp_register(priv);
3379 
3380 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3381 
3382 	/* Convert the timer from msec to usec */
3383 	if (!priv->tx_lpi_timer)
3384 		priv->tx_lpi_timer = eee_timer * 1000;
3385 
3386 	if (priv->use_riwt) {
3387 		u32 queue;
3388 
3389 		for (queue = 0; queue < rx_cnt; queue++) {
3390 			if (!priv->rx_riwt[queue])
3391 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3392 
3393 			stmmac_rx_watchdog(priv, priv->ioaddr,
3394 					   priv->rx_riwt[queue], queue);
3395 		}
3396 	}
3397 
3398 	if (priv->hw->pcs)
3399 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3400 
3401 	/* set TX and RX rings length */
3402 	stmmac_set_rings_length(priv);
3403 
3404 	/* Enable TSO */
3405 	if (priv->tso) {
3406 		for (chan = 0; chan < tx_cnt; chan++) {
3407 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3408 
3409 			/* TSO and TBS cannot co-exist */
3410 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3411 				continue;
3412 
3413 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3414 		}
3415 	}
3416 
3417 	/* Enable Split Header */
3418 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3419 	for (chan = 0; chan < rx_cnt; chan++)
3420 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3421 
3422 
3423 	/* VLAN Tag Insertion */
3424 	if (priv->dma_cap.vlins)
3425 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3426 
3427 	/* TBS */
3428 	for (chan = 0; chan < tx_cnt; chan++) {
3429 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3430 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3431 
3432 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3433 	}
3434 
3435 	/* Configure real RX and TX queues */
3436 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3437 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3438 
3439 	/* Start the ball rolling... */
3440 	stmmac_start_all_dma(priv);
3441 
3442 	if (priv->dma_cap.fpesel) {
3443 		stmmac_fpe_start_wq(priv);
3444 
3445 		if (priv->plat->fpe_cfg->enable)
3446 			stmmac_fpe_handshake(priv, true);
3447 	}
3448 
3449 	return 0;
3450 }
3451 
3452 static void stmmac_hw_teardown(struct net_device *dev)
3453 {
3454 	struct stmmac_priv *priv = netdev_priv(dev);
3455 
3456 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3457 }
3458 
3459 static void stmmac_free_irq(struct net_device *dev,
3460 			    enum request_irq_err irq_err, int irq_idx)
3461 {
3462 	struct stmmac_priv *priv = netdev_priv(dev);
3463 	int j;
3464 
3465 	switch (irq_err) {
3466 	case REQ_IRQ_ERR_ALL:
3467 		irq_idx = priv->plat->tx_queues_to_use;
3468 		fallthrough;
3469 	case REQ_IRQ_ERR_TX:
3470 		for (j = irq_idx - 1; j >= 0; j--) {
3471 			if (priv->tx_irq[j] > 0) {
3472 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3473 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3474 			}
3475 		}
3476 		irq_idx = priv->plat->rx_queues_to_use;
3477 		fallthrough;
3478 	case REQ_IRQ_ERR_RX:
3479 		for (j = irq_idx - 1; j >= 0; j--) {
3480 			if (priv->rx_irq[j] > 0) {
3481 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3482 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3483 			}
3484 		}
3485 
3486 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3487 			free_irq(priv->sfty_ue_irq, dev);
3488 		fallthrough;
3489 	case REQ_IRQ_ERR_SFTY_UE:
3490 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3491 			free_irq(priv->sfty_ce_irq, dev);
3492 		fallthrough;
3493 	case REQ_IRQ_ERR_SFTY_CE:
3494 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3495 			free_irq(priv->lpi_irq, dev);
3496 		fallthrough;
3497 	case REQ_IRQ_ERR_LPI:
3498 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3499 			free_irq(priv->wol_irq, dev);
3500 		fallthrough;
3501 	case REQ_IRQ_ERR_WOL:
3502 		free_irq(dev->irq, dev);
3503 		fallthrough;
3504 	case REQ_IRQ_ERR_MAC:
3505 	case REQ_IRQ_ERR_NO:
3506 		/* If MAC IRQ request error, no more IRQ to free */
3507 		break;
3508 	}
3509 }
3510 
3511 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3512 {
3513 	struct stmmac_priv *priv = netdev_priv(dev);
3514 	enum request_irq_err irq_err;
3515 	cpumask_t cpu_mask;
3516 	int irq_idx = 0;
3517 	char *int_name;
3518 	int ret;
3519 	int i;
3520 
3521 	/* For common interrupt */
3522 	int_name = priv->int_name_mac;
3523 	sprintf(int_name, "%s:%s", dev->name, "mac");
3524 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3525 			  0, int_name, dev);
3526 	if (unlikely(ret < 0)) {
3527 		netdev_err(priv->dev,
3528 			   "%s: alloc mac MSI %d (error: %d)\n",
3529 			   __func__, dev->irq, ret);
3530 		irq_err = REQ_IRQ_ERR_MAC;
3531 		goto irq_error;
3532 	}
3533 
3534 	/* Request the Wake IRQ in case of another line
3535 	 * is used for WoL
3536 	 */
3537 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3538 		int_name = priv->int_name_wol;
3539 		sprintf(int_name, "%s:%s", dev->name, "wol");
3540 		ret = request_irq(priv->wol_irq,
3541 				  stmmac_mac_interrupt,
3542 				  0, int_name, dev);
3543 		if (unlikely(ret < 0)) {
3544 			netdev_err(priv->dev,
3545 				   "%s: alloc wol MSI %d (error: %d)\n",
3546 				   __func__, priv->wol_irq, ret);
3547 			irq_err = REQ_IRQ_ERR_WOL;
3548 			goto irq_error;
3549 		}
3550 	}
3551 
3552 	/* Request the LPI IRQ in case of another line
3553 	 * is used for LPI
3554 	 */
3555 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3556 		int_name = priv->int_name_lpi;
3557 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3558 		ret = request_irq(priv->lpi_irq,
3559 				  stmmac_mac_interrupt,
3560 				  0, int_name, dev);
3561 		if (unlikely(ret < 0)) {
3562 			netdev_err(priv->dev,
3563 				   "%s: alloc lpi MSI %d (error: %d)\n",
3564 				   __func__, priv->lpi_irq, ret);
3565 			irq_err = REQ_IRQ_ERR_LPI;
3566 			goto irq_error;
3567 		}
3568 	}
3569 
3570 	/* Request the Safety Feature Correctible Error line in
3571 	 * case of another line is used
3572 	 */
3573 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3574 		int_name = priv->int_name_sfty_ce;
3575 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3576 		ret = request_irq(priv->sfty_ce_irq,
3577 				  stmmac_safety_interrupt,
3578 				  0, int_name, dev);
3579 		if (unlikely(ret < 0)) {
3580 			netdev_err(priv->dev,
3581 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3582 				   __func__, priv->sfty_ce_irq, ret);
3583 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3584 			goto irq_error;
3585 		}
3586 	}
3587 
3588 	/* Request the Safety Feature Uncorrectible Error line in
3589 	 * case of another line is used
3590 	 */
3591 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3592 		int_name = priv->int_name_sfty_ue;
3593 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3594 		ret = request_irq(priv->sfty_ue_irq,
3595 				  stmmac_safety_interrupt,
3596 				  0, int_name, dev);
3597 		if (unlikely(ret < 0)) {
3598 			netdev_err(priv->dev,
3599 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3600 				   __func__, priv->sfty_ue_irq, ret);
3601 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3602 			goto irq_error;
3603 		}
3604 	}
3605 
3606 	/* Request Rx MSI irq */
3607 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3608 		if (i >= MTL_MAX_RX_QUEUES)
3609 			break;
3610 		if (priv->rx_irq[i] == 0)
3611 			continue;
3612 
3613 		int_name = priv->int_name_rx_irq[i];
3614 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3615 		ret = request_irq(priv->rx_irq[i],
3616 				  stmmac_msi_intr_rx,
3617 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3618 		if (unlikely(ret < 0)) {
3619 			netdev_err(priv->dev,
3620 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3621 				   __func__, i, priv->rx_irq[i], ret);
3622 			irq_err = REQ_IRQ_ERR_RX;
3623 			irq_idx = i;
3624 			goto irq_error;
3625 		}
3626 		cpumask_clear(&cpu_mask);
3627 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3628 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3629 	}
3630 
3631 	/* Request Tx MSI irq */
3632 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3633 		if (i >= MTL_MAX_TX_QUEUES)
3634 			break;
3635 		if (priv->tx_irq[i] == 0)
3636 			continue;
3637 
3638 		int_name = priv->int_name_tx_irq[i];
3639 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3640 		ret = request_irq(priv->tx_irq[i],
3641 				  stmmac_msi_intr_tx,
3642 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3643 		if (unlikely(ret < 0)) {
3644 			netdev_err(priv->dev,
3645 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3646 				   __func__, i, priv->tx_irq[i], ret);
3647 			irq_err = REQ_IRQ_ERR_TX;
3648 			irq_idx = i;
3649 			goto irq_error;
3650 		}
3651 		cpumask_clear(&cpu_mask);
3652 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3653 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3654 	}
3655 
3656 	return 0;
3657 
3658 irq_error:
3659 	stmmac_free_irq(dev, irq_err, irq_idx);
3660 	return ret;
3661 }
3662 
3663 static int stmmac_request_irq_single(struct net_device *dev)
3664 {
3665 	struct stmmac_priv *priv = netdev_priv(dev);
3666 	enum request_irq_err irq_err;
3667 	int ret;
3668 
3669 	ret = request_irq(dev->irq, stmmac_interrupt,
3670 			  IRQF_SHARED, dev->name, dev);
3671 	if (unlikely(ret < 0)) {
3672 		netdev_err(priv->dev,
3673 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3674 			   __func__, dev->irq, ret);
3675 		irq_err = REQ_IRQ_ERR_MAC;
3676 		goto irq_error;
3677 	}
3678 
3679 	/* Request the Wake IRQ in case of another line
3680 	 * is used for WoL
3681 	 */
3682 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3683 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3684 				  IRQF_SHARED, dev->name, dev);
3685 		if (unlikely(ret < 0)) {
3686 			netdev_err(priv->dev,
3687 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3688 				   __func__, priv->wol_irq, ret);
3689 			irq_err = REQ_IRQ_ERR_WOL;
3690 			goto irq_error;
3691 		}
3692 	}
3693 
3694 	/* Request the IRQ lines */
3695 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3696 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3697 				  IRQF_SHARED, dev->name, dev);
3698 		if (unlikely(ret < 0)) {
3699 			netdev_err(priv->dev,
3700 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3701 				   __func__, priv->lpi_irq, ret);
3702 			irq_err = REQ_IRQ_ERR_LPI;
3703 			goto irq_error;
3704 		}
3705 	}
3706 
3707 	return 0;
3708 
3709 irq_error:
3710 	stmmac_free_irq(dev, irq_err, 0);
3711 	return ret;
3712 }
3713 
3714 static int stmmac_request_irq(struct net_device *dev)
3715 {
3716 	struct stmmac_priv *priv = netdev_priv(dev);
3717 	int ret;
3718 
3719 	/* Request the IRQ lines */
3720 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3721 		ret = stmmac_request_irq_multi_msi(dev);
3722 	else
3723 		ret = stmmac_request_irq_single(dev);
3724 
3725 	return ret;
3726 }
3727 
3728 /**
3729  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3730  *  @priv: driver private structure
3731  *  @mtu: MTU to setup the dma queue and buf with
3732  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3733  *  Allocate the Tx/Rx DMA queue and init them.
3734  *  Return value:
3735  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3736  */
3737 static struct stmmac_dma_conf *
3738 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3739 {
3740 	struct stmmac_dma_conf *dma_conf;
3741 	int chan, bfsize, ret;
3742 
3743 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3744 	if (!dma_conf) {
3745 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3746 			   __func__);
3747 		return ERR_PTR(-ENOMEM);
3748 	}
3749 
3750 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3751 	if (bfsize < 0)
3752 		bfsize = 0;
3753 
3754 	if (bfsize < BUF_SIZE_16KiB)
3755 		bfsize = stmmac_set_bfsize(mtu, 0);
3756 
3757 	dma_conf->dma_buf_sz = bfsize;
3758 	/* Chose the tx/rx size from the already defined one in the
3759 	 * priv struct. (if defined)
3760 	 */
3761 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3762 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3763 
3764 	if (!dma_conf->dma_tx_size)
3765 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3766 	if (!dma_conf->dma_rx_size)
3767 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3768 
3769 	/* Earlier check for TBS */
3770 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3771 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3772 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3773 
3774 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3775 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3776 	}
3777 
3778 	ret = alloc_dma_desc_resources(priv, dma_conf);
3779 	if (ret < 0) {
3780 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3781 			   __func__);
3782 		goto alloc_error;
3783 	}
3784 
3785 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3786 	if (ret < 0) {
3787 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3788 			   __func__);
3789 		goto init_error;
3790 	}
3791 
3792 	return dma_conf;
3793 
3794 init_error:
3795 	free_dma_desc_resources(priv, dma_conf);
3796 alloc_error:
3797 	kfree(dma_conf);
3798 	return ERR_PTR(ret);
3799 }
3800 
3801 /**
3802  *  __stmmac_open - open entry point of the driver
3803  *  @dev : pointer to the device structure.
3804  *  @dma_conf :  structure to take the dma data
3805  *  Description:
3806  *  This function is the open entry point of the driver.
3807  *  Return value:
3808  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3809  *  file on failure.
3810  */
3811 static int __stmmac_open(struct net_device *dev,
3812 			 struct stmmac_dma_conf *dma_conf)
3813 {
3814 	struct stmmac_priv *priv = netdev_priv(dev);
3815 	int mode = priv->plat->phy_interface;
3816 	u32 chan;
3817 	int ret;
3818 
3819 	ret = pm_runtime_resume_and_get(priv->device);
3820 	if (ret < 0)
3821 		return ret;
3822 
3823 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3824 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
3825 	    (!priv->hw->xpcs ||
3826 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
3827 	    !priv->hw->lynx_pcs) {
3828 		ret = stmmac_init_phy(dev);
3829 		if (ret) {
3830 			netdev_err(priv->dev,
3831 				   "%s: Cannot attach to PHY (error: %d)\n",
3832 				   __func__, ret);
3833 			goto init_phy_error;
3834 		}
3835 	}
3836 
3837 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3838 
3839 	buf_sz = dma_conf->dma_buf_sz;
3840 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3841 
3842 	stmmac_reset_queues_param(priv);
3843 
3844 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3845 	    priv->plat->serdes_powerup) {
3846 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3847 		if (ret < 0) {
3848 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3849 				   __func__);
3850 			goto init_error;
3851 		}
3852 	}
3853 
3854 	ret = stmmac_hw_setup(dev, true);
3855 	if (ret < 0) {
3856 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3857 		goto init_error;
3858 	}
3859 
3860 	stmmac_init_coalesce(priv);
3861 
3862 	phylink_start(priv->phylink);
3863 	/* We may have called phylink_speed_down before */
3864 	phylink_speed_up(priv->phylink);
3865 
3866 	ret = stmmac_request_irq(dev);
3867 	if (ret)
3868 		goto irq_error;
3869 
3870 	stmmac_enable_all_queues(priv);
3871 	netif_tx_start_all_queues(priv->dev);
3872 	stmmac_enable_all_dma_irq(priv);
3873 
3874 	return 0;
3875 
3876 irq_error:
3877 	phylink_stop(priv->phylink);
3878 
3879 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3880 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3881 
3882 	stmmac_hw_teardown(dev);
3883 init_error:
3884 	phylink_disconnect_phy(priv->phylink);
3885 init_phy_error:
3886 	pm_runtime_put(priv->device);
3887 	return ret;
3888 }
3889 
3890 static int stmmac_open(struct net_device *dev)
3891 {
3892 	struct stmmac_priv *priv = netdev_priv(dev);
3893 	struct stmmac_dma_conf *dma_conf;
3894 	int ret;
3895 
3896 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3897 	if (IS_ERR(dma_conf))
3898 		return PTR_ERR(dma_conf);
3899 
3900 	ret = __stmmac_open(dev, dma_conf);
3901 	if (ret)
3902 		free_dma_desc_resources(priv, dma_conf);
3903 
3904 	kfree(dma_conf);
3905 	return ret;
3906 }
3907 
3908 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3909 {
3910 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3911 
3912 	if (priv->fpe_wq)
3913 		destroy_workqueue(priv->fpe_wq);
3914 
3915 	netdev_info(priv->dev, "FPE workqueue stop");
3916 }
3917 
3918 /**
3919  *  stmmac_release - close entry point of the driver
3920  *  @dev : device pointer.
3921  *  Description:
3922  *  This is the stop entry point of the driver.
3923  */
3924 static int stmmac_release(struct net_device *dev)
3925 {
3926 	struct stmmac_priv *priv = netdev_priv(dev);
3927 	u32 chan;
3928 
3929 	if (device_may_wakeup(priv->device))
3930 		phylink_speed_down(priv->phylink, false);
3931 	/* Stop and disconnect the PHY */
3932 	phylink_stop(priv->phylink);
3933 	phylink_disconnect_phy(priv->phylink);
3934 
3935 	stmmac_disable_all_queues(priv);
3936 
3937 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3938 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3939 
3940 	netif_tx_disable(dev);
3941 
3942 	/* Free the IRQ lines */
3943 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3944 
3945 	if (priv->eee_enabled) {
3946 		priv->tx_path_in_lpi_mode = false;
3947 		del_timer_sync(&priv->eee_ctrl_timer);
3948 	}
3949 
3950 	/* Stop TX/RX DMA and clear the descriptors */
3951 	stmmac_stop_all_dma(priv);
3952 
3953 	/* Release and free the Rx/Tx resources */
3954 	free_dma_desc_resources(priv, &priv->dma_conf);
3955 
3956 	/* Disable the MAC Rx/Tx */
3957 	stmmac_mac_set(priv, priv->ioaddr, false);
3958 
3959 	/* Powerdown Serdes if there is */
3960 	if (priv->plat->serdes_powerdown)
3961 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3962 
3963 	netif_carrier_off(dev);
3964 
3965 	stmmac_release_ptp(priv);
3966 
3967 	pm_runtime_put(priv->device);
3968 
3969 	if (priv->dma_cap.fpesel)
3970 		stmmac_fpe_stop_wq(priv);
3971 
3972 	return 0;
3973 }
3974 
3975 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3976 			       struct stmmac_tx_queue *tx_q)
3977 {
3978 	u16 tag = 0x0, inner_tag = 0x0;
3979 	u32 inner_type = 0x0;
3980 	struct dma_desc *p;
3981 
3982 	if (!priv->dma_cap.vlins)
3983 		return false;
3984 	if (!skb_vlan_tag_present(skb))
3985 		return false;
3986 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3987 		inner_tag = skb_vlan_tag_get(skb);
3988 		inner_type = STMMAC_VLAN_INSERT;
3989 	}
3990 
3991 	tag = skb_vlan_tag_get(skb);
3992 
3993 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3994 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3995 	else
3996 		p = &tx_q->dma_tx[tx_q->cur_tx];
3997 
3998 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3999 		return false;
4000 
4001 	stmmac_set_tx_owner(priv, p);
4002 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4003 	return true;
4004 }
4005 
4006 /**
4007  *  stmmac_tso_allocator - close entry point of the driver
4008  *  @priv: driver private structure
4009  *  @des: buffer start address
4010  *  @total_len: total length to fill in descriptors
4011  *  @last_segment: condition for the last descriptor
4012  *  @queue: TX queue index
4013  *  Description:
4014  *  This function fills descriptor and request new descriptors according to
4015  *  buffer length to fill
4016  */
4017 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4018 				 int total_len, bool last_segment, u32 queue)
4019 {
4020 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4021 	struct dma_desc *desc;
4022 	u32 buff_size;
4023 	int tmp_len;
4024 
4025 	tmp_len = total_len;
4026 
4027 	while (tmp_len > 0) {
4028 		dma_addr_t curr_addr;
4029 
4030 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4031 						priv->dma_conf.dma_tx_size);
4032 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4033 
4034 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4035 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4036 		else
4037 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4038 
4039 		curr_addr = des + (total_len - tmp_len);
4040 		if (priv->dma_cap.addr64 <= 32)
4041 			desc->des0 = cpu_to_le32(curr_addr);
4042 		else
4043 			stmmac_set_desc_addr(priv, desc, curr_addr);
4044 
4045 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4046 			    TSO_MAX_BUFF_SIZE : tmp_len;
4047 
4048 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4049 				0, 1,
4050 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4051 				0, 0);
4052 
4053 		tmp_len -= TSO_MAX_BUFF_SIZE;
4054 	}
4055 }
4056 
4057 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4058 {
4059 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4060 	int desc_size;
4061 
4062 	if (likely(priv->extend_desc))
4063 		desc_size = sizeof(struct dma_extended_desc);
4064 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4065 		desc_size = sizeof(struct dma_edesc);
4066 	else
4067 		desc_size = sizeof(struct dma_desc);
4068 
4069 	/* The own bit must be the latest setting done when prepare the
4070 	 * descriptor and then barrier is needed to make sure that
4071 	 * all is coherent before granting the DMA engine.
4072 	 */
4073 	wmb();
4074 
4075 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4076 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4077 }
4078 
4079 /**
4080  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4081  *  @skb : the socket buffer
4082  *  @dev : device pointer
4083  *  Description: this is the transmit function that is called on TSO frames
4084  *  (support available on GMAC4 and newer chips).
4085  *  Diagram below show the ring programming in case of TSO frames:
4086  *
4087  *  First Descriptor
4088  *   --------
4089  *   | DES0 |---> buffer1 = L2/L3/L4 header
4090  *   | DES1 |---> TCP Payload (can continue on next descr...)
4091  *   | DES2 |---> buffer 1 and 2 len
4092  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4093  *   --------
4094  *	|
4095  *     ...
4096  *	|
4097  *   --------
4098  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4099  *   | DES1 | --|
4100  *   | DES2 | --> buffer 1 and 2 len
4101  *   | DES3 |
4102  *   --------
4103  *
4104  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4105  */
4106 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4107 {
4108 	struct dma_desc *desc, *first, *mss_desc = NULL;
4109 	struct stmmac_priv *priv = netdev_priv(dev);
4110 	int nfrags = skb_shinfo(skb)->nr_frags;
4111 	u32 queue = skb_get_queue_mapping(skb);
4112 	unsigned int first_entry, tx_packets;
4113 	int tmp_pay_len = 0, first_tx;
4114 	struct stmmac_tx_queue *tx_q;
4115 	bool has_vlan, set_ic;
4116 	u8 proto_hdr_len, hdr;
4117 	unsigned long flags;
4118 	u32 pay_len, mss;
4119 	dma_addr_t des;
4120 	int i;
4121 
4122 	tx_q = &priv->dma_conf.tx_queue[queue];
4123 	first_tx = tx_q->cur_tx;
4124 
4125 	/* Compute header lengths */
4126 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4127 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4128 		hdr = sizeof(struct udphdr);
4129 	} else {
4130 		proto_hdr_len = skb_tcp_all_headers(skb);
4131 		hdr = tcp_hdrlen(skb);
4132 	}
4133 
4134 	/* Desc availability based on threshold should be enough safe */
4135 	if (unlikely(stmmac_tx_avail(priv, queue) <
4136 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4137 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4138 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4139 								queue));
4140 			/* This is a hard error, log it. */
4141 			netdev_err(priv->dev,
4142 				   "%s: Tx Ring full when queue awake\n",
4143 				   __func__);
4144 		}
4145 		return NETDEV_TX_BUSY;
4146 	}
4147 
4148 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4149 
4150 	mss = skb_shinfo(skb)->gso_size;
4151 
4152 	/* set new MSS value if needed */
4153 	if (mss != tx_q->mss) {
4154 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4155 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4156 		else
4157 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4158 
4159 		stmmac_set_mss(priv, mss_desc, mss);
4160 		tx_q->mss = mss;
4161 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4162 						priv->dma_conf.dma_tx_size);
4163 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4164 	}
4165 
4166 	if (netif_msg_tx_queued(priv)) {
4167 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4168 			__func__, hdr, proto_hdr_len, pay_len, mss);
4169 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4170 			skb->data_len);
4171 	}
4172 
4173 	/* Check if VLAN can be inserted by HW */
4174 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4175 
4176 	first_entry = tx_q->cur_tx;
4177 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4178 
4179 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4180 		desc = &tx_q->dma_entx[first_entry].basic;
4181 	else
4182 		desc = &tx_q->dma_tx[first_entry];
4183 	first = desc;
4184 
4185 	if (has_vlan)
4186 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4187 
4188 	/* first descriptor: fill Headers on Buf1 */
4189 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4190 			     DMA_TO_DEVICE);
4191 	if (dma_mapping_error(priv->device, des))
4192 		goto dma_map_err;
4193 
4194 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4195 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4196 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4197 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4198 
4199 	if (priv->dma_cap.addr64 <= 32) {
4200 		first->des0 = cpu_to_le32(des);
4201 
4202 		/* Fill start of payload in buff2 of first descriptor */
4203 		if (pay_len)
4204 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4205 
4206 		/* If needed take extra descriptors to fill the remaining payload */
4207 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4208 	} else {
4209 		stmmac_set_desc_addr(priv, first, des);
4210 		tmp_pay_len = pay_len;
4211 		des += proto_hdr_len;
4212 		pay_len = 0;
4213 	}
4214 
4215 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4216 
4217 	/* Prepare fragments */
4218 	for (i = 0; i < nfrags; i++) {
4219 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4220 
4221 		des = skb_frag_dma_map(priv->device, frag, 0,
4222 				       skb_frag_size(frag),
4223 				       DMA_TO_DEVICE);
4224 		if (dma_mapping_error(priv->device, des))
4225 			goto dma_map_err;
4226 
4227 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4228 				     (i == nfrags - 1), queue);
4229 
4230 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4231 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4232 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4233 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4234 	}
4235 
4236 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4237 
4238 	/* Only the last descriptor gets to point to the skb. */
4239 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4240 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4241 
4242 	/* Manage tx mitigation */
4243 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4244 	tx_q->tx_count_frames += tx_packets;
4245 
4246 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4247 		set_ic = true;
4248 	else if (!priv->tx_coal_frames[queue])
4249 		set_ic = false;
4250 	else if (tx_packets > priv->tx_coal_frames[queue])
4251 		set_ic = true;
4252 	else if ((tx_q->tx_count_frames %
4253 		  priv->tx_coal_frames[queue]) < tx_packets)
4254 		set_ic = true;
4255 	else
4256 		set_ic = false;
4257 
4258 	if (set_ic) {
4259 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4260 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4261 		else
4262 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4263 
4264 		tx_q->tx_count_frames = 0;
4265 		stmmac_set_tx_ic(priv, desc);
4266 	}
4267 
4268 	/* We've used all descriptors we need for this skb, however,
4269 	 * advance cur_tx so that it references a fresh descriptor.
4270 	 * ndo_start_xmit will fill this descriptor the next time it's
4271 	 * called and stmmac_tx_clean may clean up to this descriptor.
4272 	 */
4273 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4274 
4275 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4276 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4277 			  __func__);
4278 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4279 	}
4280 
4281 	flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
4282 	tx_q->txq_stats.tx_bytes += skb->len;
4283 	tx_q->txq_stats.tx_tso_frames++;
4284 	tx_q->txq_stats.tx_tso_nfrags += nfrags;
4285 	if (set_ic)
4286 		tx_q->txq_stats.tx_set_ic_bit++;
4287 	u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
4288 
4289 	if (priv->sarc_type)
4290 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4291 
4292 	skb_tx_timestamp(skb);
4293 
4294 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4295 		     priv->hwts_tx_en)) {
4296 		/* declare that device is doing timestamping */
4297 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4298 		stmmac_enable_tx_timestamp(priv, first);
4299 	}
4300 
4301 	/* Complete the first descriptor before granting the DMA */
4302 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4303 			proto_hdr_len,
4304 			pay_len,
4305 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4306 			hdr / 4, (skb->len - proto_hdr_len));
4307 
4308 	/* If context desc is used to change MSS */
4309 	if (mss_desc) {
4310 		/* Make sure that first descriptor has been completely
4311 		 * written, including its own bit. This is because MSS is
4312 		 * actually before first descriptor, so we need to make
4313 		 * sure that MSS's own bit is the last thing written.
4314 		 */
4315 		dma_wmb();
4316 		stmmac_set_tx_owner(priv, mss_desc);
4317 	}
4318 
4319 	if (netif_msg_pktdata(priv)) {
4320 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4321 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4322 			tx_q->cur_tx, first, nfrags);
4323 		pr_info(">>> frame to be transmitted: ");
4324 		print_pkt(skb->data, skb_headlen(skb));
4325 	}
4326 
4327 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4328 
4329 	stmmac_flush_tx_descriptors(priv, queue);
4330 	stmmac_tx_timer_arm(priv, queue);
4331 
4332 	return NETDEV_TX_OK;
4333 
4334 dma_map_err:
4335 	dev_err(priv->device, "Tx dma map failed\n");
4336 	dev_kfree_skb(skb);
4337 	priv->xstats.tx_dropped++;
4338 	return NETDEV_TX_OK;
4339 }
4340 
4341 /**
4342  *  stmmac_xmit - Tx entry point of the driver
4343  *  @skb : the socket buffer
4344  *  @dev : device pointer
4345  *  Description : this is the tx entry point of the driver.
4346  *  It programs the chain or the ring and supports oversized frames
4347  *  and SG feature.
4348  */
4349 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4350 {
4351 	unsigned int first_entry, tx_packets, enh_desc;
4352 	struct stmmac_priv *priv = netdev_priv(dev);
4353 	unsigned int nopaged_len = skb_headlen(skb);
4354 	int i, csum_insertion = 0, is_jumbo = 0;
4355 	u32 queue = skb_get_queue_mapping(skb);
4356 	int nfrags = skb_shinfo(skb)->nr_frags;
4357 	int gso = skb_shinfo(skb)->gso_type;
4358 	struct dma_edesc *tbs_desc = NULL;
4359 	struct dma_desc *desc, *first;
4360 	struct stmmac_tx_queue *tx_q;
4361 	bool has_vlan, set_ic;
4362 	int entry, first_tx;
4363 	unsigned long flags;
4364 	dma_addr_t des;
4365 
4366 	tx_q = &priv->dma_conf.tx_queue[queue];
4367 	first_tx = tx_q->cur_tx;
4368 
4369 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4370 		stmmac_disable_eee_mode(priv);
4371 
4372 	/* Manage oversized TCP frames for GMAC4 device */
4373 	if (skb_is_gso(skb) && priv->tso) {
4374 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4375 			return stmmac_tso_xmit(skb, dev);
4376 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4377 			return stmmac_tso_xmit(skb, dev);
4378 	}
4379 
4380 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4381 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4382 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4383 								queue));
4384 			/* This is a hard error, log it. */
4385 			netdev_err(priv->dev,
4386 				   "%s: Tx Ring full when queue awake\n",
4387 				   __func__);
4388 		}
4389 		return NETDEV_TX_BUSY;
4390 	}
4391 
4392 	/* Check if VLAN can be inserted by HW */
4393 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4394 
4395 	entry = tx_q->cur_tx;
4396 	first_entry = entry;
4397 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4398 
4399 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4400 
4401 	if (likely(priv->extend_desc))
4402 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4403 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4404 		desc = &tx_q->dma_entx[entry].basic;
4405 	else
4406 		desc = tx_q->dma_tx + entry;
4407 
4408 	first = desc;
4409 
4410 	if (has_vlan)
4411 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4412 
4413 	enh_desc = priv->plat->enh_desc;
4414 	/* To program the descriptors according to the size of the frame */
4415 	if (enh_desc)
4416 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4417 
4418 	if (unlikely(is_jumbo)) {
4419 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4420 		if (unlikely(entry < 0) && (entry != -EINVAL))
4421 			goto dma_map_err;
4422 	}
4423 
4424 	for (i = 0; i < nfrags; i++) {
4425 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4426 		int len = skb_frag_size(frag);
4427 		bool last_segment = (i == (nfrags - 1));
4428 
4429 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4430 		WARN_ON(tx_q->tx_skbuff[entry]);
4431 
4432 		if (likely(priv->extend_desc))
4433 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4434 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4435 			desc = &tx_q->dma_entx[entry].basic;
4436 		else
4437 			desc = tx_q->dma_tx + entry;
4438 
4439 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4440 				       DMA_TO_DEVICE);
4441 		if (dma_mapping_error(priv->device, des))
4442 			goto dma_map_err; /* should reuse desc w/o issues */
4443 
4444 		tx_q->tx_skbuff_dma[entry].buf = des;
4445 
4446 		stmmac_set_desc_addr(priv, desc, des);
4447 
4448 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4449 		tx_q->tx_skbuff_dma[entry].len = len;
4450 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4451 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4452 
4453 		/* Prepare the descriptor and set the own bit too */
4454 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4455 				priv->mode, 1, last_segment, skb->len);
4456 	}
4457 
4458 	/* Only the last descriptor gets to point to the skb. */
4459 	tx_q->tx_skbuff[entry] = skb;
4460 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4461 
4462 	/* According to the coalesce parameter the IC bit for the latest
4463 	 * segment is reset and the timer re-started to clean the tx status.
4464 	 * This approach takes care about the fragments: desc is the first
4465 	 * element in case of no SG.
4466 	 */
4467 	tx_packets = (entry + 1) - first_tx;
4468 	tx_q->tx_count_frames += tx_packets;
4469 
4470 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4471 		set_ic = true;
4472 	else if (!priv->tx_coal_frames[queue])
4473 		set_ic = false;
4474 	else if (tx_packets > priv->tx_coal_frames[queue])
4475 		set_ic = true;
4476 	else if ((tx_q->tx_count_frames %
4477 		  priv->tx_coal_frames[queue]) < tx_packets)
4478 		set_ic = true;
4479 	else
4480 		set_ic = false;
4481 
4482 	if (set_ic) {
4483 		if (likely(priv->extend_desc))
4484 			desc = &tx_q->dma_etx[entry].basic;
4485 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4486 			desc = &tx_q->dma_entx[entry].basic;
4487 		else
4488 			desc = &tx_q->dma_tx[entry];
4489 
4490 		tx_q->tx_count_frames = 0;
4491 		stmmac_set_tx_ic(priv, desc);
4492 	}
4493 
4494 	/* We've used all descriptors we need for this skb, however,
4495 	 * advance cur_tx so that it references a fresh descriptor.
4496 	 * ndo_start_xmit will fill this descriptor the next time it's
4497 	 * called and stmmac_tx_clean may clean up to this descriptor.
4498 	 */
4499 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4500 	tx_q->cur_tx = entry;
4501 
4502 	if (netif_msg_pktdata(priv)) {
4503 		netdev_dbg(priv->dev,
4504 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4505 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4506 			   entry, first, nfrags);
4507 
4508 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4509 		print_pkt(skb->data, skb->len);
4510 	}
4511 
4512 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4513 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4514 			  __func__);
4515 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4516 	}
4517 
4518 	flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
4519 	tx_q->txq_stats.tx_bytes += skb->len;
4520 	if (set_ic)
4521 		tx_q->txq_stats.tx_set_ic_bit++;
4522 	u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
4523 
4524 	if (priv->sarc_type)
4525 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4526 
4527 	skb_tx_timestamp(skb);
4528 
4529 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4530 	 * problems because all the descriptors are actually ready to be
4531 	 * passed to the DMA engine.
4532 	 */
4533 	if (likely(!is_jumbo)) {
4534 		bool last_segment = (nfrags == 0);
4535 
4536 		des = dma_map_single(priv->device, skb->data,
4537 				     nopaged_len, DMA_TO_DEVICE);
4538 		if (dma_mapping_error(priv->device, des))
4539 			goto dma_map_err;
4540 
4541 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4542 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4543 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4544 
4545 		stmmac_set_desc_addr(priv, first, des);
4546 
4547 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4548 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4549 
4550 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4551 			     priv->hwts_tx_en)) {
4552 			/* declare that device is doing timestamping */
4553 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4554 			stmmac_enable_tx_timestamp(priv, first);
4555 		}
4556 
4557 		/* Prepare the first descriptor setting the OWN bit too */
4558 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4559 				csum_insertion, priv->mode, 0, last_segment,
4560 				skb->len);
4561 	}
4562 
4563 	if (tx_q->tbs & STMMAC_TBS_EN) {
4564 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4565 
4566 		tbs_desc = &tx_q->dma_entx[first_entry];
4567 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4568 	}
4569 
4570 	stmmac_set_tx_owner(priv, first);
4571 
4572 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4573 
4574 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4575 
4576 	stmmac_flush_tx_descriptors(priv, queue);
4577 	stmmac_tx_timer_arm(priv, queue);
4578 
4579 	return NETDEV_TX_OK;
4580 
4581 dma_map_err:
4582 	netdev_err(priv->dev, "Tx DMA map failed\n");
4583 	dev_kfree_skb(skb);
4584 	priv->xstats.tx_dropped++;
4585 	return NETDEV_TX_OK;
4586 }
4587 
4588 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4589 {
4590 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4591 	__be16 vlan_proto = veth->h_vlan_proto;
4592 	u16 vlanid;
4593 
4594 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4595 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4596 	    (vlan_proto == htons(ETH_P_8021AD) &&
4597 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4598 		/* pop the vlan tag */
4599 		vlanid = ntohs(veth->h_vlan_TCI);
4600 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4601 		skb_pull(skb, VLAN_HLEN);
4602 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4603 	}
4604 }
4605 
4606 /**
4607  * stmmac_rx_refill - refill used skb preallocated buffers
4608  * @priv: driver private structure
4609  * @queue: RX queue index
4610  * Description : this is to reallocate the skb for the reception process
4611  * that is based on zero-copy.
4612  */
4613 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4614 {
4615 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4616 	int dirty = stmmac_rx_dirty(priv, queue);
4617 	unsigned int entry = rx_q->dirty_rx;
4618 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4619 
4620 	if (priv->dma_cap.host_dma_width <= 32)
4621 		gfp |= GFP_DMA32;
4622 
4623 	while (dirty-- > 0) {
4624 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4625 		struct dma_desc *p;
4626 		bool use_rx_wd;
4627 
4628 		if (priv->extend_desc)
4629 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4630 		else
4631 			p = rx_q->dma_rx + entry;
4632 
4633 		if (!buf->page) {
4634 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4635 			if (!buf->page)
4636 				break;
4637 		}
4638 
4639 		if (priv->sph && !buf->sec_page) {
4640 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4641 			if (!buf->sec_page)
4642 				break;
4643 
4644 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4645 		}
4646 
4647 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4648 
4649 		stmmac_set_desc_addr(priv, p, buf->addr);
4650 		if (priv->sph)
4651 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4652 		else
4653 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4654 		stmmac_refill_desc3(priv, rx_q, p);
4655 
4656 		rx_q->rx_count_frames++;
4657 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4658 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4659 			rx_q->rx_count_frames = 0;
4660 
4661 		use_rx_wd = !priv->rx_coal_frames[queue];
4662 		use_rx_wd |= rx_q->rx_count_frames > 0;
4663 		if (!priv->use_riwt)
4664 			use_rx_wd = false;
4665 
4666 		dma_wmb();
4667 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4668 
4669 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4670 	}
4671 	rx_q->dirty_rx = entry;
4672 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4673 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4674 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4675 }
4676 
4677 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4678 				       struct dma_desc *p,
4679 				       int status, unsigned int len)
4680 {
4681 	unsigned int plen = 0, hlen = 0;
4682 	int coe = priv->hw->rx_csum;
4683 
4684 	/* Not first descriptor, buffer is always zero */
4685 	if (priv->sph && len)
4686 		return 0;
4687 
4688 	/* First descriptor, get split header length */
4689 	stmmac_get_rx_header_len(priv, p, &hlen);
4690 	if (priv->sph && hlen) {
4691 		priv->xstats.rx_split_hdr_pkt_n++;
4692 		return hlen;
4693 	}
4694 
4695 	/* First descriptor, not last descriptor and not split header */
4696 	if (status & rx_not_ls)
4697 		return priv->dma_conf.dma_buf_sz;
4698 
4699 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4700 
4701 	/* First descriptor and last descriptor and not split header */
4702 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4703 }
4704 
4705 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4706 				       struct dma_desc *p,
4707 				       int status, unsigned int len)
4708 {
4709 	int coe = priv->hw->rx_csum;
4710 	unsigned int plen = 0;
4711 
4712 	/* Not split header, buffer is not available */
4713 	if (!priv->sph)
4714 		return 0;
4715 
4716 	/* Not last descriptor */
4717 	if (status & rx_not_ls)
4718 		return priv->dma_conf.dma_buf_sz;
4719 
4720 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4721 
4722 	/* Last descriptor */
4723 	return plen - len;
4724 }
4725 
4726 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4727 				struct xdp_frame *xdpf, bool dma_map)
4728 {
4729 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4730 	unsigned int entry = tx_q->cur_tx;
4731 	struct dma_desc *tx_desc;
4732 	dma_addr_t dma_addr;
4733 	bool set_ic;
4734 
4735 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4736 		return STMMAC_XDP_CONSUMED;
4737 
4738 	if (likely(priv->extend_desc))
4739 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4740 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4741 		tx_desc = &tx_q->dma_entx[entry].basic;
4742 	else
4743 		tx_desc = tx_q->dma_tx + entry;
4744 
4745 	if (dma_map) {
4746 		dma_addr = dma_map_single(priv->device, xdpf->data,
4747 					  xdpf->len, DMA_TO_DEVICE);
4748 		if (dma_mapping_error(priv->device, dma_addr))
4749 			return STMMAC_XDP_CONSUMED;
4750 
4751 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4752 	} else {
4753 		struct page *page = virt_to_page(xdpf->data);
4754 
4755 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4756 			   xdpf->headroom;
4757 		dma_sync_single_for_device(priv->device, dma_addr,
4758 					   xdpf->len, DMA_BIDIRECTIONAL);
4759 
4760 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4761 	}
4762 
4763 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4764 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4765 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4766 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4767 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4768 
4769 	tx_q->xdpf[entry] = xdpf;
4770 
4771 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4772 
4773 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4774 			       true, priv->mode, true, true,
4775 			       xdpf->len);
4776 
4777 	tx_q->tx_count_frames++;
4778 
4779 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4780 		set_ic = true;
4781 	else
4782 		set_ic = false;
4783 
4784 	if (set_ic) {
4785 		unsigned long flags;
4786 		tx_q->tx_count_frames = 0;
4787 		stmmac_set_tx_ic(priv, tx_desc);
4788 		flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
4789 		tx_q->txq_stats.tx_set_ic_bit++;
4790 		u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
4791 	}
4792 
4793 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4794 
4795 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4796 	tx_q->cur_tx = entry;
4797 
4798 	return STMMAC_XDP_TX;
4799 }
4800 
4801 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4802 				   int cpu)
4803 {
4804 	int index = cpu;
4805 
4806 	if (unlikely(index < 0))
4807 		index = 0;
4808 
4809 	while (index >= priv->plat->tx_queues_to_use)
4810 		index -= priv->plat->tx_queues_to_use;
4811 
4812 	return index;
4813 }
4814 
4815 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4816 				struct xdp_buff *xdp)
4817 {
4818 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4819 	int cpu = smp_processor_id();
4820 	struct netdev_queue *nq;
4821 	int queue;
4822 	int res;
4823 
4824 	if (unlikely(!xdpf))
4825 		return STMMAC_XDP_CONSUMED;
4826 
4827 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4828 	nq = netdev_get_tx_queue(priv->dev, queue);
4829 
4830 	__netif_tx_lock(nq, cpu);
4831 	/* Avoids TX time-out as we are sharing with slow path */
4832 	txq_trans_cond_update(nq);
4833 
4834 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4835 	if (res == STMMAC_XDP_TX)
4836 		stmmac_flush_tx_descriptors(priv, queue);
4837 
4838 	__netif_tx_unlock(nq);
4839 
4840 	return res;
4841 }
4842 
4843 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4844 				 struct bpf_prog *prog,
4845 				 struct xdp_buff *xdp)
4846 {
4847 	u32 act;
4848 	int res;
4849 
4850 	act = bpf_prog_run_xdp(prog, xdp);
4851 	switch (act) {
4852 	case XDP_PASS:
4853 		res = STMMAC_XDP_PASS;
4854 		break;
4855 	case XDP_TX:
4856 		res = stmmac_xdp_xmit_back(priv, xdp);
4857 		break;
4858 	case XDP_REDIRECT:
4859 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4860 			res = STMMAC_XDP_CONSUMED;
4861 		else
4862 			res = STMMAC_XDP_REDIRECT;
4863 		break;
4864 	default:
4865 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4866 		fallthrough;
4867 	case XDP_ABORTED:
4868 		trace_xdp_exception(priv->dev, prog, act);
4869 		fallthrough;
4870 	case XDP_DROP:
4871 		res = STMMAC_XDP_CONSUMED;
4872 		break;
4873 	}
4874 
4875 	return res;
4876 }
4877 
4878 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4879 					   struct xdp_buff *xdp)
4880 {
4881 	struct bpf_prog *prog;
4882 	int res;
4883 
4884 	prog = READ_ONCE(priv->xdp_prog);
4885 	if (!prog) {
4886 		res = STMMAC_XDP_PASS;
4887 		goto out;
4888 	}
4889 
4890 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
4891 out:
4892 	return ERR_PTR(-res);
4893 }
4894 
4895 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4896 				   int xdp_status)
4897 {
4898 	int cpu = smp_processor_id();
4899 	int queue;
4900 
4901 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4902 
4903 	if (xdp_status & STMMAC_XDP_TX)
4904 		stmmac_tx_timer_arm(priv, queue);
4905 
4906 	if (xdp_status & STMMAC_XDP_REDIRECT)
4907 		xdp_do_flush();
4908 }
4909 
4910 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4911 					       struct xdp_buff *xdp)
4912 {
4913 	unsigned int metasize = xdp->data - xdp->data_meta;
4914 	unsigned int datasize = xdp->data_end - xdp->data;
4915 	struct sk_buff *skb;
4916 
4917 	skb = __napi_alloc_skb(&ch->rxtx_napi,
4918 			       xdp->data_end - xdp->data_hard_start,
4919 			       GFP_ATOMIC | __GFP_NOWARN);
4920 	if (unlikely(!skb))
4921 		return NULL;
4922 
4923 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
4924 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4925 	if (metasize)
4926 		skb_metadata_set(skb, metasize);
4927 
4928 	return skb;
4929 }
4930 
4931 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4932 				   struct dma_desc *p, struct dma_desc *np,
4933 				   struct xdp_buff *xdp)
4934 {
4935 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4936 	struct stmmac_channel *ch = &priv->channel[queue];
4937 	unsigned int len = xdp->data_end - xdp->data;
4938 	enum pkt_hash_types hash_type;
4939 	int coe = priv->hw->rx_csum;
4940 	unsigned long flags;
4941 	struct sk_buff *skb;
4942 	u32 hash;
4943 
4944 	skb = stmmac_construct_skb_zc(ch, xdp);
4945 	if (!skb) {
4946 		priv->xstats.rx_dropped++;
4947 		return;
4948 	}
4949 
4950 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
4951 	stmmac_rx_vlan(priv->dev, skb);
4952 	skb->protocol = eth_type_trans(skb, priv->dev);
4953 
4954 	if (unlikely(!coe))
4955 		skb_checksum_none_assert(skb);
4956 	else
4957 		skb->ip_summed = CHECKSUM_UNNECESSARY;
4958 
4959 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4960 		skb_set_hash(skb, hash, hash_type);
4961 
4962 	skb_record_rx_queue(skb, queue);
4963 	napi_gro_receive(&ch->rxtx_napi, skb);
4964 
4965 	flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
4966 	rx_q->rxq_stats.rx_pkt_n++;
4967 	rx_q->rxq_stats.rx_bytes += len;
4968 	u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
4969 }
4970 
4971 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4972 {
4973 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4974 	unsigned int entry = rx_q->dirty_rx;
4975 	struct dma_desc *rx_desc = NULL;
4976 	bool ret = true;
4977 
4978 	budget = min(budget, stmmac_rx_dirty(priv, queue));
4979 
4980 	while (budget-- > 0 && entry != rx_q->cur_rx) {
4981 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4982 		dma_addr_t dma_addr;
4983 		bool use_rx_wd;
4984 
4985 		if (!buf->xdp) {
4986 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
4987 			if (!buf->xdp) {
4988 				ret = false;
4989 				break;
4990 			}
4991 		}
4992 
4993 		if (priv->extend_desc)
4994 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
4995 		else
4996 			rx_desc = rx_q->dma_rx + entry;
4997 
4998 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
4999 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5000 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5001 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5002 
5003 		rx_q->rx_count_frames++;
5004 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5005 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5006 			rx_q->rx_count_frames = 0;
5007 
5008 		use_rx_wd = !priv->rx_coal_frames[queue];
5009 		use_rx_wd |= rx_q->rx_count_frames > 0;
5010 		if (!priv->use_riwt)
5011 			use_rx_wd = false;
5012 
5013 		dma_wmb();
5014 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5015 
5016 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5017 	}
5018 
5019 	if (rx_desc) {
5020 		rx_q->dirty_rx = entry;
5021 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5022 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5023 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5024 	}
5025 
5026 	return ret;
5027 }
5028 
5029 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5030 {
5031 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5032 	 * to represent incoming packet, whereas cb field in the same structure
5033 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5034 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5035 	 */
5036 	return (struct stmmac_xdp_buff *)xdp;
5037 }
5038 
5039 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5040 {
5041 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5042 	unsigned int count = 0, error = 0, len = 0;
5043 	int dirty = stmmac_rx_dirty(priv, queue);
5044 	unsigned int next_entry = rx_q->cur_rx;
5045 	u32 rx_errors = 0, rx_dropped = 0;
5046 	unsigned int desc_size;
5047 	struct bpf_prog *prog;
5048 	bool failure = false;
5049 	unsigned long flags;
5050 	int xdp_status = 0;
5051 	int status = 0;
5052 
5053 	if (netif_msg_rx_status(priv)) {
5054 		void *rx_head;
5055 
5056 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5057 		if (priv->extend_desc) {
5058 			rx_head = (void *)rx_q->dma_erx;
5059 			desc_size = sizeof(struct dma_extended_desc);
5060 		} else {
5061 			rx_head = (void *)rx_q->dma_rx;
5062 			desc_size = sizeof(struct dma_desc);
5063 		}
5064 
5065 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5066 				    rx_q->dma_rx_phy, desc_size);
5067 	}
5068 	while (count < limit) {
5069 		struct stmmac_rx_buffer *buf;
5070 		struct stmmac_xdp_buff *ctx;
5071 		unsigned int buf1_len = 0;
5072 		struct dma_desc *np, *p;
5073 		int entry;
5074 		int res;
5075 
5076 		if (!count && rx_q->state_saved) {
5077 			error = rx_q->state.error;
5078 			len = rx_q->state.len;
5079 		} else {
5080 			rx_q->state_saved = false;
5081 			error = 0;
5082 			len = 0;
5083 		}
5084 
5085 		if (count >= limit)
5086 			break;
5087 
5088 read_again:
5089 		buf1_len = 0;
5090 		entry = next_entry;
5091 		buf = &rx_q->buf_pool[entry];
5092 
5093 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5094 			failure = failure ||
5095 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5096 			dirty = 0;
5097 		}
5098 
5099 		if (priv->extend_desc)
5100 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5101 		else
5102 			p = rx_q->dma_rx + entry;
5103 
5104 		/* read the status of the incoming frame */
5105 		status = stmmac_rx_status(priv, &priv->xstats, p);
5106 		/* check if managed by the DMA otherwise go ahead */
5107 		if (unlikely(status & dma_own))
5108 			break;
5109 
5110 		/* Prefetch the next RX descriptor */
5111 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5112 						priv->dma_conf.dma_rx_size);
5113 		next_entry = rx_q->cur_rx;
5114 
5115 		if (priv->extend_desc)
5116 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5117 		else
5118 			np = rx_q->dma_rx + next_entry;
5119 
5120 		prefetch(np);
5121 
5122 		/* Ensure a valid XSK buffer before proceed */
5123 		if (!buf->xdp)
5124 			break;
5125 
5126 		if (priv->extend_desc)
5127 			stmmac_rx_extended_status(priv, &priv->xstats,
5128 						  rx_q->dma_erx + entry);
5129 		if (unlikely(status == discard_frame)) {
5130 			xsk_buff_free(buf->xdp);
5131 			buf->xdp = NULL;
5132 			dirty++;
5133 			error = 1;
5134 			if (!priv->hwts_rx_en)
5135 				rx_errors++;
5136 		}
5137 
5138 		if (unlikely(error && (status & rx_not_ls)))
5139 			goto read_again;
5140 		if (unlikely(error)) {
5141 			count++;
5142 			continue;
5143 		}
5144 
5145 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5146 		if (likely(status & rx_not_ls)) {
5147 			xsk_buff_free(buf->xdp);
5148 			buf->xdp = NULL;
5149 			dirty++;
5150 			count++;
5151 			goto read_again;
5152 		}
5153 
5154 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5155 		ctx->priv = priv;
5156 		ctx->desc = p;
5157 		ctx->ndesc = np;
5158 
5159 		/* XDP ZC Frame only support primary buffers for now */
5160 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5161 		len += buf1_len;
5162 
5163 		/* ACS is disabled; strip manually. */
5164 		if (likely(!(status & rx_not_ls))) {
5165 			buf1_len -= ETH_FCS_LEN;
5166 			len -= ETH_FCS_LEN;
5167 		}
5168 
5169 		/* RX buffer is good and fit into a XSK pool buffer */
5170 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5171 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5172 
5173 		prog = READ_ONCE(priv->xdp_prog);
5174 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5175 
5176 		switch (res) {
5177 		case STMMAC_XDP_PASS:
5178 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5179 			xsk_buff_free(buf->xdp);
5180 			break;
5181 		case STMMAC_XDP_CONSUMED:
5182 			xsk_buff_free(buf->xdp);
5183 			rx_dropped++;
5184 			break;
5185 		case STMMAC_XDP_TX:
5186 		case STMMAC_XDP_REDIRECT:
5187 			xdp_status |= res;
5188 			break;
5189 		}
5190 
5191 		buf->xdp = NULL;
5192 		dirty++;
5193 		count++;
5194 	}
5195 
5196 	if (status & rx_not_ls) {
5197 		rx_q->state_saved = true;
5198 		rx_q->state.error = error;
5199 		rx_q->state.len = len;
5200 	}
5201 
5202 	stmmac_finalize_xdp_rx(priv, xdp_status);
5203 
5204 	flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
5205 	rx_q->rxq_stats.rx_pkt_n += count;
5206 	u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
5207 
5208 	priv->xstats.rx_dropped += rx_dropped;
5209 	priv->xstats.rx_errors += rx_errors;
5210 
5211 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5212 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5213 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5214 		else
5215 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5216 
5217 		return (int)count;
5218 	}
5219 
5220 	return failure ? limit : (int)count;
5221 }
5222 
5223 /**
5224  * stmmac_rx - manage the receive process
5225  * @priv: driver private structure
5226  * @limit: napi bugget
5227  * @queue: RX queue index.
5228  * Description :  this the function called by the napi poll method.
5229  * It gets all the frames inside the ring.
5230  */
5231 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5232 {
5233 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5234 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5235 	struct stmmac_channel *ch = &priv->channel[queue];
5236 	unsigned int count = 0, error = 0, len = 0;
5237 	int status = 0, coe = priv->hw->rx_csum;
5238 	unsigned int next_entry = rx_q->cur_rx;
5239 	enum dma_data_direction dma_dir;
5240 	unsigned int desc_size;
5241 	struct sk_buff *skb = NULL;
5242 	struct stmmac_xdp_buff ctx;
5243 	unsigned long flags;
5244 	int xdp_status = 0;
5245 	int buf_sz;
5246 
5247 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5248 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5249 
5250 	if (netif_msg_rx_status(priv)) {
5251 		void *rx_head;
5252 
5253 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5254 		if (priv->extend_desc) {
5255 			rx_head = (void *)rx_q->dma_erx;
5256 			desc_size = sizeof(struct dma_extended_desc);
5257 		} else {
5258 			rx_head = (void *)rx_q->dma_rx;
5259 			desc_size = sizeof(struct dma_desc);
5260 		}
5261 
5262 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5263 				    rx_q->dma_rx_phy, desc_size);
5264 	}
5265 	while (count < limit) {
5266 		unsigned int buf1_len = 0, buf2_len = 0;
5267 		enum pkt_hash_types hash_type;
5268 		struct stmmac_rx_buffer *buf;
5269 		struct dma_desc *np, *p;
5270 		int entry;
5271 		u32 hash;
5272 
5273 		if (!count && rx_q->state_saved) {
5274 			skb = rx_q->state.skb;
5275 			error = rx_q->state.error;
5276 			len = rx_q->state.len;
5277 		} else {
5278 			rx_q->state_saved = false;
5279 			skb = NULL;
5280 			error = 0;
5281 			len = 0;
5282 		}
5283 
5284 		if (count >= limit)
5285 			break;
5286 
5287 read_again:
5288 		buf1_len = 0;
5289 		buf2_len = 0;
5290 		entry = next_entry;
5291 		buf = &rx_q->buf_pool[entry];
5292 
5293 		if (priv->extend_desc)
5294 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5295 		else
5296 			p = rx_q->dma_rx + entry;
5297 
5298 		/* read the status of the incoming frame */
5299 		status = stmmac_rx_status(priv, &priv->xstats, p);
5300 		/* check if managed by the DMA otherwise go ahead */
5301 		if (unlikely(status & dma_own))
5302 			break;
5303 
5304 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5305 						priv->dma_conf.dma_rx_size);
5306 		next_entry = rx_q->cur_rx;
5307 
5308 		if (priv->extend_desc)
5309 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5310 		else
5311 			np = rx_q->dma_rx + next_entry;
5312 
5313 		prefetch(np);
5314 
5315 		if (priv->extend_desc)
5316 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5317 		if (unlikely(status == discard_frame)) {
5318 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5319 			buf->page = NULL;
5320 			error = 1;
5321 			if (!priv->hwts_rx_en)
5322 				rx_errors++;
5323 		}
5324 
5325 		if (unlikely(error && (status & rx_not_ls)))
5326 			goto read_again;
5327 		if (unlikely(error)) {
5328 			dev_kfree_skb(skb);
5329 			skb = NULL;
5330 			count++;
5331 			continue;
5332 		}
5333 
5334 		/* Buffer is good. Go on. */
5335 
5336 		prefetch(page_address(buf->page) + buf->page_offset);
5337 		if (buf->sec_page)
5338 			prefetch(page_address(buf->sec_page));
5339 
5340 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5341 		len += buf1_len;
5342 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5343 		len += buf2_len;
5344 
5345 		/* ACS is disabled; strip manually. */
5346 		if (likely(!(status & rx_not_ls))) {
5347 			if (buf2_len) {
5348 				buf2_len -= ETH_FCS_LEN;
5349 				len -= ETH_FCS_LEN;
5350 			} else if (buf1_len) {
5351 				buf1_len -= ETH_FCS_LEN;
5352 				len -= ETH_FCS_LEN;
5353 			}
5354 		}
5355 
5356 		if (!skb) {
5357 			unsigned int pre_len, sync_len;
5358 
5359 			dma_sync_single_for_cpu(priv->device, buf->addr,
5360 						buf1_len, dma_dir);
5361 
5362 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5363 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5364 					 buf->page_offset, buf1_len, true);
5365 
5366 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5367 				  buf->page_offset;
5368 
5369 			ctx.priv = priv;
5370 			ctx.desc = p;
5371 			ctx.ndesc = np;
5372 
5373 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5374 			/* Due xdp_adjust_tail: DMA sync for_device
5375 			 * cover max len CPU touch
5376 			 */
5377 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5378 				   buf->page_offset;
5379 			sync_len = max(sync_len, pre_len);
5380 
5381 			/* For Not XDP_PASS verdict */
5382 			if (IS_ERR(skb)) {
5383 				unsigned int xdp_res = -PTR_ERR(skb);
5384 
5385 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5386 					page_pool_put_page(rx_q->page_pool,
5387 							   virt_to_head_page(ctx.xdp.data),
5388 							   sync_len, true);
5389 					buf->page = NULL;
5390 					rx_dropped++;
5391 
5392 					/* Clear skb as it was set as
5393 					 * status by XDP program.
5394 					 */
5395 					skb = NULL;
5396 
5397 					if (unlikely((status & rx_not_ls)))
5398 						goto read_again;
5399 
5400 					count++;
5401 					continue;
5402 				} else if (xdp_res & (STMMAC_XDP_TX |
5403 						      STMMAC_XDP_REDIRECT)) {
5404 					xdp_status |= xdp_res;
5405 					buf->page = NULL;
5406 					skb = NULL;
5407 					count++;
5408 					continue;
5409 				}
5410 			}
5411 		}
5412 
5413 		if (!skb) {
5414 			/* XDP program may expand or reduce tail */
5415 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5416 
5417 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5418 			if (!skb) {
5419 				rx_dropped++;
5420 				count++;
5421 				goto drain_data;
5422 			}
5423 
5424 			/* XDP program may adjust header */
5425 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5426 			skb_put(skb, buf1_len);
5427 
5428 			/* Data payload copied into SKB, page ready for recycle */
5429 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5430 			buf->page = NULL;
5431 		} else if (buf1_len) {
5432 			dma_sync_single_for_cpu(priv->device, buf->addr,
5433 						buf1_len, dma_dir);
5434 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5435 					buf->page, buf->page_offset, buf1_len,
5436 					priv->dma_conf.dma_buf_sz);
5437 
5438 			/* Data payload appended into SKB */
5439 			skb_mark_for_recycle(skb);
5440 			buf->page = NULL;
5441 		}
5442 
5443 		if (buf2_len) {
5444 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5445 						buf2_len, dma_dir);
5446 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5447 					buf->sec_page, 0, buf2_len,
5448 					priv->dma_conf.dma_buf_sz);
5449 
5450 			/* Data payload appended into SKB */
5451 			skb_mark_for_recycle(skb);
5452 			buf->sec_page = NULL;
5453 		}
5454 
5455 drain_data:
5456 		if (likely(status & rx_not_ls))
5457 			goto read_again;
5458 		if (!skb)
5459 			continue;
5460 
5461 		/* Got entire packet into SKB. Finish it. */
5462 
5463 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5464 		stmmac_rx_vlan(priv->dev, skb);
5465 		skb->protocol = eth_type_trans(skb, priv->dev);
5466 
5467 		if (unlikely(!coe))
5468 			skb_checksum_none_assert(skb);
5469 		else
5470 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5471 
5472 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5473 			skb_set_hash(skb, hash, hash_type);
5474 
5475 		skb_record_rx_queue(skb, queue);
5476 		napi_gro_receive(&ch->rx_napi, skb);
5477 		skb = NULL;
5478 
5479 		rx_packets++;
5480 		rx_bytes += len;
5481 		count++;
5482 	}
5483 
5484 	if (status & rx_not_ls || skb) {
5485 		rx_q->state_saved = true;
5486 		rx_q->state.skb = skb;
5487 		rx_q->state.error = error;
5488 		rx_q->state.len = len;
5489 	}
5490 
5491 	stmmac_finalize_xdp_rx(priv, xdp_status);
5492 
5493 	stmmac_rx_refill(priv, queue);
5494 
5495 	flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
5496 	rx_q->rxq_stats.rx_packets += rx_packets;
5497 	rx_q->rxq_stats.rx_bytes += rx_bytes;
5498 	rx_q->rxq_stats.rx_pkt_n += count;
5499 	u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
5500 
5501 	priv->xstats.rx_dropped += rx_dropped;
5502 	priv->xstats.rx_errors += rx_errors;
5503 
5504 	return count;
5505 }
5506 
5507 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5508 {
5509 	struct stmmac_channel *ch =
5510 		container_of(napi, struct stmmac_channel, rx_napi);
5511 	struct stmmac_priv *priv = ch->priv_data;
5512 	struct stmmac_rx_queue *rx_q;
5513 	u32 chan = ch->index;
5514 	unsigned long flags;
5515 	int work_done;
5516 
5517 	rx_q = &priv->dma_conf.rx_queue[chan];
5518 	flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
5519 	rx_q->rxq_stats.napi_poll++;
5520 	u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
5521 
5522 	work_done = stmmac_rx(priv, budget, chan);
5523 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5524 		unsigned long flags;
5525 
5526 		spin_lock_irqsave(&ch->lock, flags);
5527 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5528 		spin_unlock_irqrestore(&ch->lock, flags);
5529 	}
5530 
5531 	return work_done;
5532 }
5533 
5534 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5535 {
5536 	struct stmmac_channel *ch =
5537 		container_of(napi, struct stmmac_channel, tx_napi);
5538 	struct stmmac_priv *priv = ch->priv_data;
5539 	struct stmmac_tx_queue *tx_q;
5540 	u32 chan = ch->index;
5541 	unsigned long flags;
5542 	int work_done;
5543 
5544 	tx_q = &priv->dma_conf.tx_queue[chan];
5545 	flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
5546 	tx_q->txq_stats.napi_poll++;
5547 	u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
5548 
5549 	work_done = stmmac_tx_clean(priv, budget, chan);
5550 	work_done = min(work_done, budget);
5551 
5552 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5553 		unsigned long flags;
5554 
5555 		spin_lock_irqsave(&ch->lock, flags);
5556 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5557 		spin_unlock_irqrestore(&ch->lock, flags);
5558 	}
5559 
5560 	return work_done;
5561 }
5562 
5563 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5564 {
5565 	struct stmmac_channel *ch =
5566 		container_of(napi, struct stmmac_channel, rxtx_napi);
5567 	struct stmmac_priv *priv = ch->priv_data;
5568 	int rx_done, tx_done, rxtx_done;
5569 	struct stmmac_rx_queue *rx_q;
5570 	struct stmmac_tx_queue *tx_q;
5571 	u32 chan = ch->index;
5572 	unsigned long flags;
5573 
5574 	rx_q = &priv->dma_conf.rx_queue[chan];
5575 	flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
5576 	rx_q->rxq_stats.napi_poll++;
5577 	u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
5578 
5579 	tx_q = &priv->dma_conf.tx_queue[chan];
5580 	flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
5581 	tx_q->txq_stats.napi_poll++;
5582 	u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
5583 
5584 	tx_done = stmmac_tx_clean(priv, budget, chan);
5585 	tx_done = min(tx_done, budget);
5586 
5587 	rx_done = stmmac_rx_zc(priv, budget, chan);
5588 
5589 	rxtx_done = max(tx_done, rx_done);
5590 
5591 	/* If either TX or RX work is not complete, return budget
5592 	 * and keep pooling
5593 	 */
5594 	if (rxtx_done >= budget)
5595 		return budget;
5596 
5597 	/* all work done, exit the polling mode */
5598 	if (napi_complete_done(napi, rxtx_done)) {
5599 		unsigned long flags;
5600 
5601 		spin_lock_irqsave(&ch->lock, flags);
5602 		/* Both RX and TX work done are compelte,
5603 		 * so enable both RX & TX IRQs.
5604 		 */
5605 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5606 		spin_unlock_irqrestore(&ch->lock, flags);
5607 	}
5608 
5609 	return min(rxtx_done, budget - 1);
5610 }
5611 
5612 /**
5613  *  stmmac_tx_timeout
5614  *  @dev : Pointer to net device structure
5615  *  @txqueue: the index of the hanging transmit queue
5616  *  Description: this function is called when a packet transmission fails to
5617  *   complete within a reasonable time. The driver will mark the error in the
5618  *   netdev structure and arrange for the device to be reset to a sane state
5619  *   in order to transmit a new packet.
5620  */
5621 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5622 {
5623 	struct stmmac_priv *priv = netdev_priv(dev);
5624 
5625 	stmmac_global_err(priv);
5626 }
5627 
5628 /**
5629  *  stmmac_set_rx_mode - entry point for multicast addressing
5630  *  @dev : pointer to the device structure
5631  *  Description:
5632  *  This function is a driver entry point which gets called by the kernel
5633  *  whenever multicast addresses must be enabled/disabled.
5634  *  Return value:
5635  *  void.
5636  */
5637 static void stmmac_set_rx_mode(struct net_device *dev)
5638 {
5639 	struct stmmac_priv *priv = netdev_priv(dev);
5640 
5641 	stmmac_set_filter(priv, priv->hw, dev);
5642 }
5643 
5644 /**
5645  *  stmmac_change_mtu - entry point to change MTU size for the device.
5646  *  @dev : device pointer.
5647  *  @new_mtu : the new MTU size for the device.
5648  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5649  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5650  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5651  *  Return value:
5652  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5653  *  file on failure.
5654  */
5655 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5656 {
5657 	struct stmmac_priv *priv = netdev_priv(dev);
5658 	int txfifosz = priv->plat->tx_fifo_size;
5659 	struct stmmac_dma_conf *dma_conf;
5660 	const int mtu = new_mtu;
5661 	int ret;
5662 
5663 	if (txfifosz == 0)
5664 		txfifosz = priv->dma_cap.tx_fifo_size;
5665 
5666 	txfifosz /= priv->plat->tx_queues_to_use;
5667 
5668 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5669 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5670 		return -EINVAL;
5671 	}
5672 
5673 	new_mtu = STMMAC_ALIGN(new_mtu);
5674 
5675 	/* If condition true, FIFO is too small or MTU too large */
5676 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5677 		return -EINVAL;
5678 
5679 	if (netif_running(dev)) {
5680 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5681 		/* Try to allocate the new DMA conf with the new mtu */
5682 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5683 		if (IS_ERR(dma_conf)) {
5684 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5685 				   mtu);
5686 			return PTR_ERR(dma_conf);
5687 		}
5688 
5689 		stmmac_release(dev);
5690 
5691 		ret = __stmmac_open(dev, dma_conf);
5692 		if (ret) {
5693 			free_dma_desc_resources(priv, dma_conf);
5694 			kfree(dma_conf);
5695 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5696 			return ret;
5697 		}
5698 
5699 		kfree(dma_conf);
5700 
5701 		stmmac_set_rx_mode(dev);
5702 	}
5703 
5704 	dev->mtu = mtu;
5705 	netdev_update_features(dev);
5706 
5707 	return 0;
5708 }
5709 
5710 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5711 					     netdev_features_t features)
5712 {
5713 	struct stmmac_priv *priv = netdev_priv(dev);
5714 
5715 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5716 		features &= ~NETIF_F_RXCSUM;
5717 
5718 	if (!priv->plat->tx_coe)
5719 		features &= ~NETIF_F_CSUM_MASK;
5720 
5721 	/* Some GMAC devices have a bugged Jumbo frame support that
5722 	 * needs to have the Tx COE disabled for oversized frames
5723 	 * (due to limited buffer sizes). In this case we disable
5724 	 * the TX csum insertion in the TDES and not use SF.
5725 	 */
5726 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5727 		features &= ~NETIF_F_CSUM_MASK;
5728 
5729 	/* Disable tso if asked by ethtool */
5730 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5731 		if (features & NETIF_F_TSO)
5732 			priv->tso = true;
5733 		else
5734 			priv->tso = false;
5735 	}
5736 
5737 	return features;
5738 }
5739 
5740 static int stmmac_set_features(struct net_device *netdev,
5741 			       netdev_features_t features)
5742 {
5743 	struct stmmac_priv *priv = netdev_priv(netdev);
5744 
5745 	/* Keep the COE Type in case of csum is supporting */
5746 	if (features & NETIF_F_RXCSUM)
5747 		priv->hw->rx_csum = priv->plat->rx_coe;
5748 	else
5749 		priv->hw->rx_csum = 0;
5750 	/* No check needed because rx_coe has been set before and it will be
5751 	 * fixed in case of issue.
5752 	 */
5753 	stmmac_rx_ipc(priv, priv->hw);
5754 
5755 	if (priv->sph_cap) {
5756 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5757 		u32 chan;
5758 
5759 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5760 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5761 	}
5762 
5763 	return 0;
5764 }
5765 
5766 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5767 {
5768 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5769 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5770 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5771 	bool *hs_enable = &fpe_cfg->hs_enable;
5772 
5773 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5774 		return;
5775 
5776 	/* If LP has sent verify mPacket, LP is FPE capable */
5777 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5778 		if (*lp_state < FPE_STATE_CAPABLE)
5779 			*lp_state = FPE_STATE_CAPABLE;
5780 
5781 		/* If user has requested FPE enable, quickly response */
5782 		if (*hs_enable)
5783 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5784 						MPACKET_RESPONSE);
5785 	}
5786 
5787 	/* If Local has sent verify mPacket, Local is FPE capable */
5788 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5789 		if (*lo_state < FPE_STATE_CAPABLE)
5790 			*lo_state = FPE_STATE_CAPABLE;
5791 	}
5792 
5793 	/* If LP has sent response mPacket, LP is entering FPE ON */
5794 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5795 		*lp_state = FPE_STATE_ENTERING_ON;
5796 
5797 	/* If Local has sent response mPacket, Local is entering FPE ON */
5798 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5799 		*lo_state = FPE_STATE_ENTERING_ON;
5800 
5801 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5802 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5803 	    priv->fpe_wq) {
5804 		queue_work(priv->fpe_wq, &priv->fpe_task);
5805 	}
5806 }
5807 
5808 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5809 {
5810 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5811 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5812 	u32 queues_count;
5813 	u32 queue;
5814 	bool xmac;
5815 
5816 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5817 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5818 
5819 	if (priv->irq_wake)
5820 		pm_wakeup_event(priv->device, 0);
5821 
5822 	if (priv->dma_cap.estsel)
5823 		stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5824 				      &priv->xstats, tx_cnt);
5825 
5826 	if (priv->dma_cap.fpesel) {
5827 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5828 						   priv->dev);
5829 
5830 		stmmac_fpe_event_status(priv, status);
5831 	}
5832 
5833 	/* To handle GMAC own interrupts */
5834 	if ((priv->plat->has_gmac) || xmac) {
5835 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5836 
5837 		if (unlikely(status)) {
5838 			/* For LPI we need to save the tx status */
5839 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5840 				priv->tx_path_in_lpi_mode = true;
5841 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5842 				priv->tx_path_in_lpi_mode = false;
5843 		}
5844 
5845 		for (queue = 0; queue < queues_count; queue++) {
5846 			status = stmmac_host_mtl_irq_status(priv, priv->hw,
5847 							    queue);
5848 		}
5849 
5850 		/* PCS link status */
5851 		if (priv->hw->pcs &&
5852 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5853 			if (priv->xstats.pcs_link)
5854 				netif_carrier_on(priv->dev);
5855 			else
5856 				netif_carrier_off(priv->dev);
5857 		}
5858 
5859 		stmmac_timestamp_interrupt(priv, priv);
5860 	}
5861 }
5862 
5863 /**
5864  *  stmmac_interrupt - main ISR
5865  *  @irq: interrupt number.
5866  *  @dev_id: to pass the net device pointer.
5867  *  Description: this is the main driver interrupt service routine.
5868  *  It can call:
5869  *  o DMA service routine (to manage incoming frame reception and transmission
5870  *    status)
5871  *  o Core interrupts to manage: remote wake-up, management counter, LPI
5872  *    interrupts.
5873  */
5874 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5875 {
5876 	struct net_device *dev = (struct net_device *)dev_id;
5877 	struct stmmac_priv *priv = netdev_priv(dev);
5878 
5879 	/* Check if adapter is up */
5880 	if (test_bit(STMMAC_DOWN, &priv->state))
5881 		return IRQ_HANDLED;
5882 
5883 	/* Check if a fatal error happened */
5884 	if (stmmac_safety_feat_interrupt(priv))
5885 		return IRQ_HANDLED;
5886 
5887 	/* To handle Common interrupts */
5888 	stmmac_common_interrupt(priv);
5889 
5890 	/* To handle DMA interrupts */
5891 	stmmac_dma_interrupt(priv);
5892 
5893 	return IRQ_HANDLED;
5894 }
5895 
5896 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5897 {
5898 	struct net_device *dev = (struct net_device *)dev_id;
5899 	struct stmmac_priv *priv = netdev_priv(dev);
5900 
5901 	if (unlikely(!dev)) {
5902 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5903 		return IRQ_NONE;
5904 	}
5905 
5906 	/* Check if adapter is up */
5907 	if (test_bit(STMMAC_DOWN, &priv->state))
5908 		return IRQ_HANDLED;
5909 
5910 	/* To handle Common interrupts */
5911 	stmmac_common_interrupt(priv);
5912 
5913 	return IRQ_HANDLED;
5914 }
5915 
5916 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5917 {
5918 	struct net_device *dev = (struct net_device *)dev_id;
5919 	struct stmmac_priv *priv = netdev_priv(dev);
5920 
5921 	if (unlikely(!dev)) {
5922 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5923 		return IRQ_NONE;
5924 	}
5925 
5926 	/* Check if adapter is up */
5927 	if (test_bit(STMMAC_DOWN, &priv->state))
5928 		return IRQ_HANDLED;
5929 
5930 	/* Check if a fatal error happened */
5931 	stmmac_safety_feat_interrupt(priv);
5932 
5933 	return IRQ_HANDLED;
5934 }
5935 
5936 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5937 {
5938 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5939 	struct stmmac_dma_conf *dma_conf;
5940 	int chan = tx_q->queue_index;
5941 	struct stmmac_priv *priv;
5942 	int status;
5943 
5944 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
5945 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5946 
5947 	if (unlikely(!data)) {
5948 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5949 		return IRQ_NONE;
5950 	}
5951 
5952 	/* Check if adapter is up */
5953 	if (test_bit(STMMAC_DOWN, &priv->state))
5954 		return IRQ_HANDLED;
5955 
5956 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5957 
5958 	if (unlikely(status & tx_hard_error_bump_tc)) {
5959 		/* Try to bump up the dma threshold on this failure */
5960 		stmmac_bump_dma_threshold(priv, chan);
5961 	} else if (unlikely(status == tx_hard_error)) {
5962 		stmmac_tx_err(priv, chan);
5963 	}
5964 
5965 	return IRQ_HANDLED;
5966 }
5967 
5968 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
5969 {
5970 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
5971 	struct stmmac_dma_conf *dma_conf;
5972 	int chan = rx_q->queue_index;
5973 	struct stmmac_priv *priv;
5974 
5975 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
5976 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5977 
5978 	if (unlikely(!data)) {
5979 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5980 		return IRQ_NONE;
5981 	}
5982 
5983 	/* Check if adapter is up */
5984 	if (test_bit(STMMAC_DOWN, &priv->state))
5985 		return IRQ_HANDLED;
5986 
5987 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
5988 
5989 	return IRQ_HANDLED;
5990 }
5991 
5992 #ifdef CONFIG_NET_POLL_CONTROLLER
5993 /* Polling receive - used by NETCONSOLE and other diagnostic tools
5994  * to allow network I/O with interrupts disabled.
5995  */
5996 static void stmmac_poll_controller(struct net_device *dev)
5997 {
5998 	struct stmmac_priv *priv = netdev_priv(dev);
5999 	int i;
6000 
6001 	/* If adapter is down, do nothing */
6002 	if (test_bit(STMMAC_DOWN, &priv->state))
6003 		return;
6004 
6005 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) {
6006 		for (i = 0; i < priv->plat->rx_queues_to_use; i++)
6007 			stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
6008 
6009 		for (i = 0; i < priv->plat->tx_queues_to_use; i++)
6010 			stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
6011 	} else {
6012 		disable_irq(dev->irq);
6013 		stmmac_interrupt(dev->irq, dev);
6014 		enable_irq(dev->irq);
6015 	}
6016 }
6017 #endif
6018 
6019 /**
6020  *  stmmac_ioctl - Entry point for the Ioctl
6021  *  @dev: Device pointer.
6022  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6023  *  a proprietary structure used to pass information to the driver.
6024  *  @cmd: IOCTL command
6025  *  Description:
6026  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6027  */
6028 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6029 {
6030 	struct stmmac_priv *priv = netdev_priv (dev);
6031 	int ret = -EOPNOTSUPP;
6032 
6033 	if (!netif_running(dev))
6034 		return -EINVAL;
6035 
6036 	switch (cmd) {
6037 	case SIOCGMIIPHY:
6038 	case SIOCGMIIREG:
6039 	case SIOCSMIIREG:
6040 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6041 		break;
6042 	case SIOCSHWTSTAMP:
6043 		ret = stmmac_hwtstamp_set(dev, rq);
6044 		break;
6045 	case SIOCGHWTSTAMP:
6046 		ret = stmmac_hwtstamp_get(dev, rq);
6047 		break;
6048 	default:
6049 		break;
6050 	}
6051 
6052 	return ret;
6053 }
6054 
6055 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6056 				    void *cb_priv)
6057 {
6058 	struct stmmac_priv *priv = cb_priv;
6059 	int ret = -EOPNOTSUPP;
6060 
6061 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6062 		return ret;
6063 
6064 	__stmmac_disable_all_queues(priv);
6065 
6066 	switch (type) {
6067 	case TC_SETUP_CLSU32:
6068 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6069 		break;
6070 	case TC_SETUP_CLSFLOWER:
6071 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6072 		break;
6073 	default:
6074 		break;
6075 	}
6076 
6077 	stmmac_enable_all_queues(priv);
6078 	return ret;
6079 }
6080 
6081 static LIST_HEAD(stmmac_block_cb_list);
6082 
6083 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6084 			   void *type_data)
6085 {
6086 	struct stmmac_priv *priv = netdev_priv(ndev);
6087 
6088 	switch (type) {
6089 	case TC_QUERY_CAPS:
6090 		return stmmac_tc_query_caps(priv, priv, type_data);
6091 	case TC_SETUP_BLOCK:
6092 		return flow_block_cb_setup_simple(type_data,
6093 						  &stmmac_block_cb_list,
6094 						  stmmac_setup_tc_block_cb,
6095 						  priv, priv, true);
6096 	case TC_SETUP_QDISC_CBS:
6097 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6098 	case TC_SETUP_QDISC_TAPRIO:
6099 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6100 	case TC_SETUP_QDISC_ETF:
6101 		return stmmac_tc_setup_etf(priv, priv, type_data);
6102 	default:
6103 		return -EOPNOTSUPP;
6104 	}
6105 }
6106 
6107 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6108 			       struct net_device *sb_dev)
6109 {
6110 	int gso = skb_shinfo(skb)->gso_type;
6111 
6112 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6113 		/*
6114 		 * There is no way to determine the number of TSO/USO
6115 		 * capable Queues. Let's use always the Queue 0
6116 		 * because if TSO/USO is supported then at least this
6117 		 * one will be capable.
6118 		 */
6119 		return 0;
6120 	}
6121 
6122 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6123 }
6124 
6125 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6126 {
6127 	struct stmmac_priv *priv = netdev_priv(ndev);
6128 	int ret = 0;
6129 
6130 	ret = pm_runtime_resume_and_get(priv->device);
6131 	if (ret < 0)
6132 		return ret;
6133 
6134 	ret = eth_mac_addr(ndev, addr);
6135 	if (ret)
6136 		goto set_mac_error;
6137 
6138 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6139 
6140 set_mac_error:
6141 	pm_runtime_put(priv->device);
6142 
6143 	return ret;
6144 }
6145 
6146 #ifdef CONFIG_DEBUG_FS
6147 static struct dentry *stmmac_fs_dir;
6148 
6149 static void sysfs_display_ring(void *head, int size, int extend_desc,
6150 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6151 {
6152 	int i;
6153 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6154 	struct dma_desc *p = (struct dma_desc *)head;
6155 	dma_addr_t dma_addr;
6156 
6157 	for (i = 0; i < size; i++) {
6158 		if (extend_desc) {
6159 			dma_addr = dma_phy_addr + i * sizeof(*ep);
6160 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6161 				   i, &dma_addr,
6162 				   le32_to_cpu(ep->basic.des0),
6163 				   le32_to_cpu(ep->basic.des1),
6164 				   le32_to_cpu(ep->basic.des2),
6165 				   le32_to_cpu(ep->basic.des3));
6166 			ep++;
6167 		} else {
6168 			dma_addr = dma_phy_addr + i * sizeof(*p);
6169 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6170 				   i, &dma_addr,
6171 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6172 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6173 			p++;
6174 		}
6175 		seq_printf(seq, "\n");
6176 	}
6177 }
6178 
6179 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6180 {
6181 	struct net_device *dev = seq->private;
6182 	struct stmmac_priv *priv = netdev_priv(dev);
6183 	u32 rx_count = priv->plat->rx_queues_to_use;
6184 	u32 tx_count = priv->plat->tx_queues_to_use;
6185 	u32 queue;
6186 
6187 	if ((dev->flags & IFF_UP) == 0)
6188 		return 0;
6189 
6190 	for (queue = 0; queue < rx_count; queue++) {
6191 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6192 
6193 		seq_printf(seq, "RX Queue %d:\n", queue);
6194 
6195 		if (priv->extend_desc) {
6196 			seq_printf(seq, "Extended descriptor ring:\n");
6197 			sysfs_display_ring((void *)rx_q->dma_erx,
6198 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6199 		} else {
6200 			seq_printf(seq, "Descriptor ring:\n");
6201 			sysfs_display_ring((void *)rx_q->dma_rx,
6202 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6203 		}
6204 	}
6205 
6206 	for (queue = 0; queue < tx_count; queue++) {
6207 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6208 
6209 		seq_printf(seq, "TX Queue %d:\n", queue);
6210 
6211 		if (priv->extend_desc) {
6212 			seq_printf(seq, "Extended descriptor ring:\n");
6213 			sysfs_display_ring((void *)tx_q->dma_etx,
6214 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6215 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6216 			seq_printf(seq, "Descriptor ring:\n");
6217 			sysfs_display_ring((void *)tx_q->dma_tx,
6218 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6219 		}
6220 	}
6221 
6222 	return 0;
6223 }
6224 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6225 
6226 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6227 {
6228 	static const char * const dwxgmac_timestamp_source[] = {
6229 		"None",
6230 		"Internal",
6231 		"External",
6232 		"Both",
6233 	};
6234 	static const char * const dwxgmac_safety_feature_desc[] = {
6235 		"No",
6236 		"All Safety Features with ECC and Parity",
6237 		"All Safety Features without ECC or Parity",
6238 		"All Safety Features with Parity Only",
6239 		"ECC Only",
6240 		"UNDEFINED",
6241 		"UNDEFINED",
6242 		"UNDEFINED",
6243 	};
6244 	struct net_device *dev = seq->private;
6245 	struct stmmac_priv *priv = netdev_priv(dev);
6246 
6247 	if (!priv->hw_cap_support) {
6248 		seq_printf(seq, "DMA HW features not supported\n");
6249 		return 0;
6250 	}
6251 
6252 	seq_printf(seq, "==============================\n");
6253 	seq_printf(seq, "\tDMA HW features\n");
6254 	seq_printf(seq, "==============================\n");
6255 
6256 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6257 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6258 	seq_printf(seq, "\t1000 Mbps: %s\n",
6259 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6260 	seq_printf(seq, "\tHalf duplex: %s\n",
6261 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6262 	if (priv->plat->has_xgmac) {
6263 		seq_printf(seq,
6264 			   "\tNumber of Additional MAC address registers: %d\n",
6265 			   priv->dma_cap.multi_addr);
6266 	} else {
6267 		seq_printf(seq, "\tHash Filter: %s\n",
6268 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6269 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6270 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6271 	}
6272 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6273 		   (priv->dma_cap.pcs) ? "Y" : "N");
6274 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6275 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6276 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6277 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6278 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6279 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6280 	seq_printf(seq, "\tRMON module: %s\n",
6281 		   (priv->dma_cap.rmon) ? "Y" : "N");
6282 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6283 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6284 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6285 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6286 	if (priv->plat->has_xgmac)
6287 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6288 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6289 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6290 		   (priv->dma_cap.eee) ? "Y" : "N");
6291 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6292 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6293 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6294 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6295 	    priv->plat->has_xgmac) {
6296 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6297 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6298 	} else {
6299 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6300 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6301 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6302 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6303 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6304 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6305 	}
6306 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6307 		   priv->dma_cap.number_rx_channel);
6308 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6309 		   priv->dma_cap.number_tx_channel);
6310 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6311 		   priv->dma_cap.number_rx_queues);
6312 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6313 		   priv->dma_cap.number_tx_queues);
6314 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6315 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6316 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6317 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6318 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6319 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6320 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6321 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6322 		   priv->dma_cap.pps_out_num);
6323 	seq_printf(seq, "\tSafety Features: %s\n",
6324 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6325 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6326 		   priv->dma_cap.frpsel ? "Y" : "N");
6327 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6328 		   priv->dma_cap.host_dma_width);
6329 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6330 		   priv->dma_cap.rssen ? "Y" : "N");
6331 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6332 		   priv->dma_cap.vlhash ? "Y" : "N");
6333 	seq_printf(seq, "\tSplit Header: %s\n",
6334 		   priv->dma_cap.sphen ? "Y" : "N");
6335 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6336 		   priv->dma_cap.vlins ? "Y" : "N");
6337 	seq_printf(seq, "\tDouble VLAN: %s\n",
6338 		   priv->dma_cap.dvlan ? "Y" : "N");
6339 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6340 		   priv->dma_cap.l3l4fnum);
6341 	seq_printf(seq, "\tARP Offloading: %s\n",
6342 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6343 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6344 		   priv->dma_cap.estsel ? "Y" : "N");
6345 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6346 		   priv->dma_cap.fpesel ? "Y" : "N");
6347 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6348 		   priv->dma_cap.tbssel ? "Y" : "N");
6349 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6350 		   priv->dma_cap.tbs_ch_num);
6351 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6352 		   priv->dma_cap.sgfsel ? "Y" : "N");
6353 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6354 		   BIT(priv->dma_cap.ttsfd) >> 1);
6355 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6356 		   priv->dma_cap.numtc);
6357 	seq_printf(seq, "\tDCB Feature: %s\n",
6358 		   priv->dma_cap.dcben ? "Y" : "N");
6359 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6360 		   priv->dma_cap.advthword ? "Y" : "N");
6361 	seq_printf(seq, "\tPTP Offload: %s\n",
6362 		   priv->dma_cap.ptoen ? "Y" : "N");
6363 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6364 		   priv->dma_cap.osten ? "Y" : "N");
6365 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6366 		   priv->dma_cap.pfcen ? "Y" : "N");
6367 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6368 		   BIT(priv->dma_cap.frpes) << 6);
6369 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6370 		   BIT(priv->dma_cap.frpbs) << 6);
6371 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6372 		   priv->dma_cap.frppipe_num);
6373 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6374 		   priv->dma_cap.nrvf_num ?
6375 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6376 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6377 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6378 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6379 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6380 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6381 		   priv->dma_cap.cbtisel ? "Y" : "N");
6382 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6383 		   priv->dma_cap.aux_snapshot_n);
6384 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6385 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6386 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6387 		   priv->dma_cap.edma ? "Y" : "N");
6388 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6389 		   priv->dma_cap.ediffc ? "Y" : "N");
6390 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6391 		   priv->dma_cap.vxn ? "Y" : "N");
6392 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6393 		   priv->dma_cap.dbgmem ? "Y" : "N");
6394 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6395 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6396 	return 0;
6397 }
6398 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6399 
6400 /* Use network device events to rename debugfs file entries.
6401  */
6402 static int stmmac_device_event(struct notifier_block *unused,
6403 			       unsigned long event, void *ptr)
6404 {
6405 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6406 	struct stmmac_priv *priv = netdev_priv(dev);
6407 
6408 	if (dev->netdev_ops != &stmmac_netdev_ops)
6409 		goto done;
6410 
6411 	switch (event) {
6412 	case NETDEV_CHANGENAME:
6413 		if (priv->dbgfs_dir)
6414 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6415 							 priv->dbgfs_dir,
6416 							 stmmac_fs_dir,
6417 							 dev->name);
6418 		break;
6419 	}
6420 done:
6421 	return NOTIFY_DONE;
6422 }
6423 
6424 static struct notifier_block stmmac_notifier = {
6425 	.notifier_call = stmmac_device_event,
6426 };
6427 
6428 static void stmmac_init_fs(struct net_device *dev)
6429 {
6430 	struct stmmac_priv *priv = netdev_priv(dev);
6431 
6432 	rtnl_lock();
6433 
6434 	/* Create per netdev entries */
6435 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6436 
6437 	/* Entry to report DMA RX/TX rings */
6438 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6439 			    &stmmac_rings_status_fops);
6440 
6441 	/* Entry to report the DMA HW features */
6442 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6443 			    &stmmac_dma_cap_fops);
6444 
6445 	rtnl_unlock();
6446 }
6447 
6448 static void stmmac_exit_fs(struct net_device *dev)
6449 {
6450 	struct stmmac_priv *priv = netdev_priv(dev);
6451 
6452 	debugfs_remove_recursive(priv->dbgfs_dir);
6453 }
6454 #endif /* CONFIG_DEBUG_FS */
6455 
6456 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6457 {
6458 	unsigned char *data = (unsigned char *)&vid_le;
6459 	unsigned char data_byte = 0;
6460 	u32 crc = ~0x0;
6461 	u32 temp = 0;
6462 	int i, bits;
6463 
6464 	bits = get_bitmask_order(VLAN_VID_MASK);
6465 	for (i = 0; i < bits; i++) {
6466 		if ((i % 8) == 0)
6467 			data_byte = data[i / 8];
6468 
6469 		temp = ((crc & 1) ^ data_byte) & 1;
6470 		crc >>= 1;
6471 		data_byte >>= 1;
6472 
6473 		if (temp)
6474 			crc ^= 0xedb88320;
6475 	}
6476 
6477 	return crc;
6478 }
6479 
6480 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6481 {
6482 	u32 crc, hash = 0;
6483 	__le16 pmatch = 0;
6484 	int count = 0;
6485 	u16 vid = 0;
6486 
6487 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6488 		__le16 vid_le = cpu_to_le16(vid);
6489 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6490 		hash |= (1 << crc);
6491 		count++;
6492 	}
6493 
6494 	if (!priv->dma_cap.vlhash) {
6495 		if (count > 2) /* VID = 0 always passes filter */
6496 			return -EOPNOTSUPP;
6497 
6498 		pmatch = cpu_to_le16(vid);
6499 		hash = 0;
6500 	}
6501 
6502 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6503 }
6504 
6505 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6506 {
6507 	struct stmmac_priv *priv = netdev_priv(ndev);
6508 	bool is_double = false;
6509 	int ret;
6510 
6511 	ret = pm_runtime_resume_and_get(priv->device);
6512 	if (ret < 0)
6513 		return ret;
6514 
6515 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6516 		is_double = true;
6517 
6518 	set_bit(vid, priv->active_vlans);
6519 	ret = stmmac_vlan_update(priv, is_double);
6520 	if (ret) {
6521 		clear_bit(vid, priv->active_vlans);
6522 		goto err_pm_put;
6523 	}
6524 
6525 	if (priv->hw->num_vlan) {
6526 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6527 		if (ret)
6528 			goto err_pm_put;
6529 	}
6530 err_pm_put:
6531 	pm_runtime_put(priv->device);
6532 
6533 	return ret;
6534 }
6535 
6536 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6537 {
6538 	struct stmmac_priv *priv = netdev_priv(ndev);
6539 	bool is_double = false;
6540 	int ret;
6541 
6542 	ret = pm_runtime_resume_and_get(priv->device);
6543 	if (ret < 0)
6544 		return ret;
6545 
6546 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6547 		is_double = true;
6548 
6549 	clear_bit(vid, priv->active_vlans);
6550 
6551 	if (priv->hw->num_vlan) {
6552 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6553 		if (ret)
6554 			goto del_vlan_error;
6555 	}
6556 
6557 	ret = stmmac_vlan_update(priv, is_double);
6558 
6559 del_vlan_error:
6560 	pm_runtime_put(priv->device);
6561 
6562 	return ret;
6563 }
6564 
6565 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6566 {
6567 	struct stmmac_priv *priv = netdev_priv(dev);
6568 
6569 	switch (bpf->command) {
6570 	case XDP_SETUP_PROG:
6571 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6572 	case XDP_SETUP_XSK_POOL:
6573 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6574 					     bpf->xsk.queue_id);
6575 	default:
6576 		return -EOPNOTSUPP;
6577 	}
6578 }
6579 
6580 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6581 			   struct xdp_frame **frames, u32 flags)
6582 {
6583 	struct stmmac_priv *priv = netdev_priv(dev);
6584 	int cpu = smp_processor_id();
6585 	struct netdev_queue *nq;
6586 	int i, nxmit = 0;
6587 	int queue;
6588 
6589 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6590 		return -ENETDOWN;
6591 
6592 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6593 		return -EINVAL;
6594 
6595 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6596 	nq = netdev_get_tx_queue(priv->dev, queue);
6597 
6598 	__netif_tx_lock(nq, cpu);
6599 	/* Avoids TX time-out as we are sharing with slow path */
6600 	txq_trans_cond_update(nq);
6601 
6602 	for (i = 0; i < num_frames; i++) {
6603 		int res;
6604 
6605 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6606 		if (res == STMMAC_XDP_CONSUMED)
6607 			break;
6608 
6609 		nxmit++;
6610 	}
6611 
6612 	if (flags & XDP_XMIT_FLUSH) {
6613 		stmmac_flush_tx_descriptors(priv, queue);
6614 		stmmac_tx_timer_arm(priv, queue);
6615 	}
6616 
6617 	__netif_tx_unlock(nq);
6618 
6619 	return nxmit;
6620 }
6621 
6622 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6623 {
6624 	struct stmmac_channel *ch = &priv->channel[queue];
6625 	unsigned long flags;
6626 
6627 	spin_lock_irqsave(&ch->lock, flags);
6628 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6629 	spin_unlock_irqrestore(&ch->lock, flags);
6630 
6631 	stmmac_stop_rx_dma(priv, queue);
6632 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6633 }
6634 
6635 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6636 {
6637 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6638 	struct stmmac_channel *ch = &priv->channel[queue];
6639 	unsigned long flags;
6640 	u32 buf_size;
6641 	int ret;
6642 
6643 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6644 	if (ret) {
6645 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6646 		return;
6647 	}
6648 
6649 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6650 	if (ret) {
6651 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6652 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6653 		return;
6654 	}
6655 
6656 	stmmac_reset_rx_queue(priv, queue);
6657 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6658 
6659 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6660 			    rx_q->dma_rx_phy, rx_q->queue_index);
6661 
6662 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6663 			     sizeof(struct dma_desc));
6664 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6665 			       rx_q->rx_tail_addr, rx_q->queue_index);
6666 
6667 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6668 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6669 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6670 				      buf_size,
6671 				      rx_q->queue_index);
6672 	} else {
6673 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6674 				      priv->dma_conf.dma_buf_sz,
6675 				      rx_q->queue_index);
6676 	}
6677 
6678 	stmmac_start_rx_dma(priv, queue);
6679 
6680 	spin_lock_irqsave(&ch->lock, flags);
6681 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6682 	spin_unlock_irqrestore(&ch->lock, flags);
6683 }
6684 
6685 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6686 {
6687 	struct stmmac_channel *ch = &priv->channel[queue];
6688 	unsigned long flags;
6689 
6690 	spin_lock_irqsave(&ch->lock, flags);
6691 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6692 	spin_unlock_irqrestore(&ch->lock, flags);
6693 
6694 	stmmac_stop_tx_dma(priv, queue);
6695 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6696 }
6697 
6698 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6699 {
6700 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6701 	struct stmmac_channel *ch = &priv->channel[queue];
6702 	unsigned long flags;
6703 	int ret;
6704 
6705 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6706 	if (ret) {
6707 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6708 		return;
6709 	}
6710 
6711 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6712 	if (ret) {
6713 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6714 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6715 		return;
6716 	}
6717 
6718 	stmmac_reset_tx_queue(priv, queue);
6719 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6720 
6721 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6722 			    tx_q->dma_tx_phy, tx_q->queue_index);
6723 
6724 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6725 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6726 
6727 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6728 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6729 			       tx_q->tx_tail_addr, tx_q->queue_index);
6730 
6731 	stmmac_start_tx_dma(priv, queue);
6732 
6733 	spin_lock_irqsave(&ch->lock, flags);
6734 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6735 	spin_unlock_irqrestore(&ch->lock, flags);
6736 }
6737 
6738 void stmmac_xdp_release(struct net_device *dev)
6739 {
6740 	struct stmmac_priv *priv = netdev_priv(dev);
6741 	u32 chan;
6742 
6743 	/* Ensure tx function is not running */
6744 	netif_tx_disable(dev);
6745 
6746 	/* Disable NAPI process */
6747 	stmmac_disable_all_queues(priv);
6748 
6749 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6750 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6751 
6752 	/* Free the IRQ lines */
6753 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6754 
6755 	/* Stop TX/RX DMA channels */
6756 	stmmac_stop_all_dma(priv);
6757 
6758 	/* Release and free the Rx/Tx resources */
6759 	free_dma_desc_resources(priv, &priv->dma_conf);
6760 
6761 	/* Disable the MAC Rx/Tx */
6762 	stmmac_mac_set(priv, priv->ioaddr, false);
6763 
6764 	/* set trans_start so we don't get spurious
6765 	 * watchdogs during reset
6766 	 */
6767 	netif_trans_update(dev);
6768 	netif_carrier_off(dev);
6769 }
6770 
6771 int stmmac_xdp_open(struct net_device *dev)
6772 {
6773 	struct stmmac_priv *priv = netdev_priv(dev);
6774 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6775 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6776 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6777 	struct stmmac_rx_queue *rx_q;
6778 	struct stmmac_tx_queue *tx_q;
6779 	u32 buf_size;
6780 	bool sph_en;
6781 	u32 chan;
6782 	int ret;
6783 
6784 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6785 	if (ret < 0) {
6786 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6787 			   __func__);
6788 		goto dma_desc_error;
6789 	}
6790 
6791 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6792 	if (ret < 0) {
6793 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6794 			   __func__);
6795 		goto init_error;
6796 	}
6797 
6798 	stmmac_reset_queues_param(priv);
6799 
6800 	/* DMA CSR Channel configuration */
6801 	for (chan = 0; chan < dma_csr_ch; chan++) {
6802 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6803 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6804 	}
6805 
6806 	/* Adjust Split header */
6807 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6808 
6809 	/* DMA RX Channel Configuration */
6810 	for (chan = 0; chan < rx_cnt; chan++) {
6811 		rx_q = &priv->dma_conf.rx_queue[chan];
6812 
6813 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6814 				    rx_q->dma_rx_phy, chan);
6815 
6816 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6817 				     (rx_q->buf_alloc_num *
6818 				      sizeof(struct dma_desc));
6819 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6820 				       rx_q->rx_tail_addr, chan);
6821 
6822 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6823 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6824 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6825 					      buf_size,
6826 					      rx_q->queue_index);
6827 		} else {
6828 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6829 					      priv->dma_conf.dma_buf_sz,
6830 					      rx_q->queue_index);
6831 		}
6832 
6833 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6834 	}
6835 
6836 	/* DMA TX Channel Configuration */
6837 	for (chan = 0; chan < tx_cnt; chan++) {
6838 		tx_q = &priv->dma_conf.tx_queue[chan];
6839 
6840 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6841 				    tx_q->dma_tx_phy, chan);
6842 
6843 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6844 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6845 				       tx_q->tx_tail_addr, chan);
6846 
6847 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6848 		tx_q->txtimer.function = stmmac_tx_timer;
6849 	}
6850 
6851 	/* Enable the MAC Rx/Tx */
6852 	stmmac_mac_set(priv, priv->ioaddr, true);
6853 
6854 	/* Start Rx & Tx DMA Channels */
6855 	stmmac_start_all_dma(priv);
6856 
6857 	ret = stmmac_request_irq(dev);
6858 	if (ret)
6859 		goto irq_error;
6860 
6861 	/* Enable NAPI process*/
6862 	stmmac_enable_all_queues(priv);
6863 	netif_carrier_on(dev);
6864 	netif_tx_start_all_queues(dev);
6865 	stmmac_enable_all_dma_irq(priv);
6866 
6867 	return 0;
6868 
6869 irq_error:
6870 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6871 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6872 
6873 	stmmac_hw_teardown(dev);
6874 init_error:
6875 	free_dma_desc_resources(priv, &priv->dma_conf);
6876 dma_desc_error:
6877 	return ret;
6878 }
6879 
6880 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6881 {
6882 	struct stmmac_priv *priv = netdev_priv(dev);
6883 	struct stmmac_rx_queue *rx_q;
6884 	struct stmmac_tx_queue *tx_q;
6885 	struct stmmac_channel *ch;
6886 
6887 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6888 	    !netif_carrier_ok(priv->dev))
6889 		return -ENETDOWN;
6890 
6891 	if (!stmmac_xdp_is_enabled(priv))
6892 		return -EINVAL;
6893 
6894 	if (queue >= priv->plat->rx_queues_to_use ||
6895 	    queue >= priv->plat->tx_queues_to_use)
6896 		return -EINVAL;
6897 
6898 	rx_q = &priv->dma_conf.rx_queue[queue];
6899 	tx_q = &priv->dma_conf.tx_queue[queue];
6900 	ch = &priv->channel[queue];
6901 
6902 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6903 		return -EINVAL;
6904 
6905 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6906 		/* EQoS does not have per-DMA channel SW interrupt,
6907 		 * so we schedule RX Napi straight-away.
6908 		 */
6909 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6910 			__napi_schedule(&ch->rxtx_napi);
6911 	}
6912 
6913 	return 0;
6914 }
6915 
6916 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6917 {
6918 	struct stmmac_priv *priv = netdev_priv(dev);
6919 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6920 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6921 	unsigned int start;
6922 	int q;
6923 
6924 	for (q = 0; q < tx_cnt; q++) {
6925 		struct stmmac_txq_stats *txq_stats = &priv->dma_conf.tx_queue[q].txq_stats;
6926 		u64 tx_packets;
6927 		u64 tx_bytes;
6928 
6929 		do {
6930 			start = u64_stats_fetch_begin(&txq_stats->syncp);
6931 			tx_packets = txq_stats->tx_packets;
6932 			tx_bytes   = txq_stats->tx_bytes;
6933 		} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
6934 
6935 		stats->tx_packets += tx_packets;
6936 		stats->tx_bytes += tx_bytes;
6937 	}
6938 
6939 	for (q = 0; q < rx_cnt; q++) {
6940 		struct stmmac_rxq_stats *rxq_stats = &priv->dma_conf.rx_queue[q].rxq_stats;
6941 		u64 rx_packets;
6942 		u64 rx_bytes;
6943 
6944 		do {
6945 			start = u64_stats_fetch_begin(&rxq_stats->syncp);
6946 			rx_packets = rxq_stats->rx_packets;
6947 			rx_bytes   = rxq_stats->rx_bytes;
6948 		} while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
6949 
6950 		stats->rx_packets += rx_packets;
6951 		stats->rx_bytes += rx_bytes;
6952 	}
6953 
6954 	stats->rx_dropped = priv->xstats.rx_dropped;
6955 	stats->rx_errors = priv->xstats.rx_errors;
6956 	stats->tx_dropped = priv->xstats.tx_dropped;
6957 	stats->tx_errors = priv->xstats.tx_errors;
6958 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
6959 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
6960 	stats->rx_length_errors = priv->xstats.rx_length;
6961 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
6962 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
6963 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
6964 }
6965 
6966 static const struct net_device_ops stmmac_netdev_ops = {
6967 	.ndo_open = stmmac_open,
6968 	.ndo_start_xmit = stmmac_xmit,
6969 	.ndo_stop = stmmac_release,
6970 	.ndo_change_mtu = stmmac_change_mtu,
6971 	.ndo_fix_features = stmmac_fix_features,
6972 	.ndo_set_features = stmmac_set_features,
6973 	.ndo_set_rx_mode = stmmac_set_rx_mode,
6974 	.ndo_tx_timeout = stmmac_tx_timeout,
6975 	.ndo_eth_ioctl = stmmac_ioctl,
6976 	.ndo_get_stats64 = stmmac_get_stats64,
6977 	.ndo_setup_tc = stmmac_setup_tc,
6978 	.ndo_select_queue = stmmac_select_queue,
6979 #ifdef CONFIG_NET_POLL_CONTROLLER
6980 	.ndo_poll_controller = stmmac_poll_controller,
6981 #endif
6982 	.ndo_set_mac_address = stmmac_set_mac_address,
6983 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6984 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6985 	.ndo_bpf = stmmac_bpf,
6986 	.ndo_xdp_xmit = stmmac_xdp_xmit,
6987 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
6988 };
6989 
6990 static void stmmac_reset_subtask(struct stmmac_priv *priv)
6991 {
6992 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
6993 		return;
6994 	if (test_bit(STMMAC_DOWN, &priv->state))
6995 		return;
6996 
6997 	netdev_err(priv->dev, "Reset adapter.\n");
6998 
6999 	rtnl_lock();
7000 	netif_trans_update(priv->dev);
7001 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7002 		usleep_range(1000, 2000);
7003 
7004 	set_bit(STMMAC_DOWN, &priv->state);
7005 	dev_close(priv->dev);
7006 	dev_open(priv->dev, NULL);
7007 	clear_bit(STMMAC_DOWN, &priv->state);
7008 	clear_bit(STMMAC_RESETING, &priv->state);
7009 	rtnl_unlock();
7010 }
7011 
7012 static void stmmac_service_task(struct work_struct *work)
7013 {
7014 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7015 			service_task);
7016 
7017 	stmmac_reset_subtask(priv);
7018 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7019 }
7020 
7021 /**
7022  *  stmmac_hw_init - Init the MAC device
7023  *  @priv: driver private structure
7024  *  Description: this function is to configure the MAC device according to
7025  *  some platform parameters or the HW capability register. It prepares the
7026  *  driver to use either ring or chain modes and to setup either enhanced or
7027  *  normal descriptors.
7028  */
7029 static int stmmac_hw_init(struct stmmac_priv *priv)
7030 {
7031 	int ret;
7032 
7033 	/* dwmac-sun8i only work in chain mode */
7034 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7035 		chain_mode = 1;
7036 	priv->chain_mode = chain_mode;
7037 
7038 	/* Initialize HW Interface */
7039 	ret = stmmac_hwif_init(priv);
7040 	if (ret)
7041 		return ret;
7042 
7043 	/* Get the HW capability (new GMAC newer than 3.50a) */
7044 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7045 	if (priv->hw_cap_support) {
7046 		dev_info(priv->device, "DMA HW capability register supported\n");
7047 
7048 		/* We can override some gmac/dma configuration fields: e.g.
7049 		 * enh_desc, tx_coe (e.g. that are passed through the
7050 		 * platform) with the values from the HW capability
7051 		 * register (if supported).
7052 		 */
7053 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7054 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7055 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7056 		priv->hw->pmt = priv->plat->pmt;
7057 		if (priv->dma_cap.hash_tb_sz) {
7058 			priv->hw->multicast_filter_bins =
7059 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7060 			priv->hw->mcast_bits_log2 =
7061 					ilog2(priv->hw->multicast_filter_bins);
7062 		}
7063 
7064 		/* TXCOE doesn't work in thresh DMA mode */
7065 		if (priv->plat->force_thresh_dma_mode)
7066 			priv->plat->tx_coe = 0;
7067 		else
7068 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7069 
7070 		/* In case of GMAC4 rx_coe is from HW cap register. */
7071 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7072 
7073 		if (priv->dma_cap.rx_coe_type2)
7074 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7075 		else if (priv->dma_cap.rx_coe_type1)
7076 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7077 
7078 	} else {
7079 		dev_info(priv->device, "No HW DMA feature register supported\n");
7080 	}
7081 
7082 	if (priv->plat->rx_coe) {
7083 		priv->hw->rx_csum = priv->plat->rx_coe;
7084 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7085 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7086 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7087 	}
7088 	if (priv->plat->tx_coe)
7089 		dev_info(priv->device, "TX Checksum insertion supported\n");
7090 
7091 	if (priv->plat->pmt) {
7092 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7093 		device_set_wakeup_capable(priv->device, 1);
7094 	}
7095 
7096 	if (priv->dma_cap.tsoen)
7097 		dev_info(priv->device, "TSO supported\n");
7098 
7099 	priv->hw->vlan_fail_q_en =
7100 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7101 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7102 
7103 	/* Run HW quirks, if any */
7104 	if (priv->hwif_quirks) {
7105 		ret = priv->hwif_quirks(priv);
7106 		if (ret)
7107 			return ret;
7108 	}
7109 
7110 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7111 	 * In some case, for example on bugged HW this feature
7112 	 * has to be disable and this can be done by passing the
7113 	 * riwt_off field from the platform.
7114 	 */
7115 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7116 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7117 		priv->use_riwt = 1;
7118 		dev_info(priv->device,
7119 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7120 	}
7121 
7122 	return 0;
7123 }
7124 
7125 static void stmmac_napi_add(struct net_device *dev)
7126 {
7127 	struct stmmac_priv *priv = netdev_priv(dev);
7128 	u32 queue, maxq;
7129 
7130 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7131 
7132 	for (queue = 0; queue < maxq; queue++) {
7133 		struct stmmac_channel *ch = &priv->channel[queue];
7134 
7135 		ch->priv_data = priv;
7136 		ch->index = queue;
7137 		spin_lock_init(&ch->lock);
7138 
7139 		if (queue < priv->plat->rx_queues_to_use) {
7140 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7141 		}
7142 		if (queue < priv->plat->tx_queues_to_use) {
7143 			netif_napi_add_tx(dev, &ch->tx_napi,
7144 					  stmmac_napi_poll_tx);
7145 		}
7146 		if (queue < priv->plat->rx_queues_to_use &&
7147 		    queue < priv->plat->tx_queues_to_use) {
7148 			netif_napi_add(dev, &ch->rxtx_napi,
7149 				       stmmac_napi_poll_rxtx);
7150 		}
7151 	}
7152 }
7153 
7154 static void stmmac_napi_del(struct net_device *dev)
7155 {
7156 	struct stmmac_priv *priv = netdev_priv(dev);
7157 	u32 queue, maxq;
7158 
7159 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7160 
7161 	for (queue = 0; queue < maxq; queue++) {
7162 		struct stmmac_channel *ch = &priv->channel[queue];
7163 
7164 		if (queue < priv->plat->rx_queues_to_use)
7165 			netif_napi_del(&ch->rx_napi);
7166 		if (queue < priv->plat->tx_queues_to_use)
7167 			netif_napi_del(&ch->tx_napi);
7168 		if (queue < priv->plat->rx_queues_to_use &&
7169 		    queue < priv->plat->tx_queues_to_use) {
7170 			netif_napi_del(&ch->rxtx_napi);
7171 		}
7172 	}
7173 }
7174 
7175 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7176 {
7177 	struct stmmac_priv *priv = netdev_priv(dev);
7178 	int ret = 0, i;
7179 
7180 	if (netif_running(dev))
7181 		stmmac_release(dev);
7182 
7183 	stmmac_napi_del(dev);
7184 
7185 	priv->plat->rx_queues_to_use = rx_cnt;
7186 	priv->plat->tx_queues_to_use = tx_cnt;
7187 	if (!netif_is_rxfh_configured(dev))
7188 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7189 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7190 									rx_cnt);
7191 
7192 	stmmac_napi_add(dev);
7193 
7194 	if (netif_running(dev))
7195 		ret = stmmac_open(dev);
7196 
7197 	return ret;
7198 }
7199 
7200 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7201 {
7202 	struct stmmac_priv *priv = netdev_priv(dev);
7203 	int ret = 0;
7204 
7205 	if (netif_running(dev))
7206 		stmmac_release(dev);
7207 
7208 	priv->dma_conf.dma_rx_size = rx_size;
7209 	priv->dma_conf.dma_tx_size = tx_size;
7210 
7211 	if (netif_running(dev))
7212 		ret = stmmac_open(dev);
7213 
7214 	return ret;
7215 }
7216 
7217 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7218 static void stmmac_fpe_lp_task(struct work_struct *work)
7219 {
7220 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7221 						fpe_task);
7222 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7223 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7224 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7225 	bool *hs_enable = &fpe_cfg->hs_enable;
7226 	bool *enable = &fpe_cfg->enable;
7227 	int retries = 20;
7228 
7229 	while (retries-- > 0) {
7230 		/* Bail out immediately if FPE handshake is OFF */
7231 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7232 			break;
7233 
7234 		if (*lo_state == FPE_STATE_ENTERING_ON &&
7235 		    *lp_state == FPE_STATE_ENTERING_ON) {
7236 			stmmac_fpe_configure(priv, priv->ioaddr,
7237 					     priv->plat->tx_queues_to_use,
7238 					     priv->plat->rx_queues_to_use,
7239 					     *enable);
7240 
7241 			netdev_info(priv->dev, "configured FPE\n");
7242 
7243 			*lo_state = FPE_STATE_ON;
7244 			*lp_state = FPE_STATE_ON;
7245 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7246 			break;
7247 		}
7248 
7249 		if ((*lo_state == FPE_STATE_CAPABLE ||
7250 		     *lo_state == FPE_STATE_ENTERING_ON) &&
7251 		     *lp_state != FPE_STATE_ON) {
7252 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7253 				    *lo_state, *lp_state);
7254 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7255 						MPACKET_VERIFY);
7256 		}
7257 		/* Sleep then retry */
7258 		msleep(500);
7259 	}
7260 
7261 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7262 }
7263 
7264 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7265 {
7266 	if (priv->plat->fpe_cfg->hs_enable != enable) {
7267 		if (enable) {
7268 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7269 						MPACKET_VERIFY);
7270 		} else {
7271 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7272 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7273 		}
7274 
7275 		priv->plat->fpe_cfg->hs_enable = enable;
7276 	}
7277 }
7278 
7279 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7280 {
7281 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7282 	struct dma_desc *desc_contains_ts = ctx->desc;
7283 	struct stmmac_priv *priv = ctx->priv;
7284 	struct dma_desc *ndesc = ctx->ndesc;
7285 	struct dma_desc *desc = ctx->desc;
7286 	u64 ns = 0;
7287 
7288 	if (!priv->hwts_rx_en)
7289 		return -ENODATA;
7290 
7291 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7292 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7293 		desc_contains_ts = ndesc;
7294 
7295 	/* Check if timestamp is available */
7296 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7297 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7298 		ns -= priv->plat->cdc_error_adj;
7299 		*timestamp = ns_to_ktime(ns);
7300 		return 0;
7301 	}
7302 
7303 	return -ENODATA;
7304 }
7305 
7306 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7307 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7308 };
7309 
7310 /**
7311  * stmmac_dvr_probe
7312  * @device: device pointer
7313  * @plat_dat: platform data pointer
7314  * @res: stmmac resource pointer
7315  * Description: this is the main probe function used to
7316  * call the alloc_etherdev, allocate the priv structure.
7317  * Return:
7318  * returns 0 on success, otherwise errno.
7319  */
7320 int stmmac_dvr_probe(struct device *device,
7321 		     struct plat_stmmacenet_data *plat_dat,
7322 		     struct stmmac_resources *res)
7323 {
7324 	struct net_device *ndev = NULL;
7325 	struct stmmac_priv *priv;
7326 	u32 rxq;
7327 	int i, ret = 0;
7328 
7329 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7330 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7331 	if (!ndev)
7332 		return -ENOMEM;
7333 
7334 	SET_NETDEV_DEV(ndev, device);
7335 
7336 	priv = netdev_priv(ndev);
7337 	priv->device = device;
7338 	priv->dev = ndev;
7339 
7340 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7341 		u64_stats_init(&priv->dma_conf.rx_queue[i].rxq_stats.syncp);
7342 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7343 		u64_stats_init(&priv->dma_conf.tx_queue[i].txq_stats.syncp);
7344 
7345 	stmmac_set_ethtool_ops(ndev);
7346 	priv->pause = pause;
7347 	priv->plat = plat_dat;
7348 	priv->ioaddr = res->addr;
7349 	priv->dev->base_addr = (unsigned long)res->addr;
7350 	priv->plat->dma_cfg->multi_msi_en =
7351 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7352 
7353 	priv->dev->irq = res->irq;
7354 	priv->wol_irq = res->wol_irq;
7355 	priv->lpi_irq = res->lpi_irq;
7356 	priv->sfty_ce_irq = res->sfty_ce_irq;
7357 	priv->sfty_ue_irq = res->sfty_ue_irq;
7358 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7359 		priv->rx_irq[i] = res->rx_irq[i];
7360 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7361 		priv->tx_irq[i] = res->tx_irq[i];
7362 
7363 	if (!is_zero_ether_addr(res->mac))
7364 		eth_hw_addr_set(priv->dev, res->mac);
7365 
7366 	dev_set_drvdata(device, priv->dev);
7367 
7368 	/* Verify driver arguments */
7369 	stmmac_verify_args();
7370 
7371 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7372 	if (!priv->af_xdp_zc_qps)
7373 		return -ENOMEM;
7374 
7375 	/* Allocate workqueue */
7376 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7377 	if (!priv->wq) {
7378 		dev_err(priv->device, "failed to create workqueue\n");
7379 		ret = -ENOMEM;
7380 		goto error_wq_init;
7381 	}
7382 
7383 	INIT_WORK(&priv->service_task, stmmac_service_task);
7384 
7385 	/* Initialize Link Partner FPE workqueue */
7386 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7387 
7388 	/* Override with kernel parameters if supplied XXX CRS XXX
7389 	 * this needs to have multiple instances
7390 	 */
7391 	if ((phyaddr >= 0) && (phyaddr <= 31))
7392 		priv->plat->phy_addr = phyaddr;
7393 
7394 	if (priv->plat->stmmac_rst) {
7395 		ret = reset_control_assert(priv->plat->stmmac_rst);
7396 		reset_control_deassert(priv->plat->stmmac_rst);
7397 		/* Some reset controllers have only reset callback instead of
7398 		 * assert + deassert callbacks pair.
7399 		 */
7400 		if (ret == -ENOTSUPP)
7401 			reset_control_reset(priv->plat->stmmac_rst);
7402 	}
7403 
7404 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7405 	if (ret == -ENOTSUPP)
7406 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7407 			ERR_PTR(ret));
7408 
7409 	/* Init MAC and get the capabilities */
7410 	ret = stmmac_hw_init(priv);
7411 	if (ret)
7412 		goto error_hw_init;
7413 
7414 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7415 	 */
7416 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7417 		priv->plat->dma_cfg->dche = false;
7418 
7419 	stmmac_check_ether_addr(priv);
7420 
7421 	ndev->netdev_ops = &stmmac_netdev_ops;
7422 
7423 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7424 
7425 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7426 			    NETIF_F_RXCSUM;
7427 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7428 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7429 
7430 	ret = stmmac_tc_init(priv, priv);
7431 	if (!ret) {
7432 		ndev->hw_features |= NETIF_F_HW_TC;
7433 	}
7434 
7435 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7436 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7437 		if (priv->plat->has_gmac4)
7438 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7439 		priv->tso = true;
7440 		dev_info(priv->device, "TSO feature enabled\n");
7441 	}
7442 
7443 	if (priv->dma_cap.sphen &&
7444 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7445 		ndev->hw_features |= NETIF_F_GRO;
7446 		priv->sph_cap = true;
7447 		priv->sph = priv->sph_cap;
7448 		dev_info(priv->device, "SPH feature enabled\n");
7449 	}
7450 
7451 	/* Ideally our host DMA address width is the same as for the
7452 	 * device. However, it may differ and then we have to use our
7453 	 * host DMA width for allocation and the device DMA width for
7454 	 * register handling.
7455 	 */
7456 	if (priv->plat->host_dma_width)
7457 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7458 	else
7459 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7460 
7461 	if (priv->dma_cap.host_dma_width) {
7462 		ret = dma_set_mask_and_coherent(device,
7463 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7464 		if (!ret) {
7465 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7466 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7467 
7468 			/*
7469 			 * If more than 32 bits can be addressed, make sure to
7470 			 * enable enhanced addressing mode.
7471 			 */
7472 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7473 				priv->plat->dma_cfg->eame = true;
7474 		} else {
7475 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7476 			if (ret) {
7477 				dev_err(priv->device, "Failed to set DMA Mask\n");
7478 				goto error_hw_init;
7479 			}
7480 
7481 			priv->dma_cap.host_dma_width = 32;
7482 		}
7483 	}
7484 
7485 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7486 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7487 #ifdef STMMAC_VLAN_TAG_USED
7488 	/* Both mac100 and gmac support receive VLAN tag detection */
7489 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7490 	if (priv->dma_cap.vlhash) {
7491 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7492 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7493 	}
7494 	if (priv->dma_cap.vlins) {
7495 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7496 		if (priv->dma_cap.dvlan)
7497 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7498 	}
7499 #endif
7500 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7501 
7502 	priv->xstats.threshold = tc;
7503 
7504 	/* Initialize RSS */
7505 	rxq = priv->plat->rx_queues_to_use;
7506 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7507 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7508 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7509 
7510 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7511 		ndev->features |= NETIF_F_RXHASH;
7512 
7513 	ndev->vlan_features |= ndev->features;
7514 	/* TSO doesn't work on VLANs yet */
7515 	ndev->vlan_features &= ~NETIF_F_TSO;
7516 
7517 	/* MTU range: 46 - hw-specific max */
7518 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7519 	if (priv->plat->has_xgmac)
7520 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7521 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7522 		ndev->max_mtu = JUMBO_LEN;
7523 	else
7524 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7525 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7526 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7527 	 */
7528 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7529 	    (priv->plat->maxmtu >= ndev->min_mtu))
7530 		ndev->max_mtu = priv->plat->maxmtu;
7531 	else if (priv->plat->maxmtu < ndev->min_mtu)
7532 		dev_warn(priv->device,
7533 			 "%s: warning: maxmtu having invalid value (%d)\n",
7534 			 __func__, priv->plat->maxmtu);
7535 
7536 	if (flow_ctrl)
7537 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7538 
7539 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7540 
7541 	/* Setup channels NAPI */
7542 	stmmac_napi_add(ndev);
7543 
7544 	mutex_init(&priv->lock);
7545 
7546 	/* If a specific clk_csr value is passed from the platform
7547 	 * this means that the CSR Clock Range selection cannot be
7548 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7549 	 * set the MDC clock dynamically according to the csr actual
7550 	 * clock input.
7551 	 */
7552 	if (priv->plat->clk_csr >= 0)
7553 		priv->clk_csr = priv->plat->clk_csr;
7554 	else
7555 		stmmac_clk_csr_set(priv);
7556 
7557 	stmmac_check_pcs_mode(priv);
7558 
7559 	pm_runtime_get_noresume(device);
7560 	pm_runtime_set_active(device);
7561 	if (!pm_runtime_enabled(device))
7562 		pm_runtime_enable(device);
7563 
7564 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7565 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
7566 		/* MDIO bus Registration */
7567 		ret = stmmac_mdio_register(ndev);
7568 		if (ret < 0) {
7569 			dev_err_probe(priv->device, ret,
7570 				      "%s: MDIO bus (id: %d) registration failed\n",
7571 				      __func__, priv->plat->bus_id);
7572 			goto error_mdio_register;
7573 		}
7574 	}
7575 
7576 	if (priv->plat->speed_mode_2500)
7577 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7578 
7579 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7580 		ret = stmmac_xpcs_setup(priv->mii);
7581 		if (ret)
7582 			goto error_xpcs_setup;
7583 	}
7584 
7585 	ret = stmmac_phy_setup(priv);
7586 	if (ret) {
7587 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7588 		goto error_phy_setup;
7589 	}
7590 
7591 	ret = register_netdev(ndev);
7592 	if (ret) {
7593 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7594 			__func__, ret);
7595 		goto error_netdev_register;
7596 	}
7597 
7598 #ifdef CONFIG_DEBUG_FS
7599 	stmmac_init_fs(ndev);
7600 #endif
7601 
7602 	if (priv->plat->dump_debug_regs)
7603 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7604 
7605 	/* Let pm_runtime_put() disable the clocks.
7606 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7607 	 */
7608 	pm_runtime_put(device);
7609 
7610 	return ret;
7611 
7612 error_netdev_register:
7613 	phylink_destroy(priv->phylink);
7614 error_xpcs_setup:
7615 error_phy_setup:
7616 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7617 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7618 		stmmac_mdio_unregister(ndev);
7619 error_mdio_register:
7620 	stmmac_napi_del(ndev);
7621 error_hw_init:
7622 	destroy_workqueue(priv->wq);
7623 error_wq_init:
7624 	bitmap_free(priv->af_xdp_zc_qps);
7625 
7626 	return ret;
7627 }
7628 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7629 
7630 /**
7631  * stmmac_dvr_remove
7632  * @dev: device pointer
7633  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7634  * changes the link status, releases the DMA descriptor rings.
7635  */
7636 void stmmac_dvr_remove(struct device *dev)
7637 {
7638 	struct net_device *ndev = dev_get_drvdata(dev);
7639 	struct stmmac_priv *priv = netdev_priv(ndev);
7640 
7641 	netdev_info(priv->dev, "%s: removing driver", __func__);
7642 
7643 	pm_runtime_get_sync(dev);
7644 
7645 	stmmac_stop_all_dma(priv);
7646 	stmmac_mac_set(priv, priv->ioaddr, false);
7647 	netif_carrier_off(ndev);
7648 	unregister_netdev(ndev);
7649 
7650 #ifdef CONFIG_DEBUG_FS
7651 	stmmac_exit_fs(ndev);
7652 #endif
7653 	phylink_destroy(priv->phylink);
7654 	if (priv->plat->stmmac_rst)
7655 		reset_control_assert(priv->plat->stmmac_rst);
7656 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7657 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7658 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7659 		stmmac_mdio_unregister(ndev);
7660 	destroy_workqueue(priv->wq);
7661 	mutex_destroy(&priv->lock);
7662 	bitmap_free(priv->af_xdp_zc_qps);
7663 
7664 	pm_runtime_disable(dev);
7665 	pm_runtime_put_noidle(dev);
7666 }
7667 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7668 
7669 /**
7670  * stmmac_suspend - suspend callback
7671  * @dev: device pointer
7672  * Description: this is the function to suspend the device and it is called
7673  * by the platform driver to stop the network queue, release the resources,
7674  * program the PMT register (for WoL), clean and release driver resources.
7675  */
7676 int stmmac_suspend(struct device *dev)
7677 {
7678 	struct net_device *ndev = dev_get_drvdata(dev);
7679 	struct stmmac_priv *priv = netdev_priv(ndev);
7680 	u32 chan;
7681 
7682 	if (!ndev || !netif_running(ndev))
7683 		return 0;
7684 
7685 	mutex_lock(&priv->lock);
7686 
7687 	netif_device_detach(ndev);
7688 
7689 	stmmac_disable_all_queues(priv);
7690 
7691 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7692 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7693 
7694 	if (priv->eee_enabled) {
7695 		priv->tx_path_in_lpi_mode = false;
7696 		del_timer_sync(&priv->eee_ctrl_timer);
7697 	}
7698 
7699 	/* Stop TX/RX DMA */
7700 	stmmac_stop_all_dma(priv);
7701 
7702 	if (priv->plat->serdes_powerdown)
7703 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7704 
7705 	/* Enable Power down mode by programming the PMT regs */
7706 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7707 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7708 		priv->irq_wake = 1;
7709 	} else {
7710 		stmmac_mac_set(priv, priv->ioaddr, false);
7711 		pinctrl_pm_select_sleep_state(priv->device);
7712 	}
7713 
7714 	mutex_unlock(&priv->lock);
7715 
7716 	rtnl_lock();
7717 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7718 		phylink_suspend(priv->phylink, true);
7719 	} else {
7720 		if (device_may_wakeup(priv->device))
7721 			phylink_speed_down(priv->phylink, false);
7722 		phylink_suspend(priv->phylink, false);
7723 	}
7724 	rtnl_unlock();
7725 
7726 	if (priv->dma_cap.fpesel) {
7727 		/* Disable FPE */
7728 		stmmac_fpe_configure(priv, priv->ioaddr,
7729 				     priv->plat->tx_queues_to_use,
7730 				     priv->plat->rx_queues_to_use, false);
7731 
7732 		stmmac_fpe_handshake(priv, false);
7733 		stmmac_fpe_stop_wq(priv);
7734 	}
7735 
7736 	priv->speed = SPEED_UNKNOWN;
7737 	return 0;
7738 }
7739 EXPORT_SYMBOL_GPL(stmmac_suspend);
7740 
7741 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7742 {
7743 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7744 
7745 	rx_q->cur_rx = 0;
7746 	rx_q->dirty_rx = 0;
7747 }
7748 
7749 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7750 {
7751 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7752 
7753 	tx_q->cur_tx = 0;
7754 	tx_q->dirty_tx = 0;
7755 	tx_q->mss = 0;
7756 
7757 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7758 }
7759 
7760 /**
7761  * stmmac_reset_queues_param - reset queue parameters
7762  * @priv: device pointer
7763  */
7764 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7765 {
7766 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7767 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7768 	u32 queue;
7769 
7770 	for (queue = 0; queue < rx_cnt; queue++)
7771 		stmmac_reset_rx_queue(priv, queue);
7772 
7773 	for (queue = 0; queue < tx_cnt; queue++)
7774 		stmmac_reset_tx_queue(priv, queue);
7775 }
7776 
7777 /**
7778  * stmmac_resume - resume callback
7779  * @dev: device pointer
7780  * Description: when resume this function is invoked to setup the DMA and CORE
7781  * in a usable state.
7782  */
7783 int stmmac_resume(struct device *dev)
7784 {
7785 	struct net_device *ndev = dev_get_drvdata(dev);
7786 	struct stmmac_priv *priv = netdev_priv(ndev);
7787 	int ret;
7788 
7789 	if (!netif_running(ndev))
7790 		return 0;
7791 
7792 	/* Power Down bit, into the PM register, is cleared
7793 	 * automatically as soon as a magic packet or a Wake-up frame
7794 	 * is received. Anyway, it's better to manually clear
7795 	 * this bit because it can generate problems while resuming
7796 	 * from another devices (e.g. serial console).
7797 	 */
7798 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7799 		mutex_lock(&priv->lock);
7800 		stmmac_pmt(priv, priv->hw, 0);
7801 		mutex_unlock(&priv->lock);
7802 		priv->irq_wake = 0;
7803 	} else {
7804 		pinctrl_pm_select_default_state(priv->device);
7805 		/* reset the phy so that it's ready */
7806 		if (priv->mii)
7807 			stmmac_mdio_reset(priv->mii);
7808 	}
7809 
7810 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7811 	    priv->plat->serdes_powerup) {
7812 		ret = priv->plat->serdes_powerup(ndev,
7813 						 priv->plat->bsp_priv);
7814 
7815 		if (ret < 0)
7816 			return ret;
7817 	}
7818 
7819 	rtnl_lock();
7820 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7821 		phylink_resume(priv->phylink);
7822 	} else {
7823 		phylink_resume(priv->phylink);
7824 		if (device_may_wakeup(priv->device))
7825 			phylink_speed_up(priv->phylink);
7826 	}
7827 	rtnl_unlock();
7828 
7829 	rtnl_lock();
7830 	mutex_lock(&priv->lock);
7831 
7832 	stmmac_reset_queues_param(priv);
7833 
7834 	stmmac_free_tx_skbufs(priv);
7835 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7836 
7837 	stmmac_hw_setup(ndev, false);
7838 	stmmac_init_coalesce(priv);
7839 	stmmac_set_rx_mode(ndev);
7840 
7841 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7842 
7843 	stmmac_enable_all_queues(priv);
7844 	stmmac_enable_all_dma_irq(priv);
7845 
7846 	mutex_unlock(&priv->lock);
7847 	rtnl_unlock();
7848 
7849 	netif_device_attach(ndev);
7850 
7851 	return 0;
7852 }
7853 EXPORT_SYMBOL_GPL(stmmac_resume);
7854 
7855 #ifndef MODULE
7856 static int __init stmmac_cmdline_opt(char *str)
7857 {
7858 	char *opt;
7859 
7860 	if (!str || !*str)
7861 		return 1;
7862 	while ((opt = strsep(&str, ",")) != NULL) {
7863 		if (!strncmp(opt, "debug:", 6)) {
7864 			if (kstrtoint(opt + 6, 0, &debug))
7865 				goto err;
7866 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7867 			if (kstrtoint(opt + 8, 0, &phyaddr))
7868 				goto err;
7869 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7870 			if (kstrtoint(opt + 7, 0, &buf_sz))
7871 				goto err;
7872 		} else if (!strncmp(opt, "tc:", 3)) {
7873 			if (kstrtoint(opt + 3, 0, &tc))
7874 				goto err;
7875 		} else if (!strncmp(opt, "watchdog:", 9)) {
7876 			if (kstrtoint(opt + 9, 0, &watchdog))
7877 				goto err;
7878 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7879 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7880 				goto err;
7881 		} else if (!strncmp(opt, "pause:", 6)) {
7882 			if (kstrtoint(opt + 6, 0, &pause))
7883 				goto err;
7884 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7885 			if (kstrtoint(opt + 10, 0, &eee_timer))
7886 				goto err;
7887 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7888 			if (kstrtoint(opt + 11, 0, &chain_mode))
7889 				goto err;
7890 		}
7891 	}
7892 	return 1;
7893 
7894 err:
7895 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7896 	return 1;
7897 }
7898 
7899 __setup("stmmaceth=", stmmac_cmdline_opt);
7900 #endif /* MODULE */
7901 
7902 static int __init stmmac_init(void)
7903 {
7904 #ifdef CONFIG_DEBUG_FS
7905 	/* Create debugfs main directory if it doesn't exist yet */
7906 	if (!stmmac_fs_dir)
7907 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7908 	register_netdevice_notifier(&stmmac_notifier);
7909 #endif
7910 
7911 	return 0;
7912 }
7913 
7914 static void __exit stmmac_exit(void)
7915 {
7916 #ifdef CONFIG_DEBUG_FS
7917 	unregister_netdevice_notifier(&stmmac_notifier);
7918 	debugfs_remove_recursive(stmmac_fs_dir);
7919 #endif
7920 }
7921 
7922 module_init(stmmac_init)
7923 module_exit(stmmac_exit)
7924 
7925 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7926 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7927 MODULE_LICENSE("GPL");
7928