xref: /openbmc/linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision de8c12110a130337c8e7e7b8250de0580e644dee)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/pkt_cls.h>
43 #include <net/xdp_sock_drv.h>
44 #include "stmmac_ptp.h"
45 #include "stmmac.h"
46 #include "stmmac_xdp.h"
47 #include <linux/reset.h>
48 #include <linux/of_mdio.h>
49 #include "dwmac1000.h"
50 #include "dwxgmac2.h"
51 #include "hwif.h"
52 
53 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
54 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
55 
56 /* Module parameters */
57 #define TX_TIMEO	5000
58 static int watchdog = TX_TIMEO;
59 module_param(watchdog, int, 0644);
60 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
61 
62 static int debug = -1;
63 module_param(debug, int, 0644);
64 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
65 
66 static int phyaddr = -1;
67 module_param(phyaddr, int, 0444);
68 MODULE_PARM_DESC(phyaddr, "Physical device address");
69 
70 #define STMMAC_TX_THRESH(x)	((x)->dma_tx_size / 4)
71 #define STMMAC_RX_THRESH(x)	((x)->dma_rx_size / 4)
72 
73 /* Limit to make sure XDP TX and slow path can coexist */
74 #define STMMAC_XSK_TX_BUDGET_MAX	256
75 #define STMMAC_TX_XSK_AVAIL		16
76 #define STMMAC_RX_FILL_BATCH		16
77 
78 #define STMMAC_XDP_PASS		0
79 #define STMMAC_XDP_CONSUMED	BIT(0)
80 #define STMMAC_XDP_TX		BIT(1)
81 #define STMMAC_XDP_REDIRECT	BIT(2)
82 
83 static int flow_ctrl = FLOW_AUTO;
84 module_param(flow_ctrl, int, 0644);
85 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
86 
87 static int pause = PAUSE_TIME;
88 module_param(pause, int, 0644);
89 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
90 
91 #define TC_DEFAULT 64
92 static int tc = TC_DEFAULT;
93 module_param(tc, int, 0644);
94 MODULE_PARM_DESC(tc, "DMA threshold control value");
95 
96 #define	DEFAULT_BUFSIZE	1536
97 static int buf_sz = DEFAULT_BUFSIZE;
98 module_param(buf_sz, int, 0644);
99 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
100 
101 #define	STMMAC_RX_COPYBREAK	256
102 
103 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
104 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
105 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
106 
107 #define STMMAC_DEFAULT_LPI_TIMER	1000
108 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
109 module_param(eee_timer, int, 0644);
110 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
111 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
112 
113 /* By default the driver will use the ring mode to manage tx and rx descriptors,
114  * but allow user to force to use the chain instead of the ring
115  */
116 static unsigned int chain_mode;
117 module_param(chain_mode, int, 0444);
118 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
119 
120 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
121 /* For MSI interrupts handling */
122 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
123 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
124 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
125 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
126 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
127 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
128 
129 #ifdef CONFIG_DEBUG_FS
130 static const struct net_device_ops stmmac_netdev_ops;
131 static void stmmac_init_fs(struct net_device *dev);
132 static void stmmac_exit_fs(struct net_device *dev);
133 #endif
134 
135 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
136 
137 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
138 {
139 	int ret = 0;
140 
141 	if (enabled) {
142 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
143 		if (ret)
144 			return ret;
145 		ret = clk_prepare_enable(priv->plat->pclk);
146 		if (ret) {
147 			clk_disable_unprepare(priv->plat->stmmac_clk);
148 			return ret;
149 		}
150 		if (priv->plat->clks_config) {
151 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
152 			if (ret) {
153 				clk_disable_unprepare(priv->plat->stmmac_clk);
154 				clk_disable_unprepare(priv->plat->pclk);
155 				return ret;
156 			}
157 		}
158 	} else {
159 		clk_disable_unprepare(priv->plat->stmmac_clk);
160 		clk_disable_unprepare(priv->plat->pclk);
161 		if (priv->plat->clks_config)
162 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
163 	}
164 
165 	return ret;
166 }
167 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
168 
169 /**
170  * stmmac_verify_args - verify the driver parameters.
171  * Description: it checks the driver parameters and set a default in case of
172  * errors.
173  */
174 static void stmmac_verify_args(void)
175 {
176 	if (unlikely(watchdog < 0))
177 		watchdog = TX_TIMEO;
178 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
179 		buf_sz = DEFAULT_BUFSIZE;
180 	if (unlikely(flow_ctrl > 1))
181 		flow_ctrl = FLOW_AUTO;
182 	else if (likely(flow_ctrl < 0))
183 		flow_ctrl = FLOW_OFF;
184 	if (unlikely((pause < 0) || (pause > 0xffff)))
185 		pause = PAUSE_TIME;
186 	if (eee_timer < 0)
187 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
188 }
189 
190 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
191 {
192 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
193 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
194 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
195 	u32 queue;
196 
197 	for (queue = 0; queue < maxq; queue++) {
198 		struct stmmac_channel *ch = &priv->channel[queue];
199 
200 		if (stmmac_xdp_is_enabled(priv) &&
201 		    test_bit(queue, priv->af_xdp_zc_qps)) {
202 			napi_disable(&ch->rxtx_napi);
203 			continue;
204 		}
205 
206 		if (queue < rx_queues_cnt)
207 			napi_disable(&ch->rx_napi);
208 		if (queue < tx_queues_cnt)
209 			napi_disable(&ch->tx_napi);
210 	}
211 }
212 
213 /**
214  * stmmac_disable_all_queues - Disable all queues
215  * @priv: driver private structure
216  */
217 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
218 {
219 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
220 	struct stmmac_rx_queue *rx_q;
221 	u32 queue;
222 
223 	/* synchronize_rcu() needed for pending XDP buffers to drain */
224 	for (queue = 0; queue < rx_queues_cnt; queue++) {
225 		rx_q = &priv->rx_queue[queue];
226 		if (rx_q->xsk_pool) {
227 			synchronize_rcu();
228 			break;
229 		}
230 	}
231 
232 	__stmmac_disable_all_queues(priv);
233 }
234 
235 /**
236  * stmmac_enable_all_queues - Enable all queues
237  * @priv: driver private structure
238  */
239 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
240 {
241 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
242 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
243 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
244 	u32 queue;
245 
246 	for (queue = 0; queue < maxq; queue++) {
247 		struct stmmac_channel *ch = &priv->channel[queue];
248 
249 		if (stmmac_xdp_is_enabled(priv) &&
250 		    test_bit(queue, priv->af_xdp_zc_qps)) {
251 			napi_enable(&ch->rxtx_napi);
252 			continue;
253 		}
254 
255 		if (queue < rx_queues_cnt)
256 			napi_enable(&ch->rx_napi);
257 		if (queue < tx_queues_cnt)
258 			napi_enable(&ch->tx_napi);
259 	}
260 }
261 
262 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
263 {
264 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
265 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
266 		queue_work(priv->wq, &priv->service_task);
267 }
268 
269 static void stmmac_global_err(struct stmmac_priv *priv)
270 {
271 	netif_carrier_off(priv->dev);
272 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
273 	stmmac_service_event_schedule(priv);
274 }
275 
276 /**
277  * stmmac_clk_csr_set - dynamically set the MDC clock
278  * @priv: driver private structure
279  * Description: this is to dynamically set the MDC clock according to the csr
280  * clock input.
281  * Note:
282  *	If a specific clk_csr value is passed from the platform
283  *	this means that the CSR Clock Range selection cannot be
284  *	changed at run-time and it is fixed (as reported in the driver
285  *	documentation). Viceversa the driver will try to set the MDC
286  *	clock dynamically according to the actual clock input.
287  */
288 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
289 {
290 	u32 clk_rate;
291 
292 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
293 
294 	/* Platform provided default clk_csr would be assumed valid
295 	 * for all other cases except for the below mentioned ones.
296 	 * For values higher than the IEEE 802.3 specified frequency
297 	 * we can not estimate the proper divider as it is not known
298 	 * the frequency of clk_csr_i. So we do not change the default
299 	 * divider.
300 	 */
301 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
302 		if (clk_rate < CSR_F_35M)
303 			priv->clk_csr = STMMAC_CSR_20_35M;
304 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
305 			priv->clk_csr = STMMAC_CSR_35_60M;
306 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
307 			priv->clk_csr = STMMAC_CSR_60_100M;
308 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
309 			priv->clk_csr = STMMAC_CSR_100_150M;
310 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
311 			priv->clk_csr = STMMAC_CSR_150_250M;
312 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
313 			priv->clk_csr = STMMAC_CSR_250_300M;
314 	}
315 
316 	if (priv->plat->has_sun8i) {
317 		if (clk_rate > 160000000)
318 			priv->clk_csr = 0x03;
319 		else if (clk_rate > 80000000)
320 			priv->clk_csr = 0x02;
321 		else if (clk_rate > 40000000)
322 			priv->clk_csr = 0x01;
323 		else
324 			priv->clk_csr = 0;
325 	}
326 
327 	if (priv->plat->has_xgmac) {
328 		if (clk_rate > 400000000)
329 			priv->clk_csr = 0x5;
330 		else if (clk_rate > 350000000)
331 			priv->clk_csr = 0x4;
332 		else if (clk_rate > 300000000)
333 			priv->clk_csr = 0x3;
334 		else if (clk_rate > 250000000)
335 			priv->clk_csr = 0x2;
336 		else if (clk_rate > 150000000)
337 			priv->clk_csr = 0x1;
338 		else
339 			priv->clk_csr = 0x0;
340 	}
341 }
342 
343 static void print_pkt(unsigned char *buf, int len)
344 {
345 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
346 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
347 }
348 
349 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
350 {
351 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
352 	u32 avail;
353 
354 	if (tx_q->dirty_tx > tx_q->cur_tx)
355 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
356 	else
357 		avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
358 
359 	return avail;
360 }
361 
362 /**
363  * stmmac_rx_dirty - Get RX queue dirty
364  * @priv: driver private structure
365  * @queue: RX queue index
366  */
367 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
368 {
369 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
370 	u32 dirty;
371 
372 	if (rx_q->dirty_rx <= rx_q->cur_rx)
373 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
374 	else
375 		dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
376 
377 	return dirty;
378 }
379 
380 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
381 {
382 	int tx_lpi_timer;
383 
384 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
385 	priv->eee_sw_timer_en = en ? 0 : 1;
386 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
387 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
388 }
389 
390 /**
391  * stmmac_enable_eee_mode - check and enter in LPI mode
392  * @priv: driver private structure
393  * Description: this function is to verify and enter in LPI mode in case of
394  * EEE.
395  */
396 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
397 {
398 	u32 tx_cnt = priv->plat->tx_queues_to_use;
399 	u32 queue;
400 
401 	/* check if all TX queues have the work finished */
402 	for (queue = 0; queue < tx_cnt; queue++) {
403 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
404 
405 		if (tx_q->dirty_tx != tx_q->cur_tx)
406 			return; /* still unfinished work */
407 	}
408 
409 	/* Check and enter in LPI mode */
410 	if (!priv->tx_path_in_lpi_mode)
411 		stmmac_set_eee_mode(priv, priv->hw,
412 				priv->plat->en_tx_lpi_clockgating);
413 }
414 
415 /**
416  * stmmac_disable_eee_mode - disable and exit from LPI mode
417  * @priv: driver private structure
418  * Description: this function is to exit and disable EEE in case of
419  * LPI state is true. This is called by the xmit.
420  */
421 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
422 {
423 	if (!priv->eee_sw_timer_en) {
424 		stmmac_lpi_entry_timer_config(priv, 0);
425 		return;
426 	}
427 
428 	stmmac_reset_eee_mode(priv, priv->hw);
429 	del_timer_sync(&priv->eee_ctrl_timer);
430 	priv->tx_path_in_lpi_mode = false;
431 }
432 
433 /**
434  * stmmac_eee_ctrl_timer - EEE TX SW timer.
435  * @t:  timer_list struct containing private info
436  * Description:
437  *  if there is no data transfer and if we are not in LPI state,
438  *  then MAC Transmitter can be moved to LPI state.
439  */
440 static void stmmac_eee_ctrl_timer(struct timer_list *t)
441 {
442 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
443 
444 	stmmac_enable_eee_mode(priv);
445 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
446 }
447 
448 /**
449  * stmmac_eee_init - init EEE
450  * @priv: driver private structure
451  * Description:
452  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
453  *  can also manage EEE, this function enable the LPI state and start related
454  *  timer.
455  */
456 bool stmmac_eee_init(struct stmmac_priv *priv)
457 {
458 	int eee_tw_timer = priv->eee_tw_timer;
459 
460 	/* Using PCS we cannot dial with the phy registers at this stage
461 	 * so we do not support extra feature like EEE.
462 	 */
463 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
464 	    priv->hw->pcs == STMMAC_PCS_RTBI)
465 		return false;
466 
467 	/* Check if MAC core supports the EEE feature. */
468 	if (!priv->dma_cap.eee)
469 		return false;
470 
471 	mutex_lock(&priv->lock);
472 
473 	/* Check if it needs to be deactivated */
474 	if (!priv->eee_active) {
475 		if (priv->eee_enabled) {
476 			netdev_dbg(priv->dev, "disable EEE\n");
477 			stmmac_lpi_entry_timer_config(priv, 0);
478 			del_timer_sync(&priv->eee_ctrl_timer);
479 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
480 		}
481 		mutex_unlock(&priv->lock);
482 		return false;
483 	}
484 
485 	if (priv->eee_active && !priv->eee_enabled) {
486 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
487 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
488 				     eee_tw_timer);
489 	}
490 
491 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
492 		del_timer_sync(&priv->eee_ctrl_timer);
493 		priv->tx_path_in_lpi_mode = false;
494 		stmmac_lpi_entry_timer_config(priv, 1);
495 	} else {
496 		stmmac_lpi_entry_timer_config(priv, 0);
497 		mod_timer(&priv->eee_ctrl_timer,
498 			  STMMAC_LPI_T(priv->tx_lpi_timer));
499 	}
500 
501 	mutex_unlock(&priv->lock);
502 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
503 	return true;
504 }
505 
506 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
507  * @priv: driver private structure
508  * @p : descriptor pointer
509  * @skb : the socket buffer
510  * Description :
511  * This function will read timestamp from the descriptor & pass it to stack.
512  * and also perform some sanity checks.
513  */
514 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
515 				   struct dma_desc *p, struct sk_buff *skb)
516 {
517 	struct skb_shared_hwtstamps shhwtstamp;
518 	bool found = false;
519 	s64 adjust = 0;
520 	u64 ns = 0;
521 
522 	if (!priv->hwts_tx_en)
523 		return;
524 
525 	/* exit if skb doesn't support hw tstamp */
526 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
527 		return;
528 
529 	/* check tx tstamp status */
530 	if (stmmac_get_tx_timestamp_status(priv, p)) {
531 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
532 		found = true;
533 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
534 		found = true;
535 	}
536 
537 	if (found) {
538 		/* Correct the clk domain crossing(CDC) error */
539 		if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) {
540 			adjust += -(2 * (NSEC_PER_SEC /
541 					 priv->plat->clk_ptp_rate));
542 			ns += adjust;
543 		}
544 
545 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
546 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
547 
548 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
549 		/* pass tstamp to stack */
550 		skb_tstamp_tx(skb, &shhwtstamp);
551 	}
552 }
553 
554 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
555  * @priv: driver private structure
556  * @p : descriptor pointer
557  * @np : next descriptor pointer
558  * @skb : the socket buffer
559  * Description :
560  * This function will read received packet's timestamp from the descriptor
561  * and pass it to stack. It also perform some sanity checks.
562  */
563 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
564 				   struct dma_desc *np, struct sk_buff *skb)
565 {
566 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
567 	struct dma_desc *desc = p;
568 	u64 adjust = 0;
569 	u64 ns = 0;
570 
571 	if (!priv->hwts_rx_en)
572 		return;
573 	/* For GMAC4, the valid timestamp is from CTX next desc. */
574 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
575 		desc = np;
576 
577 	/* Check if timestamp is available */
578 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
579 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
580 
581 		/* Correct the clk domain crossing(CDC) error */
582 		if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) {
583 			adjust += 2 * (NSEC_PER_SEC / priv->plat->clk_ptp_rate);
584 			ns -= adjust;
585 		}
586 
587 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
588 		shhwtstamp = skb_hwtstamps(skb);
589 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
590 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
591 	} else  {
592 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
593 	}
594 }
595 
596 /**
597  *  stmmac_hwtstamp_set - control hardware timestamping.
598  *  @dev: device pointer.
599  *  @ifr: An IOCTL specific structure, that can contain a pointer to
600  *  a proprietary structure used to pass information to the driver.
601  *  Description:
602  *  This function configures the MAC to enable/disable both outgoing(TX)
603  *  and incoming(RX) packets time stamping based on user input.
604  *  Return Value:
605  *  0 on success and an appropriate -ve integer on failure.
606  */
607 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
608 {
609 	struct stmmac_priv *priv = netdev_priv(dev);
610 	struct hwtstamp_config config;
611 	struct timespec64 now;
612 	u64 temp = 0;
613 	u32 ptp_v2 = 0;
614 	u32 tstamp_all = 0;
615 	u32 ptp_over_ipv4_udp = 0;
616 	u32 ptp_over_ipv6_udp = 0;
617 	u32 ptp_over_ethernet = 0;
618 	u32 snap_type_sel = 0;
619 	u32 ts_master_en = 0;
620 	u32 ts_event_en = 0;
621 	u32 sec_inc = 0;
622 	u32 value = 0;
623 	bool xmac;
624 
625 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
626 
627 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
628 		netdev_alert(priv->dev, "No support for HW time stamping\n");
629 		priv->hwts_tx_en = 0;
630 		priv->hwts_rx_en = 0;
631 
632 		return -EOPNOTSUPP;
633 	}
634 
635 	if (copy_from_user(&config, ifr->ifr_data,
636 			   sizeof(config)))
637 		return -EFAULT;
638 
639 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
640 		   __func__, config.flags, config.tx_type, config.rx_filter);
641 
642 	/* reserved for future extensions */
643 	if (config.flags)
644 		return -EINVAL;
645 
646 	if (config.tx_type != HWTSTAMP_TX_OFF &&
647 	    config.tx_type != HWTSTAMP_TX_ON)
648 		return -ERANGE;
649 
650 	if (priv->adv_ts) {
651 		switch (config.rx_filter) {
652 		case HWTSTAMP_FILTER_NONE:
653 			/* time stamp no incoming packet at all */
654 			config.rx_filter = HWTSTAMP_FILTER_NONE;
655 			break;
656 
657 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 			/* PTP v1, UDP, any kind of event packet */
659 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 			/* 'xmac' hardware can support Sync, Pdelay_Req and
661 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 			 * This leaves Delay_Req timestamps out.
663 			 * Enable all events *and* general purpose message
664 			 * timestamping
665 			 */
666 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			break;
670 
671 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 			/* PTP v1, UDP, Sync packet */
673 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 			/* take time stamp for SYNC messages only */
675 			ts_event_en = PTP_TCR_TSEVNTENA;
676 
677 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 			break;
680 
681 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 			/* PTP v1, UDP, Delay_req packet */
683 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 			/* take time stamp for Delay_Req messages only */
685 			ts_master_en = PTP_TCR_TSMSTRENA;
686 			ts_event_en = PTP_TCR_TSEVNTENA;
687 
688 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 			break;
691 
692 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 			/* PTP v2, UDP, any kind of event packet */
694 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 			ptp_v2 = PTP_TCR_TSVER2ENA;
696 			/* take time stamp for all event messages */
697 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698 
699 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 			break;
702 
703 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 			/* PTP v2, UDP, Sync packet */
705 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 			ptp_v2 = PTP_TCR_TSVER2ENA;
707 			/* take time stamp for SYNC messages only */
708 			ts_event_en = PTP_TCR_TSEVNTENA;
709 
710 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 			break;
713 
714 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 			/* PTP v2, UDP, Delay_req packet */
716 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 			ptp_v2 = PTP_TCR_TSVER2ENA;
718 			/* take time stamp for Delay_Req messages only */
719 			ts_master_en = PTP_TCR_TSMSTRENA;
720 			ts_event_en = PTP_TCR_TSEVNTENA;
721 
722 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 			break;
725 
726 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 			/* PTP v2/802.AS1 any layer, any kind of event packet */
728 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 			ptp_v2 = PTP_TCR_TSVER2ENA;
730 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 			if (priv->synopsys_id != DWMAC_CORE_5_10)
732 				ts_event_en = PTP_TCR_TSEVNTENA;
733 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 			ptp_over_ethernet = PTP_TCR_TSIPENA;
736 			break;
737 
738 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 			/* PTP v2/802.AS1, any layer, Sync packet */
740 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 			ptp_v2 = PTP_TCR_TSVER2ENA;
742 			/* take time stamp for SYNC messages only */
743 			ts_event_en = PTP_TCR_TSEVNTENA;
744 
745 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 			ptp_over_ethernet = PTP_TCR_TSIPENA;
748 			break;
749 
750 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 			/* PTP v2/802.AS1, any layer, Delay_req packet */
752 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 			ptp_v2 = PTP_TCR_TSVER2ENA;
754 			/* take time stamp for Delay_Req messages only */
755 			ts_master_en = PTP_TCR_TSMSTRENA;
756 			ts_event_en = PTP_TCR_TSEVNTENA;
757 
758 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 			ptp_over_ethernet = PTP_TCR_TSIPENA;
761 			break;
762 
763 		case HWTSTAMP_FILTER_NTP_ALL:
764 		case HWTSTAMP_FILTER_ALL:
765 			/* time stamp any incoming packet */
766 			config.rx_filter = HWTSTAMP_FILTER_ALL;
767 			tstamp_all = PTP_TCR_TSENALL;
768 			break;
769 
770 		default:
771 			return -ERANGE;
772 		}
773 	} else {
774 		switch (config.rx_filter) {
775 		case HWTSTAMP_FILTER_NONE:
776 			config.rx_filter = HWTSTAMP_FILTER_NONE;
777 			break;
778 		default:
779 			/* PTP v1, UDP, any kind of event packet */
780 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 			break;
782 		}
783 	}
784 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786 
787 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
788 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
789 	else {
790 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
791 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
792 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
793 			 ts_master_en | snap_type_sel);
794 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
795 
796 		/* program Sub Second Increment reg */
797 		stmmac_config_sub_second_increment(priv,
798 				priv->ptpaddr, priv->plat->clk_ptp_rate,
799 				xmac, &sec_inc);
800 		temp = div_u64(1000000000ULL, sec_inc);
801 
802 		/* Store sub second increment and flags for later use */
803 		priv->sub_second_inc = sec_inc;
804 		priv->systime_flags = value;
805 
806 		/* calculate default added value:
807 		 * formula is :
808 		 * addend = (2^32)/freq_div_ratio;
809 		 * where, freq_div_ratio = 1e9ns/sec_inc
810 		 */
811 		temp = (u64)(temp << 32);
812 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
813 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
814 
815 		/* initialize system time */
816 		ktime_get_real_ts64(&now);
817 
818 		/* lower 32 bits of tv_sec are safe until y2106 */
819 		stmmac_init_systime(priv, priv->ptpaddr,
820 				(u32)now.tv_sec, now.tv_nsec);
821 	}
822 
823 	memcpy(&priv->tstamp_config, &config, sizeof(config));
824 
825 	return copy_to_user(ifr->ifr_data, &config,
826 			    sizeof(config)) ? -EFAULT : 0;
827 }
828 
829 /**
830  *  stmmac_hwtstamp_get - read hardware timestamping.
831  *  @dev: device pointer.
832  *  @ifr: An IOCTL specific structure, that can contain a pointer to
833  *  a proprietary structure used to pass information to the driver.
834  *  Description:
835  *  This function obtain the current hardware timestamping settings
836  *  as requested.
837  */
838 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
839 {
840 	struct stmmac_priv *priv = netdev_priv(dev);
841 	struct hwtstamp_config *config = &priv->tstamp_config;
842 
843 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
844 		return -EOPNOTSUPP;
845 
846 	return copy_to_user(ifr->ifr_data, config,
847 			    sizeof(*config)) ? -EFAULT : 0;
848 }
849 
850 /**
851  * stmmac_init_ptp - init PTP
852  * @priv: driver private structure
853  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
854  * This is done by looking at the HW cap. register.
855  * This function also registers the ptp driver.
856  */
857 static int stmmac_init_ptp(struct stmmac_priv *priv)
858 {
859 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
860 
861 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
862 		return -EOPNOTSUPP;
863 
864 	priv->adv_ts = 0;
865 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
866 	if (xmac && priv->dma_cap.atime_stamp)
867 		priv->adv_ts = 1;
868 	/* Dwmac 3.x core with extend_desc can support adv_ts */
869 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
870 		priv->adv_ts = 1;
871 
872 	if (priv->dma_cap.time_stamp)
873 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
874 
875 	if (priv->adv_ts)
876 		netdev_info(priv->dev,
877 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
878 
879 	priv->hwts_tx_en = 0;
880 	priv->hwts_rx_en = 0;
881 
882 	stmmac_ptp_register(priv);
883 
884 	return 0;
885 }
886 
887 static void stmmac_release_ptp(struct stmmac_priv *priv)
888 {
889 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
890 	stmmac_ptp_unregister(priv);
891 }
892 
893 /**
894  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
895  *  @priv: driver private structure
896  *  @duplex: duplex passed to the next function
897  *  Description: It is used for configuring the flow control in all queues
898  */
899 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
900 {
901 	u32 tx_cnt = priv->plat->tx_queues_to_use;
902 
903 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
904 			priv->pause, tx_cnt);
905 }
906 
907 static void stmmac_validate(struct phylink_config *config,
908 			    unsigned long *supported,
909 			    struct phylink_link_state *state)
910 {
911 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
912 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
913 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
914 	int tx_cnt = priv->plat->tx_queues_to_use;
915 	int max_speed = priv->plat->max_speed;
916 
917 	phylink_set(mac_supported, 10baseT_Half);
918 	phylink_set(mac_supported, 10baseT_Full);
919 	phylink_set(mac_supported, 100baseT_Half);
920 	phylink_set(mac_supported, 100baseT_Full);
921 	phylink_set(mac_supported, 1000baseT_Half);
922 	phylink_set(mac_supported, 1000baseT_Full);
923 	phylink_set(mac_supported, 1000baseKX_Full);
924 
925 	phylink_set(mac_supported, Autoneg);
926 	phylink_set(mac_supported, Pause);
927 	phylink_set(mac_supported, Asym_Pause);
928 	phylink_set_port_modes(mac_supported);
929 
930 	/* Cut down 1G if asked to */
931 	if ((max_speed > 0) && (max_speed < 1000)) {
932 		phylink_set(mask, 1000baseT_Full);
933 		phylink_set(mask, 1000baseX_Full);
934 	} else if (priv->plat->has_xgmac) {
935 		if (!max_speed || (max_speed >= 2500)) {
936 			phylink_set(mac_supported, 2500baseT_Full);
937 			phylink_set(mac_supported, 2500baseX_Full);
938 		}
939 		if (!max_speed || (max_speed >= 5000)) {
940 			phylink_set(mac_supported, 5000baseT_Full);
941 		}
942 		if (!max_speed || (max_speed >= 10000)) {
943 			phylink_set(mac_supported, 10000baseSR_Full);
944 			phylink_set(mac_supported, 10000baseLR_Full);
945 			phylink_set(mac_supported, 10000baseER_Full);
946 			phylink_set(mac_supported, 10000baseLRM_Full);
947 			phylink_set(mac_supported, 10000baseT_Full);
948 			phylink_set(mac_supported, 10000baseKX4_Full);
949 			phylink_set(mac_supported, 10000baseKR_Full);
950 		}
951 		if (!max_speed || (max_speed >= 25000)) {
952 			phylink_set(mac_supported, 25000baseCR_Full);
953 			phylink_set(mac_supported, 25000baseKR_Full);
954 			phylink_set(mac_supported, 25000baseSR_Full);
955 		}
956 		if (!max_speed || (max_speed >= 40000)) {
957 			phylink_set(mac_supported, 40000baseKR4_Full);
958 			phylink_set(mac_supported, 40000baseCR4_Full);
959 			phylink_set(mac_supported, 40000baseSR4_Full);
960 			phylink_set(mac_supported, 40000baseLR4_Full);
961 		}
962 		if (!max_speed || (max_speed >= 50000)) {
963 			phylink_set(mac_supported, 50000baseCR2_Full);
964 			phylink_set(mac_supported, 50000baseKR2_Full);
965 			phylink_set(mac_supported, 50000baseSR2_Full);
966 			phylink_set(mac_supported, 50000baseKR_Full);
967 			phylink_set(mac_supported, 50000baseSR_Full);
968 			phylink_set(mac_supported, 50000baseCR_Full);
969 			phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
970 			phylink_set(mac_supported, 50000baseDR_Full);
971 		}
972 		if (!max_speed || (max_speed >= 100000)) {
973 			phylink_set(mac_supported, 100000baseKR4_Full);
974 			phylink_set(mac_supported, 100000baseSR4_Full);
975 			phylink_set(mac_supported, 100000baseCR4_Full);
976 			phylink_set(mac_supported, 100000baseLR4_ER4_Full);
977 			phylink_set(mac_supported, 100000baseKR2_Full);
978 			phylink_set(mac_supported, 100000baseSR2_Full);
979 			phylink_set(mac_supported, 100000baseCR2_Full);
980 			phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
981 			phylink_set(mac_supported, 100000baseDR2_Full);
982 		}
983 	}
984 
985 	/* Half-Duplex can only work with single queue */
986 	if (tx_cnt > 1) {
987 		phylink_set(mask, 10baseT_Half);
988 		phylink_set(mask, 100baseT_Half);
989 		phylink_set(mask, 1000baseT_Half);
990 	}
991 
992 	linkmode_and(supported, supported, mac_supported);
993 	linkmode_andnot(supported, supported, mask);
994 
995 	linkmode_and(state->advertising, state->advertising, mac_supported);
996 	linkmode_andnot(state->advertising, state->advertising, mask);
997 
998 	/* If PCS is supported, check which modes it supports. */
999 	stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state);
1000 }
1001 
1002 static void stmmac_mac_pcs_get_state(struct phylink_config *config,
1003 				     struct phylink_link_state *state)
1004 {
1005 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1006 
1007 	state->link = 0;
1008 	stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state);
1009 }
1010 
1011 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
1012 			      const struct phylink_link_state *state)
1013 {
1014 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1015 
1016 	stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state);
1017 }
1018 
1019 static void stmmac_mac_an_restart(struct phylink_config *config)
1020 {
1021 	/* Not Supported */
1022 }
1023 
1024 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
1025 {
1026 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
1027 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
1028 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
1029 	bool *hs_enable = &fpe_cfg->hs_enable;
1030 
1031 	if (is_up && *hs_enable) {
1032 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
1033 	} else {
1034 		*lo_state = FPE_EVENT_UNKNOWN;
1035 		*lp_state = FPE_EVENT_UNKNOWN;
1036 	}
1037 }
1038 
1039 static void stmmac_mac_link_down(struct phylink_config *config,
1040 				 unsigned int mode, phy_interface_t interface)
1041 {
1042 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1043 
1044 	stmmac_mac_set(priv, priv->ioaddr, false);
1045 	priv->eee_active = false;
1046 	priv->tx_lpi_enabled = false;
1047 	stmmac_eee_init(priv);
1048 	stmmac_set_eee_pls(priv, priv->hw, false);
1049 
1050 	if (priv->dma_cap.fpesel)
1051 		stmmac_fpe_link_state_handle(priv, false);
1052 }
1053 
1054 static void stmmac_mac_link_up(struct phylink_config *config,
1055 			       struct phy_device *phy,
1056 			       unsigned int mode, phy_interface_t interface,
1057 			       int speed, int duplex,
1058 			       bool tx_pause, bool rx_pause)
1059 {
1060 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1061 	u32 ctrl;
1062 
1063 	stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface);
1064 
1065 	ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1066 	ctrl &= ~priv->hw->link.speed_mask;
1067 
1068 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1069 		switch (speed) {
1070 		case SPEED_10000:
1071 			ctrl |= priv->hw->link.xgmii.speed10000;
1072 			break;
1073 		case SPEED_5000:
1074 			ctrl |= priv->hw->link.xgmii.speed5000;
1075 			break;
1076 		case SPEED_2500:
1077 			ctrl |= priv->hw->link.xgmii.speed2500;
1078 			break;
1079 		default:
1080 			return;
1081 		}
1082 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1083 		switch (speed) {
1084 		case SPEED_100000:
1085 			ctrl |= priv->hw->link.xlgmii.speed100000;
1086 			break;
1087 		case SPEED_50000:
1088 			ctrl |= priv->hw->link.xlgmii.speed50000;
1089 			break;
1090 		case SPEED_40000:
1091 			ctrl |= priv->hw->link.xlgmii.speed40000;
1092 			break;
1093 		case SPEED_25000:
1094 			ctrl |= priv->hw->link.xlgmii.speed25000;
1095 			break;
1096 		case SPEED_10000:
1097 			ctrl |= priv->hw->link.xgmii.speed10000;
1098 			break;
1099 		case SPEED_2500:
1100 			ctrl |= priv->hw->link.speed2500;
1101 			break;
1102 		case SPEED_1000:
1103 			ctrl |= priv->hw->link.speed1000;
1104 			break;
1105 		default:
1106 			return;
1107 		}
1108 	} else {
1109 		switch (speed) {
1110 		case SPEED_2500:
1111 			ctrl |= priv->hw->link.speed2500;
1112 			break;
1113 		case SPEED_1000:
1114 			ctrl |= priv->hw->link.speed1000;
1115 			break;
1116 		case SPEED_100:
1117 			ctrl |= priv->hw->link.speed100;
1118 			break;
1119 		case SPEED_10:
1120 			ctrl |= priv->hw->link.speed10;
1121 			break;
1122 		default:
1123 			return;
1124 		}
1125 	}
1126 
1127 	priv->speed = speed;
1128 
1129 	if (priv->plat->fix_mac_speed)
1130 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1131 
1132 	if (!duplex)
1133 		ctrl &= ~priv->hw->link.duplex;
1134 	else
1135 		ctrl |= priv->hw->link.duplex;
1136 
1137 	/* Flow Control operation */
1138 	if (tx_pause && rx_pause)
1139 		stmmac_mac_flow_ctrl(priv, duplex);
1140 
1141 	writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1142 
1143 	stmmac_mac_set(priv, priv->ioaddr, true);
1144 	if (phy && priv->dma_cap.eee) {
1145 		priv->eee_active = phy_init_eee(phy, 1) >= 0;
1146 		priv->eee_enabled = stmmac_eee_init(priv);
1147 		priv->tx_lpi_enabled = priv->eee_enabled;
1148 		stmmac_set_eee_pls(priv, priv->hw, true);
1149 	}
1150 
1151 	if (priv->dma_cap.fpesel)
1152 		stmmac_fpe_link_state_handle(priv, true);
1153 }
1154 
1155 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1156 	.validate = stmmac_validate,
1157 	.mac_pcs_get_state = stmmac_mac_pcs_get_state,
1158 	.mac_config = stmmac_mac_config,
1159 	.mac_an_restart = stmmac_mac_an_restart,
1160 	.mac_link_down = stmmac_mac_link_down,
1161 	.mac_link_up = stmmac_mac_link_up,
1162 };
1163 
1164 /**
1165  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1166  * @priv: driver private structure
1167  * Description: this is to verify if the HW supports the PCS.
1168  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1169  * configured for the TBI, RTBI, or SGMII PHY interface.
1170  */
1171 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1172 {
1173 	int interface = priv->plat->interface;
1174 
1175 	if (priv->dma_cap.pcs) {
1176 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1177 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1178 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1179 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1180 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1181 			priv->hw->pcs = STMMAC_PCS_RGMII;
1182 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1183 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1184 			priv->hw->pcs = STMMAC_PCS_SGMII;
1185 		}
1186 	}
1187 }
1188 
1189 /**
1190  * stmmac_init_phy - PHY initialization
1191  * @dev: net device structure
1192  * Description: it initializes the driver's PHY state, and attaches the PHY
1193  * to the mac driver.
1194  *  Return value:
1195  *  0 on success
1196  */
1197 static int stmmac_init_phy(struct net_device *dev)
1198 {
1199 	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1200 	struct stmmac_priv *priv = netdev_priv(dev);
1201 	struct device_node *node;
1202 	int ret;
1203 
1204 	node = priv->plat->phylink_node;
1205 
1206 	if (node)
1207 		ret = phylink_of_phy_connect(priv->phylink, node, 0);
1208 
1209 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1210 	 * manually parse it
1211 	 */
1212 	if (!node || ret) {
1213 		int addr = priv->plat->phy_addr;
1214 		struct phy_device *phydev;
1215 
1216 		phydev = mdiobus_get_phy(priv->mii, addr);
1217 		if (!phydev) {
1218 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1219 			return -ENODEV;
1220 		}
1221 
1222 		ret = phylink_connect_phy(priv->phylink, phydev);
1223 	}
1224 
1225 	phylink_ethtool_get_wol(priv->phylink, &wol);
1226 	device_set_wakeup_capable(priv->device, !!wol.supported);
1227 
1228 	return ret;
1229 }
1230 
1231 static int stmmac_phy_setup(struct stmmac_priv *priv)
1232 {
1233 	struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1234 	int mode = priv->plat->phy_interface;
1235 	struct phylink *phylink;
1236 
1237 	priv->phylink_config.dev = &priv->dev->dev;
1238 	priv->phylink_config.type = PHYLINK_NETDEV;
1239 	priv->phylink_config.pcs_poll = true;
1240 	priv->phylink_config.ovr_an_inband =
1241 		priv->plat->mdio_bus_data->xpcs_an_inband;
1242 
1243 	if (!fwnode)
1244 		fwnode = dev_fwnode(priv->device);
1245 
1246 	phylink = phylink_create(&priv->phylink_config, fwnode,
1247 				 mode, &stmmac_phylink_mac_ops);
1248 	if (IS_ERR(phylink))
1249 		return PTR_ERR(phylink);
1250 
1251 	priv->phylink = phylink;
1252 	return 0;
1253 }
1254 
1255 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1256 {
1257 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1258 	unsigned int desc_size;
1259 	void *head_rx;
1260 	u32 queue;
1261 
1262 	/* Display RX rings */
1263 	for (queue = 0; queue < rx_cnt; queue++) {
1264 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1265 
1266 		pr_info("\tRX Queue %u rings\n", queue);
1267 
1268 		if (priv->extend_desc) {
1269 			head_rx = (void *)rx_q->dma_erx;
1270 			desc_size = sizeof(struct dma_extended_desc);
1271 		} else {
1272 			head_rx = (void *)rx_q->dma_rx;
1273 			desc_size = sizeof(struct dma_desc);
1274 		}
1275 
1276 		/* Display RX ring */
1277 		stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
1278 				    rx_q->dma_rx_phy, desc_size);
1279 	}
1280 }
1281 
1282 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1283 {
1284 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1285 	unsigned int desc_size;
1286 	void *head_tx;
1287 	u32 queue;
1288 
1289 	/* Display TX rings */
1290 	for (queue = 0; queue < tx_cnt; queue++) {
1291 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1292 
1293 		pr_info("\tTX Queue %d rings\n", queue);
1294 
1295 		if (priv->extend_desc) {
1296 			head_tx = (void *)tx_q->dma_etx;
1297 			desc_size = sizeof(struct dma_extended_desc);
1298 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1299 			head_tx = (void *)tx_q->dma_entx;
1300 			desc_size = sizeof(struct dma_edesc);
1301 		} else {
1302 			head_tx = (void *)tx_q->dma_tx;
1303 			desc_size = sizeof(struct dma_desc);
1304 		}
1305 
1306 		stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
1307 				    tx_q->dma_tx_phy, desc_size);
1308 	}
1309 }
1310 
1311 static void stmmac_display_rings(struct stmmac_priv *priv)
1312 {
1313 	/* Display RX ring */
1314 	stmmac_display_rx_rings(priv);
1315 
1316 	/* Display TX ring */
1317 	stmmac_display_tx_rings(priv);
1318 }
1319 
1320 static int stmmac_set_bfsize(int mtu, int bufsize)
1321 {
1322 	int ret = bufsize;
1323 
1324 	if (mtu >= BUF_SIZE_8KiB)
1325 		ret = BUF_SIZE_16KiB;
1326 	else if (mtu >= BUF_SIZE_4KiB)
1327 		ret = BUF_SIZE_8KiB;
1328 	else if (mtu >= BUF_SIZE_2KiB)
1329 		ret = BUF_SIZE_4KiB;
1330 	else if (mtu > DEFAULT_BUFSIZE)
1331 		ret = BUF_SIZE_2KiB;
1332 	else
1333 		ret = DEFAULT_BUFSIZE;
1334 
1335 	return ret;
1336 }
1337 
1338 /**
1339  * stmmac_clear_rx_descriptors - clear RX descriptors
1340  * @priv: driver private structure
1341  * @queue: RX queue index
1342  * Description: this function is called to clear the RX descriptors
1343  * in case of both basic and extended descriptors are used.
1344  */
1345 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1346 {
1347 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1348 	int i;
1349 
1350 	/* Clear the RX descriptors */
1351 	for (i = 0; i < priv->dma_rx_size; i++)
1352 		if (priv->extend_desc)
1353 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1354 					priv->use_riwt, priv->mode,
1355 					(i == priv->dma_rx_size - 1),
1356 					priv->dma_buf_sz);
1357 		else
1358 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1359 					priv->use_riwt, priv->mode,
1360 					(i == priv->dma_rx_size - 1),
1361 					priv->dma_buf_sz);
1362 }
1363 
1364 /**
1365  * stmmac_clear_tx_descriptors - clear tx descriptors
1366  * @priv: driver private structure
1367  * @queue: TX queue index.
1368  * Description: this function is called to clear the TX descriptors
1369  * in case of both basic and extended descriptors are used.
1370  */
1371 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1372 {
1373 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1374 	int i;
1375 
1376 	/* Clear the TX descriptors */
1377 	for (i = 0; i < priv->dma_tx_size; i++) {
1378 		int last = (i == (priv->dma_tx_size - 1));
1379 		struct dma_desc *p;
1380 
1381 		if (priv->extend_desc)
1382 			p = &tx_q->dma_etx[i].basic;
1383 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1384 			p = &tx_q->dma_entx[i].basic;
1385 		else
1386 			p = &tx_q->dma_tx[i];
1387 
1388 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1389 	}
1390 }
1391 
1392 /**
1393  * stmmac_clear_descriptors - clear descriptors
1394  * @priv: driver private structure
1395  * Description: this function is called to clear the TX and RX descriptors
1396  * in case of both basic and extended descriptors are used.
1397  */
1398 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1399 {
1400 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1401 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1402 	u32 queue;
1403 
1404 	/* Clear the RX descriptors */
1405 	for (queue = 0; queue < rx_queue_cnt; queue++)
1406 		stmmac_clear_rx_descriptors(priv, queue);
1407 
1408 	/* Clear the TX descriptors */
1409 	for (queue = 0; queue < tx_queue_cnt; queue++)
1410 		stmmac_clear_tx_descriptors(priv, queue);
1411 }
1412 
1413 /**
1414  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1415  * @priv: driver private structure
1416  * @p: descriptor pointer
1417  * @i: descriptor index
1418  * @flags: gfp flag
1419  * @queue: RX queue index
1420  * Description: this function is called to allocate a receive buffer, perform
1421  * the DMA mapping and init the descriptor.
1422  */
1423 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1424 				  int i, gfp_t flags, u32 queue)
1425 {
1426 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1427 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1428 
1429 	if (!buf->page) {
1430 		buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
1431 		if (!buf->page)
1432 			return -ENOMEM;
1433 		buf->page_offset = stmmac_rx_offset(priv);
1434 	}
1435 
1436 	if (priv->sph && !buf->sec_page) {
1437 		buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
1438 		if (!buf->sec_page)
1439 			return -ENOMEM;
1440 
1441 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1442 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1443 	} else {
1444 		buf->sec_page = NULL;
1445 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1446 	}
1447 
1448 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1449 
1450 	stmmac_set_desc_addr(priv, p, buf->addr);
1451 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1452 		stmmac_init_desc3(priv, p);
1453 
1454 	return 0;
1455 }
1456 
1457 /**
1458  * stmmac_free_rx_buffer - free RX dma buffers
1459  * @priv: private structure
1460  * @queue: RX queue index
1461  * @i: buffer index.
1462  */
1463 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1464 {
1465 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1466 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1467 
1468 	if (buf->page)
1469 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1470 	buf->page = NULL;
1471 
1472 	if (buf->sec_page)
1473 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1474 	buf->sec_page = NULL;
1475 }
1476 
1477 /**
1478  * stmmac_free_tx_buffer - free RX dma buffers
1479  * @priv: private structure
1480  * @queue: RX queue index
1481  * @i: buffer index.
1482  */
1483 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1484 {
1485 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1486 
1487 	if (tx_q->tx_skbuff_dma[i].buf &&
1488 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1489 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1490 			dma_unmap_page(priv->device,
1491 				       tx_q->tx_skbuff_dma[i].buf,
1492 				       tx_q->tx_skbuff_dma[i].len,
1493 				       DMA_TO_DEVICE);
1494 		else
1495 			dma_unmap_single(priv->device,
1496 					 tx_q->tx_skbuff_dma[i].buf,
1497 					 tx_q->tx_skbuff_dma[i].len,
1498 					 DMA_TO_DEVICE);
1499 	}
1500 
1501 	if (tx_q->xdpf[i] &&
1502 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1503 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1504 		xdp_return_frame(tx_q->xdpf[i]);
1505 		tx_q->xdpf[i] = NULL;
1506 	}
1507 
1508 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1509 		tx_q->xsk_frames_done++;
1510 
1511 	if (tx_q->tx_skbuff[i] &&
1512 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1513 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1514 		tx_q->tx_skbuff[i] = NULL;
1515 	}
1516 
1517 	tx_q->tx_skbuff_dma[i].buf = 0;
1518 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1519 }
1520 
1521 /**
1522  * dma_free_rx_skbufs - free RX dma buffers
1523  * @priv: private structure
1524  * @queue: RX queue index
1525  */
1526 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1527 {
1528 	int i;
1529 
1530 	for (i = 0; i < priv->dma_rx_size; i++)
1531 		stmmac_free_rx_buffer(priv, queue, i);
1532 }
1533 
1534 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
1535 				   gfp_t flags)
1536 {
1537 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1538 	int i;
1539 
1540 	for (i = 0; i < priv->dma_rx_size; i++) {
1541 		struct dma_desc *p;
1542 		int ret;
1543 
1544 		if (priv->extend_desc)
1545 			p = &((rx_q->dma_erx + i)->basic);
1546 		else
1547 			p = rx_q->dma_rx + i;
1548 
1549 		ret = stmmac_init_rx_buffers(priv, p, i, flags,
1550 					     queue);
1551 		if (ret)
1552 			return ret;
1553 
1554 		rx_q->buf_alloc_num++;
1555 	}
1556 
1557 	return 0;
1558 }
1559 
1560 /**
1561  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1562  * @priv: private structure
1563  * @queue: RX queue index
1564  */
1565 static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
1566 {
1567 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1568 	int i;
1569 
1570 	for (i = 0; i < priv->dma_rx_size; i++) {
1571 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1572 
1573 		if (!buf->xdp)
1574 			continue;
1575 
1576 		xsk_buff_free(buf->xdp);
1577 		buf->xdp = NULL;
1578 	}
1579 }
1580 
1581 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
1582 {
1583 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1584 	int i;
1585 
1586 	for (i = 0; i < priv->dma_rx_size; i++) {
1587 		struct stmmac_rx_buffer *buf;
1588 		dma_addr_t dma_addr;
1589 		struct dma_desc *p;
1590 
1591 		if (priv->extend_desc)
1592 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1593 		else
1594 			p = rx_q->dma_rx + i;
1595 
1596 		buf = &rx_q->buf_pool[i];
1597 
1598 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1599 		if (!buf->xdp)
1600 			return -ENOMEM;
1601 
1602 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1603 		stmmac_set_desc_addr(priv, p, dma_addr);
1604 		rx_q->buf_alloc_num++;
1605 	}
1606 
1607 	return 0;
1608 }
1609 
1610 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1611 {
1612 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1613 		return NULL;
1614 
1615 	return xsk_get_pool_from_qid(priv->dev, queue);
1616 }
1617 
1618 /**
1619  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1620  * @priv: driver private structure
1621  * @queue: RX queue index
1622  * @flags: gfp flag.
1623  * Description: this function initializes the DMA RX descriptors
1624  * and allocates the socket buffers. It supports the chained and ring
1625  * modes.
1626  */
1627 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
1628 {
1629 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1630 	int ret;
1631 
1632 	netif_dbg(priv, probe, priv->dev,
1633 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1634 		  (u32)rx_q->dma_rx_phy);
1635 
1636 	stmmac_clear_rx_descriptors(priv, queue);
1637 
1638 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1639 
1640 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1641 
1642 	if (rx_q->xsk_pool) {
1643 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1644 						   MEM_TYPE_XSK_BUFF_POOL,
1645 						   NULL));
1646 		netdev_info(priv->dev,
1647 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1648 			    rx_q->queue_index);
1649 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1650 	} else {
1651 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1652 						   MEM_TYPE_PAGE_POOL,
1653 						   rx_q->page_pool));
1654 		netdev_info(priv->dev,
1655 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1656 			    rx_q->queue_index);
1657 	}
1658 
1659 	if (rx_q->xsk_pool) {
1660 		/* RX XDP ZC buffer pool may not be populated, e.g.
1661 		 * xdpsock TX-only.
1662 		 */
1663 		stmmac_alloc_rx_buffers_zc(priv, queue);
1664 	} else {
1665 		ret = stmmac_alloc_rx_buffers(priv, queue, flags);
1666 		if (ret < 0)
1667 			return -ENOMEM;
1668 	}
1669 
1670 	rx_q->cur_rx = 0;
1671 	rx_q->dirty_rx = 0;
1672 
1673 	/* Setup the chained descriptor addresses */
1674 	if (priv->mode == STMMAC_CHAIN_MODE) {
1675 		if (priv->extend_desc)
1676 			stmmac_mode_init(priv, rx_q->dma_erx,
1677 					 rx_q->dma_rx_phy,
1678 					 priv->dma_rx_size, 1);
1679 		else
1680 			stmmac_mode_init(priv, rx_q->dma_rx,
1681 					 rx_q->dma_rx_phy,
1682 					 priv->dma_rx_size, 0);
1683 	}
1684 
1685 	return 0;
1686 }
1687 
1688 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1689 {
1690 	struct stmmac_priv *priv = netdev_priv(dev);
1691 	u32 rx_count = priv->plat->rx_queues_to_use;
1692 	u32 queue;
1693 	int ret;
1694 
1695 	/* RX INITIALIZATION */
1696 	netif_dbg(priv, probe, priv->dev,
1697 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1698 
1699 	for (queue = 0; queue < rx_count; queue++) {
1700 		ret = __init_dma_rx_desc_rings(priv, queue, flags);
1701 		if (ret)
1702 			goto err_init_rx_buffers;
1703 	}
1704 
1705 	return 0;
1706 
1707 err_init_rx_buffers:
1708 	while (queue >= 0) {
1709 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1710 
1711 		if (rx_q->xsk_pool)
1712 			dma_free_rx_xskbufs(priv, queue);
1713 		else
1714 			dma_free_rx_skbufs(priv, queue);
1715 
1716 		rx_q->buf_alloc_num = 0;
1717 		rx_q->xsk_pool = NULL;
1718 
1719 		if (queue == 0)
1720 			break;
1721 
1722 		queue--;
1723 	}
1724 
1725 	return ret;
1726 }
1727 
1728 /**
1729  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1730  * @priv: driver private structure
1731  * @queue : TX queue index
1732  * Description: this function initializes the DMA TX descriptors
1733  * and allocates the socket buffers. It supports the chained and ring
1734  * modes.
1735  */
1736 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
1737 {
1738 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1739 	int i;
1740 
1741 	netif_dbg(priv, probe, priv->dev,
1742 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1743 		  (u32)tx_q->dma_tx_phy);
1744 
1745 	/* Setup the chained descriptor addresses */
1746 	if (priv->mode == STMMAC_CHAIN_MODE) {
1747 		if (priv->extend_desc)
1748 			stmmac_mode_init(priv, tx_q->dma_etx,
1749 					 tx_q->dma_tx_phy,
1750 					 priv->dma_tx_size, 1);
1751 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1752 			stmmac_mode_init(priv, tx_q->dma_tx,
1753 					 tx_q->dma_tx_phy,
1754 					 priv->dma_tx_size, 0);
1755 	}
1756 
1757 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1758 
1759 	for (i = 0; i < priv->dma_tx_size; i++) {
1760 		struct dma_desc *p;
1761 
1762 		if (priv->extend_desc)
1763 			p = &((tx_q->dma_etx + i)->basic);
1764 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1765 			p = &((tx_q->dma_entx + i)->basic);
1766 		else
1767 			p = tx_q->dma_tx + i;
1768 
1769 		stmmac_clear_desc(priv, p);
1770 
1771 		tx_q->tx_skbuff_dma[i].buf = 0;
1772 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1773 		tx_q->tx_skbuff_dma[i].len = 0;
1774 		tx_q->tx_skbuff_dma[i].last_segment = false;
1775 		tx_q->tx_skbuff[i] = NULL;
1776 	}
1777 
1778 	tx_q->dirty_tx = 0;
1779 	tx_q->cur_tx = 0;
1780 	tx_q->mss = 0;
1781 
1782 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1783 
1784 	return 0;
1785 }
1786 
1787 static int init_dma_tx_desc_rings(struct net_device *dev)
1788 {
1789 	struct stmmac_priv *priv = netdev_priv(dev);
1790 	u32 tx_queue_cnt;
1791 	u32 queue;
1792 
1793 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1794 
1795 	for (queue = 0; queue < tx_queue_cnt; queue++)
1796 		__init_dma_tx_desc_rings(priv, queue);
1797 
1798 	return 0;
1799 }
1800 
1801 /**
1802  * init_dma_desc_rings - init the RX/TX descriptor rings
1803  * @dev: net device structure
1804  * @flags: gfp flag.
1805  * Description: this function initializes the DMA RX/TX descriptors
1806  * and allocates the socket buffers. It supports the chained and ring
1807  * modes.
1808  */
1809 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1810 {
1811 	struct stmmac_priv *priv = netdev_priv(dev);
1812 	int ret;
1813 
1814 	ret = init_dma_rx_desc_rings(dev, flags);
1815 	if (ret)
1816 		return ret;
1817 
1818 	ret = init_dma_tx_desc_rings(dev);
1819 
1820 	stmmac_clear_descriptors(priv);
1821 
1822 	if (netif_msg_hw(priv))
1823 		stmmac_display_rings(priv);
1824 
1825 	return ret;
1826 }
1827 
1828 /**
1829  * dma_free_tx_skbufs - free TX dma buffers
1830  * @priv: private structure
1831  * @queue: TX queue index
1832  */
1833 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1834 {
1835 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1836 	int i;
1837 
1838 	tx_q->xsk_frames_done = 0;
1839 
1840 	for (i = 0; i < priv->dma_tx_size; i++)
1841 		stmmac_free_tx_buffer(priv, queue, i);
1842 
1843 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1844 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1845 		tx_q->xsk_frames_done = 0;
1846 		tx_q->xsk_pool = NULL;
1847 	}
1848 }
1849 
1850 /**
1851  * stmmac_free_tx_skbufs - free TX skb buffers
1852  * @priv: private structure
1853  */
1854 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1855 {
1856 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1857 	u32 queue;
1858 
1859 	for (queue = 0; queue < tx_queue_cnt; queue++)
1860 		dma_free_tx_skbufs(priv, queue);
1861 }
1862 
1863 /**
1864  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1865  * @priv: private structure
1866  * @queue: RX queue index
1867  */
1868 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
1869 {
1870 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1871 
1872 	/* Release the DMA RX socket buffers */
1873 	if (rx_q->xsk_pool)
1874 		dma_free_rx_xskbufs(priv, queue);
1875 	else
1876 		dma_free_rx_skbufs(priv, queue);
1877 
1878 	rx_q->buf_alloc_num = 0;
1879 	rx_q->xsk_pool = NULL;
1880 
1881 	/* Free DMA regions of consistent memory previously allocated */
1882 	if (!priv->extend_desc)
1883 		dma_free_coherent(priv->device, priv->dma_rx_size *
1884 				  sizeof(struct dma_desc),
1885 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1886 	else
1887 		dma_free_coherent(priv->device, priv->dma_rx_size *
1888 				  sizeof(struct dma_extended_desc),
1889 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1890 
1891 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1892 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1893 
1894 	kfree(rx_q->buf_pool);
1895 	if (rx_q->page_pool)
1896 		page_pool_destroy(rx_q->page_pool);
1897 }
1898 
1899 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1900 {
1901 	u32 rx_count = priv->plat->rx_queues_to_use;
1902 	u32 queue;
1903 
1904 	/* Free RX queue resources */
1905 	for (queue = 0; queue < rx_count; queue++)
1906 		__free_dma_rx_desc_resources(priv, queue);
1907 }
1908 
1909 /**
1910  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1911  * @priv: private structure
1912  * @queue: TX queue index
1913  */
1914 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
1915 {
1916 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1917 	size_t size;
1918 	void *addr;
1919 
1920 	/* Release the DMA TX socket buffers */
1921 	dma_free_tx_skbufs(priv, queue);
1922 
1923 	if (priv->extend_desc) {
1924 		size = sizeof(struct dma_extended_desc);
1925 		addr = tx_q->dma_etx;
1926 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1927 		size = sizeof(struct dma_edesc);
1928 		addr = tx_q->dma_entx;
1929 	} else {
1930 		size = sizeof(struct dma_desc);
1931 		addr = tx_q->dma_tx;
1932 	}
1933 
1934 	size *= priv->dma_tx_size;
1935 
1936 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1937 
1938 	kfree(tx_q->tx_skbuff_dma);
1939 	kfree(tx_q->tx_skbuff);
1940 }
1941 
1942 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1943 {
1944 	u32 tx_count = priv->plat->tx_queues_to_use;
1945 	u32 queue;
1946 
1947 	/* Free TX queue resources */
1948 	for (queue = 0; queue < tx_count; queue++)
1949 		__free_dma_tx_desc_resources(priv, queue);
1950 }
1951 
1952 /**
1953  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1954  * @priv: private structure
1955  * @queue: RX queue index
1956  * Description: according to which descriptor can be used (extend or basic)
1957  * this function allocates the resources for TX and RX paths. In case of
1958  * reception, for example, it pre-allocated the RX socket buffer in order to
1959  * allow zero-copy mechanism.
1960  */
1961 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
1962 {
1963 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1964 	struct stmmac_channel *ch = &priv->channel[queue];
1965 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
1966 	struct page_pool_params pp_params = { 0 };
1967 	unsigned int num_pages;
1968 	unsigned int napi_id;
1969 	int ret;
1970 
1971 	rx_q->queue_index = queue;
1972 	rx_q->priv_data = priv;
1973 
1974 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
1975 	pp_params.pool_size = priv->dma_rx_size;
1976 	num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
1977 	pp_params.order = ilog2(num_pages);
1978 	pp_params.nid = dev_to_node(priv->device);
1979 	pp_params.dev = priv->device;
1980 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
1981 	pp_params.offset = stmmac_rx_offset(priv);
1982 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
1983 
1984 	rx_q->page_pool = page_pool_create(&pp_params);
1985 	if (IS_ERR(rx_q->page_pool)) {
1986 		ret = PTR_ERR(rx_q->page_pool);
1987 		rx_q->page_pool = NULL;
1988 		return ret;
1989 	}
1990 
1991 	rx_q->buf_pool = kcalloc(priv->dma_rx_size,
1992 				 sizeof(*rx_q->buf_pool),
1993 				 GFP_KERNEL);
1994 	if (!rx_q->buf_pool)
1995 		return -ENOMEM;
1996 
1997 	if (priv->extend_desc) {
1998 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
1999 						   priv->dma_rx_size *
2000 						   sizeof(struct dma_extended_desc),
2001 						   &rx_q->dma_rx_phy,
2002 						   GFP_KERNEL);
2003 		if (!rx_q->dma_erx)
2004 			return -ENOMEM;
2005 
2006 	} else {
2007 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2008 						  priv->dma_rx_size *
2009 						  sizeof(struct dma_desc),
2010 						  &rx_q->dma_rx_phy,
2011 						  GFP_KERNEL);
2012 		if (!rx_q->dma_rx)
2013 			return -ENOMEM;
2014 	}
2015 
2016 	if (stmmac_xdp_is_enabled(priv) &&
2017 	    test_bit(queue, priv->af_xdp_zc_qps))
2018 		napi_id = ch->rxtx_napi.napi_id;
2019 	else
2020 		napi_id = ch->rx_napi.napi_id;
2021 
2022 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2023 			       rx_q->queue_index,
2024 			       napi_id);
2025 	if (ret) {
2026 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2027 		return -EINVAL;
2028 	}
2029 
2030 	return 0;
2031 }
2032 
2033 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
2034 {
2035 	u32 rx_count = priv->plat->rx_queues_to_use;
2036 	u32 queue;
2037 	int ret;
2038 
2039 	/* RX queues buffers and DMA */
2040 	for (queue = 0; queue < rx_count; queue++) {
2041 		ret = __alloc_dma_rx_desc_resources(priv, queue);
2042 		if (ret)
2043 			goto err_dma;
2044 	}
2045 
2046 	return 0;
2047 
2048 err_dma:
2049 	free_dma_rx_desc_resources(priv);
2050 
2051 	return ret;
2052 }
2053 
2054 /**
2055  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2056  * @priv: private structure
2057  * @queue: TX queue index
2058  * Description: according to which descriptor can be used (extend or basic)
2059  * this function allocates the resources for TX and RX paths. In case of
2060  * reception, for example, it pre-allocated the RX socket buffer in order to
2061  * allow zero-copy mechanism.
2062  */
2063 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
2064 {
2065 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2066 	size_t size;
2067 	void *addr;
2068 
2069 	tx_q->queue_index = queue;
2070 	tx_q->priv_data = priv;
2071 
2072 	tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
2073 				      sizeof(*tx_q->tx_skbuff_dma),
2074 				      GFP_KERNEL);
2075 	if (!tx_q->tx_skbuff_dma)
2076 		return -ENOMEM;
2077 
2078 	tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
2079 				  sizeof(struct sk_buff *),
2080 				  GFP_KERNEL);
2081 	if (!tx_q->tx_skbuff)
2082 		return -ENOMEM;
2083 
2084 	if (priv->extend_desc)
2085 		size = sizeof(struct dma_extended_desc);
2086 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2087 		size = sizeof(struct dma_edesc);
2088 	else
2089 		size = sizeof(struct dma_desc);
2090 
2091 	size *= priv->dma_tx_size;
2092 
2093 	addr = dma_alloc_coherent(priv->device, size,
2094 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2095 	if (!addr)
2096 		return -ENOMEM;
2097 
2098 	if (priv->extend_desc)
2099 		tx_q->dma_etx = addr;
2100 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2101 		tx_q->dma_entx = addr;
2102 	else
2103 		tx_q->dma_tx = addr;
2104 
2105 	return 0;
2106 }
2107 
2108 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
2109 {
2110 	u32 tx_count = priv->plat->tx_queues_to_use;
2111 	u32 queue;
2112 	int ret;
2113 
2114 	/* TX queues buffers and DMA */
2115 	for (queue = 0; queue < tx_count; queue++) {
2116 		ret = __alloc_dma_tx_desc_resources(priv, queue);
2117 		if (ret)
2118 			goto err_dma;
2119 	}
2120 
2121 	return 0;
2122 
2123 err_dma:
2124 	free_dma_tx_desc_resources(priv);
2125 	return ret;
2126 }
2127 
2128 /**
2129  * alloc_dma_desc_resources - alloc TX/RX resources.
2130  * @priv: private structure
2131  * Description: according to which descriptor can be used (extend or basic)
2132  * this function allocates the resources for TX and RX paths. In case of
2133  * reception, for example, it pre-allocated the RX socket buffer in order to
2134  * allow zero-copy mechanism.
2135  */
2136 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
2137 {
2138 	/* RX Allocation */
2139 	int ret = alloc_dma_rx_desc_resources(priv);
2140 
2141 	if (ret)
2142 		return ret;
2143 
2144 	ret = alloc_dma_tx_desc_resources(priv);
2145 
2146 	return ret;
2147 }
2148 
2149 /**
2150  * free_dma_desc_resources - free dma desc resources
2151  * @priv: private structure
2152  */
2153 static void free_dma_desc_resources(struct stmmac_priv *priv)
2154 {
2155 	/* Release the DMA TX socket buffers */
2156 	free_dma_tx_desc_resources(priv);
2157 
2158 	/* Release the DMA RX socket buffers later
2159 	 * to ensure all pending XDP_TX buffers are returned.
2160 	 */
2161 	free_dma_rx_desc_resources(priv);
2162 }
2163 
2164 /**
2165  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2166  *  @priv: driver private structure
2167  *  Description: It is used for enabling the rx queues in the MAC
2168  */
2169 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2170 {
2171 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2172 	int queue;
2173 	u8 mode;
2174 
2175 	for (queue = 0; queue < rx_queues_count; queue++) {
2176 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2177 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2178 	}
2179 }
2180 
2181 /**
2182  * stmmac_start_rx_dma - start RX DMA channel
2183  * @priv: driver private structure
2184  * @chan: RX channel index
2185  * Description:
2186  * This starts a RX DMA channel
2187  */
2188 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2189 {
2190 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2191 	stmmac_start_rx(priv, priv->ioaddr, chan);
2192 }
2193 
2194 /**
2195  * stmmac_start_tx_dma - start TX DMA channel
2196  * @priv: driver private structure
2197  * @chan: TX channel index
2198  * Description:
2199  * This starts a TX DMA channel
2200  */
2201 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2202 {
2203 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2204 	stmmac_start_tx(priv, priv->ioaddr, chan);
2205 }
2206 
2207 /**
2208  * stmmac_stop_rx_dma - stop RX DMA channel
2209  * @priv: driver private structure
2210  * @chan: RX channel index
2211  * Description:
2212  * This stops a RX DMA channel
2213  */
2214 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2215 {
2216 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2217 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2218 }
2219 
2220 /**
2221  * stmmac_stop_tx_dma - stop TX DMA channel
2222  * @priv: driver private structure
2223  * @chan: TX channel index
2224  * Description:
2225  * This stops a TX DMA channel
2226  */
2227 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2228 {
2229 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2230 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2231 }
2232 
2233 /**
2234  * stmmac_start_all_dma - start all RX and TX DMA channels
2235  * @priv: driver private structure
2236  * Description:
2237  * This starts all the RX and TX DMA channels
2238  */
2239 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2240 {
2241 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2242 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2243 	u32 chan = 0;
2244 
2245 	for (chan = 0; chan < rx_channels_count; chan++)
2246 		stmmac_start_rx_dma(priv, chan);
2247 
2248 	for (chan = 0; chan < tx_channels_count; chan++)
2249 		stmmac_start_tx_dma(priv, chan);
2250 }
2251 
2252 /**
2253  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2254  * @priv: driver private structure
2255  * Description:
2256  * This stops the RX and TX DMA channels
2257  */
2258 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2259 {
2260 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2261 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2262 	u32 chan = 0;
2263 
2264 	for (chan = 0; chan < rx_channels_count; chan++)
2265 		stmmac_stop_rx_dma(priv, chan);
2266 
2267 	for (chan = 0; chan < tx_channels_count; chan++)
2268 		stmmac_stop_tx_dma(priv, chan);
2269 }
2270 
2271 /**
2272  *  stmmac_dma_operation_mode - HW DMA operation mode
2273  *  @priv: driver private structure
2274  *  Description: it is used for configuring the DMA operation mode register in
2275  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2276  */
2277 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2278 {
2279 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2280 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2281 	int rxfifosz = priv->plat->rx_fifo_size;
2282 	int txfifosz = priv->plat->tx_fifo_size;
2283 	u32 txmode = 0;
2284 	u32 rxmode = 0;
2285 	u32 chan = 0;
2286 	u8 qmode = 0;
2287 
2288 	if (rxfifosz == 0)
2289 		rxfifosz = priv->dma_cap.rx_fifo_size;
2290 	if (txfifosz == 0)
2291 		txfifosz = priv->dma_cap.tx_fifo_size;
2292 
2293 	/* Adjust for real per queue fifo size */
2294 	rxfifosz /= rx_channels_count;
2295 	txfifosz /= tx_channels_count;
2296 
2297 	if (priv->plat->force_thresh_dma_mode) {
2298 		txmode = tc;
2299 		rxmode = tc;
2300 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2301 		/*
2302 		 * In case of GMAC, SF mode can be enabled
2303 		 * to perform the TX COE in HW. This depends on:
2304 		 * 1) TX COE if actually supported
2305 		 * 2) There is no bugged Jumbo frame support
2306 		 *    that needs to not insert csum in the TDES.
2307 		 */
2308 		txmode = SF_DMA_MODE;
2309 		rxmode = SF_DMA_MODE;
2310 		priv->xstats.threshold = SF_DMA_MODE;
2311 	} else {
2312 		txmode = tc;
2313 		rxmode = SF_DMA_MODE;
2314 	}
2315 
2316 	/* configure all channels */
2317 	for (chan = 0; chan < rx_channels_count; chan++) {
2318 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2319 		u32 buf_size;
2320 
2321 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2322 
2323 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2324 				rxfifosz, qmode);
2325 
2326 		if (rx_q->xsk_pool) {
2327 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2328 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2329 					      buf_size,
2330 					      chan);
2331 		} else {
2332 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2333 					      priv->dma_buf_sz,
2334 					      chan);
2335 		}
2336 	}
2337 
2338 	for (chan = 0; chan < tx_channels_count; chan++) {
2339 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2340 
2341 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2342 				txfifosz, qmode);
2343 	}
2344 }
2345 
2346 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2347 {
2348 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2349 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2350 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2351 	unsigned int entry = tx_q->cur_tx;
2352 	struct dma_desc *tx_desc = NULL;
2353 	struct xdp_desc xdp_desc;
2354 	bool work_done = true;
2355 
2356 	/* Avoids TX time-out as we are sharing with slow path */
2357 	nq->trans_start = jiffies;
2358 
2359 	budget = min(budget, stmmac_tx_avail(priv, queue));
2360 
2361 	while (budget-- > 0) {
2362 		dma_addr_t dma_addr;
2363 		bool set_ic;
2364 
2365 		/* We are sharing with slow path and stop XSK TX desc submission when
2366 		 * available TX ring is less than threshold.
2367 		 */
2368 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2369 		    !netif_carrier_ok(priv->dev)) {
2370 			work_done = false;
2371 			break;
2372 		}
2373 
2374 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2375 			break;
2376 
2377 		if (likely(priv->extend_desc))
2378 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2379 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2380 			tx_desc = &tx_q->dma_entx[entry].basic;
2381 		else
2382 			tx_desc = tx_q->dma_tx + entry;
2383 
2384 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2385 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2386 
2387 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2388 
2389 		/* To return XDP buffer to XSK pool, we simple call
2390 		 * xsk_tx_completed(), so we don't need to fill up
2391 		 * 'buf' and 'xdpf'.
2392 		 */
2393 		tx_q->tx_skbuff_dma[entry].buf = 0;
2394 		tx_q->xdpf[entry] = NULL;
2395 
2396 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2397 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2398 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2399 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2400 
2401 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2402 
2403 		tx_q->tx_count_frames++;
2404 
2405 		if (!priv->tx_coal_frames[queue])
2406 			set_ic = false;
2407 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2408 			set_ic = true;
2409 		else
2410 			set_ic = false;
2411 
2412 		if (set_ic) {
2413 			tx_q->tx_count_frames = 0;
2414 			stmmac_set_tx_ic(priv, tx_desc);
2415 			priv->xstats.tx_set_ic_bit++;
2416 		}
2417 
2418 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2419 				       true, priv->mode, true, true,
2420 				       xdp_desc.len);
2421 
2422 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2423 
2424 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
2425 		entry = tx_q->cur_tx;
2426 	}
2427 
2428 	if (tx_desc) {
2429 		stmmac_flush_tx_descriptors(priv, queue);
2430 		xsk_tx_release(pool);
2431 	}
2432 
2433 	/* Return true if all of the 3 conditions are met
2434 	 *  a) TX Budget is still available
2435 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2436 	 *     pending XSK TX for transmission)
2437 	 */
2438 	return !!budget && work_done;
2439 }
2440 
2441 /**
2442  * stmmac_tx_clean - to manage the transmission completion
2443  * @priv: driver private structure
2444  * @budget: napi budget limiting this functions packet handling
2445  * @queue: TX queue index
2446  * Description: it reclaims the transmit resources after transmission completes.
2447  */
2448 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2449 {
2450 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2451 	unsigned int bytes_compl = 0, pkts_compl = 0;
2452 	unsigned int entry, xmits = 0, count = 0;
2453 
2454 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2455 
2456 	priv->xstats.tx_clean++;
2457 
2458 	tx_q->xsk_frames_done = 0;
2459 
2460 	entry = tx_q->dirty_tx;
2461 
2462 	/* Try to clean all TX complete frame in 1 shot */
2463 	while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) {
2464 		struct xdp_frame *xdpf;
2465 		struct sk_buff *skb;
2466 		struct dma_desc *p;
2467 		int status;
2468 
2469 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2470 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2471 			xdpf = tx_q->xdpf[entry];
2472 			skb = NULL;
2473 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2474 			xdpf = NULL;
2475 			skb = tx_q->tx_skbuff[entry];
2476 		} else {
2477 			xdpf = NULL;
2478 			skb = NULL;
2479 		}
2480 
2481 		if (priv->extend_desc)
2482 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2483 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2484 			p = &tx_q->dma_entx[entry].basic;
2485 		else
2486 			p = tx_q->dma_tx + entry;
2487 
2488 		status = stmmac_tx_status(priv, &priv->dev->stats,
2489 				&priv->xstats, p, priv->ioaddr);
2490 		/* Check if the descriptor is owned by the DMA */
2491 		if (unlikely(status & tx_dma_own))
2492 			break;
2493 
2494 		count++;
2495 
2496 		/* Make sure descriptor fields are read after reading
2497 		 * the own bit.
2498 		 */
2499 		dma_rmb();
2500 
2501 		/* Just consider the last segment and ...*/
2502 		if (likely(!(status & tx_not_ls))) {
2503 			/* ... verify the status error condition */
2504 			if (unlikely(status & tx_err)) {
2505 				priv->dev->stats.tx_errors++;
2506 			} else {
2507 				priv->dev->stats.tx_packets++;
2508 				priv->xstats.tx_pkt_n++;
2509 			}
2510 			if (skb)
2511 				stmmac_get_tx_hwtstamp(priv, p, skb);
2512 		}
2513 
2514 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2515 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2516 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2517 				dma_unmap_page(priv->device,
2518 					       tx_q->tx_skbuff_dma[entry].buf,
2519 					       tx_q->tx_skbuff_dma[entry].len,
2520 					       DMA_TO_DEVICE);
2521 			else
2522 				dma_unmap_single(priv->device,
2523 						 tx_q->tx_skbuff_dma[entry].buf,
2524 						 tx_q->tx_skbuff_dma[entry].len,
2525 						 DMA_TO_DEVICE);
2526 			tx_q->tx_skbuff_dma[entry].buf = 0;
2527 			tx_q->tx_skbuff_dma[entry].len = 0;
2528 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2529 		}
2530 
2531 		stmmac_clean_desc3(priv, tx_q, p);
2532 
2533 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2534 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2535 
2536 		if (xdpf &&
2537 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2538 			xdp_return_frame_rx_napi(xdpf);
2539 			tx_q->xdpf[entry] = NULL;
2540 		}
2541 
2542 		if (xdpf &&
2543 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2544 			xdp_return_frame(xdpf);
2545 			tx_q->xdpf[entry] = NULL;
2546 		}
2547 
2548 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2549 			tx_q->xsk_frames_done++;
2550 
2551 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2552 			if (likely(skb)) {
2553 				pkts_compl++;
2554 				bytes_compl += skb->len;
2555 				dev_consume_skb_any(skb);
2556 				tx_q->tx_skbuff[entry] = NULL;
2557 			}
2558 		}
2559 
2560 		stmmac_release_tx_desc(priv, p, priv->mode);
2561 
2562 		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
2563 	}
2564 	tx_q->dirty_tx = entry;
2565 
2566 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2567 				  pkts_compl, bytes_compl);
2568 
2569 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2570 								queue))) &&
2571 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2572 
2573 		netif_dbg(priv, tx_done, priv->dev,
2574 			  "%s: restart transmit\n", __func__);
2575 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2576 	}
2577 
2578 	if (tx_q->xsk_pool) {
2579 		bool work_done;
2580 
2581 		if (tx_q->xsk_frames_done)
2582 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2583 
2584 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2585 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2586 
2587 		/* For XSK TX, we try to send as many as possible.
2588 		 * If XSK work done (XSK TX desc empty and budget still
2589 		 * available), return "budget - 1" to reenable TX IRQ.
2590 		 * Else, return "budget" to make NAPI continue polling.
2591 		 */
2592 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2593 					       STMMAC_XSK_TX_BUDGET_MAX);
2594 		if (work_done)
2595 			xmits = budget - 1;
2596 		else
2597 			xmits = budget;
2598 	}
2599 
2600 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2601 	    priv->eee_sw_timer_en) {
2602 		stmmac_enable_eee_mode(priv);
2603 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2604 	}
2605 
2606 	/* We still have pending packets, let's call for a new scheduling */
2607 	if (tx_q->dirty_tx != tx_q->cur_tx)
2608 		hrtimer_start(&tx_q->txtimer,
2609 			      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2610 			      HRTIMER_MODE_REL);
2611 
2612 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2613 
2614 	/* Combine decisions from TX clean and XSK TX */
2615 	return max(count, xmits);
2616 }
2617 
2618 /**
2619  * stmmac_tx_err - to manage the tx error
2620  * @priv: driver private structure
2621  * @chan: channel index
2622  * Description: it cleans the descriptors and restarts the transmission
2623  * in case of transmission errors.
2624  */
2625 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2626 {
2627 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2628 
2629 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2630 
2631 	stmmac_stop_tx_dma(priv, chan);
2632 	dma_free_tx_skbufs(priv, chan);
2633 	stmmac_clear_tx_descriptors(priv, chan);
2634 	tx_q->dirty_tx = 0;
2635 	tx_q->cur_tx = 0;
2636 	tx_q->mss = 0;
2637 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2638 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2639 			    tx_q->dma_tx_phy, chan);
2640 	stmmac_start_tx_dma(priv, chan);
2641 
2642 	priv->dev->stats.tx_errors++;
2643 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2644 }
2645 
2646 /**
2647  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2648  *  @priv: driver private structure
2649  *  @txmode: TX operating mode
2650  *  @rxmode: RX operating mode
2651  *  @chan: channel index
2652  *  Description: it is used for configuring of the DMA operation mode in
2653  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2654  *  mode.
2655  */
2656 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2657 					  u32 rxmode, u32 chan)
2658 {
2659 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2660 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2661 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2662 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2663 	int rxfifosz = priv->plat->rx_fifo_size;
2664 	int txfifosz = priv->plat->tx_fifo_size;
2665 
2666 	if (rxfifosz == 0)
2667 		rxfifosz = priv->dma_cap.rx_fifo_size;
2668 	if (txfifosz == 0)
2669 		txfifosz = priv->dma_cap.tx_fifo_size;
2670 
2671 	/* Adjust for real per queue fifo size */
2672 	rxfifosz /= rx_channels_count;
2673 	txfifosz /= tx_channels_count;
2674 
2675 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2676 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2677 }
2678 
2679 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2680 {
2681 	int ret;
2682 
2683 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2684 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2685 	if (ret && (ret != -EINVAL)) {
2686 		stmmac_global_err(priv);
2687 		return true;
2688 	}
2689 
2690 	return false;
2691 }
2692 
2693 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2694 {
2695 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2696 						 &priv->xstats, chan, dir);
2697 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2698 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2699 	struct stmmac_channel *ch = &priv->channel[chan];
2700 	struct napi_struct *rx_napi;
2701 	struct napi_struct *tx_napi;
2702 	unsigned long flags;
2703 
2704 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2705 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2706 
2707 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2708 		if (napi_schedule_prep(rx_napi)) {
2709 			spin_lock_irqsave(&ch->lock, flags);
2710 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2711 			spin_unlock_irqrestore(&ch->lock, flags);
2712 			__napi_schedule(rx_napi);
2713 		}
2714 	}
2715 
2716 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2717 		if (napi_schedule_prep(tx_napi)) {
2718 			spin_lock_irqsave(&ch->lock, flags);
2719 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2720 			spin_unlock_irqrestore(&ch->lock, flags);
2721 			__napi_schedule(tx_napi);
2722 		}
2723 	}
2724 
2725 	return status;
2726 }
2727 
2728 /**
2729  * stmmac_dma_interrupt - DMA ISR
2730  * @priv: driver private structure
2731  * Description: this is the DMA ISR. It is called by the main ISR.
2732  * It calls the dwmac dma routine and schedule poll method in case of some
2733  * work can be done.
2734  */
2735 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2736 {
2737 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2738 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2739 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2740 				tx_channel_count : rx_channel_count;
2741 	u32 chan;
2742 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2743 
2744 	/* Make sure we never check beyond our status buffer. */
2745 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2746 		channels_to_check = ARRAY_SIZE(status);
2747 
2748 	for (chan = 0; chan < channels_to_check; chan++)
2749 		status[chan] = stmmac_napi_check(priv, chan,
2750 						 DMA_DIR_RXTX);
2751 
2752 	for (chan = 0; chan < tx_channel_count; chan++) {
2753 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2754 			/* Try to bump up the dma threshold on this failure */
2755 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2756 			    (tc <= 256)) {
2757 				tc += 64;
2758 				if (priv->plat->force_thresh_dma_mode)
2759 					stmmac_set_dma_operation_mode(priv,
2760 								      tc,
2761 								      tc,
2762 								      chan);
2763 				else
2764 					stmmac_set_dma_operation_mode(priv,
2765 								    tc,
2766 								    SF_DMA_MODE,
2767 								    chan);
2768 				priv->xstats.threshold = tc;
2769 			}
2770 		} else if (unlikely(status[chan] == tx_hard_error)) {
2771 			stmmac_tx_err(priv, chan);
2772 		}
2773 	}
2774 }
2775 
2776 /**
2777  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2778  * @priv: driver private structure
2779  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2780  */
2781 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2782 {
2783 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2784 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2785 
2786 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2787 
2788 	if (priv->dma_cap.rmon) {
2789 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2790 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2791 	} else
2792 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2793 }
2794 
2795 /**
2796  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2797  * @priv: driver private structure
2798  * Description:
2799  *  new GMAC chip generations have a new register to indicate the
2800  *  presence of the optional feature/functions.
2801  *  This can be also used to override the value passed through the
2802  *  platform and necessary for old MAC10/100 and GMAC chips.
2803  */
2804 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2805 {
2806 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2807 }
2808 
2809 /**
2810  * stmmac_check_ether_addr - check if the MAC addr is valid
2811  * @priv: driver private structure
2812  * Description:
2813  * it is to verify if the MAC address is valid, in case of failures it
2814  * generates a random MAC address
2815  */
2816 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2817 {
2818 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2819 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2820 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2821 			eth_hw_addr_random(priv->dev);
2822 		dev_info(priv->device, "device MAC address %pM\n",
2823 			 priv->dev->dev_addr);
2824 	}
2825 }
2826 
2827 /**
2828  * stmmac_init_dma_engine - DMA init.
2829  * @priv: driver private structure
2830  * Description:
2831  * It inits the DMA invoking the specific MAC/GMAC callback.
2832  * Some DMA parameters can be passed from the platform;
2833  * in case of these are not passed a default is kept for the MAC or GMAC.
2834  */
2835 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2836 {
2837 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2838 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2839 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2840 	struct stmmac_rx_queue *rx_q;
2841 	struct stmmac_tx_queue *tx_q;
2842 	u32 chan = 0;
2843 	int atds = 0;
2844 	int ret = 0;
2845 
2846 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2847 		dev_err(priv->device, "Invalid DMA configuration\n");
2848 		return -EINVAL;
2849 	}
2850 
2851 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2852 		atds = 1;
2853 
2854 	ret = stmmac_reset(priv, priv->ioaddr);
2855 	if (ret) {
2856 		dev_err(priv->device, "Failed to reset the dma\n");
2857 		return ret;
2858 	}
2859 
2860 	/* DMA Configuration */
2861 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2862 
2863 	if (priv->plat->axi)
2864 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2865 
2866 	/* DMA CSR Channel configuration */
2867 	for (chan = 0; chan < dma_csr_ch; chan++)
2868 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2869 
2870 	/* DMA RX Channel Configuration */
2871 	for (chan = 0; chan < rx_channels_count; chan++) {
2872 		rx_q = &priv->rx_queue[chan];
2873 
2874 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2875 				    rx_q->dma_rx_phy, chan);
2876 
2877 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2878 				     (rx_q->buf_alloc_num *
2879 				      sizeof(struct dma_desc));
2880 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2881 				       rx_q->rx_tail_addr, chan);
2882 	}
2883 
2884 	/* DMA TX Channel Configuration */
2885 	for (chan = 0; chan < tx_channels_count; chan++) {
2886 		tx_q = &priv->tx_queue[chan];
2887 
2888 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2889 				    tx_q->dma_tx_phy, chan);
2890 
2891 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2892 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2893 				       tx_q->tx_tail_addr, chan);
2894 	}
2895 
2896 	return ret;
2897 }
2898 
2899 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2900 {
2901 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2902 
2903 	hrtimer_start(&tx_q->txtimer,
2904 		      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2905 		      HRTIMER_MODE_REL);
2906 }
2907 
2908 /**
2909  * stmmac_tx_timer - mitigation sw timer for tx.
2910  * @t: data pointer
2911  * Description:
2912  * This is the timer handler to directly invoke the stmmac_tx_clean.
2913  */
2914 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
2915 {
2916 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
2917 	struct stmmac_priv *priv = tx_q->priv_data;
2918 	struct stmmac_channel *ch;
2919 	struct napi_struct *napi;
2920 
2921 	ch = &priv->channel[tx_q->queue_index];
2922 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2923 
2924 	if (likely(napi_schedule_prep(napi))) {
2925 		unsigned long flags;
2926 
2927 		spin_lock_irqsave(&ch->lock, flags);
2928 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2929 		spin_unlock_irqrestore(&ch->lock, flags);
2930 		__napi_schedule(napi);
2931 	}
2932 
2933 	return HRTIMER_NORESTART;
2934 }
2935 
2936 /**
2937  * stmmac_init_coalesce - init mitigation options.
2938  * @priv: driver private structure
2939  * Description:
2940  * This inits the coalesce parameters: i.e. timer rate,
2941  * timer handler and default threshold used for enabling the
2942  * interrupt on completion bit.
2943  */
2944 static void stmmac_init_coalesce(struct stmmac_priv *priv)
2945 {
2946 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2947 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2948 	u32 chan;
2949 
2950 	for (chan = 0; chan < tx_channel_count; chan++) {
2951 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2952 
2953 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
2954 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
2955 
2956 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2957 		tx_q->txtimer.function = stmmac_tx_timer;
2958 	}
2959 
2960 	for (chan = 0; chan < rx_channel_count; chan++)
2961 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
2962 }
2963 
2964 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2965 {
2966 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2967 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2968 	u32 chan;
2969 
2970 	/* set TX ring length */
2971 	for (chan = 0; chan < tx_channels_count; chan++)
2972 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
2973 				       (priv->dma_tx_size - 1), chan);
2974 
2975 	/* set RX ring length */
2976 	for (chan = 0; chan < rx_channels_count; chan++)
2977 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
2978 				       (priv->dma_rx_size - 1), chan);
2979 }
2980 
2981 /**
2982  *  stmmac_set_tx_queue_weight - Set TX queue weight
2983  *  @priv: driver private structure
2984  *  Description: It is used for setting TX queues weight
2985  */
2986 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2987 {
2988 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2989 	u32 weight;
2990 	u32 queue;
2991 
2992 	for (queue = 0; queue < tx_queues_count; queue++) {
2993 		weight = priv->plat->tx_queues_cfg[queue].weight;
2994 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2995 	}
2996 }
2997 
2998 /**
2999  *  stmmac_configure_cbs - Configure CBS in TX queue
3000  *  @priv: driver private structure
3001  *  Description: It is used for configuring CBS in AVB TX queues
3002  */
3003 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3004 {
3005 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3006 	u32 mode_to_use;
3007 	u32 queue;
3008 
3009 	/* queue 0 is reserved for legacy traffic */
3010 	for (queue = 1; queue < tx_queues_count; queue++) {
3011 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3012 		if (mode_to_use == MTL_QUEUE_DCB)
3013 			continue;
3014 
3015 		stmmac_config_cbs(priv, priv->hw,
3016 				priv->plat->tx_queues_cfg[queue].send_slope,
3017 				priv->plat->tx_queues_cfg[queue].idle_slope,
3018 				priv->plat->tx_queues_cfg[queue].high_credit,
3019 				priv->plat->tx_queues_cfg[queue].low_credit,
3020 				queue);
3021 	}
3022 }
3023 
3024 /**
3025  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3026  *  @priv: driver private structure
3027  *  Description: It is used for mapping RX queues to RX dma channels
3028  */
3029 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3030 {
3031 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3032 	u32 queue;
3033 	u32 chan;
3034 
3035 	for (queue = 0; queue < rx_queues_count; queue++) {
3036 		chan = priv->plat->rx_queues_cfg[queue].chan;
3037 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3038 	}
3039 }
3040 
3041 /**
3042  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3043  *  @priv: driver private structure
3044  *  Description: It is used for configuring the RX Queue Priority
3045  */
3046 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3047 {
3048 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3049 	u32 queue;
3050 	u32 prio;
3051 
3052 	for (queue = 0; queue < rx_queues_count; queue++) {
3053 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3054 			continue;
3055 
3056 		prio = priv->plat->rx_queues_cfg[queue].prio;
3057 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3058 	}
3059 }
3060 
3061 /**
3062  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3063  *  @priv: driver private structure
3064  *  Description: It is used for configuring the TX Queue Priority
3065  */
3066 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3067 {
3068 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3069 	u32 queue;
3070 	u32 prio;
3071 
3072 	for (queue = 0; queue < tx_queues_count; queue++) {
3073 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3074 			continue;
3075 
3076 		prio = priv->plat->tx_queues_cfg[queue].prio;
3077 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3078 	}
3079 }
3080 
3081 /**
3082  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3083  *  @priv: driver private structure
3084  *  Description: It is used for configuring the RX queue routing
3085  */
3086 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3087 {
3088 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3089 	u32 queue;
3090 	u8 packet;
3091 
3092 	for (queue = 0; queue < rx_queues_count; queue++) {
3093 		/* no specific packet type routing specified for the queue */
3094 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3095 			continue;
3096 
3097 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3098 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3099 	}
3100 }
3101 
3102 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3103 {
3104 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3105 		priv->rss.enable = false;
3106 		return;
3107 	}
3108 
3109 	if (priv->dev->features & NETIF_F_RXHASH)
3110 		priv->rss.enable = true;
3111 	else
3112 		priv->rss.enable = false;
3113 
3114 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3115 			     priv->plat->rx_queues_to_use);
3116 }
3117 
3118 /**
3119  *  stmmac_mtl_configuration - Configure MTL
3120  *  @priv: driver private structure
3121  *  Description: It is used for configurring MTL
3122  */
3123 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3124 {
3125 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3126 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3127 
3128 	if (tx_queues_count > 1)
3129 		stmmac_set_tx_queue_weight(priv);
3130 
3131 	/* Configure MTL RX algorithms */
3132 	if (rx_queues_count > 1)
3133 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3134 				priv->plat->rx_sched_algorithm);
3135 
3136 	/* Configure MTL TX algorithms */
3137 	if (tx_queues_count > 1)
3138 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3139 				priv->plat->tx_sched_algorithm);
3140 
3141 	/* Configure CBS in AVB TX queues */
3142 	if (tx_queues_count > 1)
3143 		stmmac_configure_cbs(priv);
3144 
3145 	/* Map RX MTL to DMA channels */
3146 	stmmac_rx_queue_dma_chan_map(priv);
3147 
3148 	/* Enable MAC RX Queues */
3149 	stmmac_mac_enable_rx_queues(priv);
3150 
3151 	/* Set RX priorities */
3152 	if (rx_queues_count > 1)
3153 		stmmac_mac_config_rx_queues_prio(priv);
3154 
3155 	/* Set TX priorities */
3156 	if (tx_queues_count > 1)
3157 		stmmac_mac_config_tx_queues_prio(priv);
3158 
3159 	/* Set RX routing */
3160 	if (rx_queues_count > 1)
3161 		stmmac_mac_config_rx_queues_routing(priv);
3162 
3163 	/* Receive Side Scaling */
3164 	if (rx_queues_count > 1)
3165 		stmmac_mac_config_rss(priv);
3166 }
3167 
3168 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3169 {
3170 	if (priv->dma_cap.asp) {
3171 		netdev_info(priv->dev, "Enabling Safety Features\n");
3172 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
3173 	} else {
3174 		netdev_info(priv->dev, "No Safety Features support found\n");
3175 	}
3176 }
3177 
3178 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3179 {
3180 	char *name;
3181 
3182 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3183 
3184 	name = priv->wq_name;
3185 	sprintf(name, "%s-fpe", priv->dev->name);
3186 
3187 	priv->fpe_wq = create_singlethread_workqueue(name);
3188 	if (!priv->fpe_wq) {
3189 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3190 
3191 		return -ENOMEM;
3192 	}
3193 	netdev_info(priv->dev, "FPE workqueue start");
3194 
3195 	return 0;
3196 }
3197 
3198 /**
3199  * stmmac_hw_setup - setup mac in a usable state.
3200  *  @dev : pointer to the device structure.
3201  *  @init_ptp: initialize PTP if set
3202  *  Description:
3203  *  this is the main function to setup the HW in a usable state because the
3204  *  dma engine is reset, the core registers are configured (e.g. AXI,
3205  *  Checksum features, timers). The DMA is ready to start receiving and
3206  *  transmitting.
3207  *  Return value:
3208  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3209  *  file on failure.
3210  */
3211 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
3212 {
3213 	struct stmmac_priv *priv = netdev_priv(dev);
3214 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3215 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3216 	bool sph_en;
3217 	u32 chan;
3218 	int ret;
3219 
3220 	/* DMA initialization and SW reset */
3221 	ret = stmmac_init_dma_engine(priv);
3222 	if (ret < 0) {
3223 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3224 			   __func__);
3225 		return ret;
3226 	}
3227 
3228 	/* Copy the MAC addr into the HW  */
3229 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3230 
3231 	/* PS and related bits will be programmed according to the speed */
3232 	if (priv->hw->pcs) {
3233 		int speed = priv->plat->mac_port_sel_speed;
3234 
3235 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3236 		    (speed == SPEED_1000)) {
3237 			priv->hw->ps = speed;
3238 		} else {
3239 			dev_warn(priv->device, "invalid port speed\n");
3240 			priv->hw->ps = 0;
3241 		}
3242 	}
3243 
3244 	/* Initialize the MAC Core */
3245 	stmmac_core_init(priv, priv->hw, dev);
3246 
3247 	/* Initialize MTL*/
3248 	stmmac_mtl_configuration(priv);
3249 
3250 	/* Initialize Safety Features */
3251 	stmmac_safety_feat_configuration(priv);
3252 
3253 	ret = stmmac_rx_ipc(priv, priv->hw);
3254 	if (!ret) {
3255 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3256 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3257 		priv->hw->rx_csum = 0;
3258 	}
3259 
3260 	/* Enable the MAC Rx/Tx */
3261 	stmmac_mac_set(priv, priv->ioaddr, true);
3262 
3263 	/* Set the HW DMA mode and the COE */
3264 	stmmac_dma_operation_mode(priv);
3265 
3266 	stmmac_mmc_setup(priv);
3267 
3268 	if (init_ptp) {
3269 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3270 		if (ret < 0)
3271 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
3272 
3273 		ret = stmmac_init_ptp(priv);
3274 		if (ret == -EOPNOTSUPP)
3275 			netdev_warn(priv->dev, "PTP not supported by HW\n");
3276 		else if (ret)
3277 			netdev_warn(priv->dev, "PTP init failed\n");
3278 	}
3279 
3280 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3281 
3282 	/* Convert the timer from msec to usec */
3283 	if (!priv->tx_lpi_timer)
3284 		priv->tx_lpi_timer = eee_timer * 1000;
3285 
3286 	if (priv->use_riwt) {
3287 		u32 queue;
3288 
3289 		for (queue = 0; queue < rx_cnt; queue++) {
3290 			if (!priv->rx_riwt[queue])
3291 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3292 
3293 			stmmac_rx_watchdog(priv, priv->ioaddr,
3294 					   priv->rx_riwt[queue], queue);
3295 		}
3296 	}
3297 
3298 	if (priv->hw->pcs)
3299 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3300 
3301 	/* set TX and RX rings length */
3302 	stmmac_set_rings_length(priv);
3303 
3304 	/* Enable TSO */
3305 	if (priv->tso) {
3306 		for (chan = 0; chan < tx_cnt; chan++)
3307 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3308 	}
3309 
3310 	/* Enable Split Header */
3311 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3312 	for (chan = 0; chan < rx_cnt; chan++)
3313 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3314 
3315 
3316 	/* VLAN Tag Insertion */
3317 	if (priv->dma_cap.vlins)
3318 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3319 
3320 	/* TBS */
3321 	for (chan = 0; chan < tx_cnt; chan++) {
3322 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3323 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3324 
3325 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3326 	}
3327 
3328 	/* Configure real RX and TX queues */
3329 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3330 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3331 
3332 	/* Start the ball rolling... */
3333 	stmmac_start_all_dma(priv);
3334 
3335 	if (priv->dma_cap.fpesel) {
3336 		stmmac_fpe_start_wq(priv);
3337 
3338 		if (priv->plat->fpe_cfg->enable)
3339 			stmmac_fpe_handshake(priv, true);
3340 	}
3341 
3342 	return 0;
3343 }
3344 
3345 static void stmmac_hw_teardown(struct net_device *dev)
3346 {
3347 	struct stmmac_priv *priv = netdev_priv(dev);
3348 
3349 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3350 }
3351 
3352 static void stmmac_free_irq(struct net_device *dev,
3353 			    enum request_irq_err irq_err, int irq_idx)
3354 {
3355 	struct stmmac_priv *priv = netdev_priv(dev);
3356 	int j;
3357 
3358 	switch (irq_err) {
3359 	case REQ_IRQ_ERR_ALL:
3360 		irq_idx = priv->plat->tx_queues_to_use;
3361 		fallthrough;
3362 	case REQ_IRQ_ERR_TX:
3363 		for (j = irq_idx - 1; j >= 0; j--) {
3364 			if (priv->tx_irq[j] > 0) {
3365 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3366 				free_irq(priv->tx_irq[j], &priv->tx_queue[j]);
3367 			}
3368 		}
3369 		irq_idx = priv->plat->rx_queues_to_use;
3370 		fallthrough;
3371 	case REQ_IRQ_ERR_RX:
3372 		for (j = irq_idx - 1; j >= 0; j--) {
3373 			if (priv->rx_irq[j] > 0) {
3374 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3375 				free_irq(priv->rx_irq[j], &priv->rx_queue[j]);
3376 			}
3377 		}
3378 
3379 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3380 			free_irq(priv->sfty_ue_irq, dev);
3381 		fallthrough;
3382 	case REQ_IRQ_ERR_SFTY_UE:
3383 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3384 			free_irq(priv->sfty_ce_irq, dev);
3385 		fallthrough;
3386 	case REQ_IRQ_ERR_SFTY_CE:
3387 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3388 			free_irq(priv->lpi_irq, dev);
3389 		fallthrough;
3390 	case REQ_IRQ_ERR_LPI:
3391 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3392 			free_irq(priv->wol_irq, dev);
3393 		fallthrough;
3394 	case REQ_IRQ_ERR_WOL:
3395 		free_irq(dev->irq, dev);
3396 		fallthrough;
3397 	case REQ_IRQ_ERR_MAC:
3398 	case REQ_IRQ_ERR_NO:
3399 		/* If MAC IRQ request error, no more IRQ to free */
3400 		break;
3401 	}
3402 }
3403 
3404 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3405 {
3406 	enum request_irq_err irq_err = REQ_IRQ_ERR_NO;
3407 	struct stmmac_priv *priv = netdev_priv(dev);
3408 	cpumask_t cpu_mask;
3409 	int irq_idx = 0;
3410 	char *int_name;
3411 	int ret;
3412 	int i;
3413 
3414 	/* For common interrupt */
3415 	int_name = priv->int_name_mac;
3416 	sprintf(int_name, "%s:%s", dev->name, "mac");
3417 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3418 			  0, int_name, dev);
3419 	if (unlikely(ret < 0)) {
3420 		netdev_err(priv->dev,
3421 			   "%s: alloc mac MSI %d (error: %d)\n",
3422 			   __func__, dev->irq, ret);
3423 		irq_err = REQ_IRQ_ERR_MAC;
3424 		goto irq_error;
3425 	}
3426 
3427 	/* Request the Wake IRQ in case of another line
3428 	 * is used for WoL
3429 	 */
3430 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3431 		int_name = priv->int_name_wol;
3432 		sprintf(int_name, "%s:%s", dev->name, "wol");
3433 		ret = request_irq(priv->wol_irq,
3434 				  stmmac_mac_interrupt,
3435 				  0, int_name, dev);
3436 		if (unlikely(ret < 0)) {
3437 			netdev_err(priv->dev,
3438 				   "%s: alloc wol MSI %d (error: %d)\n",
3439 				   __func__, priv->wol_irq, ret);
3440 			irq_err = REQ_IRQ_ERR_WOL;
3441 			goto irq_error;
3442 		}
3443 	}
3444 
3445 	/* Request the LPI IRQ in case of another line
3446 	 * is used for LPI
3447 	 */
3448 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3449 		int_name = priv->int_name_lpi;
3450 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3451 		ret = request_irq(priv->lpi_irq,
3452 				  stmmac_mac_interrupt,
3453 				  0, int_name, dev);
3454 		if (unlikely(ret < 0)) {
3455 			netdev_err(priv->dev,
3456 				   "%s: alloc lpi MSI %d (error: %d)\n",
3457 				   __func__, priv->lpi_irq, ret);
3458 			irq_err = REQ_IRQ_ERR_LPI;
3459 			goto irq_error;
3460 		}
3461 	}
3462 
3463 	/* Request the Safety Feature Correctible Error line in
3464 	 * case of another line is used
3465 	 */
3466 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3467 		int_name = priv->int_name_sfty_ce;
3468 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3469 		ret = request_irq(priv->sfty_ce_irq,
3470 				  stmmac_safety_interrupt,
3471 				  0, int_name, dev);
3472 		if (unlikely(ret < 0)) {
3473 			netdev_err(priv->dev,
3474 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3475 				   __func__, priv->sfty_ce_irq, ret);
3476 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3477 			goto irq_error;
3478 		}
3479 	}
3480 
3481 	/* Request the Safety Feature Uncorrectible Error line in
3482 	 * case of another line is used
3483 	 */
3484 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3485 		int_name = priv->int_name_sfty_ue;
3486 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3487 		ret = request_irq(priv->sfty_ue_irq,
3488 				  stmmac_safety_interrupt,
3489 				  0, int_name, dev);
3490 		if (unlikely(ret < 0)) {
3491 			netdev_err(priv->dev,
3492 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3493 				   __func__, priv->sfty_ue_irq, ret);
3494 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3495 			goto irq_error;
3496 		}
3497 	}
3498 
3499 	/* Request Rx MSI irq */
3500 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3501 		if (priv->rx_irq[i] == 0)
3502 			continue;
3503 
3504 		int_name = priv->int_name_rx_irq[i];
3505 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3506 		ret = request_irq(priv->rx_irq[i],
3507 				  stmmac_msi_intr_rx,
3508 				  0, int_name, &priv->rx_queue[i]);
3509 		if (unlikely(ret < 0)) {
3510 			netdev_err(priv->dev,
3511 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3512 				   __func__, i, priv->rx_irq[i], ret);
3513 			irq_err = REQ_IRQ_ERR_RX;
3514 			irq_idx = i;
3515 			goto irq_error;
3516 		}
3517 		cpumask_clear(&cpu_mask);
3518 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3519 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3520 	}
3521 
3522 	/* Request Tx MSI irq */
3523 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3524 		if (priv->tx_irq[i] == 0)
3525 			continue;
3526 
3527 		int_name = priv->int_name_tx_irq[i];
3528 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3529 		ret = request_irq(priv->tx_irq[i],
3530 				  stmmac_msi_intr_tx,
3531 				  0, int_name, &priv->tx_queue[i]);
3532 		if (unlikely(ret < 0)) {
3533 			netdev_err(priv->dev,
3534 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3535 				   __func__, i, priv->tx_irq[i], ret);
3536 			irq_err = REQ_IRQ_ERR_TX;
3537 			irq_idx = i;
3538 			goto irq_error;
3539 		}
3540 		cpumask_clear(&cpu_mask);
3541 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3542 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3543 	}
3544 
3545 	return 0;
3546 
3547 irq_error:
3548 	stmmac_free_irq(dev, irq_err, irq_idx);
3549 	return ret;
3550 }
3551 
3552 static int stmmac_request_irq_single(struct net_device *dev)
3553 {
3554 	enum request_irq_err irq_err = REQ_IRQ_ERR_NO;
3555 	struct stmmac_priv *priv = netdev_priv(dev);
3556 	int ret;
3557 
3558 	ret = request_irq(dev->irq, stmmac_interrupt,
3559 			  IRQF_SHARED, dev->name, dev);
3560 	if (unlikely(ret < 0)) {
3561 		netdev_err(priv->dev,
3562 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3563 			   __func__, dev->irq, ret);
3564 		irq_err = REQ_IRQ_ERR_MAC;
3565 		return ret;
3566 	}
3567 
3568 	/* Request the Wake IRQ in case of another line
3569 	 * is used for WoL
3570 	 */
3571 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3572 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3573 				  IRQF_SHARED, dev->name, dev);
3574 		if (unlikely(ret < 0)) {
3575 			netdev_err(priv->dev,
3576 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3577 				   __func__, priv->wol_irq, ret);
3578 			irq_err = REQ_IRQ_ERR_WOL;
3579 			return ret;
3580 		}
3581 	}
3582 
3583 	/* Request the IRQ lines */
3584 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3585 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3586 				  IRQF_SHARED, dev->name, dev);
3587 		if (unlikely(ret < 0)) {
3588 			netdev_err(priv->dev,
3589 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3590 				   __func__, priv->lpi_irq, ret);
3591 			irq_err = REQ_IRQ_ERR_LPI;
3592 			goto irq_error;
3593 		}
3594 	}
3595 
3596 	return 0;
3597 
3598 irq_error:
3599 	stmmac_free_irq(dev, irq_err, 0);
3600 	return ret;
3601 }
3602 
3603 static int stmmac_request_irq(struct net_device *dev)
3604 {
3605 	struct stmmac_priv *priv = netdev_priv(dev);
3606 	int ret;
3607 
3608 	/* Request the IRQ lines */
3609 	if (priv->plat->multi_msi_en)
3610 		ret = stmmac_request_irq_multi_msi(dev);
3611 	else
3612 		ret = stmmac_request_irq_single(dev);
3613 
3614 	return ret;
3615 }
3616 
3617 /**
3618  *  stmmac_open - open entry point of the driver
3619  *  @dev : pointer to the device structure.
3620  *  Description:
3621  *  This function is the open entry point of the driver.
3622  *  Return value:
3623  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3624  *  file on failure.
3625  */
3626 int stmmac_open(struct net_device *dev)
3627 {
3628 	struct stmmac_priv *priv = netdev_priv(dev);
3629 	int bfsize = 0;
3630 	u32 chan;
3631 	int ret;
3632 
3633 	ret = pm_runtime_get_sync(priv->device);
3634 	if (ret < 0) {
3635 		pm_runtime_put_noidle(priv->device);
3636 		return ret;
3637 	}
3638 
3639 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3640 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
3641 	    priv->hw->xpcs_args.an_mode != DW_AN_C73) {
3642 		ret = stmmac_init_phy(dev);
3643 		if (ret) {
3644 			netdev_err(priv->dev,
3645 				   "%s: Cannot attach to PHY (error: %d)\n",
3646 				   __func__, ret);
3647 			goto init_phy_error;
3648 		}
3649 	}
3650 
3651 	/* Extra statistics */
3652 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
3653 	priv->xstats.threshold = tc;
3654 
3655 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
3656 	if (bfsize < 0)
3657 		bfsize = 0;
3658 
3659 	if (bfsize < BUF_SIZE_16KiB)
3660 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
3661 
3662 	priv->dma_buf_sz = bfsize;
3663 	buf_sz = bfsize;
3664 
3665 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3666 
3667 	if (!priv->dma_tx_size)
3668 		priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3669 	if (!priv->dma_rx_size)
3670 		priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3671 
3672 	/* Earlier check for TBS */
3673 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3674 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3675 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3676 
3677 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3678 		if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan))
3679 			tx_q->tbs &= ~STMMAC_TBS_AVAIL;
3680 	}
3681 
3682 	ret = alloc_dma_desc_resources(priv);
3683 	if (ret < 0) {
3684 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3685 			   __func__);
3686 		goto dma_desc_error;
3687 	}
3688 
3689 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
3690 	if (ret < 0) {
3691 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3692 			   __func__);
3693 		goto init_error;
3694 	}
3695 
3696 	ret = stmmac_hw_setup(dev, true);
3697 	if (ret < 0) {
3698 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3699 		goto init_error;
3700 	}
3701 
3702 	stmmac_init_coalesce(priv);
3703 
3704 	phylink_start(priv->phylink);
3705 	/* We may have called phylink_speed_down before */
3706 	phylink_speed_up(priv->phylink);
3707 
3708 	ret = stmmac_request_irq(dev);
3709 	if (ret)
3710 		goto irq_error;
3711 
3712 	stmmac_enable_all_queues(priv);
3713 	netif_tx_start_all_queues(priv->dev);
3714 
3715 	return 0;
3716 
3717 irq_error:
3718 	phylink_stop(priv->phylink);
3719 
3720 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3721 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
3722 
3723 	stmmac_hw_teardown(dev);
3724 init_error:
3725 	free_dma_desc_resources(priv);
3726 dma_desc_error:
3727 	phylink_disconnect_phy(priv->phylink);
3728 init_phy_error:
3729 	pm_runtime_put(priv->device);
3730 	return ret;
3731 }
3732 
3733 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3734 {
3735 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3736 
3737 	if (priv->fpe_wq)
3738 		destroy_workqueue(priv->fpe_wq);
3739 
3740 	netdev_info(priv->dev, "FPE workqueue stop");
3741 }
3742 
3743 /**
3744  *  stmmac_release - close entry point of the driver
3745  *  @dev : device pointer.
3746  *  Description:
3747  *  This is the stop entry point of the driver.
3748  */
3749 int stmmac_release(struct net_device *dev)
3750 {
3751 	struct stmmac_priv *priv = netdev_priv(dev);
3752 	u32 chan;
3753 
3754 	if (device_may_wakeup(priv->device))
3755 		phylink_speed_down(priv->phylink, false);
3756 	/* Stop and disconnect the PHY */
3757 	phylink_stop(priv->phylink);
3758 	phylink_disconnect_phy(priv->phylink);
3759 
3760 	stmmac_disable_all_queues(priv);
3761 
3762 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3763 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
3764 
3765 	/* Free the IRQ lines */
3766 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3767 
3768 	if (priv->eee_enabled) {
3769 		priv->tx_path_in_lpi_mode = false;
3770 		del_timer_sync(&priv->eee_ctrl_timer);
3771 	}
3772 
3773 	/* Stop TX/RX DMA and clear the descriptors */
3774 	stmmac_stop_all_dma(priv);
3775 
3776 	/* Release and free the Rx/Tx resources */
3777 	free_dma_desc_resources(priv);
3778 
3779 	/* Disable the MAC Rx/Tx */
3780 	stmmac_mac_set(priv, priv->ioaddr, false);
3781 
3782 	netif_carrier_off(dev);
3783 
3784 	stmmac_release_ptp(priv);
3785 
3786 	pm_runtime_put(priv->device);
3787 
3788 	if (priv->dma_cap.fpesel)
3789 		stmmac_fpe_stop_wq(priv);
3790 
3791 	return 0;
3792 }
3793 
3794 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3795 			       struct stmmac_tx_queue *tx_q)
3796 {
3797 	u16 tag = 0x0, inner_tag = 0x0;
3798 	u32 inner_type = 0x0;
3799 	struct dma_desc *p;
3800 
3801 	if (!priv->dma_cap.vlins)
3802 		return false;
3803 	if (!skb_vlan_tag_present(skb))
3804 		return false;
3805 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3806 		inner_tag = skb_vlan_tag_get(skb);
3807 		inner_type = STMMAC_VLAN_INSERT;
3808 	}
3809 
3810 	tag = skb_vlan_tag_get(skb);
3811 
3812 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3813 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3814 	else
3815 		p = &tx_q->dma_tx[tx_q->cur_tx];
3816 
3817 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3818 		return false;
3819 
3820 	stmmac_set_tx_owner(priv, p);
3821 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3822 	return true;
3823 }
3824 
3825 /**
3826  *  stmmac_tso_allocator - close entry point of the driver
3827  *  @priv: driver private structure
3828  *  @des: buffer start address
3829  *  @total_len: total length to fill in descriptors
3830  *  @last_segment: condition for the last descriptor
3831  *  @queue: TX queue index
3832  *  Description:
3833  *  This function fills descriptor and request new descriptors according to
3834  *  buffer length to fill
3835  */
3836 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
3837 				 int total_len, bool last_segment, u32 queue)
3838 {
3839 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3840 	struct dma_desc *desc;
3841 	u32 buff_size;
3842 	int tmp_len;
3843 
3844 	tmp_len = total_len;
3845 
3846 	while (tmp_len > 0) {
3847 		dma_addr_t curr_addr;
3848 
3849 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3850 						priv->dma_tx_size);
3851 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3852 
3853 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3854 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3855 		else
3856 			desc = &tx_q->dma_tx[tx_q->cur_tx];
3857 
3858 		curr_addr = des + (total_len - tmp_len);
3859 		if (priv->dma_cap.addr64 <= 32)
3860 			desc->des0 = cpu_to_le32(curr_addr);
3861 		else
3862 			stmmac_set_desc_addr(priv, desc, curr_addr);
3863 
3864 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
3865 			    TSO_MAX_BUFF_SIZE : tmp_len;
3866 
3867 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
3868 				0, 1,
3869 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
3870 				0, 0);
3871 
3872 		tmp_len -= TSO_MAX_BUFF_SIZE;
3873 	}
3874 }
3875 
3876 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
3877 {
3878 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3879 	int desc_size;
3880 
3881 	if (likely(priv->extend_desc))
3882 		desc_size = sizeof(struct dma_extended_desc);
3883 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3884 		desc_size = sizeof(struct dma_edesc);
3885 	else
3886 		desc_size = sizeof(struct dma_desc);
3887 
3888 	/* The own bit must be the latest setting done when prepare the
3889 	 * descriptor and then barrier is needed to make sure that
3890 	 * all is coherent before granting the DMA engine.
3891 	 */
3892 	wmb();
3893 
3894 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3895 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3896 }
3897 
3898 /**
3899  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
3900  *  @skb : the socket buffer
3901  *  @dev : device pointer
3902  *  Description: this is the transmit function that is called on TSO frames
3903  *  (support available on GMAC4 and newer chips).
3904  *  Diagram below show the ring programming in case of TSO frames:
3905  *
3906  *  First Descriptor
3907  *   --------
3908  *   | DES0 |---> buffer1 = L2/L3/L4 header
3909  *   | DES1 |---> TCP Payload (can continue on next descr...)
3910  *   | DES2 |---> buffer 1 and 2 len
3911  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
3912  *   --------
3913  *	|
3914  *     ...
3915  *	|
3916  *   --------
3917  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
3918  *   | DES1 | --|
3919  *   | DES2 | --> buffer 1 and 2 len
3920  *   | DES3 |
3921  *   --------
3922  *
3923  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
3924  */
3925 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3926 {
3927 	struct dma_desc *desc, *first, *mss_desc = NULL;
3928 	struct stmmac_priv *priv = netdev_priv(dev);
3929 	int nfrags = skb_shinfo(skb)->nr_frags;
3930 	u32 queue = skb_get_queue_mapping(skb);
3931 	unsigned int first_entry, tx_packets;
3932 	int tmp_pay_len = 0, first_tx;
3933 	struct stmmac_tx_queue *tx_q;
3934 	bool has_vlan, set_ic;
3935 	u8 proto_hdr_len, hdr;
3936 	u32 pay_len, mss;
3937 	dma_addr_t des;
3938 	int i;
3939 
3940 	tx_q = &priv->tx_queue[queue];
3941 	first_tx = tx_q->cur_tx;
3942 
3943 	/* Compute header lengths */
3944 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3945 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
3946 		hdr = sizeof(struct udphdr);
3947 	} else {
3948 		proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3949 		hdr = tcp_hdrlen(skb);
3950 	}
3951 
3952 	/* Desc availability based on threshold should be enough safe */
3953 	if (unlikely(stmmac_tx_avail(priv, queue) <
3954 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
3955 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3956 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3957 								queue));
3958 			/* This is a hard error, log it. */
3959 			netdev_err(priv->dev,
3960 				   "%s: Tx Ring full when queue awake\n",
3961 				   __func__);
3962 		}
3963 		return NETDEV_TX_BUSY;
3964 	}
3965 
3966 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
3967 
3968 	mss = skb_shinfo(skb)->gso_size;
3969 
3970 	/* set new MSS value if needed */
3971 	if (mss != tx_q->mss) {
3972 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3973 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3974 		else
3975 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
3976 
3977 		stmmac_set_mss(priv, mss_desc, mss);
3978 		tx_q->mss = mss;
3979 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3980 						priv->dma_tx_size);
3981 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3982 	}
3983 
3984 	if (netif_msg_tx_queued(priv)) {
3985 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
3986 			__func__, hdr, proto_hdr_len, pay_len, mss);
3987 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
3988 			skb->data_len);
3989 	}
3990 
3991 	/* Check if VLAN can be inserted by HW */
3992 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3993 
3994 	first_entry = tx_q->cur_tx;
3995 	WARN_ON(tx_q->tx_skbuff[first_entry]);
3996 
3997 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3998 		desc = &tx_q->dma_entx[first_entry].basic;
3999 	else
4000 		desc = &tx_q->dma_tx[first_entry];
4001 	first = desc;
4002 
4003 	if (has_vlan)
4004 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4005 
4006 	/* first descriptor: fill Headers on Buf1 */
4007 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4008 			     DMA_TO_DEVICE);
4009 	if (dma_mapping_error(priv->device, des))
4010 		goto dma_map_err;
4011 
4012 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4013 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4014 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4015 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4016 
4017 	if (priv->dma_cap.addr64 <= 32) {
4018 		first->des0 = cpu_to_le32(des);
4019 
4020 		/* Fill start of payload in buff2 of first descriptor */
4021 		if (pay_len)
4022 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4023 
4024 		/* If needed take extra descriptors to fill the remaining payload */
4025 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4026 	} else {
4027 		stmmac_set_desc_addr(priv, first, des);
4028 		tmp_pay_len = pay_len;
4029 		des += proto_hdr_len;
4030 		pay_len = 0;
4031 	}
4032 
4033 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4034 
4035 	/* Prepare fragments */
4036 	for (i = 0; i < nfrags; i++) {
4037 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4038 
4039 		des = skb_frag_dma_map(priv->device, frag, 0,
4040 				       skb_frag_size(frag),
4041 				       DMA_TO_DEVICE);
4042 		if (dma_mapping_error(priv->device, des))
4043 			goto dma_map_err;
4044 
4045 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4046 				     (i == nfrags - 1), queue);
4047 
4048 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4049 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4050 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4051 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4052 	}
4053 
4054 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4055 
4056 	/* Only the last descriptor gets to point to the skb. */
4057 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4058 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4059 
4060 	/* Manage tx mitigation */
4061 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4062 	tx_q->tx_count_frames += tx_packets;
4063 
4064 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4065 		set_ic = true;
4066 	else if (!priv->tx_coal_frames[queue])
4067 		set_ic = false;
4068 	else if (tx_packets > priv->tx_coal_frames[queue])
4069 		set_ic = true;
4070 	else if ((tx_q->tx_count_frames %
4071 		  priv->tx_coal_frames[queue]) < tx_packets)
4072 		set_ic = true;
4073 	else
4074 		set_ic = false;
4075 
4076 	if (set_ic) {
4077 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4078 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4079 		else
4080 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4081 
4082 		tx_q->tx_count_frames = 0;
4083 		stmmac_set_tx_ic(priv, desc);
4084 		priv->xstats.tx_set_ic_bit++;
4085 	}
4086 
4087 	/* We've used all descriptors we need for this skb, however,
4088 	 * advance cur_tx so that it references a fresh descriptor.
4089 	 * ndo_start_xmit will fill this descriptor the next time it's
4090 	 * called and stmmac_tx_clean may clean up to this descriptor.
4091 	 */
4092 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
4093 
4094 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4095 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4096 			  __func__);
4097 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4098 	}
4099 
4100 	dev->stats.tx_bytes += skb->len;
4101 	priv->xstats.tx_tso_frames++;
4102 	priv->xstats.tx_tso_nfrags += nfrags;
4103 
4104 	if (priv->sarc_type)
4105 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4106 
4107 	skb_tx_timestamp(skb);
4108 
4109 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4110 		     priv->hwts_tx_en)) {
4111 		/* declare that device is doing timestamping */
4112 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4113 		stmmac_enable_tx_timestamp(priv, first);
4114 	}
4115 
4116 	/* Complete the first descriptor before granting the DMA */
4117 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4118 			proto_hdr_len,
4119 			pay_len,
4120 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4121 			hdr / 4, (skb->len - proto_hdr_len));
4122 
4123 	/* If context desc is used to change MSS */
4124 	if (mss_desc) {
4125 		/* Make sure that first descriptor has been completely
4126 		 * written, including its own bit. This is because MSS is
4127 		 * actually before first descriptor, so we need to make
4128 		 * sure that MSS's own bit is the last thing written.
4129 		 */
4130 		dma_wmb();
4131 		stmmac_set_tx_owner(priv, mss_desc);
4132 	}
4133 
4134 	if (netif_msg_pktdata(priv)) {
4135 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4136 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4137 			tx_q->cur_tx, first, nfrags);
4138 		pr_info(">>> frame to be transmitted: ");
4139 		print_pkt(skb->data, skb_headlen(skb));
4140 	}
4141 
4142 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4143 
4144 	stmmac_flush_tx_descriptors(priv, queue);
4145 	stmmac_tx_timer_arm(priv, queue);
4146 
4147 	return NETDEV_TX_OK;
4148 
4149 dma_map_err:
4150 	dev_err(priv->device, "Tx dma map failed\n");
4151 	dev_kfree_skb(skb);
4152 	priv->dev->stats.tx_dropped++;
4153 	return NETDEV_TX_OK;
4154 }
4155 
4156 /**
4157  *  stmmac_xmit - Tx entry point of the driver
4158  *  @skb : the socket buffer
4159  *  @dev : device pointer
4160  *  Description : this is the tx entry point of the driver.
4161  *  It programs the chain or the ring and supports oversized frames
4162  *  and SG feature.
4163  */
4164 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4165 {
4166 	unsigned int first_entry, tx_packets, enh_desc;
4167 	struct stmmac_priv *priv = netdev_priv(dev);
4168 	unsigned int nopaged_len = skb_headlen(skb);
4169 	int i, csum_insertion = 0, is_jumbo = 0;
4170 	u32 queue = skb_get_queue_mapping(skb);
4171 	int nfrags = skb_shinfo(skb)->nr_frags;
4172 	int gso = skb_shinfo(skb)->gso_type;
4173 	struct dma_edesc *tbs_desc = NULL;
4174 	struct dma_desc *desc, *first;
4175 	struct stmmac_tx_queue *tx_q;
4176 	bool has_vlan, set_ic;
4177 	int entry, first_tx;
4178 	dma_addr_t des;
4179 
4180 	tx_q = &priv->tx_queue[queue];
4181 	first_tx = tx_q->cur_tx;
4182 
4183 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4184 		stmmac_disable_eee_mode(priv);
4185 
4186 	/* Manage oversized TCP frames for GMAC4 device */
4187 	if (skb_is_gso(skb) && priv->tso) {
4188 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4189 			return stmmac_tso_xmit(skb, dev);
4190 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4191 			return stmmac_tso_xmit(skb, dev);
4192 	}
4193 
4194 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4195 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4196 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4197 								queue));
4198 			/* This is a hard error, log it. */
4199 			netdev_err(priv->dev,
4200 				   "%s: Tx Ring full when queue awake\n",
4201 				   __func__);
4202 		}
4203 		return NETDEV_TX_BUSY;
4204 	}
4205 
4206 	/* Check if VLAN can be inserted by HW */
4207 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4208 
4209 	entry = tx_q->cur_tx;
4210 	first_entry = entry;
4211 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4212 
4213 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4214 
4215 	if (likely(priv->extend_desc))
4216 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4217 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4218 		desc = &tx_q->dma_entx[entry].basic;
4219 	else
4220 		desc = tx_q->dma_tx + entry;
4221 
4222 	first = desc;
4223 
4224 	if (has_vlan)
4225 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4226 
4227 	enh_desc = priv->plat->enh_desc;
4228 	/* To program the descriptors according to the size of the frame */
4229 	if (enh_desc)
4230 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4231 
4232 	if (unlikely(is_jumbo)) {
4233 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4234 		if (unlikely(entry < 0) && (entry != -EINVAL))
4235 			goto dma_map_err;
4236 	}
4237 
4238 	for (i = 0; i < nfrags; i++) {
4239 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4240 		int len = skb_frag_size(frag);
4241 		bool last_segment = (i == (nfrags - 1));
4242 
4243 		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4244 		WARN_ON(tx_q->tx_skbuff[entry]);
4245 
4246 		if (likely(priv->extend_desc))
4247 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4248 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4249 			desc = &tx_q->dma_entx[entry].basic;
4250 		else
4251 			desc = tx_q->dma_tx + entry;
4252 
4253 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4254 				       DMA_TO_DEVICE);
4255 		if (dma_mapping_error(priv->device, des))
4256 			goto dma_map_err; /* should reuse desc w/o issues */
4257 
4258 		tx_q->tx_skbuff_dma[entry].buf = des;
4259 
4260 		stmmac_set_desc_addr(priv, desc, des);
4261 
4262 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4263 		tx_q->tx_skbuff_dma[entry].len = len;
4264 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4265 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4266 
4267 		/* Prepare the descriptor and set the own bit too */
4268 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4269 				priv->mode, 1, last_segment, skb->len);
4270 	}
4271 
4272 	/* Only the last descriptor gets to point to the skb. */
4273 	tx_q->tx_skbuff[entry] = skb;
4274 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4275 
4276 	/* According to the coalesce parameter the IC bit for the latest
4277 	 * segment is reset and the timer re-started to clean the tx status.
4278 	 * This approach takes care about the fragments: desc is the first
4279 	 * element in case of no SG.
4280 	 */
4281 	tx_packets = (entry + 1) - first_tx;
4282 	tx_q->tx_count_frames += tx_packets;
4283 
4284 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4285 		set_ic = true;
4286 	else if (!priv->tx_coal_frames[queue])
4287 		set_ic = false;
4288 	else if (tx_packets > priv->tx_coal_frames[queue])
4289 		set_ic = true;
4290 	else if ((tx_q->tx_count_frames %
4291 		  priv->tx_coal_frames[queue]) < tx_packets)
4292 		set_ic = true;
4293 	else
4294 		set_ic = false;
4295 
4296 	if (set_ic) {
4297 		if (likely(priv->extend_desc))
4298 			desc = &tx_q->dma_etx[entry].basic;
4299 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4300 			desc = &tx_q->dma_entx[entry].basic;
4301 		else
4302 			desc = &tx_q->dma_tx[entry];
4303 
4304 		tx_q->tx_count_frames = 0;
4305 		stmmac_set_tx_ic(priv, desc);
4306 		priv->xstats.tx_set_ic_bit++;
4307 	}
4308 
4309 	/* We've used all descriptors we need for this skb, however,
4310 	 * advance cur_tx so that it references a fresh descriptor.
4311 	 * ndo_start_xmit will fill this descriptor the next time it's
4312 	 * called and stmmac_tx_clean may clean up to this descriptor.
4313 	 */
4314 	entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4315 	tx_q->cur_tx = entry;
4316 
4317 	if (netif_msg_pktdata(priv)) {
4318 		netdev_dbg(priv->dev,
4319 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4320 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4321 			   entry, first, nfrags);
4322 
4323 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4324 		print_pkt(skb->data, skb->len);
4325 	}
4326 
4327 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4328 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4329 			  __func__);
4330 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4331 	}
4332 
4333 	dev->stats.tx_bytes += skb->len;
4334 
4335 	if (priv->sarc_type)
4336 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4337 
4338 	skb_tx_timestamp(skb);
4339 
4340 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4341 	 * problems because all the descriptors are actually ready to be
4342 	 * passed to the DMA engine.
4343 	 */
4344 	if (likely(!is_jumbo)) {
4345 		bool last_segment = (nfrags == 0);
4346 
4347 		des = dma_map_single(priv->device, skb->data,
4348 				     nopaged_len, DMA_TO_DEVICE);
4349 		if (dma_mapping_error(priv->device, des))
4350 			goto dma_map_err;
4351 
4352 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4353 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4354 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4355 
4356 		stmmac_set_desc_addr(priv, first, des);
4357 
4358 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4359 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4360 
4361 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4362 			     priv->hwts_tx_en)) {
4363 			/* declare that device is doing timestamping */
4364 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4365 			stmmac_enable_tx_timestamp(priv, first);
4366 		}
4367 
4368 		/* Prepare the first descriptor setting the OWN bit too */
4369 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4370 				csum_insertion, priv->mode, 0, last_segment,
4371 				skb->len);
4372 	}
4373 
4374 	if (tx_q->tbs & STMMAC_TBS_EN) {
4375 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4376 
4377 		tbs_desc = &tx_q->dma_entx[first_entry];
4378 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4379 	}
4380 
4381 	stmmac_set_tx_owner(priv, first);
4382 
4383 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4384 
4385 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4386 
4387 	stmmac_flush_tx_descriptors(priv, queue);
4388 	stmmac_tx_timer_arm(priv, queue);
4389 
4390 	return NETDEV_TX_OK;
4391 
4392 dma_map_err:
4393 	netdev_err(priv->dev, "Tx DMA map failed\n");
4394 	dev_kfree_skb(skb);
4395 	priv->dev->stats.tx_dropped++;
4396 	return NETDEV_TX_OK;
4397 }
4398 
4399 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4400 {
4401 	struct vlan_ethhdr *veth;
4402 	__be16 vlan_proto;
4403 	u16 vlanid;
4404 
4405 	veth = (struct vlan_ethhdr *)skb->data;
4406 	vlan_proto = veth->h_vlan_proto;
4407 
4408 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4409 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4410 	    (vlan_proto == htons(ETH_P_8021AD) &&
4411 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4412 		/* pop the vlan tag */
4413 		vlanid = ntohs(veth->h_vlan_TCI);
4414 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4415 		skb_pull(skb, VLAN_HLEN);
4416 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4417 	}
4418 }
4419 
4420 /**
4421  * stmmac_rx_refill - refill used skb preallocated buffers
4422  * @priv: driver private structure
4423  * @queue: RX queue index
4424  * Description : this is to reallocate the skb for the reception process
4425  * that is based on zero-copy.
4426  */
4427 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4428 {
4429 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4430 	int dirty = stmmac_rx_dirty(priv, queue);
4431 	unsigned int entry = rx_q->dirty_rx;
4432 
4433 	while (dirty-- > 0) {
4434 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4435 		struct dma_desc *p;
4436 		bool use_rx_wd;
4437 
4438 		if (priv->extend_desc)
4439 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4440 		else
4441 			p = rx_q->dma_rx + entry;
4442 
4443 		if (!buf->page) {
4444 			buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
4445 			if (!buf->page)
4446 				break;
4447 		}
4448 
4449 		if (priv->sph && !buf->sec_page) {
4450 			buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
4451 			if (!buf->sec_page)
4452 				break;
4453 
4454 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4455 		}
4456 
4457 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4458 
4459 		stmmac_set_desc_addr(priv, p, buf->addr);
4460 		if (priv->sph)
4461 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4462 		else
4463 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4464 		stmmac_refill_desc3(priv, rx_q, p);
4465 
4466 		rx_q->rx_count_frames++;
4467 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4468 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4469 			rx_q->rx_count_frames = 0;
4470 
4471 		use_rx_wd = !priv->rx_coal_frames[queue];
4472 		use_rx_wd |= rx_q->rx_count_frames > 0;
4473 		if (!priv->use_riwt)
4474 			use_rx_wd = false;
4475 
4476 		dma_wmb();
4477 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4478 
4479 		entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
4480 	}
4481 	rx_q->dirty_rx = entry;
4482 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4483 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4484 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4485 }
4486 
4487 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4488 				       struct dma_desc *p,
4489 				       int status, unsigned int len)
4490 {
4491 	unsigned int plen = 0, hlen = 0;
4492 	int coe = priv->hw->rx_csum;
4493 
4494 	/* Not first descriptor, buffer is always zero */
4495 	if (priv->sph && len)
4496 		return 0;
4497 
4498 	/* First descriptor, get split header length */
4499 	stmmac_get_rx_header_len(priv, p, &hlen);
4500 	if (priv->sph && hlen) {
4501 		priv->xstats.rx_split_hdr_pkt_n++;
4502 		return hlen;
4503 	}
4504 
4505 	/* First descriptor, not last descriptor and not split header */
4506 	if (status & rx_not_ls)
4507 		return priv->dma_buf_sz;
4508 
4509 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4510 
4511 	/* First descriptor and last descriptor and not split header */
4512 	return min_t(unsigned int, priv->dma_buf_sz, plen);
4513 }
4514 
4515 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4516 				       struct dma_desc *p,
4517 				       int status, unsigned int len)
4518 {
4519 	int coe = priv->hw->rx_csum;
4520 	unsigned int plen = 0;
4521 
4522 	/* Not split header, buffer is not available */
4523 	if (!priv->sph)
4524 		return 0;
4525 
4526 	/* Not last descriptor */
4527 	if (status & rx_not_ls)
4528 		return priv->dma_buf_sz;
4529 
4530 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4531 
4532 	/* Last descriptor */
4533 	return plen - len;
4534 }
4535 
4536 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4537 				struct xdp_frame *xdpf, bool dma_map)
4538 {
4539 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4540 	unsigned int entry = tx_q->cur_tx;
4541 	struct dma_desc *tx_desc;
4542 	dma_addr_t dma_addr;
4543 	bool set_ic;
4544 
4545 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4546 		return STMMAC_XDP_CONSUMED;
4547 
4548 	if (likely(priv->extend_desc))
4549 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4550 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4551 		tx_desc = &tx_q->dma_entx[entry].basic;
4552 	else
4553 		tx_desc = tx_q->dma_tx + entry;
4554 
4555 	if (dma_map) {
4556 		dma_addr = dma_map_single(priv->device, xdpf->data,
4557 					  xdpf->len, DMA_TO_DEVICE);
4558 		if (dma_mapping_error(priv->device, dma_addr))
4559 			return STMMAC_XDP_CONSUMED;
4560 
4561 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4562 	} else {
4563 		struct page *page = virt_to_page(xdpf->data);
4564 
4565 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4566 			   xdpf->headroom;
4567 		dma_sync_single_for_device(priv->device, dma_addr,
4568 					   xdpf->len, DMA_BIDIRECTIONAL);
4569 
4570 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4571 	}
4572 
4573 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4574 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4575 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4576 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4577 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4578 
4579 	tx_q->xdpf[entry] = xdpf;
4580 
4581 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4582 
4583 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4584 			       true, priv->mode, true, true,
4585 			       xdpf->len);
4586 
4587 	tx_q->tx_count_frames++;
4588 
4589 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4590 		set_ic = true;
4591 	else
4592 		set_ic = false;
4593 
4594 	if (set_ic) {
4595 		tx_q->tx_count_frames = 0;
4596 		stmmac_set_tx_ic(priv, tx_desc);
4597 		priv->xstats.tx_set_ic_bit++;
4598 	}
4599 
4600 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4601 
4602 	entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4603 	tx_q->cur_tx = entry;
4604 
4605 	return STMMAC_XDP_TX;
4606 }
4607 
4608 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4609 				   int cpu)
4610 {
4611 	int index = cpu;
4612 
4613 	if (unlikely(index < 0))
4614 		index = 0;
4615 
4616 	while (index >= priv->plat->tx_queues_to_use)
4617 		index -= priv->plat->tx_queues_to_use;
4618 
4619 	return index;
4620 }
4621 
4622 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4623 				struct xdp_buff *xdp)
4624 {
4625 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4626 	int cpu = smp_processor_id();
4627 	struct netdev_queue *nq;
4628 	int queue;
4629 	int res;
4630 
4631 	if (unlikely(!xdpf))
4632 		return STMMAC_XDP_CONSUMED;
4633 
4634 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4635 	nq = netdev_get_tx_queue(priv->dev, queue);
4636 
4637 	__netif_tx_lock(nq, cpu);
4638 	/* Avoids TX time-out as we are sharing with slow path */
4639 	nq->trans_start = jiffies;
4640 
4641 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4642 	if (res == STMMAC_XDP_TX)
4643 		stmmac_flush_tx_descriptors(priv, queue);
4644 
4645 	__netif_tx_unlock(nq);
4646 
4647 	return res;
4648 }
4649 
4650 /* This function assumes rcu_read_lock() is held by the caller. */
4651 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4652 				 struct bpf_prog *prog,
4653 				 struct xdp_buff *xdp)
4654 {
4655 	u32 act;
4656 	int res;
4657 
4658 	act = bpf_prog_run_xdp(prog, xdp);
4659 	switch (act) {
4660 	case XDP_PASS:
4661 		res = STMMAC_XDP_PASS;
4662 		break;
4663 	case XDP_TX:
4664 		res = stmmac_xdp_xmit_back(priv, xdp);
4665 		break;
4666 	case XDP_REDIRECT:
4667 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4668 			res = STMMAC_XDP_CONSUMED;
4669 		else
4670 			res = STMMAC_XDP_REDIRECT;
4671 		break;
4672 	default:
4673 		bpf_warn_invalid_xdp_action(act);
4674 		fallthrough;
4675 	case XDP_ABORTED:
4676 		trace_xdp_exception(priv->dev, prog, act);
4677 		fallthrough;
4678 	case XDP_DROP:
4679 		res = STMMAC_XDP_CONSUMED;
4680 		break;
4681 	}
4682 
4683 	return res;
4684 }
4685 
4686 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4687 					   struct xdp_buff *xdp)
4688 {
4689 	struct bpf_prog *prog;
4690 	int res;
4691 
4692 	rcu_read_lock();
4693 
4694 	prog = READ_ONCE(priv->xdp_prog);
4695 	if (!prog) {
4696 		res = STMMAC_XDP_PASS;
4697 		goto unlock;
4698 	}
4699 
4700 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
4701 unlock:
4702 	rcu_read_unlock();
4703 	return ERR_PTR(-res);
4704 }
4705 
4706 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4707 				   int xdp_status)
4708 {
4709 	int cpu = smp_processor_id();
4710 	int queue;
4711 
4712 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4713 
4714 	if (xdp_status & STMMAC_XDP_TX)
4715 		stmmac_tx_timer_arm(priv, queue);
4716 
4717 	if (xdp_status & STMMAC_XDP_REDIRECT)
4718 		xdp_do_flush();
4719 }
4720 
4721 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4722 					       struct xdp_buff *xdp)
4723 {
4724 	unsigned int metasize = xdp->data - xdp->data_meta;
4725 	unsigned int datasize = xdp->data_end - xdp->data;
4726 	struct sk_buff *skb;
4727 
4728 	skb = __napi_alloc_skb(&ch->rxtx_napi,
4729 			       xdp->data_end - xdp->data_hard_start,
4730 			       GFP_ATOMIC | __GFP_NOWARN);
4731 	if (unlikely(!skb))
4732 		return NULL;
4733 
4734 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
4735 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4736 	if (metasize)
4737 		skb_metadata_set(skb, metasize);
4738 
4739 	return skb;
4740 }
4741 
4742 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4743 				   struct dma_desc *p, struct dma_desc *np,
4744 				   struct xdp_buff *xdp)
4745 {
4746 	struct stmmac_channel *ch = &priv->channel[queue];
4747 	unsigned int len = xdp->data_end - xdp->data;
4748 	enum pkt_hash_types hash_type;
4749 	int coe = priv->hw->rx_csum;
4750 	struct sk_buff *skb;
4751 	u32 hash;
4752 
4753 	skb = stmmac_construct_skb_zc(ch, xdp);
4754 	if (!skb) {
4755 		priv->dev->stats.rx_dropped++;
4756 		return;
4757 	}
4758 
4759 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
4760 	stmmac_rx_vlan(priv->dev, skb);
4761 	skb->protocol = eth_type_trans(skb, priv->dev);
4762 
4763 	if (unlikely(!coe))
4764 		skb_checksum_none_assert(skb);
4765 	else
4766 		skb->ip_summed = CHECKSUM_UNNECESSARY;
4767 
4768 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4769 		skb_set_hash(skb, hash, hash_type);
4770 
4771 	skb_record_rx_queue(skb, queue);
4772 	napi_gro_receive(&ch->rxtx_napi, skb);
4773 
4774 	priv->dev->stats.rx_packets++;
4775 	priv->dev->stats.rx_bytes += len;
4776 }
4777 
4778 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4779 {
4780 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4781 	unsigned int entry = rx_q->dirty_rx;
4782 	struct dma_desc *rx_desc = NULL;
4783 	bool ret = true;
4784 
4785 	budget = min(budget, stmmac_rx_dirty(priv, queue));
4786 
4787 	while (budget-- > 0 && entry != rx_q->cur_rx) {
4788 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4789 		dma_addr_t dma_addr;
4790 		bool use_rx_wd;
4791 
4792 		if (!buf->xdp) {
4793 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
4794 			if (!buf->xdp) {
4795 				ret = false;
4796 				break;
4797 			}
4798 		}
4799 
4800 		if (priv->extend_desc)
4801 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
4802 		else
4803 			rx_desc = rx_q->dma_rx + entry;
4804 
4805 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
4806 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
4807 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
4808 		stmmac_refill_desc3(priv, rx_q, rx_desc);
4809 
4810 		rx_q->rx_count_frames++;
4811 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4812 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4813 			rx_q->rx_count_frames = 0;
4814 
4815 		use_rx_wd = !priv->rx_coal_frames[queue];
4816 		use_rx_wd |= rx_q->rx_count_frames > 0;
4817 		if (!priv->use_riwt)
4818 			use_rx_wd = false;
4819 
4820 		dma_wmb();
4821 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
4822 
4823 		entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
4824 	}
4825 
4826 	if (rx_desc) {
4827 		rx_q->dirty_rx = entry;
4828 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4829 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
4830 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4831 	}
4832 
4833 	return ret;
4834 }
4835 
4836 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
4837 {
4838 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4839 	unsigned int count = 0, error = 0, len = 0;
4840 	int dirty = stmmac_rx_dirty(priv, queue);
4841 	unsigned int next_entry = rx_q->cur_rx;
4842 	unsigned int desc_size;
4843 	struct bpf_prog *prog;
4844 	bool failure = false;
4845 	int xdp_status = 0;
4846 	int status = 0;
4847 
4848 	if (netif_msg_rx_status(priv)) {
4849 		void *rx_head;
4850 
4851 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
4852 		if (priv->extend_desc) {
4853 			rx_head = (void *)rx_q->dma_erx;
4854 			desc_size = sizeof(struct dma_extended_desc);
4855 		} else {
4856 			rx_head = (void *)rx_q->dma_rx;
4857 			desc_size = sizeof(struct dma_desc);
4858 		}
4859 
4860 		stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
4861 				    rx_q->dma_rx_phy, desc_size);
4862 	}
4863 	while (count < limit) {
4864 		struct stmmac_rx_buffer *buf;
4865 		unsigned int buf1_len = 0;
4866 		struct dma_desc *np, *p;
4867 		int entry;
4868 		int res;
4869 
4870 		if (!count && rx_q->state_saved) {
4871 			error = rx_q->state.error;
4872 			len = rx_q->state.len;
4873 		} else {
4874 			rx_q->state_saved = false;
4875 			error = 0;
4876 			len = 0;
4877 		}
4878 
4879 		if (count >= limit)
4880 			break;
4881 
4882 read_again:
4883 		buf1_len = 0;
4884 		entry = next_entry;
4885 		buf = &rx_q->buf_pool[entry];
4886 
4887 		if (dirty >= STMMAC_RX_FILL_BATCH) {
4888 			failure = failure ||
4889 				  !stmmac_rx_refill_zc(priv, queue, dirty);
4890 			dirty = 0;
4891 		}
4892 
4893 		if (priv->extend_desc)
4894 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4895 		else
4896 			p = rx_q->dma_rx + entry;
4897 
4898 		/* read the status of the incoming frame */
4899 		status = stmmac_rx_status(priv, &priv->dev->stats,
4900 					  &priv->xstats, p);
4901 		/* check if managed by the DMA otherwise go ahead */
4902 		if (unlikely(status & dma_own))
4903 			break;
4904 
4905 		/* Prefetch the next RX descriptor */
4906 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
4907 						priv->dma_rx_size);
4908 		next_entry = rx_q->cur_rx;
4909 
4910 		if (priv->extend_desc)
4911 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
4912 		else
4913 			np = rx_q->dma_rx + next_entry;
4914 
4915 		prefetch(np);
4916 
4917 		if (priv->extend_desc)
4918 			stmmac_rx_extended_status(priv, &priv->dev->stats,
4919 						  &priv->xstats,
4920 						  rx_q->dma_erx + entry);
4921 		if (unlikely(status == discard_frame)) {
4922 			xsk_buff_free(buf->xdp);
4923 			buf->xdp = NULL;
4924 			dirty++;
4925 			error = 1;
4926 			if (!priv->hwts_rx_en)
4927 				priv->dev->stats.rx_errors++;
4928 		}
4929 
4930 		if (unlikely(error && (status & rx_not_ls)))
4931 			goto read_again;
4932 		if (unlikely(error)) {
4933 			count++;
4934 			continue;
4935 		}
4936 
4937 		/* Ensure a valid XSK buffer before proceed */
4938 		if (!buf->xdp)
4939 			break;
4940 
4941 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
4942 		if (likely(status & rx_not_ls)) {
4943 			xsk_buff_free(buf->xdp);
4944 			buf->xdp = NULL;
4945 			dirty++;
4946 			count++;
4947 			goto read_again;
4948 		}
4949 
4950 		/* XDP ZC Frame only support primary buffers for now */
4951 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
4952 		len += buf1_len;
4953 
4954 		/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
4955 		 * Type frames (LLC/LLC-SNAP)
4956 		 *
4957 		 * llc_snap is never checked in GMAC >= 4, so this ACS
4958 		 * feature is always disabled and packets need to be
4959 		 * stripped manually.
4960 		 */
4961 		if (likely(!(status & rx_not_ls)) &&
4962 		    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
4963 		     unlikely(status != llc_snap))) {
4964 			buf1_len -= ETH_FCS_LEN;
4965 			len -= ETH_FCS_LEN;
4966 		}
4967 
4968 		/* RX buffer is good and fit into a XSK pool buffer */
4969 		buf->xdp->data_end = buf->xdp->data + buf1_len;
4970 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
4971 
4972 		rcu_read_lock();
4973 		prog = READ_ONCE(priv->xdp_prog);
4974 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
4975 		rcu_read_unlock();
4976 
4977 		switch (res) {
4978 		case STMMAC_XDP_PASS:
4979 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
4980 			xsk_buff_free(buf->xdp);
4981 			break;
4982 		case STMMAC_XDP_CONSUMED:
4983 			xsk_buff_free(buf->xdp);
4984 			priv->dev->stats.rx_dropped++;
4985 			break;
4986 		case STMMAC_XDP_TX:
4987 		case STMMAC_XDP_REDIRECT:
4988 			xdp_status |= res;
4989 			break;
4990 		}
4991 
4992 		buf->xdp = NULL;
4993 		dirty++;
4994 		count++;
4995 	}
4996 
4997 	if (status & rx_not_ls) {
4998 		rx_q->state_saved = true;
4999 		rx_q->state.error = error;
5000 		rx_q->state.len = len;
5001 	}
5002 
5003 	stmmac_finalize_xdp_rx(priv, xdp_status);
5004 
5005 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5006 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5007 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5008 		else
5009 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5010 
5011 		return (int)count;
5012 	}
5013 
5014 	return failure ? limit : (int)count;
5015 }
5016 
5017 /**
5018  * stmmac_rx - manage the receive process
5019  * @priv: driver private structure
5020  * @limit: napi bugget
5021  * @queue: RX queue index.
5022  * Description :  this the function called by the napi poll method.
5023  * It gets all the frames inside the ring.
5024  */
5025 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5026 {
5027 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5028 	struct stmmac_channel *ch = &priv->channel[queue];
5029 	unsigned int count = 0, error = 0, len = 0;
5030 	int status = 0, coe = priv->hw->rx_csum;
5031 	unsigned int next_entry = rx_q->cur_rx;
5032 	enum dma_data_direction dma_dir;
5033 	unsigned int desc_size;
5034 	struct sk_buff *skb = NULL;
5035 	struct xdp_buff xdp;
5036 	int xdp_status = 0;
5037 	int buf_sz;
5038 
5039 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5040 	buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5041 
5042 	if (netif_msg_rx_status(priv)) {
5043 		void *rx_head;
5044 
5045 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5046 		if (priv->extend_desc) {
5047 			rx_head = (void *)rx_q->dma_erx;
5048 			desc_size = sizeof(struct dma_extended_desc);
5049 		} else {
5050 			rx_head = (void *)rx_q->dma_rx;
5051 			desc_size = sizeof(struct dma_desc);
5052 		}
5053 
5054 		stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
5055 				    rx_q->dma_rx_phy, desc_size);
5056 	}
5057 	while (count < limit) {
5058 		unsigned int buf1_len = 0, buf2_len = 0;
5059 		enum pkt_hash_types hash_type;
5060 		struct stmmac_rx_buffer *buf;
5061 		struct dma_desc *np, *p;
5062 		int entry;
5063 		u32 hash;
5064 
5065 		if (!count && rx_q->state_saved) {
5066 			skb = rx_q->state.skb;
5067 			error = rx_q->state.error;
5068 			len = rx_q->state.len;
5069 		} else {
5070 			rx_q->state_saved = false;
5071 			skb = NULL;
5072 			error = 0;
5073 			len = 0;
5074 		}
5075 
5076 		if (count >= limit)
5077 			break;
5078 
5079 read_again:
5080 		buf1_len = 0;
5081 		buf2_len = 0;
5082 		entry = next_entry;
5083 		buf = &rx_q->buf_pool[entry];
5084 
5085 		if (priv->extend_desc)
5086 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5087 		else
5088 			p = rx_q->dma_rx + entry;
5089 
5090 		/* read the status of the incoming frame */
5091 		status = stmmac_rx_status(priv, &priv->dev->stats,
5092 				&priv->xstats, p);
5093 		/* check if managed by the DMA otherwise go ahead */
5094 		if (unlikely(status & dma_own))
5095 			break;
5096 
5097 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5098 						priv->dma_rx_size);
5099 		next_entry = rx_q->cur_rx;
5100 
5101 		if (priv->extend_desc)
5102 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5103 		else
5104 			np = rx_q->dma_rx + next_entry;
5105 
5106 		prefetch(np);
5107 
5108 		if (priv->extend_desc)
5109 			stmmac_rx_extended_status(priv, &priv->dev->stats,
5110 					&priv->xstats, rx_q->dma_erx + entry);
5111 		if (unlikely(status == discard_frame)) {
5112 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5113 			buf->page = NULL;
5114 			error = 1;
5115 			if (!priv->hwts_rx_en)
5116 				priv->dev->stats.rx_errors++;
5117 		}
5118 
5119 		if (unlikely(error && (status & rx_not_ls)))
5120 			goto read_again;
5121 		if (unlikely(error)) {
5122 			dev_kfree_skb(skb);
5123 			skb = NULL;
5124 			count++;
5125 			continue;
5126 		}
5127 
5128 		/* Buffer is good. Go on. */
5129 
5130 		prefetch(page_address(buf->page));
5131 		if (buf->sec_page)
5132 			prefetch(page_address(buf->sec_page));
5133 
5134 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5135 		len += buf1_len;
5136 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5137 		len += buf2_len;
5138 
5139 		/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
5140 		 * Type frames (LLC/LLC-SNAP)
5141 		 *
5142 		 * llc_snap is never checked in GMAC >= 4, so this ACS
5143 		 * feature is always disabled and packets need to be
5144 		 * stripped manually.
5145 		 */
5146 		if (likely(!(status & rx_not_ls)) &&
5147 		    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
5148 		     unlikely(status != llc_snap))) {
5149 			if (buf2_len)
5150 				buf2_len -= ETH_FCS_LEN;
5151 			else
5152 				buf1_len -= ETH_FCS_LEN;
5153 
5154 			len -= ETH_FCS_LEN;
5155 		}
5156 
5157 		if (!skb) {
5158 			unsigned int pre_len, sync_len;
5159 
5160 			dma_sync_single_for_cpu(priv->device, buf->addr,
5161 						buf1_len, dma_dir);
5162 
5163 			xdp.data = page_address(buf->page) + buf->page_offset;
5164 			xdp.data_end = xdp.data + buf1_len;
5165 			xdp.data_hard_start = page_address(buf->page);
5166 			xdp_set_data_meta_invalid(&xdp);
5167 			xdp.frame_sz = buf_sz;
5168 			xdp.rxq = &rx_q->xdp_rxq;
5169 
5170 			pre_len = xdp.data_end - xdp.data_hard_start -
5171 				  buf->page_offset;
5172 			skb = stmmac_xdp_run_prog(priv, &xdp);
5173 			/* Due xdp_adjust_tail: DMA sync for_device
5174 			 * cover max len CPU touch
5175 			 */
5176 			sync_len = xdp.data_end - xdp.data_hard_start -
5177 				   buf->page_offset;
5178 			sync_len = max(sync_len, pre_len);
5179 
5180 			/* For Not XDP_PASS verdict */
5181 			if (IS_ERR(skb)) {
5182 				unsigned int xdp_res = -PTR_ERR(skb);
5183 
5184 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5185 					page_pool_put_page(rx_q->page_pool,
5186 							   virt_to_head_page(xdp.data),
5187 							   sync_len, true);
5188 					buf->page = NULL;
5189 					priv->dev->stats.rx_dropped++;
5190 
5191 					/* Clear skb as it was set as
5192 					 * status by XDP program.
5193 					 */
5194 					skb = NULL;
5195 
5196 					if (unlikely((status & rx_not_ls)))
5197 						goto read_again;
5198 
5199 					count++;
5200 					continue;
5201 				} else if (xdp_res & (STMMAC_XDP_TX |
5202 						      STMMAC_XDP_REDIRECT)) {
5203 					xdp_status |= xdp_res;
5204 					buf->page = NULL;
5205 					skb = NULL;
5206 					count++;
5207 					continue;
5208 				}
5209 			}
5210 		}
5211 
5212 		if (!skb) {
5213 			/* XDP program may expand or reduce tail */
5214 			buf1_len = xdp.data_end - xdp.data;
5215 
5216 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5217 			if (!skb) {
5218 				priv->dev->stats.rx_dropped++;
5219 				count++;
5220 				goto drain_data;
5221 			}
5222 
5223 			/* XDP program may adjust header */
5224 			skb_copy_to_linear_data(skb, xdp.data, buf1_len);
5225 			skb_put(skb, buf1_len);
5226 
5227 			/* Data payload copied into SKB, page ready for recycle */
5228 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5229 			buf->page = NULL;
5230 		} else if (buf1_len) {
5231 			dma_sync_single_for_cpu(priv->device, buf->addr,
5232 						buf1_len, dma_dir);
5233 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5234 					buf->page, buf->page_offset, buf1_len,
5235 					priv->dma_buf_sz);
5236 
5237 			/* Data payload appended into SKB */
5238 			page_pool_release_page(rx_q->page_pool, buf->page);
5239 			buf->page = NULL;
5240 		}
5241 
5242 		if (buf2_len) {
5243 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5244 						buf2_len, dma_dir);
5245 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5246 					buf->sec_page, 0, buf2_len,
5247 					priv->dma_buf_sz);
5248 
5249 			/* Data payload appended into SKB */
5250 			page_pool_release_page(rx_q->page_pool, buf->sec_page);
5251 			buf->sec_page = NULL;
5252 		}
5253 
5254 drain_data:
5255 		if (likely(status & rx_not_ls))
5256 			goto read_again;
5257 		if (!skb)
5258 			continue;
5259 
5260 		/* Got entire packet into SKB. Finish it. */
5261 
5262 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5263 		stmmac_rx_vlan(priv->dev, skb);
5264 		skb->protocol = eth_type_trans(skb, priv->dev);
5265 
5266 		if (unlikely(!coe))
5267 			skb_checksum_none_assert(skb);
5268 		else
5269 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5270 
5271 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5272 			skb_set_hash(skb, hash, hash_type);
5273 
5274 		skb_record_rx_queue(skb, queue);
5275 		napi_gro_receive(&ch->rx_napi, skb);
5276 		skb = NULL;
5277 
5278 		priv->dev->stats.rx_packets++;
5279 		priv->dev->stats.rx_bytes += len;
5280 		count++;
5281 	}
5282 
5283 	if (status & rx_not_ls || skb) {
5284 		rx_q->state_saved = true;
5285 		rx_q->state.skb = skb;
5286 		rx_q->state.error = error;
5287 		rx_q->state.len = len;
5288 	}
5289 
5290 	stmmac_finalize_xdp_rx(priv, xdp_status);
5291 
5292 	stmmac_rx_refill(priv, queue);
5293 
5294 	priv->xstats.rx_pkt_n += count;
5295 
5296 	return count;
5297 }
5298 
5299 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5300 {
5301 	struct stmmac_channel *ch =
5302 		container_of(napi, struct stmmac_channel, rx_napi);
5303 	struct stmmac_priv *priv = ch->priv_data;
5304 	u32 chan = ch->index;
5305 	int work_done;
5306 
5307 	priv->xstats.napi_poll++;
5308 
5309 	work_done = stmmac_rx(priv, budget, chan);
5310 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5311 		unsigned long flags;
5312 
5313 		spin_lock_irqsave(&ch->lock, flags);
5314 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5315 		spin_unlock_irqrestore(&ch->lock, flags);
5316 	}
5317 
5318 	return work_done;
5319 }
5320 
5321 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5322 {
5323 	struct stmmac_channel *ch =
5324 		container_of(napi, struct stmmac_channel, tx_napi);
5325 	struct stmmac_priv *priv = ch->priv_data;
5326 	u32 chan = ch->index;
5327 	int work_done;
5328 
5329 	priv->xstats.napi_poll++;
5330 
5331 	work_done = stmmac_tx_clean(priv, budget, chan);
5332 	work_done = min(work_done, budget);
5333 
5334 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5335 		unsigned long flags;
5336 
5337 		spin_lock_irqsave(&ch->lock, flags);
5338 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5339 		spin_unlock_irqrestore(&ch->lock, flags);
5340 	}
5341 
5342 	return work_done;
5343 }
5344 
5345 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5346 {
5347 	struct stmmac_channel *ch =
5348 		container_of(napi, struct stmmac_channel, rxtx_napi);
5349 	struct stmmac_priv *priv = ch->priv_data;
5350 	int rx_done, tx_done;
5351 	u32 chan = ch->index;
5352 
5353 	priv->xstats.napi_poll++;
5354 
5355 	tx_done = stmmac_tx_clean(priv, budget, chan);
5356 	tx_done = min(tx_done, budget);
5357 
5358 	rx_done = stmmac_rx_zc(priv, budget, chan);
5359 
5360 	/* If either TX or RX work is not complete, return budget
5361 	 * and keep pooling
5362 	 */
5363 	if (tx_done >= budget || rx_done >= budget)
5364 		return budget;
5365 
5366 	/* all work done, exit the polling mode */
5367 	if (napi_complete_done(napi, rx_done)) {
5368 		unsigned long flags;
5369 
5370 		spin_lock_irqsave(&ch->lock, flags);
5371 		/* Both RX and TX work done are compelte,
5372 		 * so enable both RX & TX IRQs.
5373 		 */
5374 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5375 		spin_unlock_irqrestore(&ch->lock, flags);
5376 	}
5377 
5378 	return min(rx_done, budget - 1);
5379 }
5380 
5381 /**
5382  *  stmmac_tx_timeout
5383  *  @dev : Pointer to net device structure
5384  *  @txqueue: the index of the hanging transmit queue
5385  *  Description: this function is called when a packet transmission fails to
5386  *   complete within a reasonable time. The driver will mark the error in the
5387  *   netdev structure and arrange for the device to be reset to a sane state
5388  *   in order to transmit a new packet.
5389  */
5390 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5391 {
5392 	struct stmmac_priv *priv = netdev_priv(dev);
5393 
5394 	stmmac_global_err(priv);
5395 }
5396 
5397 /**
5398  *  stmmac_set_rx_mode - entry point for multicast addressing
5399  *  @dev : pointer to the device structure
5400  *  Description:
5401  *  This function is a driver entry point which gets called by the kernel
5402  *  whenever multicast addresses must be enabled/disabled.
5403  *  Return value:
5404  *  void.
5405  */
5406 static void stmmac_set_rx_mode(struct net_device *dev)
5407 {
5408 	struct stmmac_priv *priv = netdev_priv(dev);
5409 
5410 	stmmac_set_filter(priv, priv->hw, dev);
5411 }
5412 
5413 /**
5414  *  stmmac_change_mtu - entry point to change MTU size for the device.
5415  *  @dev : device pointer.
5416  *  @new_mtu : the new MTU size for the device.
5417  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5418  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5419  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5420  *  Return value:
5421  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5422  *  file on failure.
5423  */
5424 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5425 {
5426 	struct stmmac_priv *priv = netdev_priv(dev);
5427 	int txfifosz = priv->plat->tx_fifo_size;
5428 	const int mtu = new_mtu;
5429 
5430 	if (txfifosz == 0)
5431 		txfifosz = priv->dma_cap.tx_fifo_size;
5432 
5433 	txfifosz /= priv->plat->tx_queues_to_use;
5434 
5435 	if (netif_running(dev)) {
5436 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
5437 		return -EBUSY;
5438 	}
5439 
5440 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5441 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5442 		return -EINVAL;
5443 	}
5444 
5445 	new_mtu = STMMAC_ALIGN(new_mtu);
5446 
5447 	/* If condition true, FIFO is too small or MTU too large */
5448 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5449 		return -EINVAL;
5450 
5451 	dev->mtu = mtu;
5452 
5453 	netdev_update_features(dev);
5454 
5455 	return 0;
5456 }
5457 
5458 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5459 					     netdev_features_t features)
5460 {
5461 	struct stmmac_priv *priv = netdev_priv(dev);
5462 
5463 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5464 		features &= ~NETIF_F_RXCSUM;
5465 
5466 	if (!priv->plat->tx_coe)
5467 		features &= ~NETIF_F_CSUM_MASK;
5468 
5469 	/* Some GMAC devices have a bugged Jumbo frame support that
5470 	 * needs to have the Tx COE disabled for oversized frames
5471 	 * (due to limited buffer sizes). In this case we disable
5472 	 * the TX csum insertion in the TDES and not use SF.
5473 	 */
5474 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5475 		features &= ~NETIF_F_CSUM_MASK;
5476 
5477 	/* Disable tso if asked by ethtool */
5478 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
5479 		if (features & NETIF_F_TSO)
5480 			priv->tso = true;
5481 		else
5482 			priv->tso = false;
5483 	}
5484 
5485 	return features;
5486 }
5487 
5488 static int stmmac_set_features(struct net_device *netdev,
5489 			       netdev_features_t features)
5490 {
5491 	struct stmmac_priv *priv = netdev_priv(netdev);
5492 	bool sph_en;
5493 	u32 chan;
5494 
5495 	/* Keep the COE Type in case of csum is supporting */
5496 	if (features & NETIF_F_RXCSUM)
5497 		priv->hw->rx_csum = priv->plat->rx_coe;
5498 	else
5499 		priv->hw->rx_csum = 0;
5500 	/* No check needed because rx_coe has been set before and it will be
5501 	 * fixed in case of issue.
5502 	 */
5503 	stmmac_rx_ipc(priv, priv->hw);
5504 
5505 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5506 
5507 	for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5508 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5509 
5510 	return 0;
5511 }
5512 
5513 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5514 {
5515 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5516 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5517 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5518 	bool *hs_enable = &fpe_cfg->hs_enable;
5519 
5520 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5521 		return;
5522 
5523 	/* If LP has sent verify mPacket, LP is FPE capable */
5524 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5525 		if (*lp_state < FPE_STATE_CAPABLE)
5526 			*lp_state = FPE_STATE_CAPABLE;
5527 
5528 		/* If user has requested FPE enable, quickly response */
5529 		if (*hs_enable)
5530 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5531 						MPACKET_RESPONSE);
5532 	}
5533 
5534 	/* If Local has sent verify mPacket, Local is FPE capable */
5535 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5536 		if (*lo_state < FPE_STATE_CAPABLE)
5537 			*lo_state = FPE_STATE_CAPABLE;
5538 	}
5539 
5540 	/* If LP has sent response mPacket, LP is entering FPE ON */
5541 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5542 		*lp_state = FPE_STATE_ENTERING_ON;
5543 
5544 	/* If Local has sent response mPacket, Local is entering FPE ON */
5545 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5546 		*lo_state = FPE_STATE_ENTERING_ON;
5547 
5548 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5549 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5550 	    priv->fpe_wq) {
5551 		queue_work(priv->fpe_wq, &priv->fpe_task);
5552 	}
5553 }
5554 
5555 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5556 {
5557 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5558 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5559 	u32 queues_count;
5560 	u32 queue;
5561 	bool xmac;
5562 
5563 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5564 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5565 
5566 	if (priv->irq_wake)
5567 		pm_wakeup_event(priv->device, 0);
5568 
5569 	if (priv->dma_cap.estsel)
5570 		stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5571 				      &priv->xstats, tx_cnt);
5572 
5573 	if (priv->dma_cap.fpesel) {
5574 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5575 						   priv->dev);
5576 
5577 		stmmac_fpe_event_status(priv, status);
5578 	}
5579 
5580 	/* To handle GMAC own interrupts */
5581 	if ((priv->plat->has_gmac) || xmac) {
5582 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5583 		int mtl_status;
5584 
5585 		if (unlikely(status)) {
5586 			/* For LPI we need to save the tx status */
5587 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5588 				priv->tx_path_in_lpi_mode = true;
5589 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5590 				priv->tx_path_in_lpi_mode = false;
5591 		}
5592 
5593 		for (queue = 0; queue < queues_count; queue++) {
5594 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5595 
5596 			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
5597 								queue);
5598 			if (mtl_status != -EINVAL)
5599 				status |= mtl_status;
5600 
5601 			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
5602 				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
5603 						       rx_q->rx_tail_addr,
5604 						       queue);
5605 		}
5606 
5607 		/* PCS link status */
5608 		if (priv->hw->pcs) {
5609 			if (priv->xstats.pcs_link)
5610 				netif_carrier_on(priv->dev);
5611 			else
5612 				netif_carrier_off(priv->dev);
5613 		}
5614 
5615 		stmmac_timestamp_interrupt(priv, priv);
5616 	}
5617 }
5618 
5619 /**
5620  *  stmmac_interrupt - main ISR
5621  *  @irq: interrupt number.
5622  *  @dev_id: to pass the net device pointer.
5623  *  Description: this is the main driver interrupt service routine.
5624  *  It can call:
5625  *  o DMA service routine (to manage incoming frame reception and transmission
5626  *    status)
5627  *  o Core interrupts to manage: remote wake-up, management counter, LPI
5628  *    interrupts.
5629  */
5630 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5631 {
5632 	struct net_device *dev = (struct net_device *)dev_id;
5633 	struct stmmac_priv *priv = netdev_priv(dev);
5634 
5635 	/* Check if adapter is up */
5636 	if (test_bit(STMMAC_DOWN, &priv->state))
5637 		return IRQ_HANDLED;
5638 
5639 	/* Check if a fatal error happened */
5640 	if (stmmac_safety_feat_interrupt(priv))
5641 		return IRQ_HANDLED;
5642 
5643 	/* To handle Common interrupts */
5644 	stmmac_common_interrupt(priv);
5645 
5646 	/* To handle DMA interrupts */
5647 	stmmac_dma_interrupt(priv);
5648 
5649 	return IRQ_HANDLED;
5650 }
5651 
5652 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5653 {
5654 	struct net_device *dev = (struct net_device *)dev_id;
5655 	struct stmmac_priv *priv = netdev_priv(dev);
5656 
5657 	if (unlikely(!dev)) {
5658 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5659 		return IRQ_NONE;
5660 	}
5661 
5662 	/* Check if adapter is up */
5663 	if (test_bit(STMMAC_DOWN, &priv->state))
5664 		return IRQ_HANDLED;
5665 
5666 	/* To handle Common interrupts */
5667 	stmmac_common_interrupt(priv);
5668 
5669 	return IRQ_HANDLED;
5670 }
5671 
5672 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5673 {
5674 	struct net_device *dev = (struct net_device *)dev_id;
5675 	struct stmmac_priv *priv = netdev_priv(dev);
5676 
5677 	if (unlikely(!dev)) {
5678 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5679 		return IRQ_NONE;
5680 	}
5681 
5682 	/* Check if adapter is up */
5683 	if (test_bit(STMMAC_DOWN, &priv->state))
5684 		return IRQ_HANDLED;
5685 
5686 	/* Check if a fatal error happened */
5687 	stmmac_safety_feat_interrupt(priv);
5688 
5689 	return IRQ_HANDLED;
5690 }
5691 
5692 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5693 {
5694 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5695 	int chan = tx_q->queue_index;
5696 	struct stmmac_priv *priv;
5697 	int status;
5698 
5699 	priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]);
5700 
5701 	if (unlikely(!data)) {
5702 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5703 		return IRQ_NONE;
5704 	}
5705 
5706 	/* Check if adapter is up */
5707 	if (test_bit(STMMAC_DOWN, &priv->state))
5708 		return IRQ_HANDLED;
5709 
5710 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5711 
5712 	if (unlikely(status & tx_hard_error_bump_tc)) {
5713 		/* Try to bump up the dma threshold on this failure */
5714 		if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
5715 		    tc <= 256) {
5716 			tc += 64;
5717 			if (priv->plat->force_thresh_dma_mode)
5718 				stmmac_set_dma_operation_mode(priv,
5719 							      tc,
5720 							      tc,
5721 							      chan);
5722 			else
5723 				stmmac_set_dma_operation_mode(priv,
5724 							      tc,
5725 							      SF_DMA_MODE,
5726 							      chan);
5727 			priv->xstats.threshold = tc;
5728 		}
5729 	} else if (unlikely(status == tx_hard_error)) {
5730 		stmmac_tx_err(priv, chan);
5731 	}
5732 
5733 	return IRQ_HANDLED;
5734 }
5735 
5736 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
5737 {
5738 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
5739 	int chan = rx_q->queue_index;
5740 	struct stmmac_priv *priv;
5741 
5742 	priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]);
5743 
5744 	if (unlikely(!data)) {
5745 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5746 		return IRQ_NONE;
5747 	}
5748 
5749 	/* Check if adapter is up */
5750 	if (test_bit(STMMAC_DOWN, &priv->state))
5751 		return IRQ_HANDLED;
5752 
5753 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
5754 
5755 	return IRQ_HANDLED;
5756 }
5757 
5758 #ifdef CONFIG_NET_POLL_CONTROLLER
5759 /* Polling receive - used by NETCONSOLE and other diagnostic tools
5760  * to allow network I/O with interrupts disabled.
5761  */
5762 static void stmmac_poll_controller(struct net_device *dev)
5763 {
5764 	struct stmmac_priv *priv = netdev_priv(dev);
5765 	int i;
5766 
5767 	/* If adapter is down, do nothing */
5768 	if (test_bit(STMMAC_DOWN, &priv->state))
5769 		return;
5770 
5771 	if (priv->plat->multi_msi_en) {
5772 		for (i = 0; i < priv->plat->rx_queues_to_use; i++)
5773 			stmmac_msi_intr_rx(0, &priv->rx_queue[i]);
5774 
5775 		for (i = 0; i < priv->plat->tx_queues_to_use; i++)
5776 			stmmac_msi_intr_tx(0, &priv->tx_queue[i]);
5777 	} else {
5778 		disable_irq(dev->irq);
5779 		stmmac_interrupt(dev->irq, dev);
5780 		enable_irq(dev->irq);
5781 	}
5782 }
5783 #endif
5784 
5785 /**
5786  *  stmmac_ioctl - Entry point for the Ioctl
5787  *  @dev: Device pointer.
5788  *  @rq: An IOCTL specefic structure, that can contain a pointer to
5789  *  a proprietary structure used to pass information to the driver.
5790  *  @cmd: IOCTL command
5791  *  Description:
5792  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
5793  */
5794 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5795 {
5796 	struct stmmac_priv *priv = netdev_priv (dev);
5797 	int ret = -EOPNOTSUPP;
5798 
5799 	if (!netif_running(dev))
5800 		return -EINVAL;
5801 
5802 	switch (cmd) {
5803 	case SIOCGMIIPHY:
5804 	case SIOCGMIIREG:
5805 	case SIOCSMIIREG:
5806 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
5807 		break;
5808 	case SIOCSHWTSTAMP:
5809 		ret = stmmac_hwtstamp_set(dev, rq);
5810 		break;
5811 	case SIOCGHWTSTAMP:
5812 		ret = stmmac_hwtstamp_get(dev, rq);
5813 		break;
5814 	default:
5815 		break;
5816 	}
5817 
5818 	return ret;
5819 }
5820 
5821 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5822 				    void *cb_priv)
5823 {
5824 	struct stmmac_priv *priv = cb_priv;
5825 	int ret = -EOPNOTSUPP;
5826 
5827 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
5828 		return ret;
5829 
5830 	__stmmac_disable_all_queues(priv);
5831 
5832 	switch (type) {
5833 	case TC_SETUP_CLSU32:
5834 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
5835 		break;
5836 	case TC_SETUP_CLSFLOWER:
5837 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
5838 		break;
5839 	default:
5840 		break;
5841 	}
5842 
5843 	stmmac_enable_all_queues(priv);
5844 	return ret;
5845 }
5846 
5847 static LIST_HEAD(stmmac_block_cb_list);
5848 
5849 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
5850 			   void *type_data)
5851 {
5852 	struct stmmac_priv *priv = netdev_priv(ndev);
5853 
5854 	switch (type) {
5855 	case TC_SETUP_BLOCK:
5856 		return flow_block_cb_setup_simple(type_data,
5857 						  &stmmac_block_cb_list,
5858 						  stmmac_setup_tc_block_cb,
5859 						  priv, priv, true);
5860 	case TC_SETUP_QDISC_CBS:
5861 		return stmmac_tc_setup_cbs(priv, priv, type_data);
5862 	case TC_SETUP_QDISC_TAPRIO:
5863 		return stmmac_tc_setup_taprio(priv, priv, type_data);
5864 	case TC_SETUP_QDISC_ETF:
5865 		return stmmac_tc_setup_etf(priv, priv, type_data);
5866 	default:
5867 		return -EOPNOTSUPP;
5868 	}
5869 }
5870 
5871 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
5872 			       struct net_device *sb_dev)
5873 {
5874 	int gso = skb_shinfo(skb)->gso_type;
5875 
5876 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
5877 		/*
5878 		 * There is no way to determine the number of TSO/USO
5879 		 * capable Queues. Let's use always the Queue 0
5880 		 * because if TSO/USO is supported then at least this
5881 		 * one will be capable.
5882 		 */
5883 		return 0;
5884 	}
5885 
5886 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
5887 }
5888 
5889 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
5890 {
5891 	struct stmmac_priv *priv = netdev_priv(ndev);
5892 	int ret = 0;
5893 
5894 	ret = eth_mac_addr(ndev, addr);
5895 	if (ret)
5896 		return ret;
5897 
5898 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
5899 
5900 	return ret;
5901 }
5902 
5903 #ifdef CONFIG_DEBUG_FS
5904 static struct dentry *stmmac_fs_dir;
5905 
5906 static void sysfs_display_ring(void *head, int size, int extend_desc,
5907 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
5908 {
5909 	int i;
5910 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
5911 	struct dma_desc *p = (struct dma_desc *)head;
5912 	dma_addr_t dma_addr;
5913 
5914 	for (i = 0; i < size; i++) {
5915 		if (extend_desc) {
5916 			dma_addr = dma_phy_addr + i * sizeof(*ep);
5917 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
5918 				   i, &dma_addr,
5919 				   le32_to_cpu(ep->basic.des0),
5920 				   le32_to_cpu(ep->basic.des1),
5921 				   le32_to_cpu(ep->basic.des2),
5922 				   le32_to_cpu(ep->basic.des3));
5923 			ep++;
5924 		} else {
5925 			dma_addr = dma_phy_addr + i * sizeof(*p);
5926 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
5927 				   i, &dma_addr,
5928 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
5929 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
5930 			p++;
5931 		}
5932 		seq_printf(seq, "\n");
5933 	}
5934 }
5935 
5936 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
5937 {
5938 	struct net_device *dev = seq->private;
5939 	struct stmmac_priv *priv = netdev_priv(dev);
5940 	u32 rx_count = priv->plat->rx_queues_to_use;
5941 	u32 tx_count = priv->plat->tx_queues_to_use;
5942 	u32 queue;
5943 
5944 	if ((dev->flags & IFF_UP) == 0)
5945 		return 0;
5946 
5947 	for (queue = 0; queue < rx_count; queue++) {
5948 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5949 
5950 		seq_printf(seq, "RX Queue %d:\n", queue);
5951 
5952 		if (priv->extend_desc) {
5953 			seq_printf(seq, "Extended descriptor ring:\n");
5954 			sysfs_display_ring((void *)rx_q->dma_erx,
5955 					   priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
5956 		} else {
5957 			seq_printf(seq, "Descriptor ring:\n");
5958 			sysfs_display_ring((void *)rx_q->dma_rx,
5959 					   priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
5960 		}
5961 	}
5962 
5963 	for (queue = 0; queue < tx_count; queue++) {
5964 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5965 
5966 		seq_printf(seq, "TX Queue %d:\n", queue);
5967 
5968 		if (priv->extend_desc) {
5969 			seq_printf(seq, "Extended descriptor ring:\n");
5970 			sysfs_display_ring((void *)tx_q->dma_etx,
5971 					   priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
5972 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
5973 			seq_printf(seq, "Descriptor ring:\n");
5974 			sysfs_display_ring((void *)tx_q->dma_tx,
5975 					   priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
5976 		}
5977 	}
5978 
5979 	return 0;
5980 }
5981 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
5982 
5983 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
5984 {
5985 	struct net_device *dev = seq->private;
5986 	struct stmmac_priv *priv = netdev_priv(dev);
5987 
5988 	if (!priv->hw_cap_support) {
5989 		seq_printf(seq, "DMA HW features not supported\n");
5990 		return 0;
5991 	}
5992 
5993 	seq_printf(seq, "==============================\n");
5994 	seq_printf(seq, "\tDMA HW features\n");
5995 	seq_printf(seq, "==============================\n");
5996 
5997 	seq_printf(seq, "\t10/100 Mbps: %s\n",
5998 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
5999 	seq_printf(seq, "\t1000 Mbps: %s\n",
6000 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6001 	seq_printf(seq, "\tHalf duplex: %s\n",
6002 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6003 	seq_printf(seq, "\tHash Filter: %s\n",
6004 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
6005 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6006 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
6007 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6008 		   (priv->dma_cap.pcs) ? "Y" : "N");
6009 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6010 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6011 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6012 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6013 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6014 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6015 	seq_printf(seq, "\tRMON module: %s\n",
6016 		   (priv->dma_cap.rmon) ? "Y" : "N");
6017 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6018 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6019 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6020 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6021 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6022 		   (priv->dma_cap.eee) ? "Y" : "N");
6023 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6024 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6025 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6026 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
6027 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6028 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6029 	} else {
6030 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6031 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6032 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6033 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6034 	}
6035 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6036 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6037 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6038 		   priv->dma_cap.number_rx_channel);
6039 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6040 		   priv->dma_cap.number_tx_channel);
6041 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6042 		   priv->dma_cap.number_rx_queues);
6043 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6044 		   priv->dma_cap.number_tx_queues);
6045 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6046 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6047 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6048 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6049 	seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
6050 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6051 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6052 		   priv->dma_cap.pps_out_num);
6053 	seq_printf(seq, "\tSafety Features: %s\n",
6054 		   priv->dma_cap.asp ? "Y" : "N");
6055 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6056 		   priv->dma_cap.frpsel ? "Y" : "N");
6057 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6058 		   priv->dma_cap.addr64);
6059 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6060 		   priv->dma_cap.rssen ? "Y" : "N");
6061 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6062 		   priv->dma_cap.vlhash ? "Y" : "N");
6063 	seq_printf(seq, "\tSplit Header: %s\n",
6064 		   priv->dma_cap.sphen ? "Y" : "N");
6065 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6066 		   priv->dma_cap.vlins ? "Y" : "N");
6067 	seq_printf(seq, "\tDouble VLAN: %s\n",
6068 		   priv->dma_cap.dvlan ? "Y" : "N");
6069 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6070 		   priv->dma_cap.l3l4fnum);
6071 	seq_printf(seq, "\tARP Offloading: %s\n",
6072 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6073 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6074 		   priv->dma_cap.estsel ? "Y" : "N");
6075 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6076 		   priv->dma_cap.fpesel ? "Y" : "N");
6077 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6078 		   priv->dma_cap.tbssel ? "Y" : "N");
6079 	return 0;
6080 }
6081 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6082 
6083 /* Use network device events to rename debugfs file entries.
6084  */
6085 static int stmmac_device_event(struct notifier_block *unused,
6086 			       unsigned long event, void *ptr)
6087 {
6088 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6089 	struct stmmac_priv *priv = netdev_priv(dev);
6090 
6091 	if (dev->netdev_ops != &stmmac_netdev_ops)
6092 		goto done;
6093 
6094 	switch (event) {
6095 	case NETDEV_CHANGENAME:
6096 		if (priv->dbgfs_dir)
6097 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6098 							 priv->dbgfs_dir,
6099 							 stmmac_fs_dir,
6100 							 dev->name);
6101 		break;
6102 	}
6103 done:
6104 	return NOTIFY_DONE;
6105 }
6106 
6107 static struct notifier_block stmmac_notifier = {
6108 	.notifier_call = stmmac_device_event,
6109 };
6110 
6111 static void stmmac_init_fs(struct net_device *dev)
6112 {
6113 	struct stmmac_priv *priv = netdev_priv(dev);
6114 
6115 	rtnl_lock();
6116 
6117 	/* Create per netdev entries */
6118 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6119 
6120 	/* Entry to report DMA RX/TX rings */
6121 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6122 			    &stmmac_rings_status_fops);
6123 
6124 	/* Entry to report the DMA HW features */
6125 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6126 			    &stmmac_dma_cap_fops);
6127 
6128 	rtnl_unlock();
6129 }
6130 
6131 static void stmmac_exit_fs(struct net_device *dev)
6132 {
6133 	struct stmmac_priv *priv = netdev_priv(dev);
6134 
6135 	debugfs_remove_recursive(priv->dbgfs_dir);
6136 }
6137 #endif /* CONFIG_DEBUG_FS */
6138 
6139 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6140 {
6141 	unsigned char *data = (unsigned char *)&vid_le;
6142 	unsigned char data_byte = 0;
6143 	u32 crc = ~0x0;
6144 	u32 temp = 0;
6145 	int i, bits;
6146 
6147 	bits = get_bitmask_order(VLAN_VID_MASK);
6148 	for (i = 0; i < bits; i++) {
6149 		if ((i % 8) == 0)
6150 			data_byte = data[i / 8];
6151 
6152 		temp = ((crc & 1) ^ data_byte) & 1;
6153 		crc >>= 1;
6154 		data_byte >>= 1;
6155 
6156 		if (temp)
6157 			crc ^= 0xedb88320;
6158 	}
6159 
6160 	return crc;
6161 }
6162 
6163 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6164 {
6165 	u32 crc, hash = 0;
6166 	__le16 pmatch = 0;
6167 	int count = 0;
6168 	u16 vid = 0;
6169 
6170 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6171 		__le16 vid_le = cpu_to_le16(vid);
6172 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6173 		hash |= (1 << crc);
6174 		count++;
6175 	}
6176 
6177 	if (!priv->dma_cap.vlhash) {
6178 		if (count > 2) /* VID = 0 always passes filter */
6179 			return -EOPNOTSUPP;
6180 
6181 		pmatch = cpu_to_le16(vid);
6182 		hash = 0;
6183 	}
6184 
6185 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6186 }
6187 
6188 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6189 {
6190 	struct stmmac_priv *priv = netdev_priv(ndev);
6191 	bool is_double = false;
6192 	int ret;
6193 
6194 	ret = pm_runtime_get_sync(priv->device);
6195 	if (ret < 0) {
6196 		pm_runtime_put_noidle(priv->device);
6197 		return ret;
6198 	}
6199 
6200 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6201 		is_double = true;
6202 
6203 	set_bit(vid, priv->active_vlans);
6204 	ret = stmmac_vlan_update(priv, is_double);
6205 	if (ret) {
6206 		clear_bit(vid, priv->active_vlans);
6207 		return ret;
6208 	}
6209 
6210 	if (priv->hw->num_vlan) {
6211 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6212 		if (ret)
6213 			return ret;
6214 	}
6215 
6216 	return 0;
6217 }
6218 
6219 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6220 {
6221 	struct stmmac_priv *priv = netdev_priv(ndev);
6222 	bool is_double = false;
6223 	int ret;
6224 
6225 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6226 		is_double = true;
6227 
6228 	clear_bit(vid, priv->active_vlans);
6229 
6230 	if (priv->hw->num_vlan) {
6231 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6232 		if (ret)
6233 			goto del_vlan_error;
6234 	}
6235 
6236 	ret = stmmac_vlan_update(priv, is_double);
6237 
6238 del_vlan_error:
6239 	pm_runtime_put(priv->device);
6240 
6241 	return ret;
6242 }
6243 
6244 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6245 {
6246 	struct stmmac_priv *priv = netdev_priv(dev);
6247 
6248 	switch (bpf->command) {
6249 	case XDP_SETUP_PROG:
6250 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6251 	case XDP_SETUP_XSK_POOL:
6252 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6253 					     bpf->xsk.queue_id);
6254 	default:
6255 		return -EOPNOTSUPP;
6256 	}
6257 }
6258 
6259 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6260 			   struct xdp_frame **frames, u32 flags)
6261 {
6262 	struct stmmac_priv *priv = netdev_priv(dev);
6263 	int cpu = smp_processor_id();
6264 	struct netdev_queue *nq;
6265 	int i, nxmit = 0;
6266 	int queue;
6267 
6268 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6269 		return -ENETDOWN;
6270 
6271 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6272 		return -EINVAL;
6273 
6274 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6275 	nq = netdev_get_tx_queue(priv->dev, queue);
6276 
6277 	__netif_tx_lock(nq, cpu);
6278 	/* Avoids TX time-out as we are sharing with slow path */
6279 	nq->trans_start = jiffies;
6280 
6281 	for (i = 0; i < num_frames; i++) {
6282 		int res;
6283 
6284 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6285 		if (res == STMMAC_XDP_CONSUMED)
6286 			break;
6287 
6288 		nxmit++;
6289 	}
6290 
6291 	if (flags & XDP_XMIT_FLUSH) {
6292 		stmmac_flush_tx_descriptors(priv, queue);
6293 		stmmac_tx_timer_arm(priv, queue);
6294 	}
6295 
6296 	__netif_tx_unlock(nq);
6297 
6298 	return nxmit;
6299 }
6300 
6301 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6302 {
6303 	struct stmmac_channel *ch = &priv->channel[queue];
6304 	unsigned long flags;
6305 
6306 	spin_lock_irqsave(&ch->lock, flags);
6307 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6308 	spin_unlock_irqrestore(&ch->lock, flags);
6309 
6310 	stmmac_stop_rx_dma(priv, queue);
6311 	__free_dma_rx_desc_resources(priv, queue);
6312 }
6313 
6314 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6315 {
6316 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
6317 	struct stmmac_channel *ch = &priv->channel[queue];
6318 	unsigned long flags;
6319 	u32 buf_size;
6320 	int ret;
6321 
6322 	ret = __alloc_dma_rx_desc_resources(priv, queue);
6323 	if (ret) {
6324 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6325 		return;
6326 	}
6327 
6328 	ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL);
6329 	if (ret) {
6330 		__free_dma_rx_desc_resources(priv, queue);
6331 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6332 		return;
6333 	}
6334 
6335 	stmmac_clear_rx_descriptors(priv, queue);
6336 
6337 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6338 			    rx_q->dma_rx_phy, rx_q->queue_index);
6339 
6340 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6341 			     sizeof(struct dma_desc));
6342 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6343 			       rx_q->rx_tail_addr, rx_q->queue_index);
6344 
6345 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6346 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6347 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6348 				      buf_size,
6349 				      rx_q->queue_index);
6350 	} else {
6351 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6352 				      priv->dma_buf_sz,
6353 				      rx_q->queue_index);
6354 	}
6355 
6356 	stmmac_start_rx_dma(priv, queue);
6357 
6358 	spin_lock_irqsave(&ch->lock, flags);
6359 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6360 	spin_unlock_irqrestore(&ch->lock, flags);
6361 }
6362 
6363 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6364 {
6365 	struct stmmac_channel *ch = &priv->channel[queue];
6366 	unsigned long flags;
6367 
6368 	spin_lock_irqsave(&ch->lock, flags);
6369 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6370 	spin_unlock_irqrestore(&ch->lock, flags);
6371 
6372 	stmmac_stop_tx_dma(priv, queue);
6373 	__free_dma_tx_desc_resources(priv, queue);
6374 }
6375 
6376 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6377 {
6378 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
6379 	struct stmmac_channel *ch = &priv->channel[queue];
6380 	unsigned long flags;
6381 	int ret;
6382 
6383 	ret = __alloc_dma_tx_desc_resources(priv, queue);
6384 	if (ret) {
6385 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6386 		return;
6387 	}
6388 
6389 	ret = __init_dma_tx_desc_rings(priv, queue);
6390 	if (ret) {
6391 		__free_dma_tx_desc_resources(priv, queue);
6392 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6393 		return;
6394 	}
6395 
6396 	stmmac_clear_tx_descriptors(priv, queue);
6397 
6398 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6399 			    tx_q->dma_tx_phy, tx_q->queue_index);
6400 
6401 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6402 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6403 
6404 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6405 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6406 			       tx_q->tx_tail_addr, tx_q->queue_index);
6407 
6408 	stmmac_start_tx_dma(priv, queue);
6409 
6410 	spin_lock_irqsave(&ch->lock, flags);
6411 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6412 	spin_unlock_irqrestore(&ch->lock, flags);
6413 }
6414 
6415 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6416 {
6417 	struct stmmac_priv *priv = netdev_priv(dev);
6418 	struct stmmac_rx_queue *rx_q;
6419 	struct stmmac_tx_queue *tx_q;
6420 	struct stmmac_channel *ch;
6421 
6422 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6423 	    !netif_carrier_ok(priv->dev))
6424 		return -ENETDOWN;
6425 
6426 	if (!stmmac_xdp_is_enabled(priv))
6427 		return -ENXIO;
6428 
6429 	if (queue >= priv->plat->rx_queues_to_use ||
6430 	    queue >= priv->plat->tx_queues_to_use)
6431 		return -EINVAL;
6432 
6433 	rx_q = &priv->rx_queue[queue];
6434 	tx_q = &priv->tx_queue[queue];
6435 	ch = &priv->channel[queue];
6436 
6437 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6438 		return -ENXIO;
6439 
6440 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6441 		/* EQoS does not have per-DMA channel SW interrupt,
6442 		 * so we schedule RX Napi straight-away.
6443 		 */
6444 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6445 			__napi_schedule(&ch->rxtx_napi);
6446 	}
6447 
6448 	return 0;
6449 }
6450 
6451 static const struct net_device_ops stmmac_netdev_ops = {
6452 	.ndo_open = stmmac_open,
6453 	.ndo_start_xmit = stmmac_xmit,
6454 	.ndo_stop = stmmac_release,
6455 	.ndo_change_mtu = stmmac_change_mtu,
6456 	.ndo_fix_features = stmmac_fix_features,
6457 	.ndo_set_features = stmmac_set_features,
6458 	.ndo_set_rx_mode = stmmac_set_rx_mode,
6459 	.ndo_tx_timeout = stmmac_tx_timeout,
6460 	.ndo_do_ioctl = stmmac_ioctl,
6461 	.ndo_setup_tc = stmmac_setup_tc,
6462 	.ndo_select_queue = stmmac_select_queue,
6463 #ifdef CONFIG_NET_POLL_CONTROLLER
6464 	.ndo_poll_controller = stmmac_poll_controller,
6465 #endif
6466 	.ndo_set_mac_address = stmmac_set_mac_address,
6467 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6468 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6469 	.ndo_bpf = stmmac_bpf,
6470 	.ndo_xdp_xmit = stmmac_xdp_xmit,
6471 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
6472 };
6473 
6474 static void stmmac_reset_subtask(struct stmmac_priv *priv)
6475 {
6476 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
6477 		return;
6478 	if (test_bit(STMMAC_DOWN, &priv->state))
6479 		return;
6480 
6481 	netdev_err(priv->dev, "Reset adapter.\n");
6482 
6483 	rtnl_lock();
6484 	netif_trans_update(priv->dev);
6485 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
6486 		usleep_range(1000, 2000);
6487 
6488 	set_bit(STMMAC_DOWN, &priv->state);
6489 	dev_close(priv->dev);
6490 	dev_open(priv->dev, NULL);
6491 	clear_bit(STMMAC_DOWN, &priv->state);
6492 	clear_bit(STMMAC_RESETING, &priv->state);
6493 	rtnl_unlock();
6494 }
6495 
6496 static void stmmac_service_task(struct work_struct *work)
6497 {
6498 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6499 			service_task);
6500 
6501 	stmmac_reset_subtask(priv);
6502 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
6503 }
6504 
6505 /**
6506  *  stmmac_hw_init - Init the MAC device
6507  *  @priv: driver private structure
6508  *  Description: this function is to configure the MAC device according to
6509  *  some platform parameters or the HW capability register. It prepares the
6510  *  driver to use either ring or chain modes and to setup either enhanced or
6511  *  normal descriptors.
6512  */
6513 static int stmmac_hw_init(struct stmmac_priv *priv)
6514 {
6515 	int ret;
6516 
6517 	/* dwmac-sun8i only work in chain mode */
6518 	if (priv->plat->has_sun8i)
6519 		chain_mode = 1;
6520 	priv->chain_mode = chain_mode;
6521 
6522 	/* Initialize HW Interface */
6523 	ret = stmmac_hwif_init(priv);
6524 	if (ret)
6525 		return ret;
6526 
6527 	/* Get the HW capability (new GMAC newer than 3.50a) */
6528 	priv->hw_cap_support = stmmac_get_hw_features(priv);
6529 	if (priv->hw_cap_support) {
6530 		dev_info(priv->device, "DMA HW capability register supported\n");
6531 
6532 		/* We can override some gmac/dma configuration fields: e.g.
6533 		 * enh_desc, tx_coe (e.g. that are passed through the
6534 		 * platform) with the values from the HW capability
6535 		 * register (if supported).
6536 		 */
6537 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
6538 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
6539 		priv->hw->pmt = priv->plat->pmt;
6540 		if (priv->dma_cap.hash_tb_sz) {
6541 			priv->hw->multicast_filter_bins =
6542 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
6543 			priv->hw->mcast_bits_log2 =
6544 					ilog2(priv->hw->multicast_filter_bins);
6545 		}
6546 
6547 		/* TXCOE doesn't work in thresh DMA mode */
6548 		if (priv->plat->force_thresh_dma_mode)
6549 			priv->plat->tx_coe = 0;
6550 		else
6551 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
6552 
6553 		/* In case of GMAC4 rx_coe is from HW cap register. */
6554 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
6555 
6556 		if (priv->dma_cap.rx_coe_type2)
6557 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
6558 		else if (priv->dma_cap.rx_coe_type1)
6559 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
6560 
6561 	} else {
6562 		dev_info(priv->device, "No HW DMA feature register supported\n");
6563 	}
6564 
6565 	if (priv->plat->rx_coe) {
6566 		priv->hw->rx_csum = priv->plat->rx_coe;
6567 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
6568 		if (priv->synopsys_id < DWMAC_CORE_4_00)
6569 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
6570 	}
6571 	if (priv->plat->tx_coe)
6572 		dev_info(priv->device, "TX Checksum insertion supported\n");
6573 
6574 	if (priv->plat->pmt) {
6575 		dev_info(priv->device, "Wake-Up On Lan supported\n");
6576 		device_set_wakeup_capable(priv->device, 1);
6577 	}
6578 
6579 	if (priv->dma_cap.tsoen)
6580 		dev_info(priv->device, "TSO supported\n");
6581 
6582 	priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
6583 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
6584 
6585 	/* Run HW quirks, if any */
6586 	if (priv->hwif_quirks) {
6587 		ret = priv->hwif_quirks(priv);
6588 		if (ret)
6589 			return ret;
6590 	}
6591 
6592 	/* Rx Watchdog is available in the COREs newer than the 3.40.
6593 	 * In some case, for example on bugged HW this feature
6594 	 * has to be disable and this can be done by passing the
6595 	 * riwt_off field from the platform.
6596 	 */
6597 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
6598 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
6599 		priv->use_riwt = 1;
6600 		dev_info(priv->device,
6601 			 "Enable RX Mitigation via HW Watchdog Timer\n");
6602 	}
6603 
6604 	return 0;
6605 }
6606 
6607 static void stmmac_napi_add(struct net_device *dev)
6608 {
6609 	struct stmmac_priv *priv = netdev_priv(dev);
6610 	u32 queue, maxq;
6611 
6612 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6613 
6614 	for (queue = 0; queue < maxq; queue++) {
6615 		struct stmmac_channel *ch = &priv->channel[queue];
6616 
6617 		ch->priv_data = priv;
6618 		ch->index = queue;
6619 		spin_lock_init(&ch->lock);
6620 
6621 		if (queue < priv->plat->rx_queues_to_use) {
6622 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
6623 				       NAPI_POLL_WEIGHT);
6624 		}
6625 		if (queue < priv->plat->tx_queues_to_use) {
6626 			netif_tx_napi_add(dev, &ch->tx_napi,
6627 					  stmmac_napi_poll_tx,
6628 					  NAPI_POLL_WEIGHT);
6629 		}
6630 		if (queue < priv->plat->rx_queues_to_use &&
6631 		    queue < priv->plat->tx_queues_to_use) {
6632 			netif_napi_add(dev, &ch->rxtx_napi,
6633 				       stmmac_napi_poll_rxtx,
6634 				       NAPI_POLL_WEIGHT);
6635 		}
6636 	}
6637 }
6638 
6639 static void stmmac_napi_del(struct net_device *dev)
6640 {
6641 	struct stmmac_priv *priv = netdev_priv(dev);
6642 	u32 queue, maxq;
6643 
6644 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6645 
6646 	for (queue = 0; queue < maxq; queue++) {
6647 		struct stmmac_channel *ch = &priv->channel[queue];
6648 
6649 		if (queue < priv->plat->rx_queues_to_use)
6650 			netif_napi_del(&ch->rx_napi);
6651 		if (queue < priv->plat->tx_queues_to_use)
6652 			netif_napi_del(&ch->tx_napi);
6653 		if (queue < priv->plat->rx_queues_to_use &&
6654 		    queue < priv->plat->tx_queues_to_use) {
6655 			netif_napi_del(&ch->rxtx_napi);
6656 		}
6657 	}
6658 }
6659 
6660 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
6661 {
6662 	struct stmmac_priv *priv = netdev_priv(dev);
6663 	int ret = 0;
6664 
6665 	if (netif_running(dev))
6666 		stmmac_release(dev);
6667 
6668 	stmmac_napi_del(dev);
6669 
6670 	priv->plat->rx_queues_to_use = rx_cnt;
6671 	priv->plat->tx_queues_to_use = tx_cnt;
6672 
6673 	stmmac_napi_add(dev);
6674 
6675 	if (netif_running(dev))
6676 		ret = stmmac_open(dev);
6677 
6678 	return ret;
6679 }
6680 
6681 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
6682 {
6683 	struct stmmac_priv *priv = netdev_priv(dev);
6684 	int ret = 0;
6685 
6686 	if (netif_running(dev))
6687 		stmmac_release(dev);
6688 
6689 	priv->dma_rx_size = rx_size;
6690 	priv->dma_tx_size = tx_size;
6691 
6692 	if (netif_running(dev))
6693 		ret = stmmac_open(dev);
6694 
6695 	return ret;
6696 }
6697 
6698 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
6699 static void stmmac_fpe_lp_task(struct work_struct *work)
6700 {
6701 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6702 						fpe_task);
6703 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
6704 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
6705 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
6706 	bool *hs_enable = &fpe_cfg->hs_enable;
6707 	bool *enable = &fpe_cfg->enable;
6708 	int retries = 20;
6709 
6710 	while (retries-- > 0) {
6711 		/* Bail out immediately if FPE handshake is OFF */
6712 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
6713 			break;
6714 
6715 		if (*lo_state == FPE_STATE_ENTERING_ON &&
6716 		    *lp_state == FPE_STATE_ENTERING_ON) {
6717 			stmmac_fpe_configure(priv, priv->ioaddr,
6718 					     priv->plat->tx_queues_to_use,
6719 					     priv->plat->rx_queues_to_use,
6720 					     *enable);
6721 
6722 			netdev_info(priv->dev, "configured FPE\n");
6723 
6724 			*lo_state = FPE_STATE_ON;
6725 			*lp_state = FPE_STATE_ON;
6726 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
6727 			break;
6728 		}
6729 
6730 		if ((*lo_state == FPE_STATE_CAPABLE ||
6731 		     *lo_state == FPE_STATE_ENTERING_ON) &&
6732 		     *lp_state != FPE_STATE_ON) {
6733 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
6734 				    *lo_state, *lp_state);
6735 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
6736 						MPACKET_VERIFY);
6737 		}
6738 		/* Sleep then retry */
6739 		msleep(500);
6740 	}
6741 
6742 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
6743 }
6744 
6745 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
6746 {
6747 	if (priv->plat->fpe_cfg->hs_enable != enable) {
6748 		if (enable) {
6749 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
6750 						MPACKET_VERIFY);
6751 		} else {
6752 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
6753 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
6754 		}
6755 
6756 		priv->plat->fpe_cfg->hs_enable = enable;
6757 	}
6758 }
6759 
6760 /**
6761  * stmmac_dvr_probe
6762  * @device: device pointer
6763  * @plat_dat: platform data pointer
6764  * @res: stmmac resource pointer
6765  * Description: this is the main probe function used to
6766  * call the alloc_etherdev, allocate the priv structure.
6767  * Return:
6768  * returns 0 on success, otherwise errno.
6769  */
6770 int stmmac_dvr_probe(struct device *device,
6771 		     struct plat_stmmacenet_data *plat_dat,
6772 		     struct stmmac_resources *res)
6773 {
6774 	struct net_device *ndev = NULL;
6775 	struct stmmac_priv *priv;
6776 	u32 rxq;
6777 	int i, ret = 0;
6778 
6779 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
6780 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
6781 	if (!ndev)
6782 		return -ENOMEM;
6783 
6784 	SET_NETDEV_DEV(ndev, device);
6785 
6786 	priv = netdev_priv(ndev);
6787 	priv->device = device;
6788 	priv->dev = ndev;
6789 
6790 	stmmac_set_ethtool_ops(ndev);
6791 	priv->pause = pause;
6792 	priv->plat = plat_dat;
6793 	priv->ioaddr = res->addr;
6794 	priv->dev->base_addr = (unsigned long)res->addr;
6795 	priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
6796 
6797 	priv->dev->irq = res->irq;
6798 	priv->wol_irq = res->wol_irq;
6799 	priv->lpi_irq = res->lpi_irq;
6800 	priv->sfty_ce_irq = res->sfty_ce_irq;
6801 	priv->sfty_ue_irq = res->sfty_ue_irq;
6802 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
6803 		priv->rx_irq[i] = res->rx_irq[i];
6804 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
6805 		priv->tx_irq[i] = res->tx_irq[i];
6806 
6807 	if (!is_zero_ether_addr(res->mac))
6808 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
6809 
6810 	dev_set_drvdata(device, priv->dev);
6811 
6812 	/* Verify driver arguments */
6813 	stmmac_verify_args();
6814 
6815 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
6816 	if (!priv->af_xdp_zc_qps)
6817 		return -ENOMEM;
6818 
6819 	/* Allocate workqueue */
6820 	priv->wq = create_singlethread_workqueue("stmmac_wq");
6821 	if (!priv->wq) {
6822 		dev_err(priv->device, "failed to create workqueue\n");
6823 		return -ENOMEM;
6824 	}
6825 
6826 	INIT_WORK(&priv->service_task, stmmac_service_task);
6827 
6828 	/* Initialize Link Partner FPE workqueue */
6829 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
6830 
6831 	/* Override with kernel parameters if supplied XXX CRS XXX
6832 	 * this needs to have multiple instances
6833 	 */
6834 	if ((phyaddr >= 0) && (phyaddr <= 31))
6835 		priv->plat->phy_addr = phyaddr;
6836 
6837 	if (priv->plat->stmmac_rst) {
6838 		ret = reset_control_assert(priv->plat->stmmac_rst);
6839 		reset_control_deassert(priv->plat->stmmac_rst);
6840 		/* Some reset controllers have only reset callback instead of
6841 		 * assert + deassert callbacks pair.
6842 		 */
6843 		if (ret == -ENOTSUPP)
6844 			reset_control_reset(priv->plat->stmmac_rst);
6845 	}
6846 
6847 	/* Init MAC and get the capabilities */
6848 	ret = stmmac_hw_init(priv);
6849 	if (ret)
6850 		goto error_hw_init;
6851 
6852 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
6853 	 */
6854 	if (priv->synopsys_id < DWMAC_CORE_5_20)
6855 		priv->plat->dma_cfg->dche = false;
6856 
6857 	stmmac_check_ether_addr(priv);
6858 
6859 	ndev->netdev_ops = &stmmac_netdev_ops;
6860 
6861 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6862 			    NETIF_F_RXCSUM;
6863 
6864 	ret = stmmac_tc_init(priv, priv);
6865 	if (!ret) {
6866 		ndev->hw_features |= NETIF_F_HW_TC;
6867 	}
6868 
6869 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
6870 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
6871 		if (priv->plat->has_gmac4)
6872 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
6873 		priv->tso = true;
6874 		dev_info(priv->device, "TSO feature enabled\n");
6875 	}
6876 
6877 	if (priv->dma_cap.sphen) {
6878 		ndev->hw_features |= NETIF_F_GRO;
6879 		priv->sph_cap = true;
6880 		priv->sph = priv->sph_cap;
6881 		dev_info(priv->device, "SPH feature enabled\n");
6882 	}
6883 
6884 	/* The current IP register MAC_HW_Feature1[ADDR64] only define
6885 	 * 32/40/64 bit width, but some SOC support others like i.MX8MP
6886 	 * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
6887 	 * So overwrite dma_cap.addr64 according to HW real design.
6888 	 */
6889 	if (priv->plat->addr64)
6890 		priv->dma_cap.addr64 = priv->plat->addr64;
6891 
6892 	if (priv->dma_cap.addr64) {
6893 		ret = dma_set_mask_and_coherent(device,
6894 				DMA_BIT_MASK(priv->dma_cap.addr64));
6895 		if (!ret) {
6896 			dev_info(priv->device, "Using %d bits DMA width\n",
6897 				 priv->dma_cap.addr64);
6898 
6899 			/*
6900 			 * If more than 32 bits can be addressed, make sure to
6901 			 * enable enhanced addressing mode.
6902 			 */
6903 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
6904 				priv->plat->dma_cfg->eame = true;
6905 		} else {
6906 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
6907 			if (ret) {
6908 				dev_err(priv->device, "Failed to set DMA Mask\n");
6909 				goto error_hw_init;
6910 			}
6911 
6912 			priv->dma_cap.addr64 = 32;
6913 		}
6914 	}
6915 
6916 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
6917 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
6918 #ifdef STMMAC_VLAN_TAG_USED
6919 	/* Both mac100 and gmac support receive VLAN tag detection */
6920 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
6921 	if (priv->dma_cap.vlhash) {
6922 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6923 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
6924 	}
6925 	if (priv->dma_cap.vlins) {
6926 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
6927 		if (priv->dma_cap.dvlan)
6928 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
6929 	}
6930 #endif
6931 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
6932 
6933 	/* Initialize RSS */
6934 	rxq = priv->plat->rx_queues_to_use;
6935 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
6936 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
6937 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
6938 
6939 	if (priv->dma_cap.rssen && priv->plat->rss_en)
6940 		ndev->features |= NETIF_F_RXHASH;
6941 
6942 	/* MTU range: 46 - hw-specific max */
6943 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
6944 	if (priv->plat->has_xgmac)
6945 		ndev->max_mtu = XGMAC_JUMBO_LEN;
6946 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
6947 		ndev->max_mtu = JUMBO_LEN;
6948 	else
6949 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
6950 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
6951 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
6952 	 */
6953 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
6954 	    (priv->plat->maxmtu >= ndev->min_mtu))
6955 		ndev->max_mtu = priv->plat->maxmtu;
6956 	else if (priv->plat->maxmtu < ndev->min_mtu)
6957 		dev_warn(priv->device,
6958 			 "%s: warning: maxmtu having invalid value (%d)\n",
6959 			 __func__, priv->plat->maxmtu);
6960 
6961 	if (flow_ctrl)
6962 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
6963 
6964 	/* Setup channels NAPI */
6965 	stmmac_napi_add(ndev);
6966 
6967 	mutex_init(&priv->lock);
6968 
6969 	/* If a specific clk_csr value is passed from the platform
6970 	 * this means that the CSR Clock Range selection cannot be
6971 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
6972 	 * set the MDC clock dynamically according to the csr actual
6973 	 * clock input.
6974 	 */
6975 	if (priv->plat->clk_csr >= 0)
6976 		priv->clk_csr = priv->plat->clk_csr;
6977 	else
6978 		stmmac_clk_csr_set(priv);
6979 
6980 	stmmac_check_pcs_mode(priv);
6981 
6982 	pm_runtime_get_noresume(device);
6983 	pm_runtime_set_active(device);
6984 	pm_runtime_enable(device);
6985 
6986 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
6987 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
6988 		/* MDIO bus Registration */
6989 		ret = stmmac_mdio_register(ndev);
6990 		if (ret < 0) {
6991 			dev_err(priv->device,
6992 				"%s: MDIO bus (id: %d) registration failed",
6993 				__func__, priv->plat->bus_id);
6994 			goto error_mdio_register;
6995 		}
6996 	}
6997 
6998 	ret = stmmac_phy_setup(priv);
6999 	if (ret) {
7000 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7001 		goto error_phy_setup;
7002 	}
7003 
7004 	ret = register_netdev(ndev);
7005 	if (ret) {
7006 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7007 			__func__, ret);
7008 		goto error_netdev_register;
7009 	}
7010 
7011 	if (priv->plat->serdes_powerup) {
7012 		ret = priv->plat->serdes_powerup(ndev,
7013 						 priv->plat->bsp_priv);
7014 
7015 		if (ret < 0)
7016 			goto error_serdes_powerup;
7017 	}
7018 
7019 #ifdef CONFIG_DEBUG_FS
7020 	stmmac_init_fs(ndev);
7021 #endif
7022 
7023 	/* Let pm_runtime_put() disable the clocks.
7024 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7025 	 */
7026 	pm_runtime_put(device);
7027 
7028 	return ret;
7029 
7030 error_serdes_powerup:
7031 	unregister_netdev(ndev);
7032 error_netdev_register:
7033 	phylink_destroy(priv->phylink);
7034 error_phy_setup:
7035 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7036 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7037 		stmmac_mdio_unregister(ndev);
7038 error_mdio_register:
7039 	stmmac_napi_del(ndev);
7040 error_hw_init:
7041 	destroy_workqueue(priv->wq);
7042 	stmmac_bus_clks_config(priv, false);
7043 	bitmap_free(priv->af_xdp_zc_qps);
7044 
7045 	return ret;
7046 }
7047 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7048 
7049 /**
7050  * stmmac_dvr_remove
7051  * @dev: device pointer
7052  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7053  * changes the link status, releases the DMA descriptor rings.
7054  */
7055 int stmmac_dvr_remove(struct device *dev)
7056 {
7057 	struct net_device *ndev = dev_get_drvdata(dev);
7058 	struct stmmac_priv *priv = netdev_priv(ndev);
7059 
7060 	netdev_info(priv->dev, "%s: removing driver", __func__);
7061 
7062 	stmmac_stop_all_dma(priv);
7063 	stmmac_mac_set(priv, priv->ioaddr, false);
7064 	netif_carrier_off(ndev);
7065 	unregister_netdev(ndev);
7066 
7067 	/* Serdes power down needs to happen after VLAN filter
7068 	 * is deleted that is triggered by unregister_netdev().
7069 	 */
7070 	if (priv->plat->serdes_powerdown)
7071 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7072 
7073 #ifdef CONFIG_DEBUG_FS
7074 	stmmac_exit_fs(ndev);
7075 #endif
7076 	phylink_destroy(priv->phylink);
7077 	if (priv->plat->stmmac_rst)
7078 		reset_control_assert(priv->plat->stmmac_rst);
7079 	pm_runtime_put(dev);
7080 	pm_runtime_disable(dev);
7081 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7082 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7083 		stmmac_mdio_unregister(ndev);
7084 	destroy_workqueue(priv->wq);
7085 	mutex_destroy(&priv->lock);
7086 	bitmap_free(priv->af_xdp_zc_qps);
7087 
7088 	return 0;
7089 }
7090 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7091 
7092 /**
7093  * stmmac_suspend - suspend callback
7094  * @dev: device pointer
7095  * Description: this is the function to suspend the device and it is called
7096  * by the platform driver to stop the network queue, release the resources,
7097  * program the PMT register (for WoL), clean and release driver resources.
7098  */
7099 int stmmac_suspend(struct device *dev)
7100 {
7101 	struct net_device *ndev = dev_get_drvdata(dev);
7102 	struct stmmac_priv *priv = netdev_priv(ndev);
7103 	u32 chan;
7104 	int ret;
7105 
7106 	if (!ndev || !netif_running(ndev))
7107 		return 0;
7108 
7109 	phylink_mac_change(priv->phylink, false);
7110 
7111 	mutex_lock(&priv->lock);
7112 
7113 	netif_device_detach(ndev);
7114 
7115 	stmmac_disable_all_queues(priv);
7116 
7117 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7118 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
7119 
7120 	if (priv->eee_enabled) {
7121 		priv->tx_path_in_lpi_mode = false;
7122 		del_timer_sync(&priv->eee_ctrl_timer);
7123 	}
7124 
7125 	/* Stop TX/RX DMA */
7126 	stmmac_stop_all_dma(priv);
7127 
7128 	if (priv->plat->serdes_powerdown)
7129 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7130 
7131 	/* Enable Power down mode by programming the PMT regs */
7132 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7133 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7134 		priv->irq_wake = 1;
7135 	} else {
7136 		mutex_unlock(&priv->lock);
7137 		rtnl_lock();
7138 		if (device_may_wakeup(priv->device))
7139 			phylink_speed_down(priv->phylink, false);
7140 		phylink_stop(priv->phylink);
7141 		rtnl_unlock();
7142 		mutex_lock(&priv->lock);
7143 
7144 		stmmac_mac_set(priv, priv->ioaddr, false);
7145 		pinctrl_pm_select_sleep_state(priv->device);
7146 		/* Disable clock in case of PWM is off */
7147 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
7148 		ret = pm_runtime_force_suspend(dev);
7149 		if (ret) {
7150 			mutex_unlock(&priv->lock);
7151 			return ret;
7152 		}
7153 	}
7154 
7155 	mutex_unlock(&priv->lock);
7156 
7157 	if (priv->dma_cap.fpesel) {
7158 		/* Disable FPE */
7159 		stmmac_fpe_configure(priv, priv->ioaddr,
7160 				     priv->plat->tx_queues_to_use,
7161 				     priv->plat->rx_queues_to_use, false);
7162 
7163 		stmmac_fpe_handshake(priv, false);
7164 	}
7165 
7166 	priv->speed = SPEED_UNKNOWN;
7167 	return 0;
7168 }
7169 EXPORT_SYMBOL_GPL(stmmac_suspend);
7170 
7171 /**
7172  * stmmac_reset_queues_param - reset queue parameters
7173  * @priv: device pointer
7174  */
7175 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7176 {
7177 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7178 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7179 	u32 queue;
7180 
7181 	for (queue = 0; queue < rx_cnt; queue++) {
7182 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
7183 
7184 		rx_q->cur_rx = 0;
7185 		rx_q->dirty_rx = 0;
7186 	}
7187 
7188 	for (queue = 0; queue < tx_cnt; queue++) {
7189 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
7190 
7191 		tx_q->cur_tx = 0;
7192 		tx_q->dirty_tx = 0;
7193 		tx_q->mss = 0;
7194 
7195 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7196 	}
7197 }
7198 
7199 /**
7200  * stmmac_resume - resume callback
7201  * @dev: device pointer
7202  * Description: when resume this function is invoked to setup the DMA and CORE
7203  * in a usable state.
7204  */
7205 int stmmac_resume(struct device *dev)
7206 {
7207 	struct net_device *ndev = dev_get_drvdata(dev);
7208 	struct stmmac_priv *priv = netdev_priv(ndev);
7209 	int ret;
7210 
7211 	if (!netif_running(ndev))
7212 		return 0;
7213 
7214 	/* Power Down bit, into the PM register, is cleared
7215 	 * automatically as soon as a magic packet or a Wake-up frame
7216 	 * is received. Anyway, it's better to manually clear
7217 	 * this bit because it can generate problems while resuming
7218 	 * from another devices (e.g. serial console).
7219 	 */
7220 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7221 		mutex_lock(&priv->lock);
7222 		stmmac_pmt(priv, priv->hw, 0);
7223 		mutex_unlock(&priv->lock);
7224 		priv->irq_wake = 0;
7225 	} else {
7226 		pinctrl_pm_select_default_state(priv->device);
7227 		/* enable the clk previously disabled */
7228 		ret = pm_runtime_force_resume(dev);
7229 		if (ret)
7230 			return ret;
7231 		if (priv->plat->clk_ptp_ref)
7232 			clk_prepare_enable(priv->plat->clk_ptp_ref);
7233 		/* reset the phy so that it's ready */
7234 		if (priv->mii)
7235 			stmmac_mdio_reset(priv->mii);
7236 	}
7237 
7238 	if (priv->plat->serdes_powerup) {
7239 		ret = priv->plat->serdes_powerup(ndev,
7240 						 priv->plat->bsp_priv);
7241 
7242 		if (ret < 0)
7243 			return ret;
7244 	}
7245 
7246 	if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
7247 		rtnl_lock();
7248 		phylink_start(priv->phylink);
7249 		/* We may have called phylink_speed_down before */
7250 		phylink_speed_up(priv->phylink);
7251 		rtnl_unlock();
7252 	}
7253 
7254 	rtnl_lock();
7255 	mutex_lock(&priv->lock);
7256 
7257 	stmmac_reset_queues_param(priv);
7258 
7259 	stmmac_free_tx_skbufs(priv);
7260 	stmmac_clear_descriptors(priv);
7261 
7262 	stmmac_hw_setup(ndev, false);
7263 	stmmac_init_coalesce(priv);
7264 	stmmac_set_rx_mode(ndev);
7265 
7266 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7267 
7268 	stmmac_enable_all_queues(priv);
7269 
7270 	mutex_unlock(&priv->lock);
7271 	rtnl_unlock();
7272 
7273 	phylink_mac_change(priv->phylink, true);
7274 
7275 	netif_device_attach(ndev);
7276 
7277 	return 0;
7278 }
7279 EXPORT_SYMBOL_GPL(stmmac_resume);
7280 
7281 #ifndef MODULE
7282 static int __init stmmac_cmdline_opt(char *str)
7283 {
7284 	char *opt;
7285 
7286 	if (!str || !*str)
7287 		return -EINVAL;
7288 	while ((opt = strsep(&str, ",")) != NULL) {
7289 		if (!strncmp(opt, "debug:", 6)) {
7290 			if (kstrtoint(opt + 6, 0, &debug))
7291 				goto err;
7292 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7293 			if (kstrtoint(opt + 8, 0, &phyaddr))
7294 				goto err;
7295 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7296 			if (kstrtoint(opt + 7, 0, &buf_sz))
7297 				goto err;
7298 		} else if (!strncmp(opt, "tc:", 3)) {
7299 			if (kstrtoint(opt + 3, 0, &tc))
7300 				goto err;
7301 		} else if (!strncmp(opt, "watchdog:", 9)) {
7302 			if (kstrtoint(opt + 9, 0, &watchdog))
7303 				goto err;
7304 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7305 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7306 				goto err;
7307 		} else if (!strncmp(opt, "pause:", 6)) {
7308 			if (kstrtoint(opt + 6, 0, &pause))
7309 				goto err;
7310 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7311 			if (kstrtoint(opt + 10, 0, &eee_timer))
7312 				goto err;
7313 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7314 			if (kstrtoint(opt + 11, 0, &chain_mode))
7315 				goto err;
7316 		}
7317 	}
7318 	return 0;
7319 
7320 err:
7321 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7322 	return -EINVAL;
7323 }
7324 
7325 __setup("stmmaceth=", stmmac_cmdline_opt);
7326 #endif /* MODULE */
7327 
7328 static int __init stmmac_init(void)
7329 {
7330 #ifdef CONFIG_DEBUG_FS
7331 	/* Create debugfs main directory if it doesn't exist yet */
7332 	if (!stmmac_fs_dir)
7333 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7334 	register_netdevice_notifier(&stmmac_notifier);
7335 #endif
7336 
7337 	return 0;
7338 }
7339 
7340 static void __exit stmmac_exit(void)
7341 {
7342 #ifdef CONFIG_DEBUG_FS
7343 	unregister_netdevice_notifier(&stmmac_notifier);
7344 	debugfs_remove_recursive(stmmac_fs_dir);
7345 #endif
7346 }
7347 
7348 module_init(stmmac_init)
7349 module_exit(stmmac_exit)
7350 
7351 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7352 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7353 MODULE_LICENSE("GPL");
7354