1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/pkt_cls.h>
43 #include <net/xdp_sock_drv.h>
44 #include "stmmac_ptp.h"
45 #include "stmmac.h"
46 #include "stmmac_xdp.h"
47 #include <linux/reset.h>
48 #include <linux/of_mdio.h>
49 #include "dwmac1000.h"
50 #include "dwxgmac2.h"
51 #include "hwif.h"
52 
53 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
54 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
55 
56 /* Module parameters */
57 #define TX_TIMEO	5000
58 static int watchdog = TX_TIMEO;
59 module_param(watchdog, int, 0644);
60 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
61 
62 static int debug = -1;
63 module_param(debug, int, 0644);
64 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
65 
66 static int phyaddr = -1;
67 module_param(phyaddr, int, 0444);
68 MODULE_PARM_DESC(phyaddr, "Physical device address");
69 
70 #define STMMAC_TX_THRESH(x)	((x)->dma_tx_size / 4)
71 #define STMMAC_RX_THRESH(x)	((x)->dma_rx_size / 4)
72 
73 /* Limit to make sure XDP TX and slow path can coexist */
74 #define STMMAC_XSK_TX_BUDGET_MAX	256
75 #define STMMAC_TX_XSK_AVAIL		16
76 #define STMMAC_RX_FILL_BATCH		16
77 
78 #define STMMAC_XDP_PASS		0
79 #define STMMAC_XDP_CONSUMED	BIT(0)
80 #define STMMAC_XDP_TX		BIT(1)
81 #define STMMAC_XDP_REDIRECT	BIT(2)
82 
83 static int flow_ctrl = FLOW_AUTO;
84 module_param(flow_ctrl, int, 0644);
85 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
86 
87 static int pause = PAUSE_TIME;
88 module_param(pause, int, 0644);
89 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
90 
91 #define TC_DEFAULT 64
92 static int tc = TC_DEFAULT;
93 module_param(tc, int, 0644);
94 MODULE_PARM_DESC(tc, "DMA threshold control value");
95 
96 #define	DEFAULT_BUFSIZE	1536
97 static int buf_sz = DEFAULT_BUFSIZE;
98 module_param(buf_sz, int, 0644);
99 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
100 
101 #define	STMMAC_RX_COPYBREAK	256
102 
103 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
104 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
105 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
106 
107 #define STMMAC_DEFAULT_LPI_TIMER	1000
108 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
109 module_param(eee_timer, int, 0644);
110 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
111 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
112 
113 /* By default the driver will use the ring mode to manage tx and rx descriptors,
114  * but allow user to force to use the chain instead of the ring
115  */
116 static unsigned int chain_mode;
117 module_param(chain_mode, int, 0444);
118 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
119 
120 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
121 /* For MSI interrupts handling */
122 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
123 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
124 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
125 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
126 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
127 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
128 
129 #ifdef CONFIG_DEBUG_FS
130 static const struct net_device_ops stmmac_netdev_ops;
131 static void stmmac_init_fs(struct net_device *dev);
132 static void stmmac_exit_fs(struct net_device *dev);
133 #endif
134 
135 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
136 
137 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
138 {
139 	int ret = 0;
140 
141 	if (enabled) {
142 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
143 		if (ret)
144 			return ret;
145 		ret = clk_prepare_enable(priv->plat->pclk);
146 		if (ret) {
147 			clk_disable_unprepare(priv->plat->stmmac_clk);
148 			return ret;
149 		}
150 		if (priv->plat->clks_config) {
151 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
152 			if (ret) {
153 				clk_disable_unprepare(priv->plat->stmmac_clk);
154 				clk_disable_unprepare(priv->plat->pclk);
155 				return ret;
156 			}
157 		}
158 	} else {
159 		clk_disable_unprepare(priv->plat->stmmac_clk);
160 		clk_disable_unprepare(priv->plat->pclk);
161 		if (priv->plat->clks_config)
162 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
163 	}
164 
165 	return ret;
166 }
167 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
168 
169 /**
170  * stmmac_verify_args - verify the driver parameters.
171  * Description: it checks the driver parameters and set a default in case of
172  * errors.
173  */
174 static void stmmac_verify_args(void)
175 {
176 	if (unlikely(watchdog < 0))
177 		watchdog = TX_TIMEO;
178 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
179 		buf_sz = DEFAULT_BUFSIZE;
180 	if (unlikely(flow_ctrl > 1))
181 		flow_ctrl = FLOW_AUTO;
182 	else if (likely(flow_ctrl < 0))
183 		flow_ctrl = FLOW_OFF;
184 	if (unlikely((pause < 0) || (pause > 0xffff)))
185 		pause = PAUSE_TIME;
186 	if (eee_timer < 0)
187 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
188 }
189 
190 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
191 {
192 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
193 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
194 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
195 	u32 queue;
196 
197 	for (queue = 0; queue < maxq; queue++) {
198 		struct stmmac_channel *ch = &priv->channel[queue];
199 
200 		if (stmmac_xdp_is_enabled(priv) &&
201 		    test_bit(queue, priv->af_xdp_zc_qps)) {
202 			napi_disable(&ch->rxtx_napi);
203 			continue;
204 		}
205 
206 		if (queue < rx_queues_cnt)
207 			napi_disable(&ch->rx_napi);
208 		if (queue < tx_queues_cnt)
209 			napi_disable(&ch->tx_napi);
210 	}
211 }
212 
213 /**
214  * stmmac_disable_all_queues - Disable all queues
215  * @priv: driver private structure
216  */
217 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
218 {
219 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
220 	struct stmmac_rx_queue *rx_q;
221 	u32 queue;
222 
223 	/* synchronize_rcu() needed for pending XDP buffers to drain */
224 	for (queue = 0; queue < rx_queues_cnt; queue++) {
225 		rx_q = &priv->rx_queue[queue];
226 		if (rx_q->xsk_pool) {
227 			synchronize_rcu();
228 			break;
229 		}
230 	}
231 
232 	__stmmac_disable_all_queues(priv);
233 }
234 
235 /**
236  * stmmac_enable_all_queues - Enable all queues
237  * @priv: driver private structure
238  */
239 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
240 {
241 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
242 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
243 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
244 	u32 queue;
245 
246 	for (queue = 0; queue < maxq; queue++) {
247 		struct stmmac_channel *ch = &priv->channel[queue];
248 
249 		if (stmmac_xdp_is_enabled(priv) &&
250 		    test_bit(queue, priv->af_xdp_zc_qps)) {
251 			napi_enable(&ch->rxtx_napi);
252 			continue;
253 		}
254 
255 		if (queue < rx_queues_cnt)
256 			napi_enable(&ch->rx_napi);
257 		if (queue < tx_queues_cnt)
258 			napi_enable(&ch->tx_napi);
259 	}
260 }
261 
262 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
263 {
264 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
265 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
266 		queue_work(priv->wq, &priv->service_task);
267 }
268 
269 static void stmmac_global_err(struct stmmac_priv *priv)
270 {
271 	netif_carrier_off(priv->dev);
272 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
273 	stmmac_service_event_schedule(priv);
274 }
275 
276 /**
277  * stmmac_clk_csr_set - dynamically set the MDC clock
278  * @priv: driver private structure
279  * Description: this is to dynamically set the MDC clock according to the csr
280  * clock input.
281  * Note:
282  *	If a specific clk_csr value is passed from the platform
283  *	this means that the CSR Clock Range selection cannot be
284  *	changed at run-time and it is fixed (as reported in the driver
285  *	documentation). Viceversa the driver will try to set the MDC
286  *	clock dynamically according to the actual clock input.
287  */
288 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
289 {
290 	u32 clk_rate;
291 
292 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
293 
294 	/* Platform provided default clk_csr would be assumed valid
295 	 * for all other cases except for the below mentioned ones.
296 	 * For values higher than the IEEE 802.3 specified frequency
297 	 * we can not estimate the proper divider as it is not known
298 	 * the frequency of clk_csr_i. So we do not change the default
299 	 * divider.
300 	 */
301 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
302 		if (clk_rate < CSR_F_35M)
303 			priv->clk_csr = STMMAC_CSR_20_35M;
304 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
305 			priv->clk_csr = STMMAC_CSR_35_60M;
306 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
307 			priv->clk_csr = STMMAC_CSR_60_100M;
308 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
309 			priv->clk_csr = STMMAC_CSR_100_150M;
310 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
311 			priv->clk_csr = STMMAC_CSR_150_250M;
312 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
313 			priv->clk_csr = STMMAC_CSR_250_300M;
314 	}
315 
316 	if (priv->plat->has_sun8i) {
317 		if (clk_rate > 160000000)
318 			priv->clk_csr = 0x03;
319 		else if (clk_rate > 80000000)
320 			priv->clk_csr = 0x02;
321 		else if (clk_rate > 40000000)
322 			priv->clk_csr = 0x01;
323 		else
324 			priv->clk_csr = 0;
325 	}
326 
327 	if (priv->plat->has_xgmac) {
328 		if (clk_rate > 400000000)
329 			priv->clk_csr = 0x5;
330 		else if (clk_rate > 350000000)
331 			priv->clk_csr = 0x4;
332 		else if (clk_rate > 300000000)
333 			priv->clk_csr = 0x3;
334 		else if (clk_rate > 250000000)
335 			priv->clk_csr = 0x2;
336 		else if (clk_rate > 150000000)
337 			priv->clk_csr = 0x1;
338 		else
339 			priv->clk_csr = 0x0;
340 	}
341 }
342 
343 static void print_pkt(unsigned char *buf, int len)
344 {
345 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
346 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
347 }
348 
349 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
350 {
351 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
352 	u32 avail;
353 
354 	if (tx_q->dirty_tx > tx_q->cur_tx)
355 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
356 	else
357 		avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
358 
359 	return avail;
360 }
361 
362 /**
363  * stmmac_rx_dirty - Get RX queue dirty
364  * @priv: driver private structure
365  * @queue: RX queue index
366  */
367 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
368 {
369 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
370 	u32 dirty;
371 
372 	if (rx_q->dirty_rx <= rx_q->cur_rx)
373 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
374 	else
375 		dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
376 
377 	return dirty;
378 }
379 
380 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
381 {
382 	int tx_lpi_timer;
383 
384 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
385 	priv->eee_sw_timer_en = en ? 0 : 1;
386 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
387 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
388 }
389 
390 /**
391  * stmmac_enable_eee_mode - check and enter in LPI mode
392  * @priv: driver private structure
393  * Description: this function is to verify and enter in LPI mode in case of
394  * EEE.
395  */
396 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
397 {
398 	u32 tx_cnt = priv->plat->tx_queues_to_use;
399 	u32 queue;
400 
401 	/* check if all TX queues have the work finished */
402 	for (queue = 0; queue < tx_cnt; queue++) {
403 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
404 
405 		if (tx_q->dirty_tx != tx_q->cur_tx)
406 			return; /* still unfinished work */
407 	}
408 
409 	/* Check and enter in LPI mode */
410 	if (!priv->tx_path_in_lpi_mode)
411 		stmmac_set_eee_mode(priv, priv->hw,
412 				priv->plat->en_tx_lpi_clockgating);
413 }
414 
415 /**
416  * stmmac_disable_eee_mode - disable and exit from LPI mode
417  * @priv: driver private structure
418  * Description: this function is to exit and disable EEE in case of
419  * LPI state is true. This is called by the xmit.
420  */
421 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
422 {
423 	if (!priv->eee_sw_timer_en) {
424 		stmmac_lpi_entry_timer_config(priv, 0);
425 		return;
426 	}
427 
428 	stmmac_reset_eee_mode(priv, priv->hw);
429 	del_timer_sync(&priv->eee_ctrl_timer);
430 	priv->tx_path_in_lpi_mode = false;
431 }
432 
433 /**
434  * stmmac_eee_ctrl_timer - EEE TX SW timer.
435  * @t:  timer_list struct containing private info
436  * Description:
437  *  if there is no data transfer and if we are not in LPI state,
438  *  then MAC Transmitter can be moved to LPI state.
439  */
440 static void stmmac_eee_ctrl_timer(struct timer_list *t)
441 {
442 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
443 
444 	stmmac_enable_eee_mode(priv);
445 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
446 }
447 
448 /**
449  * stmmac_eee_init - init EEE
450  * @priv: driver private structure
451  * Description:
452  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
453  *  can also manage EEE, this function enable the LPI state and start related
454  *  timer.
455  */
456 bool stmmac_eee_init(struct stmmac_priv *priv)
457 {
458 	int eee_tw_timer = priv->eee_tw_timer;
459 
460 	/* Using PCS we cannot dial with the phy registers at this stage
461 	 * so we do not support extra feature like EEE.
462 	 */
463 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
464 	    priv->hw->pcs == STMMAC_PCS_RTBI)
465 		return false;
466 
467 	/* Check if MAC core supports the EEE feature. */
468 	if (!priv->dma_cap.eee)
469 		return false;
470 
471 	mutex_lock(&priv->lock);
472 
473 	/* Check if it needs to be deactivated */
474 	if (!priv->eee_active) {
475 		if (priv->eee_enabled) {
476 			netdev_dbg(priv->dev, "disable EEE\n");
477 			stmmac_lpi_entry_timer_config(priv, 0);
478 			del_timer_sync(&priv->eee_ctrl_timer);
479 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
480 			if (priv->hw->xpcs)
481 				xpcs_config_eee(priv->hw->xpcs,
482 						priv->plat->mult_fact_100ns,
483 						false);
484 		}
485 		mutex_unlock(&priv->lock);
486 		return false;
487 	}
488 
489 	if (priv->eee_active && !priv->eee_enabled) {
490 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
491 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
492 				     eee_tw_timer);
493 		if (priv->hw->xpcs)
494 			xpcs_config_eee(priv->hw->xpcs,
495 					priv->plat->mult_fact_100ns,
496 					true);
497 	}
498 
499 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
500 		del_timer_sync(&priv->eee_ctrl_timer);
501 		priv->tx_path_in_lpi_mode = false;
502 		stmmac_lpi_entry_timer_config(priv, 1);
503 	} else {
504 		stmmac_lpi_entry_timer_config(priv, 0);
505 		mod_timer(&priv->eee_ctrl_timer,
506 			  STMMAC_LPI_T(priv->tx_lpi_timer));
507 	}
508 
509 	mutex_unlock(&priv->lock);
510 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
511 	return true;
512 }
513 
514 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
515  * @priv: driver private structure
516  * @p : descriptor pointer
517  * @skb : the socket buffer
518  * Description :
519  * This function will read timestamp from the descriptor & pass it to stack.
520  * and also perform some sanity checks.
521  */
522 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
523 				   struct dma_desc *p, struct sk_buff *skb)
524 {
525 	struct skb_shared_hwtstamps shhwtstamp;
526 	bool found = false;
527 	s64 adjust = 0;
528 	u64 ns = 0;
529 
530 	if (!priv->hwts_tx_en)
531 		return;
532 
533 	/* exit if skb doesn't support hw tstamp */
534 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
535 		return;
536 
537 	/* check tx tstamp status */
538 	if (stmmac_get_tx_timestamp_status(priv, p)) {
539 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
540 		found = true;
541 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
542 		found = true;
543 	}
544 
545 	if (found) {
546 		/* Correct the clk domain crossing(CDC) error */
547 		if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) {
548 			adjust += -(2 * (NSEC_PER_SEC /
549 					 priv->plat->clk_ptp_rate));
550 			ns += adjust;
551 		}
552 
553 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
554 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
555 
556 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
557 		/* pass tstamp to stack */
558 		skb_tstamp_tx(skb, &shhwtstamp);
559 	}
560 }
561 
562 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
563  * @priv: driver private structure
564  * @p : descriptor pointer
565  * @np : next descriptor pointer
566  * @skb : the socket buffer
567  * Description :
568  * This function will read received packet's timestamp from the descriptor
569  * and pass it to stack. It also perform some sanity checks.
570  */
571 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
572 				   struct dma_desc *np, struct sk_buff *skb)
573 {
574 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
575 	struct dma_desc *desc = p;
576 	u64 adjust = 0;
577 	u64 ns = 0;
578 
579 	if (!priv->hwts_rx_en)
580 		return;
581 	/* For GMAC4, the valid timestamp is from CTX next desc. */
582 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
583 		desc = np;
584 
585 	/* Check if timestamp is available */
586 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
587 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
588 
589 		/* Correct the clk domain crossing(CDC) error */
590 		if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) {
591 			adjust += 2 * (NSEC_PER_SEC / priv->plat->clk_ptp_rate);
592 			ns -= adjust;
593 		}
594 
595 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
596 		shhwtstamp = skb_hwtstamps(skb);
597 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
598 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
599 	} else  {
600 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
601 	}
602 }
603 
604 /**
605  *  stmmac_hwtstamp_set - control hardware timestamping.
606  *  @dev: device pointer.
607  *  @ifr: An IOCTL specific structure, that can contain a pointer to
608  *  a proprietary structure used to pass information to the driver.
609  *  Description:
610  *  This function configures the MAC to enable/disable both outgoing(TX)
611  *  and incoming(RX) packets time stamping based on user input.
612  *  Return Value:
613  *  0 on success and an appropriate -ve integer on failure.
614  */
615 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
616 {
617 	struct stmmac_priv *priv = netdev_priv(dev);
618 	struct hwtstamp_config config;
619 	struct timespec64 now;
620 	u64 temp = 0;
621 	u32 ptp_v2 = 0;
622 	u32 tstamp_all = 0;
623 	u32 ptp_over_ipv4_udp = 0;
624 	u32 ptp_over_ipv6_udp = 0;
625 	u32 ptp_over_ethernet = 0;
626 	u32 snap_type_sel = 0;
627 	u32 ts_master_en = 0;
628 	u32 ts_event_en = 0;
629 	u32 sec_inc = 0;
630 	u32 value = 0;
631 	bool xmac;
632 
633 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
634 
635 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
636 		netdev_alert(priv->dev, "No support for HW time stamping\n");
637 		priv->hwts_tx_en = 0;
638 		priv->hwts_rx_en = 0;
639 
640 		return -EOPNOTSUPP;
641 	}
642 
643 	if (copy_from_user(&config, ifr->ifr_data,
644 			   sizeof(config)))
645 		return -EFAULT;
646 
647 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
648 		   __func__, config.flags, config.tx_type, config.rx_filter);
649 
650 	/* reserved for future extensions */
651 	if (config.flags)
652 		return -EINVAL;
653 
654 	if (config.tx_type != HWTSTAMP_TX_OFF &&
655 	    config.tx_type != HWTSTAMP_TX_ON)
656 		return -ERANGE;
657 
658 	if (priv->adv_ts) {
659 		switch (config.rx_filter) {
660 		case HWTSTAMP_FILTER_NONE:
661 			/* time stamp no incoming packet at all */
662 			config.rx_filter = HWTSTAMP_FILTER_NONE;
663 			break;
664 
665 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
666 			/* PTP v1, UDP, any kind of event packet */
667 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
668 			/* 'xmac' hardware can support Sync, Pdelay_Req and
669 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
670 			 * This leaves Delay_Req timestamps out.
671 			 * Enable all events *and* general purpose message
672 			 * timestamping
673 			 */
674 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
675 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
676 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
677 			break;
678 
679 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
680 			/* PTP v1, UDP, Sync packet */
681 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
682 			/* take time stamp for SYNC messages only */
683 			ts_event_en = PTP_TCR_TSEVNTENA;
684 
685 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
686 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
687 			break;
688 
689 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
690 			/* PTP v1, UDP, Delay_req packet */
691 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
692 			/* take time stamp for Delay_Req messages only */
693 			ts_master_en = PTP_TCR_TSMSTRENA;
694 			ts_event_en = PTP_TCR_TSEVNTENA;
695 
696 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
697 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
698 			break;
699 
700 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
701 			/* PTP v2, UDP, any kind of event packet */
702 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
703 			ptp_v2 = PTP_TCR_TSVER2ENA;
704 			/* take time stamp for all event messages */
705 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
706 
707 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
708 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
709 			break;
710 
711 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
712 			/* PTP v2, UDP, Sync packet */
713 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
714 			ptp_v2 = PTP_TCR_TSVER2ENA;
715 			/* take time stamp for SYNC messages only */
716 			ts_event_en = PTP_TCR_TSEVNTENA;
717 
718 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
719 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
720 			break;
721 
722 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
723 			/* PTP v2, UDP, Delay_req packet */
724 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
725 			ptp_v2 = PTP_TCR_TSVER2ENA;
726 			/* take time stamp for Delay_Req messages only */
727 			ts_master_en = PTP_TCR_TSMSTRENA;
728 			ts_event_en = PTP_TCR_TSEVNTENA;
729 
730 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
731 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
732 			break;
733 
734 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
735 			/* PTP v2/802.AS1 any layer, any kind of event packet */
736 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
737 			ptp_v2 = PTP_TCR_TSVER2ENA;
738 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
739 			if (priv->synopsys_id != DWMAC_CORE_5_10)
740 				ts_event_en = PTP_TCR_TSEVNTENA;
741 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
742 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
743 			ptp_over_ethernet = PTP_TCR_TSIPENA;
744 			break;
745 
746 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
747 			/* PTP v2/802.AS1, any layer, Sync packet */
748 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
749 			ptp_v2 = PTP_TCR_TSVER2ENA;
750 			/* take time stamp for SYNC messages only */
751 			ts_event_en = PTP_TCR_TSEVNTENA;
752 
753 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
754 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
755 			ptp_over_ethernet = PTP_TCR_TSIPENA;
756 			break;
757 
758 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
759 			/* PTP v2/802.AS1, any layer, Delay_req packet */
760 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
761 			ptp_v2 = PTP_TCR_TSVER2ENA;
762 			/* take time stamp for Delay_Req messages only */
763 			ts_master_en = PTP_TCR_TSMSTRENA;
764 			ts_event_en = PTP_TCR_TSEVNTENA;
765 
766 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
767 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
768 			ptp_over_ethernet = PTP_TCR_TSIPENA;
769 			break;
770 
771 		case HWTSTAMP_FILTER_NTP_ALL:
772 		case HWTSTAMP_FILTER_ALL:
773 			/* time stamp any incoming packet */
774 			config.rx_filter = HWTSTAMP_FILTER_ALL;
775 			tstamp_all = PTP_TCR_TSENALL;
776 			break;
777 
778 		default:
779 			return -ERANGE;
780 		}
781 	} else {
782 		switch (config.rx_filter) {
783 		case HWTSTAMP_FILTER_NONE:
784 			config.rx_filter = HWTSTAMP_FILTER_NONE;
785 			break;
786 		default:
787 			/* PTP v1, UDP, any kind of event packet */
788 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
789 			break;
790 		}
791 	}
792 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
793 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
794 
795 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
796 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
797 	else {
798 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
799 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
800 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
801 			 ts_master_en | snap_type_sel);
802 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
803 
804 		/* program Sub Second Increment reg */
805 		stmmac_config_sub_second_increment(priv,
806 				priv->ptpaddr, priv->plat->clk_ptp_rate,
807 				xmac, &sec_inc);
808 		temp = div_u64(1000000000ULL, sec_inc);
809 
810 		/* Store sub second increment and flags for later use */
811 		priv->sub_second_inc = sec_inc;
812 		priv->systime_flags = value;
813 
814 		/* calculate default added value:
815 		 * formula is :
816 		 * addend = (2^32)/freq_div_ratio;
817 		 * where, freq_div_ratio = 1e9ns/sec_inc
818 		 */
819 		temp = (u64)(temp << 32);
820 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
821 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
822 
823 		/* initialize system time */
824 		ktime_get_real_ts64(&now);
825 
826 		/* lower 32 bits of tv_sec are safe until y2106 */
827 		stmmac_init_systime(priv, priv->ptpaddr,
828 				(u32)now.tv_sec, now.tv_nsec);
829 	}
830 
831 	memcpy(&priv->tstamp_config, &config, sizeof(config));
832 
833 	return copy_to_user(ifr->ifr_data, &config,
834 			    sizeof(config)) ? -EFAULT : 0;
835 }
836 
837 /**
838  *  stmmac_hwtstamp_get - read hardware timestamping.
839  *  @dev: device pointer.
840  *  @ifr: An IOCTL specific structure, that can contain a pointer to
841  *  a proprietary structure used to pass information to the driver.
842  *  Description:
843  *  This function obtain the current hardware timestamping settings
844  *  as requested.
845  */
846 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
847 {
848 	struct stmmac_priv *priv = netdev_priv(dev);
849 	struct hwtstamp_config *config = &priv->tstamp_config;
850 
851 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
852 		return -EOPNOTSUPP;
853 
854 	return copy_to_user(ifr->ifr_data, config,
855 			    sizeof(*config)) ? -EFAULT : 0;
856 }
857 
858 /**
859  * stmmac_init_ptp - init PTP
860  * @priv: driver private structure
861  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
862  * This is done by looking at the HW cap. register.
863  * This function also registers the ptp driver.
864  */
865 static int stmmac_init_ptp(struct stmmac_priv *priv)
866 {
867 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
868 
869 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
870 		return -EOPNOTSUPP;
871 
872 	priv->adv_ts = 0;
873 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
874 	if (xmac && priv->dma_cap.atime_stamp)
875 		priv->adv_ts = 1;
876 	/* Dwmac 3.x core with extend_desc can support adv_ts */
877 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
878 		priv->adv_ts = 1;
879 
880 	if (priv->dma_cap.time_stamp)
881 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
882 
883 	if (priv->adv_ts)
884 		netdev_info(priv->dev,
885 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
886 
887 	priv->hwts_tx_en = 0;
888 	priv->hwts_rx_en = 0;
889 
890 	stmmac_ptp_register(priv);
891 
892 	return 0;
893 }
894 
895 static void stmmac_release_ptp(struct stmmac_priv *priv)
896 {
897 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
898 	stmmac_ptp_unregister(priv);
899 }
900 
901 /**
902  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
903  *  @priv: driver private structure
904  *  @duplex: duplex passed to the next function
905  *  Description: It is used for configuring the flow control in all queues
906  */
907 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
908 {
909 	u32 tx_cnt = priv->plat->tx_queues_to_use;
910 
911 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
912 			priv->pause, tx_cnt);
913 }
914 
915 static void stmmac_validate(struct phylink_config *config,
916 			    unsigned long *supported,
917 			    struct phylink_link_state *state)
918 {
919 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
920 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
921 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
922 	int tx_cnt = priv->plat->tx_queues_to_use;
923 	int max_speed = priv->plat->max_speed;
924 
925 	phylink_set(mac_supported, 10baseT_Half);
926 	phylink_set(mac_supported, 10baseT_Full);
927 	phylink_set(mac_supported, 100baseT_Half);
928 	phylink_set(mac_supported, 100baseT_Full);
929 	phylink_set(mac_supported, 1000baseT_Half);
930 	phylink_set(mac_supported, 1000baseT_Full);
931 	phylink_set(mac_supported, 1000baseKX_Full);
932 
933 	phylink_set(mac_supported, Autoneg);
934 	phylink_set(mac_supported, Pause);
935 	phylink_set(mac_supported, Asym_Pause);
936 	phylink_set_port_modes(mac_supported);
937 
938 	/* Cut down 1G if asked to */
939 	if ((max_speed > 0) && (max_speed < 1000)) {
940 		phylink_set(mask, 1000baseT_Full);
941 		phylink_set(mask, 1000baseX_Full);
942 	} else if (priv->plat->has_gmac4) {
943 		if (!max_speed || max_speed >= 2500) {
944 			phylink_set(mac_supported, 2500baseT_Full);
945 			phylink_set(mac_supported, 2500baseX_Full);
946 		}
947 	} else if (priv->plat->has_xgmac) {
948 		if (!max_speed || (max_speed >= 2500)) {
949 			phylink_set(mac_supported, 2500baseT_Full);
950 			phylink_set(mac_supported, 2500baseX_Full);
951 		}
952 		if (!max_speed || (max_speed >= 5000)) {
953 			phylink_set(mac_supported, 5000baseT_Full);
954 		}
955 		if (!max_speed || (max_speed >= 10000)) {
956 			phylink_set(mac_supported, 10000baseSR_Full);
957 			phylink_set(mac_supported, 10000baseLR_Full);
958 			phylink_set(mac_supported, 10000baseER_Full);
959 			phylink_set(mac_supported, 10000baseLRM_Full);
960 			phylink_set(mac_supported, 10000baseT_Full);
961 			phylink_set(mac_supported, 10000baseKX4_Full);
962 			phylink_set(mac_supported, 10000baseKR_Full);
963 		}
964 		if (!max_speed || (max_speed >= 25000)) {
965 			phylink_set(mac_supported, 25000baseCR_Full);
966 			phylink_set(mac_supported, 25000baseKR_Full);
967 			phylink_set(mac_supported, 25000baseSR_Full);
968 		}
969 		if (!max_speed || (max_speed >= 40000)) {
970 			phylink_set(mac_supported, 40000baseKR4_Full);
971 			phylink_set(mac_supported, 40000baseCR4_Full);
972 			phylink_set(mac_supported, 40000baseSR4_Full);
973 			phylink_set(mac_supported, 40000baseLR4_Full);
974 		}
975 		if (!max_speed || (max_speed >= 50000)) {
976 			phylink_set(mac_supported, 50000baseCR2_Full);
977 			phylink_set(mac_supported, 50000baseKR2_Full);
978 			phylink_set(mac_supported, 50000baseSR2_Full);
979 			phylink_set(mac_supported, 50000baseKR_Full);
980 			phylink_set(mac_supported, 50000baseSR_Full);
981 			phylink_set(mac_supported, 50000baseCR_Full);
982 			phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
983 			phylink_set(mac_supported, 50000baseDR_Full);
984 		}
985 		if (!max_speed || (max_speed >= 100000)) {
986 			phylink_set(mac_supported, 100000baseKR4_Full);
987 			phylink_set(mac_supported, 100000baseSR4_Full);
988 			phylink_set(mac_supported, 100000baseCR4_Full);
989 			phylink_set(mac_supported, 100000baseLR4_ER4_Full);
990 			phylink_set(mac_supported, 100000baseKR2_Full);
991 			phylink_set(mac_supported, 100000baseSR2_Full);
992 			phylink_set(mac_supported, 100000baseCR2_Full);
993 			phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
994 			phylink_set(mac_supported, 100000baseDR2_Full);
995 		}
996 	}
997 
998 	/* Half-Duplex can only work with single queue */
999 	if (tx_cnt > 1) {
1000 		phylink_set(mask, 10baseT_Half);
1001 		phylink_set(mask, 100baseT_Half);
1002 		phylink_set(mask, 1000baseT_Half);
1003 	}
1004 
1005 	linkmode_and(supported, supported, mac_supported);
1006 	linkmode_andnot(supported, supported, mask);
1007 
1008 	linkmode_and(state->advertising, state->advertising, mac_supported);
1009 	linkmode_andnot(state->advertising, state->advertising, mask);
1010 
1011 	/* If PCS is supported, check which modes it supports. */
1012 	if (priv->hw->xpcs)
1013 		xpcs_validate(priv->hw->xpcs, supported, state);
1014 }
1015 
1016 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
1017 			      const struct phylink_link_state *state)
1018 {
1019 	/* Nothing to do, xpcs_config() handles everything */
1020 }
1021 
1022 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
1023 {
1024 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
1025 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
1026 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
1027 	bool *hs_enable = &fpe_cfg->hs_enable;
1028 
1029 	if (is_up && *hs_enable) {
1030 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
1031 	} else {
1032 		*lo_state = FPE_STATE_OFF;
1033 		*lp_state = FPE_STATE_OFF;
1034 	}
1035 }
1036 
1037 static void stmmac_mac_link_down(struct phylink_config *config,
1038 				 unsigned int mode, phy_interface_t interface)
1039 {
1040 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1041 
1042 	stmmac_mac_set(priv, priv->ioaddr, false);
1043 	priv->eee_active = false;
1044 	priv->tx_lpi_enabled = false;
1045 	priv->eee_enabled = stmmac_eee_init(priv);
1046 	stmmac_set_eee_pls(priv, priv->hw, false);
1047 
1048 	if (priv->dma_cap.fpesel)
1049 		stmmac_fpe_link_state_handle(priv, false);
1050 }
1051 
1052 static void stmmac_mac_link_up(struct phylink_config *config,
1053 			       struct phy_device *phy,
1054 			       unsigned int mode, phy_interface_t interface,
1055 			       int speed, int duplex,
1056 			       bool tx_pause, bool rx_pause)
1057 {
1058 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1059 	u32 ctrl;
1060 
1061 	ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1062 	ctrl &= ~priv->hw->link.speed_mask;
1063 
1064 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1065 		switch (speed) {
1066 		case SPEED_10000:
1067 			ctrl |= priv->hw->link.xgmii.speed10000;
1068 			break;
1069 		case SPEED_5000:
1070 			ctrl |= priv->hw->link.xgmii.speed5000;
1071 			break;
1072 		case SPEED_2500:
1073 			ctrl |= priv->hw->link.xgmii.speed2500;
1074 			break;
1075 		default:
1076 			return;
1077 		}
1078 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1079 		switch (speed) {
1080 		case SPEED_100000:
1081 			ctrl |= priv->hw->link.xlgmii.speed100000;
1082 			break;
1083 		case SPEED_50000:
1084 			ctrl |= priv->hw->link.xlgmii.speed50000;
1085 			break;
1086 		case SPEED_40000:
1087 			ctrl |= priv->hw->link.xlgmii.speed40000;
1088 			break;
1089 		case SPEED_25000:
1090 			ctrl |= priv->hw->link.xlgmii.speed25000;
1091 			break;
1092 		case SPEED_10000:
1093 			ctrl |= priv->hw->link.xgmii.speed10000;
1094 			break;
1095 		case SPEED_2500:
1096 			ctrl |= priv->hw->link.speed2500;
1097 			break;
1098 		case SPEED_1000:
1099 			ctrl |= priv->hw->link.speed1000;
1100 			break;
1101 		default:
1102 			return;
1103 		}
1104 	} else {
1105 		switch (speed) {
1106 		case SPEED_2500:
1107 			ctrl |= priv->hw->link.speed2500;
1108 			break;
1109 		case SPEED_1000:
1110 			ctrl |= priv->hw->link.speed1000;
1111 			break;
1112 		case SPEED_100:
1113 			ctrl |= priv->hw->link.speed100;
1114 			break;
1115 		case SPEED_10:
1116 			ctrl |= priv->hw->link.speed10;
1117 			break;
1118 		default:
1119 			return;
1120 		}
1121 	}
1122 
1123 	priv->speed = speed;
1124 
1125 	if (priv->plat->fix_mac_speed)
1126 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1127 
1128 	if (!duplex)
1129 		ctrl &= ~priv->hw->link.duplex;
1130 	else
1131 		ctrl |= priv->hw->link.duplex;
1132 
1133 	/* Flow Control operation */
1134 	if (tx_pause && rx_pause)
1135 		stmmac_mac_flow_ctrl(priv, duplex);
1136 
1137 	writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1138 
1139 	stmmac_mac_set(priv, priv->ioaddr, true);
1140 	if (phy && priv->dma_cap.eee) {
1141 		priv->eee_active = phy_init_eee(phy, 1) >= 0;
1142 		priv->eee_enabled = stmmac_eee_init(priv);
1143 		priv->tx_lpi_enabled = priv->eee_enabled;
1144 		stmmac_set_eee_pls(priv, priv->hw, true);
1145 	}
1146 
1147 	if (priv->dma_cap.fpesel)
1148 		stmmac_fpe_link_state_handle(priv, true);
1149 }
1150 
1151 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1152 	.validate = stmmac_validate,
1153 	.mac_config = stmmac_mac_config,
1154 	.mac_link_down = stmmac_mac_link_down,
1155 	.mac_link_up = stmmac_mac_link_up,
1156 };
1157 
1158 /**
1159  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1160  * @priv: driver private structure
1161  * Description: this is to verify if the HW supports the PCS.
1162  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1163  * configured for the TBI, RTBI, or SGMII PHY interface.
1164  */
1165 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1166 {
1167 	int interface = priv->plat->interface;
1168 
1169 	if (priv->dma_cap.pcs) {
1170 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1171 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1172 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1173 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1174 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1175 			priv->hw->pcs = STMMAC_PCS_RGMII;
1176 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1177 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1178 			priv->hw->pcs = STMMAC_PCS_SGMII;
1179 		}
1180 	}
1181 }
1182 
1183 /**
1184  * stmmac_init_phy - PHY initialization
1185  * @dev: net device structure
1186  * Description: it initializes the driver's PHY state, and attaches the PHY
1187  * to the mac driver.
1188  *  Return value:
1189  *  0 on success
1190  */
1191 static int stmmac_init_phy(struct net_device *dev)
1192 {
1193 	struct stmmac_priv *priv = netdev_priv(dev);
1194 	struct device_node *node;
1195 	int ret;
1196 
1197 	node = priv->plat->phylink_node;
1198 
1199 	if (node)
1200 		ret = phylink_of_phy_connect(priv->phylink, node, 0);
1201 
1202 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1203 	 * manually parse it
1204 	 */
1205 	if (!node || ret) {
1206 		int addr = priv->plat->phy_addr;
1207 		struct phy_device *phydev;
1208 
1209 		phydev = mdiobus_get_phy(priv->mii, addr);
1210 		if (!phydev) {
1211 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1212 			return -ENODEV;
1213 		}
1214 
1215 		ret = phylink_connect_phy(priv->phylink, phydev);
1216 	}
1217 
1218 	if (!priv->plat->pmt) {
1219 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1220 
1221 		phylink_ethtool_get_wol(priv->phylink, &wol);
1222 		device_set_wakeup_capable(priv->device, !!wol.supported);
1223 	}
1224 
1225 	return ret;
1226 }
1227 
1228 static int stmmac_phy_setup(struct stmmac_priv *priv)
1229 {
1230 	struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
1231 	struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1232 	int mode = priv->plat->phy_interface;
1233 	struct phylink *phylink;
1234 
1235 	priv->phylink_config.dev = &priv->dev->dev;
1236 	priv->phylink_config.type = PHYLINK_NETDEV;
1237 	priv->phylink_config.pcs_poll = true;
1238 	if (priv->plat->mdio_bus_data)
1239 		priv->phylink_config.ovr_an_inband =
1240 			mdio_bus_data->xpcs_an_inband;
1241 
1242 	if (!fwnode)
1243 		fwnode = dev_fwnode(priv->device);
1244 
1245 	phylink = phylink_create(&priv->phylink_config, fwnode,
1246 				 mode, &stmmac_phylink_mac_ops);
1247 	if (IS_ERR(phylink))
1248 		return PTR_ERR(phylink);
1249 
1250 	if (priv->hw->xpcs)
1251 		phylink_set_pcs(phylink, &priv->hw->xpcs->pcs);
1252 
1253 	priv->phylink = phylink;
1254 	return 0;
1255 }
1256 
1257 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1258 {
1259 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1260 	unsigned int desc_size;
1261 	void *head_rx;
1262 	u32 queue;
1263 
1264 	/* Display RX rings */
1265 	for (queue = 0; queue < rx_cnt; queue++) {
1266 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1267 
1268 		pr_info("\tRX Queue %u rings\n", queue);
1269 
1270 		if (priv->extend_desc) {
1271 			head_rx = (void *)rx_q->dma_erx;
1272 			desc_size = sizeof(struct dma_extended_desc);
1273 		} else {
1274 			head_rx = (void *)rx_q->dma_rx;
1275 			desc_size = sizeof(struct dma_desc);
1276 		}
1277 
1278 		/* Display RX ring */
1279 		stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
1280 				    rx_q->dma_rx_phy, desc_size);
1281 	}
1282 }
1283 
1284 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1285 {
1286 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1287 	unsigned int desc_size;
1288 	void *head_tx;
1289 	u32 queue;
1290 
1291 	/* Display TX rings */
1292 	for (queue = 0; queue < tx_cnt; queue++) {
1293 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1294 
1295 		pr_info("\tTX Queue %d rings\n", queue);
1296 
1297 		if (priv->extend_desc) {
1298 			head_tx = (void *)tx_q->dma_etx;
1299 			desc_size = sizeof(struct dma_extended_desc);
1300 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1301 			head_tx = (void *)tx_q->dma_entx;
1302 			desc_size = sizeof(struct dma_edesc);
1303 		} else {
1304 			head_tx = (void *)tx_q->dma_tx;
1305 			desc_size = sizeof(struct dma_desc);
1306 		}
1307 
1308 		stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
1309 				    tx_q->dma_tx_phy, desc_size);
1310 	}
1311 }
1312 
1313 static void stmmac_display_rings(struct stmmac_priv *priv)
1314 {
1315 	/* Display RX ring */
1316 	stmmac_display_rx_rings(priv);
1317 
1318 	/* Display TX ring */
1319 	stmmac_display_tx_rings(priv);
1320 }
1321 
1322 static int stmmac_set_bfsize(int mtu, int bufsize)
1323 {
1324 	int ret = bufsize;
1325 
1326 	if (mtu >= BUF_SIZE_8KiB)
1327 		ret = BUF_SIZE_16KiB;
1328 	else if (mtu >= BUF_SIZE_4KiB)
1329 		ret = BUF_SIZE_8KiB;
1330 	else if (mtu >= BUF_SIZE_2KiB)
1331 		ret = BUF_SIZE_4KiB;
1332 	else if (mtu > DEFAULT_BUFSIZE)
1333 		ret = BUF_SIZE_2KiB;
1334 	else
1335 		ret = DEFAULT_BUFSIZE;
1336 
1337 	return ret;
1338 }
1339 
1340 /**
1341  * stmmac_clear_rx_descriptors - clear RX descriptors
1342  * @priv: driver private structure
1343  * @queue: RX queue index
1344  * Description: this function is called to clear the RX descriptors
1345  * in case of both basic and extended descriptors are used.
1346  */
1347 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1348 {
1349 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1350 	int i;
1351 
1352 	/* Clear the RX descriptors */
1353 	for (i = 0; i < priv->dma_rx_size; i++)
1354 		if (priv->extend_desc)
1355 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1356 					priv->use_riwt, priv->mode,
1357 					(i == priv->dma_rx_size - 1),
1358 					priv->dma_buf_sz);
1359 		else
1360 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1361 					priv->use_riwt, priv->mode,
1362 					(i == priv->dma_rx_size - 1),
1363 					priv->dma_buf_sz);
1364 }
1365 
1366 /**
1367  * stmmac_clear_tx_descriptors - clear tx descriptors
1368  * @priv: driver private structure
1369  * @queue: TX queue index.
1370  * Description: this function is called to clear the TX descriptors
1371  * in case of both basic and extended descriptors are used.
1372  */
1373 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1374 {
1375 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1376 	int i;
1377 
1378 	/* Clear the TX descriptors */
1379 	for (i = 0; i < priv->dma_tx_size; i++) {
1380 		int last = (i == (priv->dma_tx_size - 1));
1381 		struct dma_desc *p;
1382 
1383 		if (priv->extend_desc)
1384 			p = &tx_q->dma_etx[i].basic;
1385 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1386 			p = &tx_q->dma_entx[i].basic;
1387 		else
1388 			p = &tx_q->dma_tx[i];
1389 
1390 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1391 	}
1392 }
1393 
1394 /**
1395  * stmmac_clear_descriptors - clear descriptors
1396  * @priv: driver private structure
1397  * Description: this function is called to clear the TX and RX descriptors
1398  * in case of both basic and extended descriptors are used.
1399  */
1400 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1401 {
1402 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1403 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1404 	u32 queue;
1405 
1406 	/* Clear the RX descriptors */
1407 	for (queue = 0; queue < rx_queue_cnt; queue++)
1408 		stmmac_clear_rx_descriptors(priv, queue);
1409 
1410 	/* Clear the TX descriptors */
1411 	for (queue = 0; queue < tx_queue_cnt; queue++)
1412 		stmmac_clear_tx_descriptors(priv, queue);
1413 }
1414 
1415 /**
1416  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1417  * @priv: driver private structure
1418  * @p: descriptor pointer
1419  * @i: descriptor index
1420  * @flags: gfp flag
1421  * @queue: RX queue index
1422  * Description: this function is called to allocate a receive buffer, perform
1423  * the DMA mapping and init the descriptor.
1424  */
1425 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1426 				  int i, gfp_t flags, u32 queue)
1427 {
1428 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1429 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1430 
1431 	if (!buf->page) {
1432 		buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
1433 		if (!buf->page)
1434 			return -ENOMEM;
1435 		buf->page_offset = stmmac_rx_offset(priv);
1436 	}
1437 
1438 	if (priv->sph && !buf->sec_page) {
1439 		buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
1440 		if (!buf->sec_page)
1441 			return -ENOMEM;
1442 
1443 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1444 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1445 	} else {
1446 		buf->sec_page = NULL;
1447 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1448 	}
1449 
1450 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1451 
1452 	stmmac_set_desc_addr(priv, p, buf->addr);
1453 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1454 		stmmac_init_desc3(priv, p);
1455 
1456 	return 0;
1457 }
1458 
1459 /**
1460  * stmmac_free_rx_buffer - free RX dma buffers
1461  * @priv: private structure
1462  * @queue: RX queue index
1463  * @i: buffer index.
1464  */
1465 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1466 {
1467 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1468 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1469 
1470 	if (buf->page)
1471 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1472 	buf->page = NULL;
1473 
1474 	if (buf->sec_page)
1475 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1476 	buf->sec_page = NULL;
1477 }
1478 
1479 /**
1480  * stmmac_free_tx_buffer - free RX dma buffers
1481  * @priv: private structure
1482  * @queue: RX queue index
1483  * @i: buffer index.
1484  */
1485 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1486 {
1487 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1488 
1489 	if (tx_q->tx_skbuff_dma[i].buf &&
1490 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1491 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1492 			dma_unmap_page(priv->device,
1493 				       tx_q->tx_skbuff_dma[i].buf,
1494 				       tx_q->tx_skbuff_dma[i].len,
1495 				       DMA_TO_DEVICE);
1496 		else
1497 			dma_unmap_single(priv->device,
1498 					 tx_q->tx_skbuff_dma[i].buf,
1499 					 tx_q->tx_skbuff_dma[i].len,
1500 					 DMA_TO_DEVICE);
1501 	}
1502 
1503 	if (tx_q->xdpf[i] &&
1504 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1505 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1506 		xdp_return_frame(tx_q->xdpf[i]);
1507 		tx_q->xdpf[i] = NULL;
1508 	}
1509 
1510 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1511 		tx_q->xsk_frames_done++;
1512 
1513 	if (tx_q->tx_skbuff[i] &&
1514 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1515 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1516 		tx_q->tx_skbuff[i] = NULL;
1517 	}
1518 
1519 	tx_q->tx_skbuff_dma[i].buf = 0;
1520 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1521 }
1522 
1523 /**
1524  * dma_free_rx_skbufs - free RX dma buffers
1525  * @priv: private structure
1526  * @queue: RX queue index
1527  */
1528 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1529 {
1530 	int i;
1531 
1532 	for (i = 0; i < priv->dma_rx_size; i++)
1533 		stmmac_free_rx_buffer(priv, queue, i);
1534 }
1535 
1536 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
1537 				   gfp_t flags)
1538 {
1539 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1540 	int i;
1541 
1542 	for (i = 0; i < priv->dma_rx_size; i++) {
1543 		struct dma_desc *p;
1544 		int ret;
1545 
1546 		if (priv->extend_desc)
1547 			p = &((rx_q->dma_erx + i)->basic);
1548 		else
1549 			p = rx_q->dma_rx + i;
1550 
1551 		ret = stmmac_init_rx_buffers(priv, p, i, flags,
1552 					     queue);
1553 		if (ret)
1554 			return ret;
1555 
1556 		rx_q->buf_alloc_num++;
1557 	}
1558 
1559 	return 0;
1560 }
1561 
1562 /**
1563  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1564  * @priv: private structure
1565  * @queue: RX queue index
1566  */
1567 static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
1568 {
1569 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1570 	int i;
1571 
1572 	for (i = 0; i < priv->dma_rx_size; i++) {
1573 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1574 
1575 		if (!buf->xdp)
1576 			continue;
1577 
1578 		xsk_buff_free(buf->xdp);
1579 		buf->xdp = NULL;
1580 	}
1581 }
1582 
1583 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
1584 {
1585 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1586 	int i;
1587 
1588 	for (i = 0; i < priv->dma_rx_size; i++) {
1589 		struct stmmac_rx_buffer *buf;
1590 		dma_addr_t dma_addr;
1591 		struct dma_desc *p;
1592 
1593 		if (priv->extend_desc)
1594 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1595 		else
1596 			p = rx_q->dma_rx + i;
1597 
1598 		buf = &rx_q->buf_pool[i];
1599 
1600 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1601 		if (!buf->xdp)
1602 			return -ENOMEM;
1603 
1604 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1605 		stmmac_set_desc_addr(priv, p, dma_addr);
1606 		rx_q->buf_alloc_num++;
1607 	}
1608 
1609 	return 0;
1610 }
1611 
1612 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1613 {
1614 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1615 		return NULL;
1616 
1617 	return xsk_get_pool_from_qid(priv->dev, queue);
1618 }
1619 
1620 /**
1621  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1622  * @priv: driver private structure
1623  * @queue: RX queue index
1624  * @flags: gfp flag.
1625  * Description: this function initializes the DMA RX descriptors
1626  * and allocates the socket buffers. It supports the chained and ring
1627  * modes.
1628  */
1629 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
1630 {
1631 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1632 	int ret;
1633 
1634 	netif_dbg(priv, probe, priv->dev,
1635 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1636 		  (u32)rx_q->dma_rx_phy);
1637 
1638 	stmmac_clear_rx_descriptors(priv, queue);
1639 
1640 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1641 
1642 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1643 
1644 	if (rx_q->xsk_pool) {
1645 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1646 						   MEM_TYPE_XSK_BUFF_POOL,
1647 						   NULL));
1648 		netdev_info(priv->dev,
1649 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1650 			    rx_q->queue_index);
1651 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1652 	} else {
1653 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1654 						   MEM_TYPE_PAGE_POOL,
1655 						   rx_q->page_pool));
1656 		netdev_info(priv->dev,
1657 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1658 			    rx_q->queue_index);
1659 	}
1660 
1661 	if (rx_q->xsk_pool) {
1662 		/* RX XDP ZC buffer pool may not be populated, e.g.
1663 		 * xdpsock TX-only.
1664 		 */
1665 		stmmac_alloc_rx_buffers_zc(priv, queue);
1666 	} else {
1667 		ret = stmmac_alloc_rx_buffers(priv, queue, flags);
1668 		if (ret < 0)
1669 			return -ENOMEM;
1670 	}
1671 
1672 	rx_q->cur_rx = 0;
1673 	rx_q->dirty_rx = 0;
1674 
1675 	/* Setup the chained descriptor addresses */
1676 	if (priv->mode == STMMAC_CHAIN_MODE) {
1677 		if (priv->extend_desc)
1678 			stmmac_mode_init(priv, rx_q->dma_erx,
1679 					 rx_q->dma_rx_phy,
1680 					 priv->dma_rx_size, 1);
1681 		else
1682 			stmmac_mode_init(priv, rx_q->dma_rx,
1683 					 rx_q->dma_rx_phy,
1684 					 priv->dma_rx_size, 0);
1685 	}
1686 
1687 	return 0;
1688 }
1689 
1690 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1691 {
1692 	struct stmmac_priv *priv = netdev_priv(dev);
1693 	u32 rx_count = priv->plat->rx_queues_to_use;
1694 	u32 queue;
1695 	int ret;
1696 
1697 	/* RX INITIALIZATION */
1698 	netif_dbg(priv, probe, priv->dev,
1699 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1700 
1701 	for (queue = 0; queue < rx_count; queue++) {
1702 		ret = __init_dma_rx_desc_rings(priv, queue, flags);
1703 		if (ret)
1704 			goto err_init_rx_buffers;
1705 	}
1706 
1707 	return 0;
1708 
1709 err_init_rx_buffers:
1710 	while (queue >= 0) {
1711 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1712 
1713 		if (rx_q->xsk_pool)
1714 			dma_free_rx_xskbufs(priv, queue);
1715 		else
1716 			dma_free_rx_skbufs(priv, queue);
1717 
1718 		rx_q->buf_alloc_num = 0;
1719 		rx_q->xsk_pool = NULL;
1720 
1721 		if (queue == 0)
1722 			break;
1723 
1724 		queue--;
1725 	}
1726 
1727 	return ret;
1728 }
1729 
1730 /**
1731  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1732  * @priv: driver private structure
1733  * @queue : TX queue index
1734  * Description: this function initializes the DMA TX descriptors
1735  * and allocates the socket buffers. It supports the chained and ring
1736  * modes.
1737  */
1738 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
1739 {
1740 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1741 	int i;
1742 
1743 	netif_dbg(priv, probe, priv->dev,
1744 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1745 		  (u32)tx_q->dma_tx_phy);
1746 
1747 	/* Setup the chained descriptor addresses */
1748 	if (priv->mode == STMMAC_CHAIN_MODE) {
1749 		if (priv->extend_desc)
1750 			stmmac_mode_init(priv, tx_q->dma_etx,
1751 					 tx_q->dma_tx_phy,
1752 					 priv->dma_tx_size, 1);
1753 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1754 			stmmac_mode_init(priv, tx_q->dma_tx,
1755 					 tx_q->dma_tx_phy,
1756 					 priv->dma_tx_size, 0);
1757 	}
1758 
1759 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1760 
1761 	for (i = 0; i < priv->dma_tx_size; i++) {
1762 		struct dma_desc *p;
1763 
1764 		if (priv->extend_desc)
1765 			p = &((tx_q->dma_etx + i)->basic);
1766 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1767 			p = &((tx_q->dma_entx + i)->basic);
1768 		else
1769 			p = tx_q->dma_tx + i;
1770 
1771 		stmmac_clear_desc(priv, p);
1772 
1773 		tx_q->tx_skbuff_dma[i].buf = 0;
1774 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1775 		tx_q->tx_skbuff_dma[i].len = 0;
1776 		tx_q->tx_skbuff_dma[i].last_segment = false;
1777 		tx_q->tx_skbuff[i] = NULL;
1778 	}
1779 
1780 	tx_q->dirty_tx = 0;
1781 	tx_q->cur_tx = 0;
1782 	tx_q->mss = 0;
1783 
1784 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1785 
1786 	return 0;
1787 }
1788 
1789 static int init_dma_tx_desc_rings(struct net_device *dev)
1790 {
1791 	struct stmmac_priv *priv = netdev_priv(dev);
1792 	u32 tx_queue_cnt;
1793 	u32 queue;
1794 
1795 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1796 
1797 	for (queue = 0; queue < tx_queue_cnt; queue++)
1798 		__init_dma_tx_desc_rings(priv, queue);
1799 
1800 	return 0;
1801 }
1802 
1803 /**
1804  * init_dma_desc_rings - init the RX/TX descriptor rings
1805  * @dev: net device structure
1806  * @flags: gfp flag.
1807  * Description: this function initializes the DMA RX/TX descriptors
1808  * and allocates the socket buffers. It supports the chained and ring
1809  * modes.
1810  */
1811 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1812 {
1813 	struct stmmac_priv *priv = netdev_priv(dev);
1814 	int ret;
1815 
1816 	ret = init_dma_rx_desc_rings(dev, flags);
1817 	if (ret)
1818 		return ret;
1819 
1820 	ret = init_dma_tx_desc_rings(dev);
1821 
1822 	stmmac_clear_descriptors(priv);
1823 
1824 	if (netif_msg_hw(priv))
1825 		stmmac_display_rings(priv);
1826 
1827 	return ret;
1828 }
1829 
1830 /**
1831  * dma_free_tx_skbufs - free TX dma buffers
1832  * @priv: private structure
1833  * @queue: TX queue index
1834  */
1835 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1836 {
1837 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1838 	int i;
1839 
1840 	tx_q->xsk_frames_done = 0;
1841 
1842 	for (i = 0; i < priv->dma_tx_size; i++)
1843 		stmmac_free_tx_buffer(priv, queue, i);
1844 
1845 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1846 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1847 		tx_q->xsk_frames_done = 0;
1848 		tx_q->xsk_pool = NULL;
1849 	}
1850 }
1851 
1852 /**
1853  * stmmac_free_tx_skbufs - free TX skb buffers
1854  * @priv: private structure
1855  */
1856 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1857 {
1858 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1859 	u32 queue;
1860 
1861 	for (queue = 0; queue < tx_queue_cnt; queue++)
1862 		dma_free_tx_skbufs(priv, queue);
1863 }
1864 
1865 /**
1866  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1867  * @priv: private structure
1868  * @queue: RX queue index
1869  */
1870 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
1871 {
1872 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1873 
1874 	/* Release the DMA RX socket buffers */
1875 	if (rx_q->xsk_pool)
1876 		dma_free_rx_xskbufs(priv, queue);
1877 	else
1878 		dma_free_rx_skbufs(priv, queue);
1879 
1880 	rx_q->buf_alloc_num = 0;
1881 	rx_q->xsk_pool = NULL;
1882 
1883 	/* Free DMA regions of consistent memory previously allocated */
1884 	if (!priv->extend_desc)
1885 		dma_free_coherent(priv->device, priv->dma_rx_size *
1886 				  sizeof(struct dma_desc),
1887 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1888 	else
1889 		dma_free_coherent(priv->device, priv->dma_rx_size *
1890 				  sizeof(struct dma_extended_desc),
1891 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1892 
1893 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1894 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1895 
1896 	kfree(rx_q->buf_pool);
1897 	if (rx_q->page_pool)
1898 		page_pool_destroy(rx_q->page_pool);
1899 }
1900 
1901 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1902 {
1903 	u32 rx_count = priv->plat->rx_queues_to_use;
1904 	u32 queue;
1905 
1906 	/* Free RX queue resources */
1907 	for (queue = 0; queue < rx_count; queue++)
1908 		__free_dma_rx_desc_resources(priv, queue);
1909 }
1910 
1911 /**
1912  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1913  * @priv: private structure
1914  * @queue: TX queue index
1915  */
1916 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
1917 {
1918 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1919 	size_t size;
1920 	void *addr;
1921 
1922 	/* Release the DMA TX socket buffers */
1923 	dma_free_tx_skbufs(priv, queue);
1924 
1925 	if (priv->extend_desc) {
1926 		size = sizeof(struct dma_extended_desc);
1927 		addr = tx_q->dma_etx;
1928 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1929 		size = sizeof(struct dma_edesc);
1930 		addr = tx_q->dma_entx;
1931 	} else {
1932 		size = sizeof(struct dma_desc);
1933 		addr = tx_q->dma_tx;
1934 	}
1935 
1936 	size *= priv->dma_tx_size;
1937 
1938 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1939 
1940 	kfree(tx_q->tx_skbuff_dma);
1941 	kfree(tx_q->tx_skbuff);
1942 }
1943 
1944 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1945 {
1946 	u32 tx_count = priv->plat->tx_queues_to_use;
1947 	u32 queue;
1948 
1949 	/* Free TX queue resources */
1950 	for (queue = 0; queue < tx_count; queue++)
1951 		__free_dma_tx_desc_resources(priv, queue);
1952 }
1953 
1954 /**
1955  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1956  * @priv: private structure
1957  * @queue: RX queue index
1958  * Description: according to which descriptor can be used (extend or basic)
1959  * this function allocates the resources for TX and RX paths. In case of
1960  * reception, for example, it pre-allocated the RX socket buffer in order to
1961  * allow zero-copy mechanism.
1962  */
1963 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
1964 {
1965 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1966 	struct stmmac_channel *ch = &priv->channel[queue];
1967 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
1968 	struct page_pool_params pp_params = { 0 };
1969 	unsigned int num_pages;
1970 	unsigned int napi_id;
1971 	int ret;
1972 
1973 	rx_q->queue_index = queue;
1974 	rx_q->priv_data = priv;
1975 
1976 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
1977 	pp_params.pool_size = priv->dma_rx_size;
1978 	num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
1979 	pp_params.order = ilog2(num_pages);
1980 	pp_params.nid = dev_to_node(priv->device);
1981 	pp_params.dev = priv->device;
1982 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
1983 	pp_params.offset = stmmac_rx_offset(priv);
1984 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
1985 
1986 	rx_q->page_pool = page_pool_create(&pp_params);
1987 	if (IS_ERR(rx_q->page_pool)) {
1988 		ret = PTR_ERR(rx_q->page_pool);
1989 		rx_q->page_pool = NULL;
1990 		return ret;
1991 	}
1992 
1993 	rx_q->buf_pool = kcalloc(priv->dma_rx_size,
1994 				 sizeof(*rx_q->buf_pool),
1995 				 GFP_KERNEL);
1996 	if (!rx_q->buf_pool)
1997 		return -ENOMEM;
1998 
1999 	if (priv->extend_desc) {
2000 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2001 						   priv->dma_rx_size *
2002 						   sizeof(struct dma_extended_desc),
2003 						   &rx_q->dma_rx_phy,
2004 						   GFP_KERNEL);
2005 		if (!rx_q->dma_erx)
2006 			return -ENOMEM;
2007 
2008 	} else {
2009 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2010 						  priv->dma_rx_size *
2011 						  sizeof(struct dma_desc),
2012 						  &rx_q->dma_rx_phy,
2013 						  GFP_KERNEL);
2014 		if (!rx_q->dma_rx)
2015 			return -ENOMEM;
2016 	}
2017 
2018 	if (stmmac_xdp_is_enabled(priv) &&
2019 	    test_bit(queue, priv->af_xdp_zc_qps))
2020 		napi_id = ch->rxtx_napi.napi_id;
2021 	else
2022 		napi_id = ch->rx_napi.napi_id;
2023 
2024 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2025 			       rx_q->queue_index,
2026 			       napi_id);
2027 	if (ret) {
2028 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2029 		return -EINVAL;
2030 	}
2031 
2032 	return 0;
2033 }
2034 
2035 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
2036 {
2037 	u32 rx_count = priv->plat->rx_queues_to_use;
2038 	u32 queue;
2039 	int ret;
2040 
2041 	/* RX queues buffers and DMA */
2042 	for (queue = 0; queue < rx_count; queue++) {
2043 		ret = __alloc_dma_rx_desc_resources(priv, queue);
2044 		if (ret)
2045 			goto err_dma;
2046 	}
2047 
2048 	return 0;
2049 
2050 err_dma:
2051 	free_dma_rx_desc_resources(priv);
2052 
2053 	return ret;
2054 }
2055 
2056 /**
2057  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2058  * @priv: private structure
2059  * @queue: TX queue index
2060  * Description: according to which descriptor can be used (extend or basic)
2061  * this function allocates the resources for TX and RX paths. In case of
2062  * reception, for example, it pre-allocated the RX socket buffer in order to
2063  * allow zero-copy mechanism.
2064  */
2065 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
2066 {
2067 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2068 	size_t size;
2069 	void *addr;
2070 
2071 	tx_q->queue_index = queue;
2072 	tx_q->priv_data = priv;
2073 
2074 	tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
2075 				      sizeof(*tx_q->tx_skbuff_dma),
2076 				      GFP_KERNEL);
2077 	if (!tx_q->tx_skbuff_dma)
2078 		return -ENOMEM;
2079 
2080 	tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
2081 				  sizeof(struct sk_buff *),
2082 				  GFP_KERNEL);
2083 	if (!tx_q->tx_skbuff)
2084 		return -ENOMEM;
2085 
2086 	if (priv->extend_desc)
2087 		size = sizeof(struct dma_extended_desc);
2088 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2089 		size = sizeof(struct dma_edesc);
2090 	else
2091 		size = sizeof(struct dma_desc);
2092 
2093 	size *= priv->dma_tx_size;
2094 
2095 	addr = dma_alloc_coherent(priv->device, size,
2096 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2097 	if (!addr)
2098 		return -ENOMEM;
2099 
2100 	if (priv->extend_desc)
2101 		tx_q->dma_etx = addr;
2102 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2103 		tx_q->dma_entx = addr;
2104 	else
2105 		tx_q->dma_tx = addr;
2106 
2107 	return 0;
2108 }
2109 
2110 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
2111 {
2112 	u32 tx_count = priv->plat->tx_queues_to_use;
2113 	u32 queue;
2114 	int ret;
2115 
2116 	/* TX queues buffers and DMA */
2117 	for (queue = 0; queue < tx_count; queue++) {
2118 		ret = __alloc_dma_tx_desc_resources(priv, queue);
2119 		if (ret)
2120 			goto err_dma;
2121 	}
2122 
2123 	return 0;
2124 
2125 err_dma:
2126 	free_dma_tx_desc_resources(priv);
2127 	return ret;
2128 }
2129 
2130 /**
2131  * alloc_dma_desc_resources - alloc TX/RX resources.
2132  * @priv: private structure
2133  * Description: according to which descriptor can be used (extend or basic)
2134  * this function allocates the resources for TX and RX paths. In case of
2135  * reception, for example, it pre-allocated the RX socket buffer in order to
2136  * allow zero-copy mechanism.
2137  */
2138 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
2139 {
2140 	/* RX Allocation */
2141 	int ret = alloc_dma_rx_desc_resources(priv);
2142 
2143 	if (ret)
2144 		return ret;
2145 
2146 	ret = alloc_dma_tx_desc_resources(priv);
2147 
2148 	return ret;
2149 }
2150 
2151 /**
2152  * free_dma_desc_resources - free dma desc resources
2153  * @priv: private structure
2154  */
2155 static void free_dma_desc_resources(struct stmmac_priv *priv)
2156 {
2157 	/* Release the DMA TX socket buffers */
2158 	free_dma_tx_desc_resources(priv);
2159 
2160 	/* Release the DMA RX socket buffers later
2161 	 * to ensure all pending XDP_TX buffers are returned.
2162 	 */
2163 	free_dma_rx_desc_resources(priv);
2164 }
2165 
2166 /**
2167  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2168  *  @priv: driver private structure
2169  *  Description: It is used for enabling the rx queues in the MAC
2170  */
2171 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2172 {
2173 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2174 	int queue;
2175 	u8 mode;
2176 
2177 	for (queue = 0; queue < rx_queues_count; queue++) {
2178 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2179 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2180 	}
2181 }
2182 
2183 /**
2184  * stmmac_start_rx_dma - start RX DMA channel
2185  * @priv: driver private structure
2186  * @chan: RX channel index
2187  * Description:
2188  * This starts a RX DMA channel
2189  */
2190 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2191 {
2192 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2193 	stmmac_start_rx(priv, priv->ioaddr, chan);
2194 }
2195 
2196 /**
2197  * stmmac_start_tx_dma - start TX DMA channel
2198  * @priv: driver private structure
2199  * @chan: TX channel index
2200  * Description:
2201  * This starts a TX DMA channel
2202  */
2203 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2204 {
2205 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2206 	stmmac_start_tx(priv, priv->ioaddr, chan);
2207 }
2208 
2209 /**
2210  * stmmac_stop_rx_dma - stop RX DMA channel
2211  * @priv: driver private structure
2212  * @chan: RX channel index
2213  * Description:
2214  * This stops a RX DMA channel
2215  */
2216 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2217 {
2218 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2219 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2220 }
2221 
2222 /**
2223  * stmmac_stop_tx_dma - stop TX DMA channel
2224  * @priv: driver private structure
2225  * @chan: TX channel index
2226  * Description:
2227  * This stops a TX DMA channel
2228  */
2229 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2230 {
2231 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2232 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2233 }
2234 
2235 /**
2236  * stmmac_start_all_dma - start all RX and TX DMA channels
2237  * @priv: driver private structure
2238  * Description:
2239  * This starts all the RX and TX DMA channels
2240  */
2241 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2242 {
2243 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2244 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2245 	u32 chan = 0;
2246 
2247 	for (chan = 0; chan < rx_channels_count; chan++)
2248 		stmmac_start_rx_dma(priv, chan);
2249 
2250 	for (chan = 0; chan < tx_channels_count; chan++)
2251 		stmmac_start_tx_dma(priv, chan);
2252 }
2253 
2254 /**
2255  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2256  * @priv: driver private structure
2257  * Description:
2258  * This stops the RX and TX DMA channels
2259  */
2260 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2261 {
2262 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2263 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2264 	u32 chan = 0;
2265 
2266 	for (chan = 0; chan < rx_channels_count; chan++)
2267 		stmmac_stop_rx_dma(priv, chan);
2268 
2269 	for (chan = 0; chan < tx_channels_count; chan++)
2270 		stmmac_stop_tx_dma(priv, chan);
2271 }
2272 
2273 /**
2274  *  stmmac_dma_operation_mode - HW DMA operation mode
2275  *  @priv: driver private structure
2276  *  Description: it is used for configuring the DMA operation mode register in
2277  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2278  */
2279 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2280 {
2281 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2282 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2283 	int rxfifosz = priv->plat->rx_fifo_size;
2284 	int txfifosz = priv->plat->tx_fifo_size;
2285 	u32 txmode = 0;
2286 	u32 rxmode = 0;
2287 	u32 chan = 0;
2288 	u8 qmode = 0;
2289 
2290 	if (rxfifosz == 0)
2291 		rxfifosz = priv->dma_cap.rx_fifo_size;
2292 	if (txfifosz == 0)
2293 		txfifosz = priv->dma_cap.tx_fifo_size;
2294 
2295 	/* Adjust for real per queue fifo size */
2296 	rxfifosz /= rx_channels_count;
2297 	txfifosz /= tx_channels_count;
2298 
2299 	if (priv->plat->force_thresh_dma_mode) {
2300 		txmode = tc;
2301 		rxmode = tc;
2302 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2303 		/*
2304 		 * In case of GMAC, SF mode can be enabled
2305 		 * to perform the TX COE in HW. This depends on:
2306 		 * 1) TX COE if actually supported
2307 		 * 2) There is no bugged Jumbo frame support
2308 		 *    that needs to not insert csum in the TDES.
2309 		 */
2310 		txmode = SF_DMA_MODE;
2311 		rxmode = SF_DMA_MODE;
2312 		priv->xstats.threshold = SF_DMA_MODE;
2313 	} else {
2314 		txmode = tc;
2315 		rxmode = SF_DMA_MODE;
2316 	}
2317 
2318 	/* configure all channels */
2319 	for (chan = 0; chan < rx_channels_count; chan++) {
2320 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2321 		u32 buf_size;
2322 
2323 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2324 
2325 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2326 				rxfifosz, qmode);
2327 
2328 		if (rx_q->xsk_pool) {
2329 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2330 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2331 					      buf_size,
2332 					      chan);
2333 		} else {
2334 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2335 					      priv->dma_buf_sz,
2336 					      chan);
2337 		}
2338 	}
2339 
2340 	for (chan = 0; chan < tx_channels_count; chan++) {
2341 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2342 
2343 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2344 				txfifosz, qmode);
2345 	}
2346 }
2347 
2348 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2349 {
2350 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2351 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2352 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2353 	unsigned int entry = tx_q->cur_tx;
2354 	struct dma_desc *tx_desc = NULL;
2355 	struct xdp_desc xdp_desc;
2356 	bool work_done = true;
2357 
2358 	/* Avoids TX time-out as we are sharing with slow path */
2359 	nq->trans_start = jiffies;
2360 
2361 	budget = min(budget, stmmac_tx_avail(priv, queue));
2362 
2363 	while (budget-- > 0) {
2364 		dma_addr_t dma_addr;
2365 		bool set_ic;
2366 
2367 		/* We are sharing with slow path and stop XSK TX desc submission when
2368 		 * available TX ring is less than threshold.
2369 		 */
2370 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2371 		    !netif_carrier_ok(priv->dev)) {
2372 			work_done = false;
2373 			break;
2374 		}
2375 
2376 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2377 			break;
2378 
2379 		if (likely(priv->extend_desc))
2380 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2381 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2382 			tx_desc = &tx_q->dma_entx[entry].basic;
2383 		else
2384 			tx_desc = tx_q->dma_tx + entry;
2385 
2386 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2387 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2388 
2389 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2390 
2391 		/* To return XDP buffer to XSK pool, we simple call
2392 		 * xsk_tx_completed(), so we don't need to fill up
2393 		 * 'buf' and 'xdpf'.
2394 		 */
2395 		tx_q->tx_skbuff_dma[entry].buf = 0;
2396 		tx_q->xdpf[entry] = NULL;
2397 
2398 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2399 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2400 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2401 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2402 
2403 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2404 
2405 		tx_q->tx_count_frames++;
2406 
2407 		if (!priv->tx_coal_frames[queue])
2408 			set_ic = false;
2409 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2410 			set_ic = true;
2411 		else
2412 			set_ic = false;
2413 
2414 		if (set_ic) {
2415 			tx_q->tx_count_frames = 0;
2416 			stmmac_set_tx_ic(priv, tx_desc);
2417 			priv->xstats.tx_set_ic_bit++;
2418 		}
2419 
2420 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2421 				       true, priv->mode, true, true,
2422 				       xdp_desc.len);
2423 
2424 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2425 
2426 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
2427 		entry = tx_q->cur_tx;
2428 	}
2429 
2430 	if (tx_desc) {
2431 		stmmac_flush_tx_descriptors(priv, queue);
2432 		xsk_tx_release(pool);
2433 	}
2434 
2435 	/* Return true if all of the 3 conditions are met
2436 	 *  a) TX Budget is still available
2437 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2438 	 *     pending XSK TX for transmission)
2439 	 */
2440 	return !!budget && work_done;
2441 }
2442 
2443 /**
2444  * stmmac_tx_clean - to manage the transmission completion
2445  * @priv: driver private structure
2446  * @budget: napi budget limiting this functions packet handling
2447  * @queue: TX queue index
2448  * Description: it reclaims the transmit resources after transmission completes.
2449  */
2450 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2451 {
2452 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2453 	unsigned int bytes_compl = 0, pkts_compl = 0;
2454 	unsigned int entry, xmits = 0, count = 0;
2455 
2456 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2457 
2458 	priv->xstats.tx_clean++;
2459 
2460 	tx_q->xsk_frames_done = 0;
2461 
2462 	entry = tx_q->dirty_tx;
2463 
2464 	/* Try to clean all TX complete frame in 1 shot */
2465 	while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) {
2466 		struct xdp_frame *xdpf;
2467 		struct sk_buff *skb;
2468 		struct dma_desc *p;
2469 		int status;
2470 
2471 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2472 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2473 			xdpf = tx_q->xdpf[entry];
2474 			skb = NULL;
2475 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2476 			xdpf = NULL;
2477 			skb = tx_q->tx_skbuff[entry];
2478 		} else {
2479 			xdpf = NULL;
2480 			skb = NULL;
2481 		}
2482 
2483 		if (priv->extend_desc)
2484 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2485 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2486 			p = &tx_q->dma_entx[entry].basic;
2487 		else
2488 			p = tx_q->dma_tx + entry;
2489 
2490 		status = stmmac_tx_status(priv, &priv->dev->stats,
2491 				&priv->xstats, p, priv->ioaddr);
2492 		/* Check if the descriptor is owned by the DMA */
2493 		if (unlikely(status & tx_dma_own))
2494 			break;
2495 
2496 		count++;
2497 
2498 		/* Make sure descriptor fields are read after reading
2499 		 * the own bit.
2500 		 */
2501 		dma_rmb();
2502 
2503 		/* Just consider the last segment and ...*/
2504 		if (likely(!(status & tx_not_ls))) {
2505 			/* ... verify the status error condition */
2506 			if (unlikely(status & tx_err)) {
2507 				priv->dev->stats.tx_errors++;
2508 			} else {
2509 				priv->dev->stats.tx_packets++;
2510 				priv->xstats.tx_pkt_n++;
2511 				priv->xstats.txq_stats[queue].tx_pkt_n++;
2512 			}
2513 			if (skb)
2514 				stmmac_get_tx_hwtstamp(priv, p, skb);
2515 		}
2516 
2517 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2518 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2519 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2520 				dma_unmap_page(priv->device,
2521 					       tx_q->tx_skbuff_dma[entry].buf,
2522 					       tx_q->tx_skbuff_dma[entry].len,
2523 					       DMA_TO_DEVICE);
2524 			else
2525 				dma_unmap_single(priv->device,
2526 						 tx_q->tx_skbuff_dma[entry].buf,
2527 						 tx_q->tx_skbuff_dma[entry].len,
2528 						 DMA_TO_DEVICE);
2529 			tx_q->tx_skbuff_dma[entry].buf = 0;
2530 			tx_q->tx_skbuff_dma[entry].len = 0;
2531 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2532 		}
2533 
2534 		stmmac_clean_desc3(priv, tx_q, p);
2535 
2536 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2537 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2538 
2539 		if (xdpf &&
2540 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2541 			xdp_return_frame_rx_napi(xdpf);
2542 			tx_q->xdpf[entry] = NULL;
2543 		}
2544 
2545 		if (xdpf &&
2546 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2547 			xdp_return_frame(xdpf);
2548 			tx_q->xdpf[entry] = NULL;
2549 		}
2550 
2551 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2552 			tx_q->xsk_frames_done++;
2553 
2554 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2555 			if (likely(skb)) {
2556 				pkts_compl++;
2557 				bytes_compl += skb->len;
2558 				dev_consume_skb_any(skb);
2559 				tx_q->tx_skbuff[entry] = NULL;
2560 			}
2561 		}
2562 
2563 		stmmac_release_tx_desc(priv, p, priv->mode);
2564 
2565 		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
2566 	}
2567 	tx_q->dirty_tx = entry;
2568 
2569 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2570 				  pkts_compl, bytes_compl);
2571 
2572 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2573 								queue))) &&
2574 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2575 
2576 		netif_dbg(priv, tx_done, priv->dev,
2577 			  "%s: restart transmit\n", __func__);
2578 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2579 	}
2580 
2581 	if (tx_q->xsk_pool) {
2582 		bool work_done;
2583 
2584 		if (tx_q->xsk_frames_done)
2585 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2586 
2587 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2588 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2589 
2590 		/* For XSK TX, we try to send as many as possible.
2591 		 * If XSK work done (XSK TX desc empty and budget still
2592 		 * available), return "budget - 1" to reenable TX IRQ.
2593 		 * Else, return "budget" to make NAPI continue polling.
2594 		 */
2595 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2596 					       STMMAC_XSK_TX_BUDGET_MAX);
2597 		if (work_done)
2598 			xmits = budget - 1;
2599 		else
2600 			xmits = budget;
2601 	}
2602 
2603 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2604 	    priv->eee_sw_timer_en) {
2605 		stmmac_enable_eee_mode(priv);
2606 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2607 	}
2608 
2609 	/* We still have pending packets, let's call for a new scheduling */
2610 	if (tx_q->dirty_tx != tx_q->cur_tx)
2611 		hrtimer_start(&tx_q->txtimer,
2612 			      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2613 			      HRTIMER_MODE_REL);
2614 
2615 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2616 
2617 	/* Combine decisions from TX clean and XSK TX */
2618 	return max(count, xmits);
2619 }
2620 
2621 /**
2622  * stmmac_tx_err - to manage the tx error
2623  * @priv: driver private structure
2624  * @chan: channel index
2625  * Description: it cleans the descriptors and restarts the transmission
2626  * in case of transmission errors.
2627  */
2628 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2629 {
2630 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2631 
2632 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2633 
2634 	stmmac_stop_tx_dma(priv, chan);
2635 	dma_free_tx_skbufs(priv, chan);
2636 	stmmac_clear_tx_descriptors(priv, chan);
2637 	tx_q->dirty_tx = 0;
2638 	tx_q->cur_tx = 0;
2639 	tx_q->mss = 0;
2640 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2641 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2642 			    tx_q->dma_tx_phy, chan);
2643 	stmmac_start_tx_dma(priv, chan);
2644 
2645 	priv->dev->stats.tx_errors++;
2646 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2647 }
2648 
2649 /**
2650  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2651  *  @priv: driver private structure
2652  *  @txmode: TX operating mode
2653  *  @rxmode: RX operating mode
2654  *  @chan: channel index
2655  *  Description: it is used for configuring of the DMA operation mode in
2656  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2657  *  mode.
2658  */
2659 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2660 					  u32 rxmode, u32 chan)
2661 {
2662 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2663 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2664 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2665 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2666 	int rxfifosz = priv->plat->rx_fifo_size;
2667 	int txfifosz = priv->plat->tx_fifo_size;
2668 
2669 	if (rxfifosz == 0)
2670 		rxfifosz = priv->dma_cap.rx_fifo_size;
2671 	if (txfifosz == 0)
2672 		txfifosz = priv->dma_cap.tx_fifo_size;
2673 
2674 	/* Adjust for real per queue fifo size */
2675 	rxfifosz /= rx_channels_count;
2676 	txfifosz /= tx_channels_count;
2677 
2678 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2679 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2680 }
2681 
2682 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2683 {
2684 	int ret;
2685 
2686 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2687 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2688 	if (ret && (ret != -EINVAL)) {
2689 		stmmac_global_err(priv);
2690 		return true;
2691 	}
2692 
2693 	return false;
2694 }
2695 
2696 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2697 {
2698 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2699 						 &priv->xstats, chan, dir);
2700 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2701 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2702 	struct stmmac_channel *ch = &priv->channel[chan];
2703 	struct napi_struct *rx_napi;
2704 	struct napi_struct *tx_napi;
2705 	unsigned long flags;
2706 
2707 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2708 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2709 
2710 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2711 		if (napi_schedule_prep(rx_napi)) {
2712 			spin_lock_irqsave(&ch->lock, flags);
2713 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2714 			spin_unlock_irqrestore(&ch->lock, flags);
2715 			__napi_schedule(rx_napi);
2716 		}
2717 	}
2718 
2719 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2720 		if (napi_schedule_prep(tx_napi)) {
2721 			spin_lock_irqsave(&ch->lock, flags);
2722 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2723 			spin_unlock_irqrestore(&ch->lock, flags);
2724 			__napi_schedule(tx_napi);
2725 		}
2726 	}
2727 
2728 	return status;
2729 }
2730 
2731 /**
2732  * stmmac_dma_interrupt - DMA ISR
2733  * @priv: driver private structure
2734  * Description: this is the DMA ISR. It is called by the main ISR.
2735  * It calls the dwmac dma routine and schedule poll method in case of some
2736  * work can be done.
2737  */
2738 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2739 {
2740 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2741 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2742 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2743 				tx_channel_count : rx_channel_count;
2744 	u32 chan;
2745 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2746 
2747 	/* Make sure we never check beyond our status buffer. */
2748 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2749 		channels_to_check = ARRAY_SIZE(status);
2750 
2751 	for (chan = 0; chan < channels_to_check; chan++)
2752 		status[chan] = stmmac_napi_check(priv, chan,
2753 						 DMA_DIR_RXTX);
2754 
2755 	for (chan = 0; chan < tx_channel_count; chan++) {
2756 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2757 			/* Try to bump up the dma threshold on this failure */
2758 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2759 			    (tc <= 256)) {
2760 				tc += 64;
2761 				if (priv->plat->force_thresh_dma_mode)
2762 					stmmac_set_dma_operation_mode(priv,
2763 								      tc,
2764 								      tc,
2765 								      chan);
2766 				else
2767 					stmmac_set_dma_operation_mode(priv,
2768 								    tc,
2769 								    SF_DMA_MODE,
2770 								    chan);
2771 				priv->xstats.threshold = tc;
2772 			}
2773 		} else if (unlikely(status[chan] == tx_hard_error)) {
2774 			stmmac_tx_err(priv, chan);
2775 		}
2776 	}
2777 }
2778 
2779 /**
2780  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2781  * @priv: driver private structure
2782  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2783  */
2784 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2785 {
2786 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2787 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2788 
2789 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2790 
2791 	if (priv->dma_cap.rmon) {
2792 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2793 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2794 	} else
2795 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2796 }
2797 
2798 /**
2799  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2800  * @priv: driver private structure
2801  * Description:
2802  *  new GMAC chip generations have a new register to indicate the
2803  *  presence of the optional feature/functions.
2804  *  This can be also used to override the value passed through the
2805  *  platform and necessary for old MAC10/100 and GMAC chips.
2806  */
2807 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2808 {
2809 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2810 }
2811 
2812 /**
2813  * stmmac_check_ether_addr - check if the MAC addr is valid
2814  * @priv: driver private structure
2815  * Description:
2816  * it is to verify if the MAC address is valid, in case of failures it
2817  * generates a random MAC address
2818  */
2819 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2820 {
2821 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2822 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2823 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2824 			eth_hw_addr_random(priv->dev);
2825 		dev_info(priv->device, "device MAC address %pM\n",
2826 			 priv->dev->dev_addr);
2827 	}
2828 }
2829 
2830 /**
2831  * stmmac_init_dma_engine - DMA init.
2832  * @priv: driver private structure
2833  * Description:
2834  * It inits the DMA invoking the specific MAC/GMAC callback.
2835  * Some DMA parameters can be passed from the platform;
2836  * in case of these are not passed a default is kept for the MAC or GMAC.
2837  */
2838 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2839 {
2840 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2841 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2842 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2843 	struct stmmac_rx_queue *rx_q;
2844 	struct stmmac_tx_queue *tx_q;
2845 	u32 chan = 0;
2846 	int atds = 0;
2847 	int ret = 0;
2848 
2849 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2850 		dev_err(priv->device, "Invalid DMA configuration\n");
2851 		return -EINVAL;
2852 	}
2853 
2854 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2855 		atds = 1;
2856 
2857 	ret = stmmac_reset(priv, priv->ioaddr);
2858 	if (ret) {
2859 		dev_err(priv->device, "Failed to reset the dma\n");
2860 		return ret;
2861 	}
2862 
2863 	/* DMA Configuration */
2864 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2865 
2866 	if (priv->plat->axi)
2867 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2868 
2869 	/* DMA CSR Channel configuration */
2870 	for (chan = 0; chan < dma_csr_ch; chan++)
2871 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2872 
2873 	/* DMA RX Channel Configuration */
2874 	for (chan = 0; chan < rx_channels_count; chan++) {
2875 		rx_q = &priv->rx_queue[chan];
2876 
2877 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2878 				    rx_q->dma_rx_phy, chan);
2879 
2880 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2881 				     (rx_q->buf_alloc_num *
2882 				      sizeof(struct dma_desc));
2883 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2884 				       rx_q->rx_tail_addr, chan);
2885 	}
2886 
2887 	/* DMA TX Channel Configuration */
2888 	for (chan = 0; chan < tx_channels_count; chan++) {
2889 		tx_q = &priv->tx_queue[chan];
2890 
2891 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2892 				    tx_q->dma_tx_phy, chan);
2893 
2894 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2895 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2896 				       tx_q->tx_tail_addr, chan);
2897 	}
2898 
2899 	return ret;
2900 }
2901 
2902 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2903 {
2904 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2905 
2906 	hrtimer_start(&tx_q->txtimer,
2907 		      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2908 		      HRTIMER_MODE_REL);
2909 }
2910 
2911 /**
2912  * stmmac_tx_timer - mitigation sw timer for tx.
2913  * @t: data pointer
2914  * Description:
2915  * This is the timer handler to directly invoke the stmmac_tx_clean.
2916  */
2917 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
2918 {
2919 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
2920 	struct stmmac_priv *priv = tx_q->priv_data;
2921 	struct stmmac_channel *ch;
2922 	struct napi_struct *napi;
2923 
2924 	ch = &priv->channel[tx_q->queue_index];
2925 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2926 
2927 	if (likely(napi_schedule_prep(napi))) {
2928 		unsigned long flags;
2929 
2930 		spin_lock_irqsave(&ch->lock, flags);
2931 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2932 		spin_unlock_irqrestore(&ch->lock, flags);
2933 		__napi_schedule(napi);
2934 	}
2935 
2936 	return HRTIMER_NORESTART;
2937 }
2938 
2939 /**
2940  * stmmac_init_coalesce - init mitigation options.
2941  * @priv: driver private structure
2942  * Description:
2943  * This inits the coalesce parameters: i.e. timer rate,
2944  * timer handler and default threshold used for enabling the
2945  * interrupt on completion bit.
2946  */
2947 static void stmmac_init_coalesce(struct stmmac_priv *priv)
2948 {
2949 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2950 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2951 	u32 chan;
2952 
2953 	for (chan = 0; chan < tx_channel_count; chan++) {
2954 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2955 
2956 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
2957 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
2958 
2959 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2960 		tx_q->txtimer.function = stmmac_tx_timer;
2961 	}
2962 
2963 	for (chan = 0; chan < rx_channel_count; chan++)
2964 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
2965 }
2966 
2967 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2968 {
2969 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2970 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2971 	u32 chan;
2972 
2973 	/* set TX ring length */
2974 	for (chan = 0; chan < tx_channels_count; chan++)
2975 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
2976 				       (priv->dma_tx_size - 1), chan);
2977 
2978 	/* set RX ring length */
2979 	for (chan = 0; chan < rx_channels_count; chan++)
2980 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
2981 				       (priv->dma_rx_size - 1), chan);
2982 }
2983 
2984 /**
2985  *  stmmac_set_tx_queue_weight - Set TX queue weight
2986  *  @priv: driver private structure
2987  *  Description: It is used for setting TX queues weight
2988  */
2989 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2990 {
2991 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2992 	u32 weight;
2993 	u32 queue;
2994 
2995 	for (queue = 0; queue < tx_queues_count; queue++) {
2996 		weight = priv->plat->tx_queues_cfg[queue].weight;
2997 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2998 	}
2999 }
3000 
3001 /**
3002  *  stmmac_configure_cbs - Configure CBS in TX queue
3003  *  @priv: driver private structure
3004  *  Description: It is used for configuring CBS in AVB TX queues
3005  */
3006 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3007 {
3008 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3009 	u32 mode_to_use;
3010 	u32 queue;
3011 
3012 	/* queue 0 is reserved for legacy traffic */
3013 	for (queue = 1; queue < tx_queues_count; queue++) {
3014 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3015 		if (mode_to_use == MTL_QUEUE_DCB)
3016 			continue;
3017 
3018 		stmmac_config_cbs(priv, priv->hw,
3019 				priv->plat->tx_queues_cfg[queue].send_slope,
3020 				priv->plat->tx_queues_cfg[queue].idle_slope,
3021 				priv->plat->tx_queues_cfg[queue].high_credit,
3022 				priv->plat->tx_queues_cfg[queue].low_credit,
3023 				queue);
3024 	}
3025 }
3026 
3027 /**
3028  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3029  *  @priv: driver private structure
3030  *  Description: It is used for mapping RX queues to RX dma channels
3031  */
3032 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3033 {
3034 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3035 	u32 queue;
3036 	u32 chan;
3037 
3038 	for (queue = 0; queue < rx_queues_count; queue++) {
3039 		chan = priv->plat->rx_queues_cfg[queue].chan;
3040 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3041 	}
3042 }
3043 
3044 /**
3045  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3046  *  @priv: driver private structure
3047  *  Description: It is used for configuring the RX Queue Priority
3048  */
3049 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3050 {
3051 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3052 	u32 queue;
3053 	u32 prio;
3054 
3055 	for (queue = 0; queue < rx_queues_count; queue++) {
3056 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3057 			continue;
3058 
3059 		prio = priv->plat->rx_queues_cfg[queue].prio;
3060 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3061 	}
3062 }
3063 
3064 /**
3065  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3066  *  @priv: driver private structure
3067  *  Description: It is used for configuring the TX Queue Priority
3068  */
3069 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3070 {
3071 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3072 	u32 queue;
3073 	u32 prio;
3074 
3075 	for (queue = 0; queue < tx_queues_count; queue++) {
3076 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3077 			continue;
3078 
3079 		prio = priv->plat->tx_queues_cfg[queue].prio;
3080 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3081 	}
3082 }
3083 
3084 /**
3085  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3086  *  @priv: driver private structure
3087  *  Description: It is used for configuring the RX queue routing
3088  */
3089 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3090 {
3091 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3092 	u32 queue;
3093 	u8 packet;
3094 
3095 	for (queue = 0; queue < rx_queues_count; queue++) {
3096 		/* no specific packet type routing specified for the queue */
3097 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3098 			continue;
3099 
3100 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3101 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3102 	}
3103 }
3104 
3105 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3106 {
3107 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3108 		priv->rss.enable = false;
3109 		return;
3110 	}
3111 
3112 	if (priv->dev->features & NETIF_F_RXHASH)
3113 		priv->rss.enable = true;
3114 	else
3115 		priv->rss.enable = false;
3116 
3117 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3118 			     priv->plat->rx_queues_to_use);
3119 }
3120 
3121 /**
3122  *  stmmac_mtl_configuration - Configure MTL
3123  *  @priv: driver private structure
3124  *  Description: It is used for configurring MTL
3125  */
3126 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3127 {
3128 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3129 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3130 
3131 	if (tx_queues_count > 1)
3132 		stmmac_set_tx_queue_weight(priv);
3133 
3134 	/* Configure MTL RX algorithms */
3135 	if (rx_queues_count > 1)
3136 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3137 				priv->plat->rx_sched_algorithm);
3138 
3139 	/* Configure MTL TX algorithms */
3140 	if (tx_queues_count > 1)
3141 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3142 				priv->plat->tx_sched_algorithm);
3143 
3144 	/* Configure CBS in AVB TX queues */
3145 	if (tx_queues_count > 1)
3146 		stmmac_configure_cbs(priv);
3147 
3148 	/* Map RX MTL to DMA channels */
3149 	stmmac_rx_queue_dma_chan_map(priv);
3150 
3151 	/* Enable MAC RX Queues */
3152 	stmmac_mac_enable_rx_queues(priv);
3153 
3154 	/* Set RX priorities */
3155 	if (rx_queues_count > 1)
3156 		stmmac_mac_config_rx_queues_prio(priv);
3157 
3158 	/* Set TX priorities */
3159 	if (tx_queues_count > 1)
3160 		stmmac_mac_config_tx_queues_prio(priv);
3161 
3162 	/* Set RX routing */
3163 	if (rx_queues_count > 1)
3164 		stmmac_mac_config_rx_queues_routing(priv);
3165 
3166 	/* Receive Side Scaling */
3167 	if (rx_queues_count > 1)
3168 		stmmac_mac_config_rss(priv);
3169 }
3170 
3171 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3172 {
3173 	if (priv->dma_cap.asp) {
3174 		netdev_info(priv->dev, "Enabling Safety Features\n");
3175 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3176 					  priv->plat->safety_feat_cfg);
3177 	} else {
3178 		netdev_info(priv->dev, "No Safety Features support found\n");
3179 	}
3180 }
3181 
3182 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3183 {
3184 	char *name;
3185 
3186 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3187 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3188 
3189 	name = priv->wq_name;
3190 	sprintf(name, "%s-fpe", priv->dev->name);
3191 
3192 	priv->fpe_wq = create_singlethread_workqueue(name);
3193 	if (!priv->fpe_wq) {
3194 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3195 
3196 		return -ENOMEM;
3197 	}
3198 	netdev_info(priv->dev, "FPE workqueue start");
3199 
3200 	return 0;
3201 }
3202 
3203 /**
3204  * stmmac_hw_setup - setup mac in a usable state.
3205  *  @dev : pointer to the device structure.
3206  *  @init_ptp: initialize PTP if set
3207  *  Description:
3208  *  this is the main function to setup the HW in a usable state because the
3209  *  dma engine is reset, the core registers are configured (e.g. AXI,
3210  *  Checksum features, timers). The DMA is ready to start receiving and
3211  *  transmitting.
3212  *  Return value:
3213  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3214  *  file on failure.
3215  */
3216 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
3217 {
3218 	struct stmmac_priv *priv = netdev_priv(dev);
3219 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3220 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3221 	bool sph_en;
3222 	u32 chan;
3223 	int ret;
3224 
3225 	/* DMA initialization and SW reset */
3226 	ret = stmmac_init_dma_engine(priv);
3227 	if (ret < 0) {
3228 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3229 			   __func__);
3230 		return ret;
3231 	}
3232 
3233 	/* Copy the MAC addr into the HW  */
3234 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3235 
3236 	/* PS and related bits will be programmed according to the speed */
3237 	if (priv->hw->pcs) {
3238 		int speed = priv->plat->mac_port_sel_speed;
3239 
3240 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3241 		    (speed == SPEED_1000)) {
3242 			priv->hw->ps = speed;
3243 		} else {
3244 			dev_warn(priv->device, "invalid port speed\n");
3245 			priv->hw->ps = 0;
3246 		}
3247 	}
3248 
3249 	/* Initialize the MAC Core */
3250 	stmmac_core_init(priv, priv->hw, dev);
3251 
3252 	/* Initialize MTL*/
3253 	stmmac_mtl_configuration(priv);
3254 
3255 	/* Initialize Safety Features */
3256 	stmmac_safety_feat_configuration(priv);
3257 
3258 	ret = stmmac_rx_ipc(priv, priv->hw);
3259 	if (!ret) {
3260 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3261 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3262 		priv->hw->rx_csum = 0;
3263 	}
3264 
3265 	/* Enable the MAC Rx/Tx */
3266 	stmmac_mac_set(priv, priv->ioaddr, true);
3267 
3268 	/* Set the HW DMA mode and the COE */
3269 	stmmac_dma_operation_mode(priv);
3270 
3271 	stmmac_mmc_setup(priv);
3272 
3273 	if (init_ptp) {
3274 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3275 		if (ret < 0)
3276 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
3277 
3278 		ret = stmmac_init_ptp(priv);
3279 		if (ret == -EOPNOTSUPP)
3280 			netdev_warn(priv->dev, "PTP not supported by HW\n");
3281 		else if (ret)
3282 			netdev_warn(priv->dev, "PTP init failed\n");
3283 	}
3284 
3285 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3286 
3287 	/* Convert the timer from msec to usec */
3288 	if (!priv->tx_lpi_timer)
3289 		priv->tx_lpi_timer = eee_timer * 1000;
3290 
3291 	if (priv->use_riwt) {
3292 		u32 queue;
3293 
3294 		for (queue = 0; queue < rx_cnt; queue++) {
3295 			if (!priv->rx_riwt[queue])
3296 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3297 
3298 			stmmac_rx_watchdog(priv, priv->ioaddr,
3299 					   priv->rx_riwt[queue], queue);
3300 		}
3301 	}
3302 
3303 	if (priv->hw->pcs)
3304 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3305 
3306 	/* set TX and RX rings length */
3307 	stmmac_set_rings_length(priv);
3308 
3309 	/* Enable TSO */
3310 	if (priv->tso) {
3311 		for (chan = 0; chan < tx_cnt; chan++) {
3312 			struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3313 
3314 			/* TSO and TBS cannot co-exist */
3315 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3316 				continue;
3317 
3318 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3319 		}
3320 	}
3321 
3322 	/* Enable Split Header */
3323 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3324 	for (chan = 0; chan < rx_cnt; chan++)
3325 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3326 
3327 
3328 	/* VLAN Tag Insertion */
3329 	if (priv->dma_cap.vlins)
3330 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3331 
3332 	/* TBS */
3333 	for (chan = 0; chan < tx_cnt; chan++) {
3334 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3335 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3336 
3337 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3338 	}
3339 
3340 	/* Configure real RX and TX queues */
3341 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3342 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3343 
3344 	/* Start the ball rolling... */
3345 	stmmac_start_all_dma(priv);
3346 
3347 	if (priv->dma_cap.fpesel) {
3348 		stmmac_fpe_start_wq(priv);
3349 
3350 		if (priv->plat->fpe_cfg->enable)
3351 			stmmac_fpe_handshake(priv, true);
3352 	}
3353 
3354 	return 0;
3355 }
3356 
3357 static void stmmac_hw_teardown(struct net_device *dev)
3358 {
3359 	struct stmmac_priv *priv = netdev_priv(dev);
3360 
3361 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3362 }
3363 
3364 static void stmmac_free_irq(struct net_device *dev,
3365 			    enum request_irq_err irq_err, int irq_idx)
3366 {
3367 	struct stmmac_priv *priv = netdev_priv(dev);
3368 	int j;
3369 
3370 	switch (irq_err) {
3371 	case REQ_IRQ_ERR_ALL:
3372 		irq_idx = priv->plat->tx_queues_to_use;
3373 		fallthrough;
3374 	case REQ_IRQ_ERR_TX:
3375 		for (j = irq_idx - 1; j >= 0; j--) {
3376 			if (priv->tx_irq[j] > 0) {
3377 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3378 				free_irq(priv->tx_irq[j], &priv->tx_queue[j]);
3379 			}
3380 		}
3381 		irq_idx = priv->plat->rx_queues_to_use;
3382 		fallthrough;
3383 	case REQ_IRQ_ERR_RX:
3384 		for (j = irq_idx - 1; j >= 0; j--) {
3385 			if (priv->rx_irq[j] > 0) {
3386 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3387 				free_irq(priv->rx_irq[j], &priv->rx_queue[j]);
3388 			}
3389 		}
3390 
3391 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3392 			free_irq(priv->sfty_ue_irq, dev);
3393 		fallthrough;
3394 	case REQ_IRQ_ERR_SFTY_UE:
3395 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3396 			free_irq(priv->sfty_ce_irq, dev);
3397 		fallthrough;
3398 	case REQ_IRQ_ERR_SFTY_CE:
3399 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3400 			free_irq(priv->lpi_irq, dev);
3401 		fallthrough;
3402 	case REQ_IRQ_ERR_LPI:
3403 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3404 			free_irq(priv->wol_irq, dev);
3405 		fallthrough;
3406 	case REQ_IRQ_ERR_WOL:
3407 		free_irq(dev->irq, dev);
3408 		fallthrough;
3409 	case REQ_IRQ_ERR_MAC:
3410 	case REQ_IRQ_ERR_NO:
3411 		/* If MAC IRQ request error, no more IRQ to free */
3412 		break;
3413 	}
3414 }
3415 
3416 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3417 {
3418 	struct stmmac_priv *priv = netdev_priv(dev);
3419 	enum request_irq_err irq_err;
3420 	cpumask_t cpu_mask;
3421 	int irq_idx = 0;
3422 	char *int_name;
3423 	int ret;
3424 	int i;
3425 
3426 	/* For common interrupt */
3427 	int_name = priv->int_name_mac;
3428 	sprintf(int_name, "%s:%s", dev->name, "mac");
3429 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3430 			  0, int_name, dev);
3431 	if (unlikely(ret < 0)) {
3432 		netdev_err(priv->dev,
3433 			   "%s: alloc mac MSI %d (error: %d)\n",
3434 			   __func__, dev->irq, ret);
3435 		irq_err = REQ_IRQ_ERR_MAC;
3436 		goto irq_error;
3437 	}
3438 
3439 	/* Request the Wake IRQ in case of another line
3440 	 * is used for WoL
3441 	 */
3442 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3443 		int_name = priv->int_name_wol;
3444 		sprintf(int_name, "%s:%s", dev->name, "wol");
3445 		ret = request_irq(priv->wol_irq,
3446 				  stmmac_mac_interrupt,
3447 				  0, int_name, dev);
3448 		if (unlikely(ret < 0)) {
3449 			netdev_err(priv->dev,
3450 				   "%s: alloc wol MSI %d (error: %d)\n",
3451 				   __func__, priv->wol_irq, ret);
3452 			irq_err = REQ_IRQ_ERR_WOL;
3453 			goto irq_error;
3454 		}
3455 	}
3456 
3457 	/* Request the LPI IRQ in case of another line
3458 	 * is used for LPI
3459 	 */
3460 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3461 		int_name = priv->int_name_lpi;
3462 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3463 		ret = request_irq(priv->lpi_irq,
3464 				  stmmac_mac_interrupt,
3465 				  0, int_name, dev);
3466 		if (unlikely(ret < 0)) {
3467 			netdev_err(priv->dev,
3468 				   "%s: alloc lpi MSI %d (error: %d)\n",
3469 				   __func__, priv->lpi_irq, ret);
3470 			irq_err = REQ_IRQ_ERR_LPI;
3471 			goto irq_error;
3472 		}
3473 	}
3474 
3475 	/* Request the Safety Feature Correctible Error line in
3476 	 * case of another line is used
3477 	 */
3478 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3479 		int_name = priv->int_name_sfty_ce;
3480 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3481 		ret = request_irq(priv->sfty_ce_irq,
3482 				  stmmac_safety_interrupt,
3483 				  0, int_name, dev);
3484 		if (unlikely(ret < 0)) {
3485 			netdev_err(priv->dev,
3486 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3487 				   __func__, priv->sfty_ce_irq, ret);
3488 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3489 			goto irq_error;
3490 		}
3491 	}
3492 
3493 	/* Request the Safety Feature Uncorrectible Error line in
3494 	 * case of another line is used
3495 	 */
3496 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3497 		int_name = priv->int_name_sfty_ue;
3498 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3499 		ret = request_irq(priv->sfty_ue_irq,
3500 				  stmmac_safety_interrupt,
3501 				  0, int_name, dev);
3502 		if (unlikely(ret < 0)) {
3503 			netdev_err(priv->dev,
3504 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3505 				   __func__, priv->sfty_ue_irq, ret);
3506 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3507 			goto irq_error;
3508 		}
3509 	}
3510 
3511 	/* Request Rx MSI irq */
3512 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3513 		if (i >= MTL_MAX_RX_QUEUES)
3514 			break;
3515 		if (priv->rx_irq[i] == 0)
3516 			continue;
3517 
3518 		int_name = priv->int_name_rx_irq[i];
3519 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3520 		ret = request_irq(priv->rx_irq[i],
3521 				  stmmac_msi_intr_rx,
3522 				  0, int_name, &priv->rx_queue[i]);
3523 		if (unlikely(ret < 0)) {
3524 			netdev_err(priv->dev,
3525 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3526 				   __func__, i, priv->rx_irq[i], ret);
3527 			irq_err = REQ_IRQ_ERR_RX;
3528 			irq_idx = i;
3529 			goto irq_error;
3530 		}
3531 		cpumask_clear(&cpu_mask);
3532 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3533 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3534 	}
3535 
3536 	/* Request Tx MSI irq */
3537 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3538 		if (i >= MTL_MAX_TX_QUEUES)
3539 			break;
3540 		if (priv->tx_irq[i] == 0)
3541 			continue;
3542 
3543 		int_name = priv->int_name_tx_irq[i];
3544 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3545 		ret = request_irq(priv->tx_irq[i],
3546 				  stmmac_msi_intr_tx,
3547 				  0, int_name, &priv->tx_queue[i]);
3548 		if (unlikely(ret < 0)) {
3549 			netdev_err(priv->dev,
3550 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3551 				   __func__, i, priv->tx_irq[i], ret);
3552 			irq_err = REQ_IRQ_ERR_TX;
3553 			irq_idx = i;
3554 			goto irq_error;
3555 		}
3556 		cpumask_clear(&cpu_mask);
3557 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3558 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3559 	}
3560 
3561 	return 0;
3562 
3563 irq_error:
3564 	stmmac_free_irq(dev, irq_err, irq_idx);
3565 	return ret;
3566 }
3567 
3568 static int stmmac_request_irq_single(struct net_device *dev)
3569 {
3570 	struct stmmac_priv *priv = netdev_priv(dev);
3571 	enum request_irq_err irq_err;
3572 	int ret;
3573 
3574 	ret = request_irq(dev->irq, stmmac_interrupt,
3575 			  IRQF_SHARED, dev->name, dev);
3576 	if (unlikely(ret < 0)) {
3577 		netdev_err(priv->dev,
3578 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3579 			   __func__, dev->irq, ret);
3580 		irq_err = REQ_IRQ_ERR_MAC;
3581 		goto irq_error;
3582 	}
3583 
3584 	/* Request the Wake IRQ in case of another line
3585 	 * is used for WoL
3586 	 */
3587 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3588 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3589 				  IRQF_SHARED, dev->name, dev);
3590 		if (unlikely(ret < 0)) {
3591 			netdev_err(priv->dev,
3592 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3593 				   __func__, priv->wol_irq, ret);
3594 			irq_err = REQ_IRQ_ERR_WOL;
3595 			goto irq_error;
3596 		}
3597 	}
3598 
3599 	/* Request the IRQ lines */
3600 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3601 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3602 				  IRQF_SHARED, dev->name, dev);
3603 		if (unlikely(ret < 0)) {
3604 			netdev_err(priv->dev,
3605 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3606 				   __func__, priv->lpi_irq, ret);
3607 			irq_err = REQ_IRQ_ERR_LPI;
3608 			goto irq_error;
3609 		}
3610 	}
3611 
3612 	return 0;
3613 
3614 irq_error:
3615 	stmmac_free_irq(dev, irq_err, 0);
3616 	return ret;
3617 }
3618 
3619 static int stmmac_request_irq(struct net_device *dev)
3620 {
3621 	struct stmmac_priv *priv = netdev_priv(dev);
3622 	int ret;
3623 
3624 	/* Request the IRQ lines */
3625 	if (priv->plat->multi_msi_en)
3626 		ret = stmmac_request_irq_multi_msi(dev);
3627 	else
3628 		ret = stmmac_request_irq_single(dev);
3629 
3630 	return ret;
3631 }
3632 
3633 /**
3634  *  stmmac_open - open entry point of the driver
3635  *  @dev : pointer to the device structure.
3636  *  Description:
3637  *  This function is the open entry point of the driver.
3638  *  Return value:
3639  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3640  *  file on failure.
3641  */
3642 int stmmac_open(struct net_device *dev)
3643 {
3644 	struct stmmac_priv *priv = netdev_priv(dev);
3645 	int mode = priv->plat->phy_interface;
3646 	int bfsize = 0;
3647 	u32 chan;
3648 	int ret;
3649 
3650 	ret = pm_runtime_get_sync(priv->device);
3651 	if (ret < 0) {
3652 		pm_runtime_put_noidle(priv->device);
3653 		return ret;
3654 	}
3655 
3656 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3657 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
3658 	    (!priv->hw->xpcs ||
3659 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3660 		ret = stmmac_init_phy(dev);
3661 		if (ret) {
3662 			netdev_err(priv->dev,
3663 				   "%s: Cannot attach to PHY (error: %d)\n",
3664 				   __func__, ret);
3665 			goto init_phy_error;
3666 		}
3667 	}
3668 
3669 	/* Extra statistics */
3670 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
3671 	priv->xstats.threshold = tc;
3672 
3673 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
3674 	if (bfsize < 0)
3675 		bfsize = 0;
3676 
3677 	if (bfsize < BUF_SIZE_16KiB)
3678 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
3679 
3680 	priv->dma_buf_sz = bfsize;
3681 	buf_sz = bfsize;
3682 
3683 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3684 
3685 	if (!priv->dma_tx_size)
3686 		priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3687 	if (!priv->dma_rx_size)
3688 		priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3689 
3690 	/* Earlier check for TBS */
3691 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3692 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3693 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3694 
3695 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3696 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3697 	}
3698 
3699 	ret = alloc_dma_desc_resources(priv);
3700 	if (ret < 0) {
3701 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3702 			   __func__);
3703 		goto dma_desc_error;
3704 	}
3705 
3706 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
3707 	if (ret < 0) {
3708 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3709 			   __func__);
3710 		goto init_error;
3711 	}
3712 
3713 	ret = stmmac_hw_setup(dev, true);
3714 	if (ret < 0) {
3715 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3716 		goto init_error;
3717 	}
3718 
3719 	stmmac_init_coalesce(priv);
3720 
3721 	phylink_start(priv->phylink);
3722 	/* We may have called phylink_speed_down before */
3723 	phylink_speed_up(priv->phylink);
3724 
3725 	ret = stmmac_request_irq(dev);
3726 	if (ret)
3727 		goto irq_error;
3728 
3729 	stmmac_enable_all_queues(priv);
3730 	netif_tx_start_all_queues(priv->dev);
3731 
3732 	return 0;
3733 
3734 irq_error:
3735 	phylink_stop(priv->phylink);
3736 
3737 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3738 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
3739 
3740 	stmmac_hw_teardown(dev);
3741 init_error:
3742 	free_dma_desc_resources(priv);
3743 dma_desc_error:
3744 	phylink_disconnect_phy(priv->phylink);
3745 init_phy_error:
3746 	pm_runtime_put(priv->device);
3747 	return ret;
3748 }
3749 
3750 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3751 {
3752 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3753 
3754 	if (priv->fpe_wq)
3755 		destroy_workqueue(priv->fpe_wq);
3756 
3757 	netdev_info(priv->dev, "FPE workqueue stop");
3758 }
3759 
3760 /**
3761  *  stmmac_release - close entry point of the driver
3762  *  @dev : device pointer.
3763  *  Description:
3764  *  This is the stop entry point of the driver.
3765  */
3766 int stmmac_release(struct net_device *dev)
3767 {
3768 	struct stmmac_priv *priv = netdev_priv(dev);
3769 	u32 chan;
3770 
3771 	if (device_may_wakeup(priv->device))
3772 		phylink_speed_down(priv->phylink, false);
3773 	/* Stop and disconnect the PHY */
3774 	phylink_stop(priv->phylink);
3775 	phylink_disconnect_phy(priv->phylink);
3776 
3777 	stmmac_disable_all_queues(priv);
3778 
3779 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3780 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
3781 
3782 	/* Free the IRQ lines */
3783 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3784 
3785 	if (priv->eee_enabled) {
3786 		priv->tx_path_in_lpi_mode = false;
3787 		del_timer_sync(&priv->eee_ctrl_timer);
3788 	}
3789 
3790 	/* Stop TX/RX DMA and clear the descriptors */
3791 	stmmac_stop_all_dma(priv);
3792 
3793 	/* Release and free the Rx/Tx resources */
3794 	free_dma_desc_resources(priv);
3795 
3796 	/* Disable the MAC Rx/Tx */
3797 	stmmac_mac_set(priv, priv->ioaddr, false);
3798 
3799 	netif_carrier_off(dev);
3800 
3801 	stmmac_release_ptp(priv);
3802 
3803 	pm_runtime_put(priv->device);
3804 
3805 	if (priv->dma_cap.fpesel)
3806 		stmmac_fpe_stop_wq(priv);
3807 
3808 	return 0;
3809 }
3810 
3811 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3812 			       struct stmmac_tx_queue *tx_q)
3813 {
3814 	u16 tag = 0x0, inner_tag = 0x0;
3815 	u32 inner_type = 0x0;
3816 	struct dma_desc *p;
3817 
3818 	if (!priv->dma_cap.vlins)
3819 		return false;
3820 	if (!skb_vlan_tag_present(skb))
3821 		return false;
3822 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3823 		inner_tag = skb_vlan_tag_get(skb);
3824 		inner_type = STMMAC_VLAN_INSERT;
3825 	}
3826 
3827 	tag = skb_vlan_tag_get(skb);
3828 
3829 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3830 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3831 	else
3832 		p = &tx_q->dma_tx[tx_q->cur_tx];
3833 
3834 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3835 		return false;
3836 
3837 	stmmac_set_tx_owner(priv, p);
3838 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3839 	return true;
3840 }
3841 
3842 /**
3843  *  stmmac_tso_allocator - close entry point of the driver
3844  *  @priv: driver private structure
3845  *  @des: buffer start address
3846  *  @total_len: total length to fill in descriptors
3847  *  @last_segment: condition for the last descriptor
3848  *  @queue: TX queue index
3849  *  Description:
3850  *  This function fills descriptor and request new descriptors according to
3851  *  buffer length to fill
3852  */
3853 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
3854 				 int total_len, bool last_segment, u32 queue)
3855 {
3856 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3857 	struct dma_desc *desc;
3858 	u32 buff_size;
3859 	int tmp_len;
3860 
3861 	tmp_len = total_len;
3862 
3863 	while (tmp_len > 0) {
3864 		dma_addr_t curr_addr;
3865 
3866 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3867 						priv->dma_tx_size);
3868 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3869 
3870 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3871 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3872 		else
3873 			desc = &tx_q->dma_tx[tx_q->cur_tx];
3874 
3875 		curr_addr = des + (total_len - tmp_len);
3876 		if (priv->dma_cap.addr64 <= 32)
3877 			desc->des0 = cpu_to_le32(curr_addr);
3878 		else
3879 			stmmac_set_desc_addr(priv, desc, curr_addr);
3880 
3881 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
3882 			    TSO_MAX_BUFF_SIZE : tmp_len;
3883 
3884 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
3885 				0, 1,
3886 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
3887 				0, 0);
3888 
3889 		tmp_len -= TSO_MAX_BUFF_SIZE;
3890 	}
3891 }
3892 
3893 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
3894 {
3895 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3896 	int desc_size;
3897 
3898 	if (likely(priv->extend_desc))
3899 		desc_size = sizeof(struct dma_extended_desc);
3900 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3901 		desc_size = sizeof(struct dma_edesc);
3902 	else
3903 		desc_size = sizeof(struct dma_desc);
3904 
3905 	/* The own bit must be the latest setting done when prepare the
3906 	 * descriptor and then barrier is needed to make sure that
3907 	 * all is coherent before granting the DMA engine.
3908 	 */
3909 	wmb();
3910 
3911 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3912 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3913 }
3914 
3915 /**
3916  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
3917  *  @skb : the socket buffer
3918  *  @dev : device pointer
3919  *  Description: this is the transmit function that is called on TSO frames
3920  *  (support available on GMAC4 and newer chips).
3921  *  Diagram below show the ring programming in case of TSO frames:
3922  *
3923  *  First Descriptor
3924  *   --------
3925  *   | DES0 |---> buffer1 = L2/L3/L4 header
3926  *   | DES1 |---> TCP Payload (can continue on next descr...)
3927  *   | DES2 |---> buffer 1 and 2 len
3928  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
3929  *   --------
3930  *	|
3931  *     ...
3932  *	|
3933  *   --------
3934  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
3935  *   | DES1 | --|
3936  *   | DES2 | --> buffer 1 and 2 len
3937  *   | DES3 |
3938  *   --------
3939  *
3940  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
3941  */
3942 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3943 {
3944 	struct dma_desc *desc, *first, *mss_desc = NULL;
3945 	struct stmmac_priv *priv = netdev_priv(dev);
3946 	int nfrags = skb_shinfo(skb)->nr_frags;
3947 	u32 queue = skb_get_queue_mapping(skb);
3948 	unsigned int first_entry, tx_packets;
3949 	int tmp_pay_len = 0, first_tx;
3950 	struct stmmac_tx_queue *tx_q;
3951 	bool has_vlan, set_ic;
3952 	u8 proto_hdr_len, hdr;
3953 	u32 pay_len, mss;
3954 	dma_addr_t des;
3955 	int i;
3956 
3957 	tx_q = &priv->tx_queue[queue];
3958 	first_tx = tx_q->cur_tx;
3959 
3960 	/* Compute header lengths */
3961 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3962 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
3963 		hdr = sizeof(struct udphdr);
3964 	} else {
3965 		proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3966 		hdr = tcp_hdrlen(skb);
3967 	}
3968 
3969 	/* Desc availability based on threshold should be enough safe */
3970 	if (unlikely(stmmac_tx_avail(priv, queue) <
3971 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
3972 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3973 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3974 								queue));
3975 			/* This is a hard error, log it. */
3976 			netdev_err(priv->dev,
3977 				   "%s: Tx Ring full when queue awake\n",
3978 				   __func__);
3979 		}
3980 		return NETDEV_TX_BUSY;
3981 	}
3982 
3983 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
3984 
3985 	mss = skb_shinfo(skb)->gso_size;
3986 
3987 	/* set new MSS value if needed */
3988 	if (mss != tx_q->mss) {
3989 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3990 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3991 		else
3992 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
3993 
3994 		stmmac_set_mss(priv, mss_desc, mss);
3995 		tx_q->mss = mss;
3996 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3997 						priv->dma_tx_size);
3998 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3999 	}
4000 
4001 	if (netif_msg_tx_queued(priv)) {
4002 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4003 			__func__, hdr, proto_hdr_len, pay_len, mss);
4004 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4005 			skb->data_len);
4006 	}
4007 
4008 	/* Check if VLAN can be inserted by HW */
4009 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4010 
4011 	first_entry = tx_q->cur_tx;
4012 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4013 
4014 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4015 		desc = &tx_q->dma_entx[first_entry].basic;
4016 	else
4017 		desc = &tx_q->dma_tx[first_entry];
4018 	first = desc;
4019 
4020 	if (has_vlan)
4021 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4022 
4023 	/* first descriptor: fill Headers on Buf1 */
4024 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4025 			     DMA_TO_DEVICE);
4026 	if (dma_mapping_error(priv->device, des))
4027 		goto dma_map_err;
4028 
4029 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4030 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4031 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4032 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4033 
4034 	if (priv->dma_cap.addr64 <= 32) {
4035 		first->des0 = cpu_to_le32(des);
4036 
4037 		/* Fill start of payload in buff2 of first descriptor */
4038 		if (pay_len)
4039 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4040 
4041 		/* If needed take extra descriptors to fill the remaining payload */
4042 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4043 	} else {
4044 		stmmac_set_desc_addr(priv, first, des);
4045 		tmp_pay_len = pay_len;
4046 		des += proto_hdr_len;
4047 		pay_len = 0;
4048 	}
4049 
4050 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4051 
4052 	/* Prepare fragments */
4053 	for (i = 0; i < nfrags; i++) {
4054 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4055 
4056 		des = skb_frag_dma_map(priv->device, frag, 0,
4057 				       skb_frag_size(frag),
4058 				       DMA_TO_DEVICE);
4059 		if (dma_mapping_error(priv->device, des))
4060 			goto dma_map_err;
4061 
4062 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4063 				     (i == nfrags - 1), queue);
4064 
4065 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4066 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4067 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4068 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4069 	}
4070 
4071 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4072 
4073 	/* Only the last descriptor gets to point to the skb. */
4074 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4075 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4076 
4077 	/* Manage tx mitigation */
4078 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4079 	tx_q->tx_count_frames += tx_packets;
4080 
4081 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4082 		set_ic = true;
4083 	else if (!priv->tx_coal_frames[queue])
4084 		set_ic = false;
4085 	else if (tx_packets > priv->tx_coal_frames[queue])
4086 		set_ic = true;
4087 	else if ((tx_q->tx_count_frames %
4088 		  priv->tx_coal_frames[queue]) < tx_packets)
4089 		set_ic = true;
4090 	else
4091 		set_ic = false;
4092 
4093 	if (set_ic) {
4094 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4095 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4096 		else
4097 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4098 
4099 		tx_q->tx_count_frames = 0;
4100 		stmmac_set_tx_ic(priv, desc);
4101 		priv->xstats.tx_set_ic_bit++;
4102 	}
4103 
4104 	/* We've used all descriptors we need for this skb, however,
4105 	 * advance cur_tx so that it references a fresh descriptor.
4106 	 * ndo_start_xmit will fill this descriptor the next time it's
4107 	 * called and stmmac_tx_clean may clean up to this descriptor.
4108 	 */
4109 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
4110 
4111 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4112 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4113 			  __func__);
4114 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4115 	}
4116 
4117 	dev->stats.tx_bytes += skb->len;
4118 	priv->xstats.tx_tso_frames++;
4119 	priv->xstats.tx_tso_nfrags += nfrags;
4120 
4121 	if (priv->sarc_type)
4122 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4123 
4124 	skb_tx_timestamp(skb);
4125 
4126 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4127 		     priv->hwts_tx_en)) {
4128 		/* declare that device is doing timestamping */
4129 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4130 		stmmac_enable_tx_timestamp(priv, first);
4131 	}
4132 
4133 	/* Complete the first descriptor before granting the DMA */
4134 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4135 			proto_hdr_len,
4136 			pay_len,
4137 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4138 			hdr / 4, (skb->len - proto_hdr_len));
4139 
4140 	/* If context desc is used to change MSS */
4141 	if (mss_desc) {
4142 		/* Make sure that first descriptor has been completely
4143 		 * written, including its own bit. This is because MSS is
4144 		 * actually before first descriptor, so we need to make
4145 		 * sure that MSS's own bit is the last thing written.
4146 		 */
4147 		dma_wmb();
4148 		stmmac_set_tx_owner(priv, mss_desc);
4149 	}
4150 
4151 	if (netif_msg_pktdata(priv)) {
4152 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4153 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4154 			tx_q->cur_tx, first, nfrags);
4155 		pr_info(">>> frame to be transmitted: ");
4156 		print_pkt(skb->data, skb_headlen(skb));
4157 	}
4158 
4159 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4160 
4161 	stmmac_flush_tx_descriptors(priv, queue);
4162 	stmmac_tx_timer_arm(priv, queue);
4163 
4164 	return NETDEV_TX_OK;
4165 
4166 dma_map_err:
4167 	dev_err(priv->device, "Tx dma map failed\n");
4168 	dev_kfree_skb(skb);
4169 	priv->dev->stats.tx_dropped++;
4170 	return NETDEV_TX_OK;
4171 }
4172 
4173 /**
4174  *  stmmac_xmit - Tx entry point of the driver
4175  *  @skb : the socket buffer
4176  *  @dev : device pointer
4177  *  Description : this is the tx entry point of the driver.
4178  *  It programs the chain or the ring and supports oversized frames
4179  *  and SG feature.
4180  */
4181 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4182 {
4183 	unsigned int first_entry, tx_packets, enh_desc;
4184 	struct stmmac_priv *priv = netdev_priv(dev);
4185 	unsigned int nopaged_len = skb_headlen(skb);
4186 	int i, csum_insertion = 0, is_jumbo = 0;
4187 	u32 queue = skb_get_queue_mapping(skb);
4188 	int nfrags = skb_shinfo(skb)->nr_frags;
4189 	int gso = skb_shinfo(skb)->gso_type;
4190 	struct dma_edesc *tbs_desc = NULL;
4191 	struct dma_desc *desc, *first;
4192 	struct stmmac_tx_queue *tx_q;
4193 	bool has_vlan, set_ic;
4194 	int entry, first_tx;
4195 	dma_addr_t des;
4196 
4197 	tx_q = &priv->tx_queue[queue];
4198 	first_tx = tx_q->cur_tx;
4199 
4200 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4201 		stmmac_disable_eee_mode(priv);
4202 
4203 	/* Manage oversized TCP frames for GMAC4 device */
4204 	if (skb_is_gso(skb) && priv->tso) {
4205 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4206 			return stmmac_tso_xmit(skb, dev);
4207 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4208 			return stmmac_tso_xmit(skb, dev);
4209 	}
4210 
4211 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4212 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4213 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4214 								queue));
4215 			/* This is a hard error, log it. */
4216 			netdev_err(priv->dev,
4217 				   "%s: Tx Ring full when queue awake\n",
4218 				   __func__);
4219 		}
4220 		return NETDEV_TX_BUSY;
4221 	}
4222 
4223 	/* Check if VLAN can be inserted by HW */
4224 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4225 
4226 	entry = tx_q->cur_tx;
4227 	first_entry = entry;
4228 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4229 
4230 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4231 
4232 	if (likely(priv->extend_desc))
4233 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4234 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4235 		desc = &tx_q->dma_entx[entry].basic;
4236 	else
4237 		desc = tx_q->dma_tx + entry;
4238 
4239 	first = desc;
4240 
4241 	if (has_vlan)
4242 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4243 
4244 	enh_desc = priv->plat->enh_desc;
4245 	/* To program the descriptors according to the size of the frame */
4246 	if (enh_desc)
4247 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4248 
4249 	if (unlikely(is_jumbo)) {
4250 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4251 		if (unlikely(entry < 0) && (entry != -EINVAL))
4252 			goto dma_map_err;
4253 	}
4254 
4255 	for (i = 0; i < nfrags; i++) {
4256 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4257 		int len = skb_frag_size(frag);
4258 		bool last_segment = (i == (nfrags - 1));
4259 
4260 		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4261 		WARN_ON(tx_q->tx_skbuff[entry]);
4262 
4263 		if (likely(priv->extend_desc))
4264 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4265 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4266 			desc = &tx_q->dma_entx[entry].basic;
4267 		else
4268 			desc = tx_q->dma_tx + entry;
4269 
4270 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4271 				       DMA_TO_DEVICE);
4272 		if (dma_mapping_error(priv->device, des))
4273 			goto dma_map_err; /* should reuse desc w/o issues */
4274 
4275 		tx_q->tx_skbuff_dma[entry].buf = des;
4276 
4277 		stmmac_set_desc_addr(priv, desc, des);
4278 
4279 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4280 		tx_q->tx_skbuff_dma[entry].len = len;
4281 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4282 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4283 
4284 		/* Prepare the descriptor and set the own bit too */
4285 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4286 				priv->mode, 1, last_segment, skb->len);
4287 	}
4288 
4289 	/* Only the last descriptor gets to point to the skb. */
4290 	tx_q->tx_skbuff[entry] = skb;
4291 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4292 
4293 	/* According to the coalesce parameter the IC bit for the latest
4294 	 * segment is reset and the timer re-started to clean the tx status.
4295 	 * This approach takes care about the fragments: desc is the first
4296 	 * element in case of no SG.
4297 	 */
4298 	tx_packets = (entry + 1) - first_tx;
4299 	tx_q->tx_count_frames += tx_packets;
4300 
4301 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4302 		set_ic = true;
4303 	else if (!priv->tx_coal_frames[queue])
4304 		set_ic = false;
4305 	else if (tx_packets > priv->tx_coal_frames[queue])
4306 		set_ic = true;
4307 	else if ((tx_q->tx_count_frames %
4308 		  priv->tx_coal_frames[queue]) < tx_packets)
4309 		set_ic = true;
4310 	else
4311 		set_ic = false;
4312 
4313 	if (set_ic) {
4314 		if (likely(priv->extend_desc))
4315 			desc = &tx_q->dma_etx[entry].basic;
4316 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4317 			desc = &tx_q->dma_entx[entry].basic;
4318 		else
4319 			desc = &tx_q->dma_tx[entry];
4320 
4321 		tx_q->tx_count_frames = 0;
4322 		stmmac_set_tx_ic(priv, desc);
4323 		priv->xstats.tx_set_ic_bit++;
4324 	}
4325 
4326 	/* We've used all descriptors we need for this skb, however,
4327 	 * advance cur_tx so that it references a fresh descriptor.
4328 	 * ndo_start_xmit will fill this descriptor the next time it's
4329 	 * called and stmmac_tx_clean may clean up to this descriptor.
4330 	 */
4331 	entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4332 	tx_q->cur_tx = entry;
4333 
4334 	if (netif_msg_pktdata(priv)) {
4335 		netdev_dbg(priv->dev,
4336 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4337 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4338 			   entry, first, nfrags);
4339 
4340 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4341 		print_pkt(skb->data, skb->len);
4342 	}
4343 
4344 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4345 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4346 			  __func__);
4347 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4348 	}
4349 
4350 	dev->stats.tx_bytes += skb->len;
4351 
4352 	if (priv->sarc_type)
4353 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4354 
4355 	skb_tx_timestamp(skb);
4356 
4357 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4358 	 * problems because all the descriptors are actually ready to be
4359 	 * passed to the DMA engine.
4360 	 */
4361 	if (likely(!is_jumbo)) {
4362 		bool last_segment = (nfrags == 0);
4363 
4364 		des = dma_map_single(priv->device, skb->data,
4365 				     nopaged_len, DMA_TO_DEVICE);
4366 		if (dma_mapping_error(priv->device, des))
4367 			goto dma_map_err;
4368 
4369 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4370 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4371 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4372 
4373 		stmmac_set_desc_addr(priv, first, des);
4374 
4375 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4376 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4377 
4378 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4379 			     priv->hwts_tx_en)) {
4380 			/* declare that device is doing timestamping */
4381 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4382 			stmmac_enable_tx_timestamp(priv, first);
4383 		}
4384 
4385 		/* Prepare the first descriptor setting the OWN bit too */
4386 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4387 				csum_insertion, priv->mode, 0, last_segment,
4388 				skb->len);
4389 	}
4390 
4391 	if (tx_q->tbs & STMMAC_TBS_EN) {
4392 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4393 
4394 		tbs_desc = &tx_q->dma_entx[first_entry];
4395 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4396 	}
4397 
4398 	stmmac_set_tx_owner(priv, first);
4399 
4400 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4401 
4402 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4403 
4404 	stmmac_flush_tx_descriptors(priv, queue);
4405 	stmmac_tx_timer_arm(priv, queue);
4406 
4407 	return NETDEV_TX_OK;
4408 
4409 dma_map_err:
4410 	netdev_err(priv->dev, "Tx DMA map failed\n");
4411 	dev_kfree_skb(skb);
4412 	priv->dev->stats.tx_dropped++;
4413 	return NETDEV_TX_OK;
4414 }
4415 
4416 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4417 {
4418 	struct vlan_ethhdr *veth;
4419 	__be16 vlan_proto;
4420 	u16 vlanid;
4421 
4422 	veth = (struct vlan_ethhdr *)skb->data;
4423 	vlan_proto = veth->h_vlan_proto;
4424 
4425 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4426 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4427 	    (vlan_proto == htons(ETH_P_8021AD) &&
4428 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4429 		/* pop the vlan tag */
4430 		vlanid = ntohs(veth->h_vlan_TCI);
4431 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4432 		skb_pull(skb, VLAN_HLEN);
4433 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4434 	}
4435 }
4436 
4437 /**
4438  * stmmac_rx_refill - refill used skb preallocated buffers
4439  * @priv: driver private structure
4440  * @queue: RX queue index
4441  * Description : this is to reallocate the skb for the reception process
4442  * that is based on zero-copy.
4443  */
4444 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4445 {
4446 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4447 	int dirty = stmmac_rx_dirty(priv, queue);
4448 	unsigned int entry = rx_q->dirty_rx;
4449 
4450 	while (dirty-- > 0) {
4451 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4452 		struct dma_desc *p;
4453 		bool use_rx_wd;
4454 
4455 		if (priv->extend_desc)
4456 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4457 		else
4458 			p = rx_q->dma_rx + entry;
4459 
4460 		if (!buf->page) {
4461 			buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
4462 			if (!buf->page)
4463 				break;
4464 		}
4465 
4466 		if (priv->sph && !buf->sec_page) {
4467 			buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
4468 			if (!buf->sec_page)
4469 				break;
4470 
4471 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4472 		}
4473 
4474 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4475 
4476 		stmmac_set_desc_addr(priv, p, buf->addr);
4477 		if (priv->sph)
4478 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4479 		else
4480 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4481 		stmmac_refill_desc3(priv, rx_q, p);
4482 
4483 		rx_q->rx_count_frames++;
4484 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4485 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4486 			rx_q->rx_count_frames = 0;
4487 
4488 		use_rx_wd = !priv->rx_coal_frames[queue];
4489 		use_rx_wd |= rx_q->rx_count_frames > 0;
4490 		if (!priv->use_riwt)
4491 			use_rx_wd = false;
4492 
4493 		dma_wmb();
4494 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4495 
4496 		entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
4497 	}
4498 	rx_q->dirty_rx = entry;
4499 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4500 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4501 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4502 }
4503 
4504 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4505 				       struct dma_desc *p,
4506 				       int status, unsigned int len)
4507 {
4508 	unsigned int plen = 0, hlen = 0;
4509 	int coe = priv->hw->rx_csum;
4510 
4511 	/* Not first descriptor, buffer is always zero */
4512 	if (priv->sph && len)
4513 		return 0;
4514 
4515 	/* First descriptor, get split header length */
4516 	stmmac_get_rx_header_len(priv, p, &hlen);
4517 	if (priv->sph && hlen) {
4518 		priv->xstats.rx_split_hdr_pkt_n++;
4519 		return hlen;
4520 	}
4521 
4522 	/* First descriptor, not last descriptor and not split header */
4523 	if (status & rx_not_ls)
4524 		return priv->dma_buf_sz;
4525 
4526 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4527 
4528 	/* First descriptor and last descriptor and not split header */
4529 	return min_t(unsigned int, priv->dma_buf_sz, plen);
4530 }
4531 
4532 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4533 				       struct dma_desc *p,
4534 				       int status, unsigned int len)
4535 {
4536 	int coe = priv->hw->rx_csum;
4537 	unsigned int plen = 0;
4538 
4539 	/* Not split header, buffer is not available */
4540 	if (!priv->sph)
4541 		return 0;
4542 
4543 	/* Not last descriptor */
4544 	if (status & rx_not_ls)
4545 		return priv->dma_buf_sz;
4546 
4547 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4548 
4549 	/* Last descriptor */
4550 	return plen - len;
4551 }
4552 
4553 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4554 				struct xdp_frame *xdpf, bool dma_map)
4555 {
4556 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4557 	unsigned int entry = tx_q->cur_tx;
4558 	struct dma_desc *tx_desc;
4559 	dma_addr_t dma_addr;
4560 	bool set_ic;
4561 
4562 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4563 		return STMMAC_XDP_CONSUMED;
4564 
4565 	if (likely(priv->extend_desc))
4566 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4567 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4568 		tx_desc = &tx_q->dma_entx[entry].basic;
4569 	else
4570 		tx_desc = tx_q->dma_tx + entry;
4571 
4572 	if (dma_map) {
4573 		dma_addr = dma_map_single(priv->device, xdpf->data,
4574 					  xdpf->len, DMA_TO_DEVICE);
4575 		if (dma_mapping_error(priv->device, dma_addr))
4576 			return STMMAC_XDP_CONSUMED;
4577 
4578 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4579 	} else {
4580 		struct page *page = virt_to_page(xdpf->data);
4581 
4582 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4583 			   xdpf->headroom;
4584 		dma_sync_single_for_device(priv->device, dma_addr,
4585 					   xdpf->len, DMA_BIDIRECTIONAL);
4586 
4587 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4588 	}
4589 
4590 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4591 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4592 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4593 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4594 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4595 
4596 	tx_q->xdpf[entry] = xdpf;
4597 
4598 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4599 
4600 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4601 			       true, priv->mode, true, true,
4602 			       xdpf->len);
4603 
4604 	tx_q->tx_count_frames++;
4605 
4606 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4607 		set_ic = true;
4608 	else
4609 		set_ic = false;
4610 
4611 	if (set_ic) {
4612 		tx_q->tx_count_frames = 0;
4613 		stmmac_set_tx_ic(priv, tx_desc);
4614 		priv->xstats.tx_set_ic_bit++;
4615 	}
4616 
4617 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4618 
4619 	entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4620 	tx_q->cur_tx = entry;
4621 
4622 	return STMMAC_XDP_TX;
4623 }
4624 
4625 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4626 				   int cpu)
4627 {
4628 	int index = cpu;
4629 
4630 	if (unlikely(index < 0))
4631 		index = 0;
4632 
4633 	while (index >= priv->plat->tx_queues_to_use)
4634 		index -= priv->plat->tx_queues_to_use;
4635 
4636 	return index;
4637 }
4638 
4639 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4640 				struct xdp_buff *xdp)
4641 {
4642 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4643 	int cpu = smp_processor_id();
4644 	struct netdev_queue *nq;
4645 	int queue;
4646 	int res;
4647 
4648 	if (unlikely(!xdpf))
4649 		return STMMAC_XDP_CONSUMED;
4650 
4651 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4652 	nq = netdev_get_tx_queue(priv->dev, queue);
4653 
4654 	__netif_tx_lock(nq, cpu);
4655 	/* Avoids TX time-out as we are sharing with slow path */
4656 	nq->trans_start = jiffies;
4657 
4658 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4659 	if (res == STMMAC_XDP_TX)
4660 		stmmac_flush_tx_descriptors(priv, queue);
4661 
4662 	__netif_tx_unlock(nq);
4663 
4664 	return res;
4665 }
4666 
4667 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4668 				 struct bpf_prog *prog,
4669 				 struct xdp_buff *xdp)
4670 {
4671 	u32 act;
4672 	int res;
4673 
4674 	act = bpf_prog_run_xdp(prog, xdp);
4675 	switch (act) {
4676 	case XDP_PASS:
4677 		res = STMMAC_XDP_PASS;
4678 		break;
4679 	case XDP_TX:
4680 		res = stmmac_xdp_xmit_back(priv, xdp);
4681 		break;
4682 	case XDP_REDIRECT:
4683 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4684 			res = STMMAC_XDP_CONSUMED;
4685 		else
4686 			res = STMMAC_XDP_REDIRECT;
4687 		break;
4688 	default:
4689 		bpf_warn_invalid_xdp_action(act);
4690 		fallthrough;
4691 	case XDP_ABORTED:
4692 		trace_xdp_exception(priv->dev, prog, act);
4693 		fallthrough;
4694 	case XDP_DROP:
4695 		res = STMMAC_XDP_CONSUMED;
4696 		break;
4697 	}
4698 
4699 	return res;
4700 }
4701 
4702 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4703 					   struct xdp_buff *xdp)
4704 {
4705 	struct bpf_prog *prog;
4706 	int res;
4707 
4708 	prog = READ_ONCE(priv->xdp_prog);
4709 	if (!prog) {
4710 		res = STMMAC_XDP_PASS;
4711 		goto out;
4712 	}
4713 
4714 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
4715 out:
4716 	return ERR_PTR(-res);
4717 }
4718 
4719 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4720 				   int xdp_status)
4721 {
4722 	int cpu = smp_processor_id();
4723 	int queue;
4724 
4725 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4726 
4727 	if (xdp_status & STMMAC_XDP_TX)
4728 		stmmac_tx_timer_arm(priv, queue);
4729 
4730 	if (xdp_status & STMMAC_XDP_REDIRECT)
4731 		xdp_do_flush();
4732 }
4733 
4734 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4735 					       struct xdp_buff *xdp)
4736 {
4737 	unsigned int metasize = xdp->data - xdp->data_meta;
4738 	unsigned int datasize = xdp->data_end - xdp->data;
4739 	struct sk_buff *skb;
4740 
4741 	skb = __napi_alloc_skb(&ch->rxtx_napi,
4742 			       xdp->data_end - xdp->data_hard_start,
4743 			       GFP_ATOMIC | __GFP_NOWARN);
4744 	if (unlikely(!skb))
4745 		return NULL;
4746 
4747 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
4748 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4749 	if (metasize)
4750 		skb_metadata_set(skb, metasize);
4751 
4752 	return skb;
4753 }
4754 
4755 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4756 				   struct dma_desc *p, struct dma_desc *np,
4757 				   struct xdp_buff *xdp)
4758 {
4759 	struct stmmac_channel *ch = &priv->channel[queue];
4760 	unsigned int len = xdp->data_end - xdp->data;
4761 	enum pkt_hash_types hash_type;
4762 	int coe = priv->hw->rx_csum;
4763 	struct sk_buff *skb;
4764 	u32 hash;
4765 
4766 	skb = stmmac_construct_skb_zc(ch, xdp);
4767 	if (!skb) {
4768 		priv->dev->stats.rx_dropped++;
4769 		return;
4770 	}
4771 
4772 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
4773 	stmmac_rx_vlan(priv->dev, skb);
4774 	skb->protocol = eth_type_trans(skb, priv->dev);
4775 
4776 	if (unlikely(!coe))
4777 		skb_checksum_none_assert(skb);
4778 	else
4779 		skb->ip_summed = CHECKSUM_UNNECESSARY;
4780 
4781 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4782 		skb_set_hash(skb, hash, hash_type);
4783 
4784 	skb_record_rx_queue(skb, queue);
4785 	napi_gro_receive(&ch->rxtx_napi, skb);
4786 
4787 	priv->dev->stats.rx_packets++;
4788 	priv->dev->stats.rx_bytes += len;
4789 }
4790 
4791 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4792 {
4793 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4794 	unsigned int entry = rx_q->dirty_rx;
4795 	struct dma_desc *rx_desc = NULL;
4796 	bool ret = true;
4797 
4798 	budget = min(budget, stmmac_rx_dirty(priv, queue));
4799 
4800 	while (budget-- > 0 && entry != rx_q->cur_rx) {
4801 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4802 		dma_addr_t dma_addr;
4803 		bool use_rx_wd;
4804 
4805 		if (!buf->xdp) {
4806 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
4807 			if (!buf->xdp) {
4808 				ret = false;
4809 				break;
4810 			}
4811 		}
4812 
4813 		if (priv->extend_desc)
4814 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
4815 		else
4816 			rx_desc = rx_q->dma_rx + entry;
4817 
4818 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
4819 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
4820 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
4821 		stmmac_refill_desc3(priv, rx_q, rx_desc);
4822 
4823 		rx_q->rx_count_frames++;
4824 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4825 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4826 			rx_q->rx_count_frames = 0;
4827 
4828 		use_rx_wd = !priv->rx_coal_frames[queue];
4829 		use_rx_wd |= rx_q->rx_count_frames > 0;
4830 		if (!priv->use_riwt)
4831 			use_rx_wd = false;
4832 
4833 		dma_wmb();
4834 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
4835 
4836 		entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
4837 	}
4838 
4839 	if (rx_desc) {
4840 		rx_q->dirty_rx = entry;
4841 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4842 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
4843 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4844 	}
4845 
4846 	return ret;
4847 }
4848 
4849 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
4850 {
4851 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4852 	unsigned int count = 0, error = 0, len = 0;
4853 	int dirty = stmmac_rx_dirty(priv, queue);
4854 	unsigned int next_entry = rx_q->cur_rx;
4855 	unsigned int desc_size;
4856 	struct bpf_prog *prog;
4857 	bool failure = false;
4858 	int xdp_status = 0;
4859 	int status = 0;
4860 
4861 	if (netif_msg_rx_status(priv)) {
4862 		void *rx_head;
4863 
4864 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
4865 		if (priv->extend_desc) {
4866 			rx_head = (void *)rx_q->dma_erx;
4867 			desc_size = sizeof(struct dma_extended_desc);
4868 		} else {
4869 			rx_head = (void *)rx_q->dma_rx;
4870 			desc_size = sizeof(struct dma_desc);
4871 		}
4872 
4873 		stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
4874 				    rx_q->dma_rx_phy, desc_size);
4875 	}
4876 	while (count < limit) {
4877 		struct stmmac_rx_buffer *buf;
4878 		unsigned int buf1_len = 0;
4879 		struct dma_desc *np, *p;
4880 		int entry;
4881 		int res;
4882 
4883 		if (!count && rx_q->state_saved) {
4884 			error = rx_q->state.error;
4885 			len = rx_q->state.len;
4886 		} else {
4887 			rx_q->state_saved = false;
4888 			error = 0;
4889 			len = 0;
4890 		}
4891 
4892 		if (count >= limit)
4893 			break;
4894 
4895 read_again:
4896 		buf1_len = 0;
4897 		entry = next_entry;
4898 		buf = &rx_q->buf_pool[entry];
4899 
4900 		if (dirty >= STMMAC_RX_FILL_BATCH) {
4901 			failure = failure ||
4902 				  !stmmac_rx_refill_zc(priv, queue, dirty);
4903 			dirty = 0;
4904 		}
4905 
4906 		if (priv->extend_desc)
4907 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4908 		else
4909 			p = rx_q->dma_rx + entry;
4910 
4911 		/* read the status of the incoming frame */
4912 		status = stmmac_rx_status(priv, &priv->dev->stats,
4913 					  &priv->xstats, p);
4914 		/* check if managed by the DMA otherwise go ahead */
4915 		if (unlikely(status & dma_own))
4916 			break;
4917 
4918 		/* Prefetch the next RX descriptor */
4919 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
4920 						priv->dma_rx_size);
4921 		next_entry = rx_q->cur_rx;
4922 
4923 		if (priv->extend_desc)
4924 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
4925 		else
4926 			np = rx_q->dma_rx + next_entry;
4927 
4928 		prefetch(np);
4929 
4930 		/* Ensure a valid XSK buffer before proceed */
4931 		if (!buf->xdp)
4932 			break;
4933 
4934 		if (priv->extend_desc)
4935 			stmmac_rx_extended_status(priv, &priv->dev->stats,
4936 						  &priv->xstats,
4937 						  rx_q->dma_erx + entry);
4938 		if (unlikely(status == discard_frame)) {
4939 			xsk_buff_free(buf->xdp);
4940 			buf->xdp = NULL;
4941 			dirty++;
4942 			error = 1;
4943 			if (!priv->hwts_rx_en)
4944 				priv->dev->stats.rx_errors++;
4945 		}
4946 
4947 		if (unlikely(error && (status & rx_not_ls)))
4948 			goto read_again;
4949 		if (unlikely(error)) {
4950 			count++;
4951 			continue;
4952 		}
4953 
4954 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
4955 		if (likely(status & rx_not_ls)) {
4956 			xsk_buff_free(buf->xdp);
4957 			buf->xdp = NULL;
4958 			dirty++;
4959 			count++;
4960 			goto read_again;
4961 		}
4962 
4963 		/* XDP ZC Frame only support primary buffers for now */
4964 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
4965 		len += buf1_len;
4966 
4967 		/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
4968 		 * Type frames (LLC/LLC-SNAP)
4969 		 *
4970 		 * llc_snap is never checked in GMAC >= 4, so this ACS
4971 		 * feature is always disabled and packets need to be
4972 		 * stripped manually.
4973 		 */
4974 		if (likely(!(status & rx_not_ls)) &&
4975 		    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
4976 		     unlikely(status != llc_snap))) {
4977 			buf1_len -= ETH_FCS_LEN;
4978 			len -= ETH_FCS_LEN;
4979 		}
4980 
4981 		/* RX buffer is good and fit into a XSK pool buffer */
4982 		buf->xdp->data_end = buf->xdp->data + buf1_len;
4983 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
4984 
4985 		prog = READ_ONCE(priv->xdp_prog);
4986 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
4987 
4988 		switch (res) {
4989 		case STMMAC_XDP_PASS:
4990 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
4991 			xsk_buff_free(buf->xdp);
4992 			break;
4993 		case STMMAC_XDP_CONSUMED:
4994 			xsk_buff_free(buf->xdp);
4995 			priv->dev->stats.rx_dropped++;
4996 			break;
4997 		case STMMAC_XDP_TX:
4998 		case STMMAC_XDP_REDIRECT:
4999 			xdp_status |= res;
5000 			break;
5001 		}
5002 
5003 		buf->xdp = NULL;
5004 		dirty++;
5005 		count++;
5006 	}
5007 
5008 	if (status & rx_not_ls) {
5009 		rx_q->state_saved = true;
5010 		rx_q->state.error = error;
5011 		rx_q->state.len = len;
5012 	}
5013 
5014 	stmmac_finalize_xdp_rx(priv, xdp_status);
5015 
5016 	priv->xstats.rx_pkt_n += count;
5017 	priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5018 
5019 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5020 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5021 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5022 		else
5023 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5024 
5025 		return (int)count;
5026 	}
5027 
5028 	return failure ? limit : (int)count;
5029 }
5030 
5031 /**
5032  * stmmac_rx - manage the receive process
5033  * @priv: driver private structure
5034  * @limit: napi bugget
5035  * @queue: RX queue index.
5036  * Description :  this the function called by the napi poll method.
5037  * It gets all the frames inside the ring.
5038  */
5039 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5040 {
5041 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5042 	struct stmmac_channel *ch = &priv->channel[queue];
5043 	unsigned int count = 0, error = 0, len = 0;
5044 	int status = 0, coe = priv->hw->rx_csum;
5045 	unsigned int next_entry = rx_q->cur_rx;
5046 	enum dma_data_direction dma_dir;
5047 	unsigned int desc_size;
5048 	struct sk_buff *skb = NULL;
5049 	struct xdp_buff xdp;
5050 	int xdp_status = 0;
5051 	int buf_sz;
5052 
5053 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5054 	buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5055 
5056 	if (netif_msg_rx_status(priv)) {
5057 		void *rx_head;
5058 
5059 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5060 		if (priv->extend_desc) {
5061 			rx_head = (void *)rx_q->dma_erx;
5062 			desc_size = sizeof(struct dma_extended_desc);
5063 		} else {
5064 			rx_head = (void *)rx_q->dma_rx;
5065 			desc_size = sizeof(struct dma_desc);
5066 		}
5067 
5068 		stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
5069 				    rx_q->dma_rx_phy, desc_size);
5070 	}
5071 	while (count < limit) {
5072 		unsigned int buf1_len = 0, buf2_len = 0;
5073 		enum pkt_hash_types hash_type;
5074 		struct stmmac_rx_buffer *buf;
5075 		struct dma_desc *np, *p;
5076 		int entry;
5077 		u32 hash;
5078 
5079 		if (!count && rx_q->state_saved) {
5080 			skb = rx_q->state.skb;
5081 			error = rx_q->state.error;
5082 			len = rx_q->state.len;
5083 		} else {
5084 			rx_q->state_saved = false;
5085 			skb = NULL;
5086 			error = 0;
5087 			len = 0;
5088 		}
5089 
5090 		if (count >= limit)
5091 			break;
5092 
5093 read_again:
5094 		buf1_len = 0;
5095 		buf2_len = 0;
5096 		entry = next_entry;
5097 		buf = &rx_q->buf_pool[entry];
5098 
5099 		if (priv->extend_desc)
5100 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5101 		else
5102 			p = rx_q->dma_rx + entry;
5103 
5104 		/* read the status of the incoming frame */
5105 		status = stmmac_rx_status(priv, &priv->dev->stats,
5106 				&priv->xstats, p);
5107 		/* check if managed by the DMA otherwise go ahead */
5108 		if (unlikely(status & dma_own))
5109 			break;
5110 
5111 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5112 						priv->dma_rx_size);
5113 		next_entry = rx_q->cur_rx;
5114 
5115 		if (priv->extend_desc)
5116 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5117 		else
5118 			np = rx_q->dma_rx + next_entry;
5119 
5120 		prefetch(np);
5121 
5122 		if (priv->extend_desc)
5123 			stmmac_rx_extended_status(priv, &priv->dev->stats,
5124 					&priv->xstats, rx_q->dma_erx + entry);
5125 		if (unlikely(status == discard_frame)) {
5126 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5127 			buf->page = NULL;
5128 			error = 1;
5129 			if (!priv->hwts_rx_en)
5130 				priv->dev->stats.rx_errors++;
5131 		}
5132 
5133 		if (unlikely(error && (status & rx_not_ls)))
5134 			goto read_again;
5135 		if (unlikely(error)) {
5136 			dev_kfree_skb(skb);
5137 			skb = NULL;
5138 			count++;
5139 			continue;
5140 		}
5141 
5142 		/* Buffer is good. Go on. */
5143 
5144 		prefetch(page_address(buf->page) + buf->page_offset);
5145 		if (buf->sec_page)
5146 			prefetch(page_address(buf->sec_page));
5147 
5148 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5149 		len += buf1_len;
5150 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5151 		len += buf2_len;
5152 
5153 		/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
5154 		 * Type frames (LLC/LLC-SNAP)
5155 		 *
5156 		 * llc_snap is never checked in GMAC >= 4, so this ACS
5157 		 * feature is always disabled and packets need to be
5158 		 * stripped manually.
5159 		 */
5160 		if (likely(!(status & rx_not_ls)) &&
5161 		    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
5162 		     unlikely(status != llc_snap))) {
5163 			if (buf2_len)
5164 				buf2_len -= ETH_FCS_LEN;
5165 			else
5166 				buf1_len -= ETH_FCS_LEN;
5167 
5168 			len -= ETH_FCS_LEN;
5169 		}
5170 
5171 		if (!skb) {
5172 			unsigned int pre_len, sync_len;
5173 
5174 			dma_sync_single_for_cpu(priv->device, buf->addr,
5175 						buf1_len, dma_dir);
5176 
5177 			xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq);
5178 			xdp_prepare_buff(&xdp, page_address(buf->page),
5179 					 buf->page_offset, buf1_len, false);
5180 
5181 			pre_len = xdp.data_end - xdp.data_hard_start -
5182 				  buf->page_offset;
5183 			skb = stmmac_xdp_run_prog(priv, &xdp);
5184 			/* Due xdp_adjust_tail: DMA sync for_device
5185 			 * cover max len CPU touch
5186 			 */
5187 			sync_len = xdp.data_end - xdp.data_hard_start -
5188 				   buf->page_offset;
5189 			sync_len = max(sync_len, pre_len);
5190 
5191 			/* For Not XDP_PASS verdict */
5192 			if (IS_ERR(skb)) {
5193 				unsigned int xdp_res = -PTR_ERR(skb);
5194 
5195 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5196 					page_pool_put_page(rx_q->page_pool,
5197 							   virt_to_head_page(xdp.data),
5198 							   sync_len, true);
5199 					buf->page = NULL;
5200 					priv->dev->stats.rx_dropped++;
5201 
5202 					/* Clear skb as it was set as
5203 					 * status by XDP program.
5204 					 */
5205 					skb = NULL;
5206 
5207 					if (unlikely((status & rx_not_ls)))
5208 						goto read_again;
5209 
5210 					count++;
5211 					continue;
5212 				} else if (xdp_res & (STMMAC_XDP_TX |
5213 						      STMMAC_XDP_REDIRECT)) {
5214 					xdp_status |= xdp_res;
5215 					buf->page = NULL;
5216 					skb = NULL;
5217 					count++;
5218 					continue;
5219 				}
5220 			}
5221 		}
5222 
5223 		if (!skb) {
5224 			/* XDP program may expand or reduce tail */
5225 			buf1_len = xdp.data_end - xdp.data;
5226 
5227 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5228 			if (!skb) {
5229 				priv->dev->stats.rx_dropped++;
5230 				count++;
5231 				goto drain_data;
5232 			}
5233 
5234 			/* XDP program may adjust header */
5235 			skb_copy_to_linear_data(skb, xdp.data, buf1_len);
5236 			skb_put(skb, buf1_len);
5237 
5238 			/* Data payload copied into SKB, page ready for recycle */
5239 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5240 			buf->page = NULL;
5241 		} else if (buf1_len) {
5242 			dma_sync_single_for_cpu(priv->device, buf->addr,
5243 						buf1_len, dma_dir);
5244 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5245 					buf->page, buf->page_offset, buf1_len,
5246 					priv->dma_buf_sz);
5247 
5248 			/* Data payload appended into SKB */
5249 			page_pool_release_page(rx_q->page_pool, buf->page);
5250 			buf->page = NULL;
5251 		}
5252 
5253 		if (buf2_len) {
5254 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5255 						buf2_len, dma_dir);
5256 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5257 					buf->sec_page, 0, buf2_len,
5258 					priv->dma_buf_sz);
5259 
5260 			/* Data payload appended into SKB */
5261 			page_pool_release_page(rx_q->page_pool, buf->sec_page);
5262 			buf->sec_page = NULL;
5263 		}
5264 
5265 drain_data:
5266 		if (likely(status & rx_not_ls))
5267 			goto read_again;
5268 		if (!skb)
5269 			continue;
5270 
5271 		/* Got entire packet into SKB. Finish it. */
5272 
5273 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5274 		stmmac_rx_vlan(priv->dev, skb);
5275 		skb->protocol = eth_type_trans(skb, priv->dev);
5276 
5277 		if (unlikely(!coe))
5278 			skb_checksum_none_assert(skb);
5279 		else
5280 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5281 
5282 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5283 			skb_set_hash(skb, hash, hash_type);
5284 
5285 		skb_record_rx_queue(skb, queue);
5286 		napi_gro_receive(&ch->rx_napi, skb);
5287 		skb = NULL;
5288 
5289 		priv->dev->stats.rx_packets++;
5290 		priv->dev->stats.rx_bytes += len;
5291 		count++;
5292 	}
5293 
5294 	if (status & rx_not_ls || skb) {
5295 		rx_q->state_saved = true;
5296 		rx_q->state.skb = skb;
5297 		rx_q->state.error = error;
5298 		rx_q->state.len = len;
5299 	}
5300 
5301 	stmmac_finalize_xdp_rx(priv, xdp_status);
5302 
5303 	stmmac_rx_refill(priv, queue);
5304 
5305 	priv->xstats.rx_pkt_n += count;
5306 	priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5307 
5308 	return count;
5309 }
5310 
5311 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5312 {
5313 	struct stmmac_channel *ch =
5314 		container_of(napi, struct stmmac_channel, rx_napi);
5315 	struct stmmac_priv *priv = ch->priv_data;
5316 	u32 chan = ch->index;
5317 	int work_done;
5318 
5319 	priv->xstats.napi_poll++;
5320 
5321 	work_done = stmmac_rx(priv, budget, chan);
5322 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5323 		unsigned long flags;
5324 
5325 		spin_lock_irqsave(&ch->lock, flags);
5326 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5327 		spin_unlock_irqrestore(&ch->lock, flags);
5328 	}
5329 
5330 	return work_done;
5331 }
5332 
5333 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5334 {
5335 	struct stmmac_channel *ch =
5336 		container_of(napi, struct stmmac_channel, tx_napi);
5337 	struct stmmac_priv *priv = ch->priv_data;
5338 	u32 chan = ch->index;
5339 	int work_done;
5340 
5341 	priv->xstats.napi_poll++;
5342 
5343 	work_done = stmmac_tx_clean(priv, budget, chan);
5344 	work_done = min(work_done, budget);
5345 
5346 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5347 		unsigned long flags;
5348 
5349 		spin_lock_irqsave(&ch->lock, flags);
5350 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5351 		spin_unlock_irqrestore(&ch->lock, flags);
5352 	}
5353 
5354 	return work_done;
5355 }
5356 
5357 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5358 {
5359 	struct stmmac_channel *ch =
5360 		container_of(napi, struct stmmac_channel, rxtx_napi);
5361 	struct stmmac_priv *priv = ch->priv_data;
5362 	int rx_done, tx_done, rxtx_done;
5363 	u32 chan = ch->index;
5364 
5365 	priv->xstats.napi_poll++;
5366 
5367 	tx_done = stmmac_tx_clean(priv, budget, chan);
5368 	tx_done = min(tx_done, budget);
5369 
5370 	rx_done = stmmac_rx_zc(priv, budget, chan);
5371 
5372 	rxtx_done = max(tx_done, rx_done);
5373 
5374 	/* If either TX or RX work is not complete, return budget
5375 	 * and keep pooling
5376 	 */
5377 	if (rxtx_done >= budget)
5378 		return budget;
5379 
5380 	/* all work done, exit the polling mode */
5381 	if (napi_complete_done(napi, rxtx_done)) {
5382 		unsigned long flags;
5383 
5384 		spin_lock_irqsave(&ch->lock, flags);
5385 		/* Both RX and TX work done are compelte,
5386 		 * so enable both RX & TX IRQs.
5387 		 */
5388 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5389 		spin_unlock_irqrestore(&ch->lock, flags);
5390 	}
5391 
5392 	return min(rxtx_done, budget - 1);
5393 }
5394 
5395 /**
5396  *  stmmac_tx_timeout
5397  *  @dev : Pointer to net device structure
5398  *  @txqueue: the index of the hanging transmit queue
5399  *  Description: this function is called when a packet transmission fails to
5400  *   complete within a reasonable time. The driver will mark the error in the
5401  *   netdev structure and arrange for the device to be reset to a sane state
5402  *   in order to transmit a new packet.
5403  */
5404 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5405 {
5406 	struct stmmac_priv *priv = netdev_priv(dev);
5407 
5408 	stmmac_global_err(priv);
5409 }
5410 
5411 /**
5412  *  stmmac_set_rx_mode - entry point for multicast addressing
5413  *  @dev : pointer to the device structure
5414  *  Description:
5415  *  This function is a driver entry point which gets called by the kernel
5416  *  whenever multicast addresses must be enabled/disabled.
5417  *  Return value:
5418  *  void.
5419  */
5420 static void stmmac_set_rx_mode(struct net_device *dev)
5421 {
5422 	struct stmmac_priv *priv = netdev_priv(dev);
5423 
5424 	stmmac_set_filter(priv, priv->hw, dev);
5425 }
5426 
5427 /**
5428  *  stmmac_change_mtu - entry point to change MTU size for the device.
5429  *  @dev : device pointer.
5430  *  @new_mtu : the new MTU size for the device.
5431  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5432  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5433  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5434  *  Return value:
5435  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5436  *  file on failure.
5437  */
5438 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5439 {
5440 	struct stmmac_priv *priv = netdev_priv(dev);
5441 	int txfifosz = priv->plat->tx_fifo_size;
5442 	const int mtu = new_mtu;
5443 
5444 	if (txfifosz == 0)
5445 		txfifosz = priv->dma_cap.tx_fifo_size;
5446 
5447 	txfifosz /= priv->plat->tx_queues_to_use;
5448 
5449 	if (netif_running(dev)) {
5450 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
5451 		return -EBUSY;
5452 	}
5453 
5454 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5455 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5456 		return -EINVAL;
5457 	}
5458 
5459 	new_mtu = STMMAC_ALIGN(new_mtu);
5460 
5461 	/* If condition true, FIFO is too small or MTU too large */
5462 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5463 		return -EINVAL;
5464 
5465 	dev->mtu = mtu;
5466 
5467 	netdev_update_features(dev);
5468 
5469 	return 0;
5470 }
5471 
5472 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5473 					     netdev_features_t features)
5474 {
5475 	struct stmmac_priv *priv = netdev_priv(dev);
5476 
5477 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5478 		features &= ~NETIF_F_RXCSUM;
5479 
5480 	if (!priv->plat->tx_coe)
5481 		features &= ~NETIF_F_CSUM_MASK;
5482 
5483 	/* Some GMAC devices have a bugged Jumbo frame support that
5484 	 * needs to have the Tx COE disabled for oversized frames
5485 	 * (due to limited buffer sizes). In this case we disable
5486 	 * the TX csum insertion in the TDES and not use SF.
5487 	 */
5488 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5489 		features &= ~NETIF_F_CSUM_MASK;
5490 
5491 	/* Disable tso if asked by ethtool */
5492 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
5493 		if (features & NETIF_F_TSO)
5494 			priv->tso = true;
5495 		else
5496 			priv->tso = false;
5497 	}
5498 
5499 	return features;
5500 }
5501 
5502 static int stmmac_set_features(struct net_device *netdev,
5503 			       netdev_features_t features)
5504 {
5505 	struct stmmac_priv *priv = netdev_priv(netdev);
5506 	bool sph_en;
5507 	u32 chan;
5508 
5509 	/* Keep the COE Type in case of csum is supporting */
5510 	if (features & NETIF_F_RXCSUM)
5511 		priv->hw->rx_csum = priv->plat->rx_coe;
5512 	else
5513 		priv->hw->rx_csum = 0;
5514 	/* No check needed because rx_coe has been set before and it will be
5515 	 * fixed in case of issue.
5516 	 */
5517 	stmmac_rx_ipc(priv, priv->hw);
5518 
5519 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5520 
5521 	for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5522 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5523 
5524 	return 0;
5525 }
5526 
5527 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5528 {
5529 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5530 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5531 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5532 	bool *hs_enable = &fpe_cfg->hs_enable;
5533 
5534 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5535 		return;
5536 
5537 	/* If LP has sent verify mPacket, LP is FPE capable */
5538 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5539 		if (*lp_state < FPE_STATE_CAPABLE)
5540 			*lp_state = FPE_STATE_CAPABLE;
5541 
5542 		/* If user has requested FPE enable, quickly response */
5543 		if (*hs_enable)
5544 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5545 						MPACKET_RESPONSE);
5546 	}
5547 
5548 	/* If Local has sent verify mPacket, Local is FPE capable */
5549 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5550 		if (*lo_state < FPE_STATE_CAPABLE)
5551 			*lo_state = FPE_STATE_CAPABLE;
5552 	}
5553 
5554 	/* If LP has sent response mPacket, LP is entering FPE ON */
5555 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5556 		*lp_state = FPE_STATE_ENTERING_ON;
5557 
5558 	/* If Local has sent response mPacket, Local is entering FPE ON */
5559 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5560 		*lo_state = FPE_STATE_ENTERING_ON;
5561 
5562 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5563 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5564 	    priv->fpe_wq) {
5565 		queue_work(priv->fpe_wq, &priv->fpe_task);
5566 	}
5567 }
5568 
5569 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5570 {
5571 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5572 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5573 	u32 queues_count;
5574 	u32 queue;
5575 	bool xmac;
5576 
5577 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5578 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5579 
5580 	if (priv->irq_wake)
5581 		pm_wakeup_event(priv->device, 0);
5582 
5583 	if (priv->dma_cap.estsel)
5584 		stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5585 				      &priv->xstats, tx_cnt);
5586 
5587 	if (priv->dma_cap.fpesel) {
5588 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5589 						   priv->dev);
5590 
5591 		stmmac_fpe_event_status(priv, status);
5592 	}
5593 
5594 	/* To handle GMAC own interrupts */
5595 	if ((priv->plat->has_gmac) || xmac) {
5596 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5597 
5598 		if (unlikely(status)) {
5599 			/* For LPI we need to save the tx status */
5600 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5601 				priv->tx_path_in_lpi_mode = true;
5602 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5603 				priv->tx_path_in_lpi_mode = false;
5604 		}
5605 
5606 		for (queue = 0; queue < queues_count; queue++) {
5607 			status = stmmac_host_mtl_irq_status(priv, priv->hw,
5608 							    queue);
5609 		}
5610 
5611 		/* PCS link status */
5612 		if (priv->hw->pcs) {
5613 			if (priv->xstats.pcs_link)
5614 				netif_carrier_on(priv->dev);
5615 			else
5616 				netif_carrier_off(priv->dev);
5617 		}
5618 
5619 		stmmac_timestamp_interrupt(priv, priv);
5620 	}
5621 }
5622 
5623 /**
5624  *  stmmac_interrupt - main ISR
5625  *  @irq: interrupt number.
5626  *  @dev_id: to pass the net device pointer.
5627  *  Description: this is the main driver interrupt service routine.
5628  *  It can call:
5629  *  o DMA service routine (to manage incoming frame reception and transmission
5630  *    status)
5631  *  o Core interrupts to manage: remote wake-up, management counter, LPI
5632  *    interrupts.
5633  */
5634 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5635 {
5636 	struct net_device *dev = (struct net_device *)dev_id;
5637 	struct stmmac_priv *priv = netdev_priv(dev);
5638 
5639 	/* Check if adapter is up */
5640 	if (test_bit(STMMAC_DOWN, &priv->state))
5641 		return IRQ_HANDLED;
5642 
5643 	/* Check if a fatal error happened */
5644 	if (stmmac_safety_feat_interrupt(priv))
5645 		return IRQ_HANDLED;
5646 
5647 	/* To handle Common interrupts */
5648 	stmmac_common_interrupt(priv);
5649 
5650 	/* To handle DMA interrupts */
5651 	stmmac_dma_interrupt(priv);
5652 
5653 	return IRQ_HANDLED;
5654 }
5655 
5656 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5657 {
5658 	struct net_device *dev = (struct net_device *)dev_id;
5659 	struct stmmac_priv *priv = netdev_priv(dev);
5660 
5661 	if (unlikely(!dev)) {
5662 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5663 		return IRQ_NONE;
5664 	}
5665 
5666 	/* Check if adapter is up */
5667 	if (test_bit(STMMAC_DOWN, &priv->state))
5668 		return IRQ_HANDLED;
5669 
5670 	/* To handle Common interrupts */
5671 	stmmac_common_interrupt(priv);
5672 
5673 	return IRQ_HANDLED;
5674 }
5675 
5676 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5677 {
5678 	struct net_device *dev = (struct net_device *)dev_id;
5679 	struct stmmac_priv *priv = netdev_priv(dev);
5680 
5681 	if (unlikely(!dev)) {
5682 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5683 		return IRQ_NONE;
5684 	}
5685 
5686 	/* Check if adapter is up */
5687 	if (test_bit(STMMAC_DOWN, &priv->state))
5688 		return IRQ_HANDLED;
5689 
5690 	/* Check if a fatal error happened */
5691 	stmmac_safety_feat_interrupt(priv);
5692 
5693 	return IRQ_HANDLED;
5694 }
5695 
5696 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5697 {
5698 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5699 	int chan = tx_q->queue_index;
5700 	struct stmmac_priv *priv;
5701 	int status;
5702 
5703 	priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]);
5704 
5705 	if (unlikely(!data)) {
5706 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5707 		return IRQ_NONE;
5708 	}
5709 
5710 	/* Check if adapter is up */
5711 	if (test_bit(STMMAC_DOWN, &priv->state))
5712 		return IRQ_HANDLED;
5713 
5714 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5715 
5716 	if (unlikely(status & tx_hard_error_bump_tc)) {
5717 		/* Try to bump up the dma threshold on this failure */
5718 		if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
5719 		    tc <= 256) {
5720 			tc += 64;
5721 			if (priv->plat->force_thresh_dma_mode)
5722 				stmmac_set_dma_operation_mode(priv,
5723 							      tc,
5724 							      tc,
5725 							      chan);
5726 			else
5727 				stmmac_set_dma_operation_mode(priv,
5728 							      tc,
5729 							      SF_DMA_MODE,
5730 							      chan);
5731 			priv->xstats.threshold = tc;
5732 		}
5733 	} else if (unlikely(status == tx_hard_error)) {
5734 		stmmac_tx_err(priv, chan);
5735 	}
5736 
5737 	return IRQ_HANDLED;
5738 }
5739 
5740 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
5741 {
5742 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
5743 	int chan = rx_q->queue_index;
5744 	struct stmmac_priv *priv;
5745 
5746 	priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]);
5747 
5748 	if (unlikely(!data)) {
5749 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5750 		return IRQ_NONE;
5751 	}
5752 
5753 	/* Check if adapter is up */
5754 	if (test_bit(STMMAC_DOWN, &priv->state))
5755 		return IRQ_HANDLED;
5756 
5757 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
5758 
5759 	return IRQ_HANDLED;
5760 }
5761 
5762 #ifdef CONFIG_NET_POLL_CONTROLLER
5763 /* Polling receive - used by NETCONSOLE and other diagnostic tools
5764  * to allow network I/O with interrupts disabled.
5765  */
5766 static void stmmac_poll_controller(struct net_device *dev)
5767 {
5768 	struct stmmac_priv *priv = netdev_priv(dev);
5769 	int i;
5770 
5771 	/* If adapter is down, do nothing */
5772 	if (test_bit(STMMAC_DOWN, &priv->state))
5773 		return;
5774 
5775 	if (priv->plat->multi_msi_en) {
5776 		for (i = 0; i < priv->plat->rx_queues_to_use; i++)
5777 			stmmac_msi_intr_rx(0, &priv->rx_queue[i]);
5778 
5779 		for (i = 0; i < priv->plat->tx_queues_to_use; i++)
5780 			stmmac_msi_intr_tx(0, &priv->tx_queue[i]);
5781 	} else {
5782 		disable_irq(dev->irq);
5783 		stmmac_interrupt(dev->irq, dev);
5784 		enable_irq(dev->irq);
5785 	}
5786 }
5787 #endif
5788 
5789 /**
5790  *  stmmac_ioctl - Entry point for the Ioctl
5791  *  @dev: Device pointer.
5792  *  @rq: An IOCTL specefic structure, that can contain a pointer to
5793  *  a proprietary structure used to pass information to the driver.
5794  *  @cmd: IOCTL command
5795  *  Description:
5796  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
5797  */
5798 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5799 {
5800 	struct stmmac_priv *priv = netdev_priv (dev);
5801 	int ret = -EOPNOTSUPP;
5802 
5803 	if (!netif_running(dev))
5804 		return -EINVAL;
5805 
5806 	switch (cmd) {
5807 	case SIOCGMIIPHY:
5808 	case SIOCGMIIREG:
5809 	case SIOCSMIIREG:
5810 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
5811 		break;
5812 	case SIOCSHWTSTAMP:
5813 		ret = stmmac_hwtstamp_set(dev, rq);
5814 		break;
5815 	case SIOCGHWTSTAMP:
5816 		ret = stmmac_hwtstamp_get(dev, rq);
5817 		break;
5818 	default:
5819 		break;
5820 	}
5821 
5822 	return ret;
5823 }
5824 
5825 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5826 				    void *cb_priv)
5827 {
5828 	struct stmmac_priv *priv = cb_priv;
5829 	int ret = -EOPNOTSUPP;
5830 
5831 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
5832 		return ret;
5833 
5834 	__stmmac_disable_all_queues(priv);
5835 
5836 	switch (type) {
5837 	case TC_SETUP_CLSU32:
5838 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
5839 		break;
5840 	case TC_SETUP_CLSFLOWER:
5841 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
5842 		break;
5843 	default:
5844 		break;
5845 	}
5846 
5847 	stmmac_enable_all_queues(priv);
5848 	return ret;
5849 }
5850 
5851 static LIST_HEAD(stmmac_block_cb_list);
5852 
5853 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
5854 			   void *type_data)
5855 {
5856 	struct stmmac_priv *priv = netdev_priv(ndev);
5857 
5858 	switch (type) {
5859 	case TC_SETUP_BLOCK:
5860 		return flow_block_cb_setup_simple(type_data,
5861 						  &stmmac_block_cb_list,
5862 						  stmmac_setup_tc_block_cb,
5863 						  priv, priv, true);
5864 	case TC_SETUP_QDISC_CBS:
5865 		return stmmac_tc_setup_cbs(priv, priv, type_data);
5866 	case TC_SETUP_QDISC_TAPRIO:
5867 		return stmmac_tc_setup_taprio(priv, priv, type_data);
5868 	case TC_SETUP_QDISC_ETF:
5869 		return stmmac_tc_setup_etf(priv, priv, type_data);
5870 	default:
5871 		return -EOPNOTSUPP;
5872 	}
5873 }
5874 
5875 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
5876 			       struct net_device *sb_dev)
5877 {
5878 	int gso = skb_shinfo(skb)->gso_type;
5879 
5880 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
5881 		/*
5882 		 * There is no way to determine the number of TSO/USO
5883 		 * capable Queues. Let's use always the Queue 0
5884 		 * because if TSO/USO is supported then at least this
5885 		 * one will be capable.
5886 		 */
5887 		return 0;
5888 	}
5889 
5890 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
5891 }
5892 
5893 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
5894 {
5895 	struct stmmac_priv *priv = netdev_priv(ndev);
5896 	int ret = 0;
5897 
5898 	ret = pm_runtime_get_sync(priv->device);
5899 	if (ret < 0) {
5900 		pm_runtime_put_noidle(priv->device);
5901 		return ret;
5902 	}
5903 
5904 	ret = eth_mac_addr(ndev, addr);
5905 	if (ret)
5906 		goto set_mac_error;
5907 
5908 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
5909 
5910 set_mac_error:
5911 	pm_runtime_put(priv->device);
5912 
5913 	return ret;
5914 }
5915 
5916 #ifdef CONFIG_DEBUG_FS
5917 static struct dentry *stmmac_fs_dir;
5918 
5919 static void sysfs_display_ring(void *head, int size, int extend_desc,
5920 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
5921 {
5922 	int i;
5923 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
5924 	struct dma_desc *p = (struct dma_desc *)head;
5925 	dma_addr_t dma_addr;
5926 
5927 	for (i = 0; i < size; i++) {
5928 		if (extend_desc) {
5929 			dma_addr = dma_phy_addr + i * sizeof(*ep);
5930 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
5931 				   i, &dma_addr,
5932 				   le32_to_cpu(ep->basic.des0),
5933 				   le32_to_cpu(ep->basic.des1),
5934 				   le32_to_cpu(ep->basic.des2),
5935 				   le32_to_cpu(ep->basic.des3));
5936 			ep++;
5937 		} else {
5938 			dma_addr = dma_phy_addr + i * sizeof(*p);
5939 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
5940 				   i, &dma_addr,
5941 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
5942 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
5943 			p++;
5944 		}
5945 		seq_printf(seq, "\n");
5946 	}
5947 }
5948 
5949 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
5950 {
5951 	struct net_device *dev = seq->private;
5952 	struct stmmac_priv *priv = netdev_priv(dev);
5953 	u32 rx_count = priv->plat->rx_queues_to_use;
5954 	u32 tx_count = priv->plat->tx_queues_to_use;
5955 	u32 queue;
5956 
5957 	if ((dev->flags & IFF_UP) == 0)
5958 		return 0;
5959 
5960 	for (queue = 0; queue < rx_count; queue++) {
5961 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5962 
5963 		seq_printf(seq, "RX Queue %d:\n", queue);
5964 
5965 		if (priv->extend_desc) {
5966 			seq_printf(seq, "Extended descriptor ring:\n");
5967 			sysfs_display_ring((void *)rx_q->dma_erx,
5968 					   priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
5969 		} else {
5970 			seq_printf(seq, "Descriptor ring:\n");
5971 			sysfs_display_ring((void *)rx_q->dma_rx,
5972 					   priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
5973 		}
5974 	}
5975 
5976 	for (queue = 0; queue < tx_count; queue++) {
5977 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5978 
5979 		seq_printf(seq, "TX Queue %d:\n", queue);
5980 
5981 		if (priv->extend_desc) {
5982 			seq_printf(seq, "Extended descriptor ring:\n");
5983 			sysfs_display_ring((void *)tx_q->dma_etx,
5984 					   priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
5985 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
5986 			seq_printf(seq, "Descriptor ring:\n");
5987 			sysfs_display_ring((void *)tx_q->dma_tx,
5988 					   priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
5989 		}
5990 	}
5991 
5992 	return 0;
5993 }
5994 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
5995 
5996 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
5997 {
5998 	struct net_device *dev = seq->private;
5999 	struct stmmac_priv *priv = netdev_priv(dev);
6000 
6001 	if (!priv->hw_cap_support) {
6002 		seq_printf(seq, "DMA HW features not supported\n");
6003 		return 0;
6004 	}
6005 
6006 	seq_printf(seq, "==============================\n");
6007 	seq_printf(seq, "\tDMA HW features\n");
6008 	seq_printf(seq, "==============================\n");
6009 
6010 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6011 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6012 	seq_printf(seq, "\t1000 Mbps: %s\n",
6013 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6014 	seq_printf(seq, "\tHalf duplex: %s\n",
6015 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6016 	seq_printf(seq, "\tHash Filter: %s\n",
6017 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
6018 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6019 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
6020 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6021 		   (priv->dma_cap.pcs) ? "Y" : "N");
6022 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6023 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6024 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6025 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6026 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6027 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6028 	seq_printf(seq, "\tRMON module: %s\n",
6029 		   (priv->dma_cap.rmon) ? "Y" : "N");
6030 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6031 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6032 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6033 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6034 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6035 		   (priv->dma_cap.eee) ? "Y" : "N");
6036 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6037 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6038 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6039 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
6040 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6041 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6042 	} else {
6043 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6044 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6045 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6046 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6047 	}
6048 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6049 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6050 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6051 		   priv->dma_cap.number_rx_channel);
6052 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6053 		   priv->dma_cap.number_tx_channel);
6054 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6055 		   priv->dma_cap.number_rx_queues);
6056 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6057 		   priv->dma_cap.number_tx_queues);
6058 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6059 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6060 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6061 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6062 	seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
6063 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6064 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6065 		   priv->dma_cap.pps_out_num);
6066 	seq_printf(seq, "\tSafety Features: %s\n",
6067 		   priv->dma_cap.asp ? "Y" : "N");
6068 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6069 		   priv->dma_cap.frpsel ? "Y" : "N");
6070 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6071 		   priv->dma_cap.addr64);
6072 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6073 		   priv->dma_cap.rssen ? "Y" : "N");
6074 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6075 		   priv->dma_cap.vlhash ? "Y" : "N");
6076 	seq_printf(seq, "\tSplit Header: %s\n",
6077 		   priv->dma_cap.sphen ? "Y" : "N");
6078 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6079 		   priv->dma_cap.vlins ? "Y" : "N");
6080 	seq_printf(seq, "\tDouble VLAN: %s\n",
6081 		   priv->dma_cap.dvlan ? "Y" : "N");
6082 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6083 		   priv->dma_cap.l3l4fnum);
6084 	seq_printf(seq, "\tARP Offloading: %s\n",
6085 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6086 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6087 		   priv->dma_cap.estsel ? "Y" : "N");
6088 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6089 		   priv->dma_cap.fpesel ? "Y" : "N");
6090 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6091 		   priv->dma_cap.tbssel ? "Y" : "N");
6092 	return 0;
6093 }
6094 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6095 
6096 /* Use network device events to rename debugfs file entries.
6097  */
6098 static int stmmac_device_event(struct notifier_block *unused,
6099 			       unsigned long event, void *ptr)
6100 {
6101 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6102 	struct stmmac_priv *priv = netdev_priv(dev);
6103 
6104 	if (dev->netdev_ops != &stmmac_netdev_ops)
6105 		goto done;
6106 
6107 	switch (event) {
6108 	case NETDEV_CHANGENAME:
6109 		if (priv->dbgfs_dir)
6110 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6111 							 priv->dbgfs_dir,
6112 							 stmmac_fs_dir,
6113 							 dev->name);
6114 		break;
6115 	}
6116 done:
6117 	return NOTIFY_DONE;
6118 }
6119 
6120 static struct notifier_block stmmac_notifier = {
6121 	.notifier_call = stmmac_device_event,
6122 };
6123 
6124 static void stmmac_init_fs(struct net_device *dev)
6125 {
6126 	struct stmmac_priv *priv = netdev_priv(dev);
6127 
6128 	rtnl_lock();
6129 
6130 	/* Create per netdev entries */
6131 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6132 
6133 	/* Entry to report DMA RX/TX rings */
6134 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6135 			    &stmmac_rings_status_fops);
6136 
6137 	/* Entry to report the DMA HW features */
6138 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6139 			    &stmmac_dma_cap_fops);
6140 
6141 	rtnl_unlock();
6142 }
6143 
6144 static void stmmac_exit_fs(struct net_device *dev)
6145 {
6146 	struct stmmac_priv *priv = netdev_priv(dev);
6147 
6148 	debugfs_remove_recursive(priv->dbgfs_dir);
6149 }
6150 #endif /* CONFIG_DEBUG_FS */
6151 
6152 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6153 {
6154 	unsigned char *data = (unsigned char *)&vid_le;
6155 	unsigned char data_byte = 0;
6156 	u32 crc = ~0x0;
6157 	u32 temp = 0;
6158 	int i, bits;
6159 
6160 	bits = get_bitmask_order(VLAN_VID_MASK);
6161 	for (i = 0; i < bits; i++) {
6162 		if ((i % 8) == 0)
6163 			data_byte = data[i / 8];
6164 
6165 		temp = ((crc & 1) ^ data_byte) & 1;
6166 		crc >>= 1;
6167 		data_byte >>= 1;
6168 
6169 		if (temp)
6170 			crc ^= 0xedb88320;
6171 	}
6172 
6173 	return crc;
6174 }
6175 
6176 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6177 {
6178 	u32 crc, hash = 0;
6179 	__le16 pmatch = 0;
6180 	int count = 0;
6181 	u16 vid = 0;
6182 
6183 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6184 		__le16 vid_le = cpu_to_le16(vid);
6185 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6186 		hash |= (1 << crc);
6187 		count++;
6188 	}
6189 
6190 	if (!priv->dma_cap.vlhash) {
6191 		if (count > 2) /* VID = 0 always passes filter */
6192 			return -EOPNOTSUPP;
6193 
6194 		pmatch = cpu_to_le16(vid);
6195 		hash = 0;
6196 	}
6197 
6198 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6199 }
6200 
6201 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6202 {
6203 	struct stmmac_priv *priv = netdev_priv(ndev);
6204 	bool is_double = false;
6205 	int ret;
6206 
6207 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6208 		is_double = true;
6209 
6210 	set_bit(vid, priv->active_vlans);
6211 	ret = stmmac_vlan_update(priv, is_double);
6212 	if (ret) {
6213 		clear_bit(vid, priv->active_vlans);
6214 		return ret;
6215 	}
6216 
6217 	if (priv->hw->num_vlan) {
6218 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6219 		if (ret)
6220 			return ret;
6221 	}
6222 
6223 	return 0;
6224 }
6225 
6226 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6227 {
6228 	struct stmmac_priv *priv = netdev_priv(ndev);
6229 	bool is_double = false;
6230 	int ret;
6231 
6232 	ret = pm_runtime_get_sync(priv->device);
6233 	if (ret < 0) {
6234 		pm_runtime_put_noidle(priv->device);
6235 		return ret;
6236 	}
6237 
6238 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6239 		is_double = true;
6240 
6241 	clear_bit(vid, priv->active_vlans);
6242 
6243 	if (priv->hw->num_vlan) {
6244 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6245 		if (ret)
6246 			goto del_vlan_error;
6247 	}
6248 
6249 	ret = stmmac_vlan_update(priv, is_double);
6250 
6251 del_vlan_error:
6252 	pm_runtime_put(priv->device);
6253 
6254 	return ret;
6255 }
6256 
6257 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6258 {
6259 	struct stmmac_priv *priv = netdev_priv(dev);
6260 
6261 	switch (bpf->command) {
6262 	case XDP_SETUP_PROG:
6263 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6264 	case XDP_SETUP_XSK_POOL:
6265 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6266 					     bpf->xsk.queue_id);
6267 	default:
6268 		return -EOPNOTSUPP;
6269 	}
6270 }
6271 
6272 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6273 			   struct xdp_frame **frames, u32 flags)
6274 {
6275 	struct stmmac_priv *priv = netdev_priv(dev);
6276 	int cpu = smp_processor_id();
6277 	struct netdev_queue *nq;
6278 	int i, nxmit = 0;
6279 	int queue;
6280 
6281 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6282 		return -ENETDOWN;
6283 
6284 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6285 		return -EINVAL;
6286 
6287 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6288 	nq = netdev_get_tx_queue(priv->dev, queue);
6289 
6290 	__netif_tx_lock(nq, cpu);
6291 	/* Avoids TX time-out as we are sharing with slow path */
6292 	nq->trans_start = jiffies;
6293 
6294 	for (i = 0; i < num_frames; i++) {
6295 		int res;
6296 
6297 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6298 		if (res == STMMAC_XDP_CONSUMED)
6299 			break;
6300 
6301 		nxmit++;
6302 	}
6303 
6304 	if (flags & XDP_XMIT_FLUSH) {
6305 		stmmac_flush_tx_descriptors(priv, queue);
6306 		stmmac_tx_timer_arm(priv, queue);
6307 	}
6308 
6309 	__netif_tx_unlock(nq);
6310 
6311 	return nxmit;
6312 }
6313 
6314 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6315 {
6316 	struct stmmac_channel *ch = &priv->channel[queue];
6317 	unsigned long flags;
6318 
6319 	spin_lock_irqsave(&ch->lock, flags);
6320 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6321 	spin_unlock_irqrestore(&ch->lock, flags);
6322 
6323 	stmmac_stop_rx_dma(priv, queue);
6324 	__free_dma_rx_desc_resources(priv, queue);
6325 }
6326 
6327 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6328 {
6329 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
6330 	struct stmmac_channel *ch = &priv->channel[queue];
6331 	unsigned long flags;
6332 	u32 buf_size;
6333 	int ret;
6334 
6335 	ret = __alloc_dma_rx_desc_resources(priv, queue);
6336 	if (ret) {
6337 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6338 		return;
6339 	}
6340 
6341 	ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL);
6342 	if (ret) {
6343 		__free_dma_rx_desc_resources(priv, queue);
6344 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6345 		return;
6346 	}
6347 
6348 	stmmac_clear_rx_descriptors(priv, queue);
6349 
6350 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6351 			    rx_q->dma_rx_phy, rx_q->queue_index);
6352 
6353 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6354 			     sizeof(struct dma_desc));
6355 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6356 			       rx_q->rx_tail_addr, rx_q->queue_index);
6357 
6358 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6359 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6360 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6361 				      buf_size,
6362 				      rx_q->queue_index);
6363 	} else {
6364 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6365 				      priv->dma_buf_sz,
6366 				      rx_q->queue_index);
6367 	}
6368 
6369 	stmmac_start_rx_dma(priv, queue);
6370 
6371 	spin_lock_irqsave(&ch->lock, flags);
6372 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6373 	spin_unlock_irqrestore(&ch->lock, flags);
6374 }
6375 
6376 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6377 {
6378 	struct stmmac_channel *ch = &priv->channel[queue];
6379 	unsigned long flags;
6380 
6381 	spin_lock_irqsave(&ch->lock, flags);
6382 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6383 	spin_unlock_irqrestore(&ch->lock, flags);
6384 
6385 	stmmac_stop_tx_dma(priv, queue);
6386 	__free_dma_tx_desc_resources(priv, queue);
6387 }
6388 
6389 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6390 {
6391 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
6392 	struct stmmac_channel *ch = &priv->channel[queue];
6393 	unsigned long flags;
6394 	int ret;
6395 
6396 	ret = __alloc_dma_tx_desc_resources(priv, queue);
6397 	if (ret) {
6398 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6399 		return;
6400 	}
6401 
6402 	ret = __init_dma_tx_desc_rings(priv, queue);
6403 	if (ret) {
6404 		__free_dma_tx_desc_resources(priv, queue);
6405 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6406 		return;
6407 	}
6408 
6409 	stmmac_clear_tx_descriptors(priv, queue);
6410 
6411 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6412 			    tx_q->dma_tx_phy, tx_q->queue_index);
6413 
6414 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6415 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6416 
6417 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6418 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6419 			       tx_q->tx_tail_addr, tx_q->queue_index);
6420 
6421 	stmmac_start_tx_dma(priv, queue);
6422 
6423 	spin_lock_irqsave(&ch->lock, flags);
6424 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6425 	spin_unlock_irqrestore(&ch->lock, flags);
6426 }
6427 
6428 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6429 {
6430 	struct stmmac_priv *priv = netdev_priv(dev);
6431 	struct stmmac_rx_queue *rx_q;
6432 	struct stmmac_tx_queue *tx_q;
6433 	struct stmmac_channel *ch;
6434 
6435 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6436 	    !netif_carrier_ok(priv->dev))
6437 		return -ENETDOWN;
6438 
6439 	if (!stmmac_xdp_is_enabled(priv))
6440 		return -ENXIO;
6441 
6442 	if (queue >= priv->plat->rx_queues_to_use ||
6443 	    queue >= priv->plat->tx_queues_to_use)
6444 		return -EINVAL;
6445 
6446 	rx_q = &priv->rx_queue[queue];
6447 	tx_q = &priv->tx_queue[queue];
6448 	ch = &priv->channel[queue];
6449 
6450 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6451 		return -ENXIO;
6452 
6453 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6454 		/* EQoS does not have per-DMA channel SW interrupt,
6455 		 * so we schedule RX Napi straight-away.
6456 		 */
6457 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6458 			__napi_schedule(&ch->rxtx_napi);
6459 	}
6460 
6461 	return 0;
6462 }
6463 
6464 static const struct net_device_ops stmmac_netdev_ops = {
6465 	.ndo_open = stmmac_open,
6466 	.ndo_start_xmit = stmmac_xmit,
6467 	.ndo_stop = stmmac_release,
6468 	.ndo_change_mtu = stmmac_change_mtu,
6469 	.ndo_fix_features = stmmac_fix_features,
6470 	.ndo_set_features = stmmac_set_features,
6471 	.ndo_set_rx_mode = stmmac_set_rx_mode,
6472 	.ndo_tx_timeout = stmmac_tx_timeout,
6473 	.ndo_eth_ioctl = stmmac_ioctl,
6474 	.ndo_setup_tc = stmmac_setup_tc,
6475 	.ndo_select_queue = stmmac_select_queue,
6476 #ifdef CONFIG_NET_POLL_CONTROLLER
6477 	.ndo_poll_controller = stmmac_poll_controller,
6478 #endif
6479 	.ndo_set_mac_address = stmmac_set_mac_address,
6480 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6481 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6482 	.ndo_bpf = stmmac_bpf,
6483 	.ndo_xdp_xmit = stmmac_xdp_xmit,
6484 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
6485 };
6486 
6487 static void stmmac_reset_subtask(struct stmmac_priv *priv)
6488 {
6489 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
6490 		return;
6491 	if (test_bit(STMMAC_DOWN, &priv->state))
6492 		return;
6493 
6494 	netdev_err(priv->dev, "Reset adapter.\n");
6495 
6496 	rtnl_lock();
6497 	netif_trans_update(priv->dev);
6498 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
6499 		usleep_range(1000, 2000);
6500 
6501 	set_bit(STMMAC_DOWN, &priv->state);
6502 	dev_close(priv->dev);
6503 	dev_open(priv->dev, NULL);
6504 	clear_bit(STMMAC_DOWN, &priv->state);
6505 	clear_bit(STMMAC_RESETING, &priv->state);
6506 	rtnl_unlock();
6507 }
6508 
6509 static void stmmac_service_task(struct work_struct *work)
6510 {
6511 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6512 			service_task);
6513 
6514 	stmmac_reset_subtask(priv);
6515 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
6516 }
6517 
6518 /**
6519  *  stmmac_hw_init - Init the MAC device
6520  *  @priv: driver private structure
6521  *  Description: this function is to configure the MAC device according to
6522  *  some platform parameters or the HW capability register. It prepares the
6523  *  driver to use either ring or chain modes and to setup either enhanced or
6524  *  normal descriptors.
6525  */
6526 static int stmmac_hw_init(struct stmmac_priv *priv)
6527 {
6528 	int ret;
6529 
6530 	/* dwmac-sun8i only work in chain mode */
6531 	if (priv->plat->has_sun8i)
6532 		chain_mode = 1;
6533 	priv->chain_mode = chain_mode;
6534 
6535 	/* Initialize HW Interface */
6536 	ret = stmmac_hwif_init(priv);
6537 	if (ret)
6538 		return ret;
6539 
6540 	/* Get the HW capability (new GMAC newer than 3.50a) */
6541 	priv->hw_cap_support = stmmac_get_hw_features(priv);
6542 	if (priv->hw_cap_support) {
6543 		dev_info(priv->device, "DMA HW capability register supported\n");
6544 
6545 		/* We can override some gmac/dma configuration fields: e.g.
6546 		 * enh_desc, tx_coe (e.g. that are passed through the
6547 		 * platform) with the values from the HW capability
6548 		 * register (if supported).
6549 		 */
6550 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
6551 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
6552 				!priv->plat->use_phy_wol;
6553 		priv->hw->pmt = priv->plat->pmt;
6554 		if (priv->dma_cap.hash_tb_sz) {
6555 			priv->hw->multicast_filter_bins =
6556 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
6557 			priv->hw->mcast_bits_log2 =
6558 					ilog2(priv->hw->multicast_filter_bins);
6559 		}
6560 
6561 		/* TXCOE doesn't work in thresh DMA mode */
6562 		if (priv->plat->force_thresh_dma_mode)
6563 			priv->plat->tx_coe = 0;
6564 		else
6565 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
6566 
6567 		/* In case of GMAC4 rx_coe is from HW cap register. */
6568 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
6569 
6570 		if (priv->dma_cap.rx_coe_type2)
6571 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
6572 		else if (priv->dma_cap.rx_coe_type1)
6573 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
6574 
6575 	} else {
6576 		dev_info(priv->device, "No HW DMA feature register supported\n");
6577 	}
6578 
6579 	if (priv->plat->rx_coe) {
6580 		priv->hw->rx_csum = priv->plat->rx_coe;
6581 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
6582 		if (priv->synopsys_id < DWMAC_CORE_4_00)
6583 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
6584 	}
6585 	if (priv->plat->tx_coe)
6586 		dev_info(priv->device, "TX Checksum insertion supported\n");
6587 
6588 	if (priv->plat->pmt) {
6589 		dev_info(priv->device, "Wake-Up On Lan supported\n");
6590 		device_set_wakeup_capable(priv->device, 1);
6591 	}
6592 
6593 	if (priv->dma_cap.tsoen)
6594 		dev_info(priv->device, "TSO supported\n");
6595 
6596 	priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
6597 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
6598 
6599 	/* Run HW quirks, if any */
6600 	if (priv->hwif_quirks) {
6601 		ret = priv->hwif_quirks(priv);
6602 		if (ret)
6603 			return ret;
6604 	}
6605 
6606 	/* Rx Watchdog is available in the COREs newer than the 3.40.
6607 	 * In some case, for example on bugged HW this feature
6608 	 * has to be disable and this can be done by passing the
6609 	 * riwt_off field from the platform.
6610 	 */
6611 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
6612 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
6613 		priv->use_riwt = 1;
6614 		dev_info(priv->device,
6615 			 "Enable RX Mitigation via HW Watchdog Timer\n");
6616 	}
6617 
6618 	return 0;
6619 }
6620 
6621 static void stmmac_napi_add(struct net_device *dev)
6622 {
6623 	struct stmmac_priv *priv = netdev_priv(dev);
6624 	u32 queue, maxq;
6625 
6626 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6627 
6628 	for (queue = 0; queue < maxq; queue++) {
6629 		struct stmmac_channel *ch = &priv->channel[queue];
6630 
6631 		ch->priv_data = priv;
6632 		ch->index = queue;
6633 		spin_lock_init(&ch->lock);
6634 
6635 		if (queue < priv->plat->rx_queues_to_use) {
6636 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
6637 				       NAPI_POLL_WEIGHT);
6638 		}
6639 		if (queue < priv->plat->tx_queues_to_use) {
6640 			netif_tx_napi_add(dev, &ch->tx_napi,
6641 					  stmmac_napi_poll_tx,
6642 					  NAPI_POLL_WEIGHT);
6643 		}
6644 		if (queue < priv->plat->rx_queues_to_use &&
6645 		    queue < priv->plat->tx_queues_to_use) {
6646 			netif_napi_add(dev, &ch->rxtx_napi,
6647 				       stmmac_napi_poll_rxtx,
6648 				       NAPI_POLL_WEIGHT);
6649 		}
6650 	}
6651 }
6652 
6653 static void stmmac_napi_del(struct net_device *dev)
6654 {
6655 	struct stmmac_priv *priv = netdev_priv(dev);
6656 	u32 queue, maxq;
6657 
6658 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6659 
6660 	for (queue = 0; queue < maxq; queue++) {
6661 		struct stmmac_channel *ch = &priv->channel[queue];
6662 
6663 		if (queue < priv->plat->rx_queues_to_use)
6664 			netif_napi_del(&ch->rx_napi);
6665 		if (queue < priv->plat->tx_queues_to_use)
6666 			netif_napi_del(&ch->tx_napi);
6667 		if (queue < priv->plat->rx_queues_to_use &&
6668 		    queue < priv->plat->tx_queues_to_use) {
6669 			netif_napi_del(&ch->rxtx_napi);
6670 		}
6671 	}
6672 }
6673 
6674 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
6675 {
6676 	struct stmmac_priv *priv = netdev_priv(dev);
6677 	int ret = 0;
6678 
6679 	if (netif_running(dev))
6680 		stmmac_release(dev);
6681 
6682 	stmmac_napi_del(dev);
6683 
6684 	priv->plat->rx_queues_to_use = rx_cnt;
6685 	priv->plat->tx_queues_to_use = tx_cnt;
6686 
6687 	stmmac_napi_add(dev);
6688 
6689 	if (netif_running(dev))
6690 		ret = stmmac_open(dev);
6691 
6692 	return ret;
6693 }
6694 
6695 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
6696 {
6697 	struct stmmac_priv *priv = netdev_priv(dev);
6698 	int ret = 0;
6699 
6700 	if (netif_running(dev))
6701 		stmmac_release(dev);
6702 
6703 	priv->dma_rx_size = rx_size;
6704 	priv->dma_tx_size = tx_size;
6705 
6706 	if (netif_running(dev))
6707 		ret = stmmac_open(dev);
6708 
6709 	return ret;
6710 }
6711 
6712 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
6713 static void stmmac_fpe_lp_task(struct work_struct *work)
6714 {
6715 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6716 						fpe_task);
6717 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
6718 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
6719 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
6720 	bool *hs_enable = &fpe_cfg->hs_enable;
6721 	bool *enable = &fpe_cfg->enable;
6722 	int retries = 20;
6723 
6724 	while (retries-- > 0) {
6725 		/* Bail out immediately if FPE handshake is OFF */
6726 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
6727 			break;
6728 
6729 		if (*lo_state == FPE_STATE_ENTERING_ON &&
6730 		    *lp_state == FPE_STATE_ENTERING_ON) {
6731 			stmmac_fpe_configure(priv, priv->ioaddr,
6732 					     priv->plat->tx_queues_to_use,
6733 					     priv->plat->rx_queues_to_use,
6734 					     *enable);
6735 
6736 			netdev_info(priv->dev, "configured FPE\n");
6737 
6738 			*lo_state = FPE_STATE_ON;
6739 			*lp_state = FPE_STATE_ON;
6740 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
6741 			break;
6742 		}
6743 
6744 		if ((*lo_state == FPE_STATE_CAPABLE ||
6745 		     *lo_state == FPE_STATE_ENTERING_ON) &&
6746 		     *lp_state != FPE_STATE_ON) {
6747 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
6748 				    *lo_state, *lp_state);
6749 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
6750 						MPACKET_VERIFY);
6751 		}
6752 		/* Sleep then retry */
6753 		msleep(500);
6754 	}
6755 
6756 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
6757 }
6758 
6759 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
6760 {
6761 	if (priv->plat->fpe_cfg->hs_enable != enable) {
6762 		if (enable) {
6763 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
6764 						MPACKET_VERIFY);
6765 		} else {
6766 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
6767 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
6768 		}
6769 
6770 		priv->plat->fpe_cfg->hs_enable = enable;
6771 	}
6772 }
6773 
6774 /**
6775  * stmmac_dvr_probe
6776  * @device: device pointer
6777  * @plat_dat: platform data pointer
6778  * @res: stmmac resource pointer
6779  * Description: this is the main probe function used to
6780  * call the alloc_etherdev, allocate the priv structure.
6781  * Return:
6782  * returns 0 on success, otherwise errno.
6783  */
6784 int stmmac_dvr_probe(struct device *device,
6785 		     struct plat_stmmacenet_data *plat_dat,
6786 		     struct stmmac_resources *res)
6787 {
6788 	struct net_device *ndev = NULL;
6789 	struct stmmac_priv *priv;
6790 	u32 rxq;
6791 	int i, ret = 0;
6792 
6793 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
6794 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
6795 	if (!ndev)
6796 		return -ENOMEM;
6797 
6798 	SET_NETDEV_DEV(ndev, device);
6799 
6800 	priv = netdev_priv(ndev);
6801 	priv->device = device;
6802 	priv->dev = ndev;
6803 
6804 	stmmac_set_ethtool_ops(ndev);
6805 	priv->pause = pause;
6806 	priv->plat = plat_dat;
6807 	priv->ioaddr = res->addr;
6808 	priv->dev->base_addr = (unsigned long)res->addr;
6809 	priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
6810 
6811 	priv->dev->irq = res->irq;
6812 	priv->wol_irq = res->wol_irq;
6813 	priv->lpi_irq = res->lpi_irq;
6814 	priv->sfty_ce_irq = res->sfty_ce_irq;
6815 	priv->sfty_ue_irq = res->sfty_ue_irq;
6816 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
6817 		priv->rx_irq[i] = res->rx_irq[i];
6818 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
6819 		priv->tx_irq[i] = res->tx_irq[i];
6820 
6821 	if (!is_zero_ether_addr(res->mac))
6822 		eth_hw_addr_set(priv->dev, res->mac);
6823 
6824 	dev_set_drvdata(device, priv->dev);
6825 
6826 	/* Verify driver arguments */
6827 	stmmac_verify_args();
6828 
6829 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
6830 	if (!priv->af_xdp_zc_qps)
6831 		return -ENOMEM;
6832 
6833 	/* Allocate workqueue */
6834 	priv->wq = create_singlethread_workqueue("stmmac_wq");
6835 	if (!priv->wq) {
6836 		dev_err(priv->device, "failed to create workqueue\n");
6837 		return -ENOMEM;
6838 	}
6839 
6840 	INIT_WORK(&priv->service_task, stmmac_service_task);
6841 
6842 	/* Initialize Link Partner FPE workqueue */
6843 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
6844 
6845 	/* Override with kernel parameters if supplied XXX CRS XXX
6846 	 * this needs to have multiple instances
6847 	 */
6848 	if ((phyaddr >= 0) && (phyaddr <= 31))
6849 		priv->plat->phy_addr = phyaddr;
6850 
6851 	if (priv->plat->stmmac_rst) {
6852 		ret = reset_control_assert(priv->plat->stmmac_rst);
6853 		reset_control_deassert(priv->plat->stmmac_rst);
6854 		/* Some reset controllers have only reset callback instead of
6855 		 * assert + deassert callbacks pair.
6856 		 */
6857 		if (ret == -ENOTSUPP)
6858 			reset_control_reset(priv->plat->stmmac_rst);
6859 	}
6860 
6861 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
6862 	if (ret == -ENOTSUPP)
6863 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
6864 			ERR_PTR(ret));
6865 
6866 	/* Init MAC and get the capabilities */
6867 	ret = stmmac_hw_init(priv);
6868 	if (ret)
6869 		goto error_hw_init;
6870 
6871 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
6872 	 */
6873 	if (priv->synopsys_id < DWMAC_CORE_5_20)
6874 		priv->plat->dma_cfg->dche = false;
6875 
6876 	stmmac_check_ether_addr(priv);
6877 
6878 	ndev->netdev_ops = &stmmac_netdev_ops;
6879 
6880 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6881 			    NETIF_F_RXCSUM;
6882 
6883 	ret = stmmac_tc_init(priv, priv);
6884 	if (!ret) {
6885 		ndev->hw_features |= NETIF_F_HW_TC;
6886 	}
6887 
6888 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
6889 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
6890 		if (priv->plat->has_gmac4)
6891 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
6892 		priv->tso = true;
6893 		dev_info(priv->device, "TSO feature enabled\n");
6894 	}
6895 
6896 	if (priv->dma_cap.sphen) {
6897 		ndev->hw_features |= NETIF_F_GRO;
6898 		priv->sph_cap = true;
6899 		priv->sph = priv->sph_cap;
6900 		dev_info(priv->device, "SPH feature enabled\n");
6901 	}
6902 
6903 	/* The current IP register MAC_HW_Feature1[ADDR64] only define
6904 	 * 32/40/64 bit width, but some SOC support others like i.MX8MP
6905 	 * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
6906 	 * So overwrite dma_cap.addr64 according to HW real design.
6907 	 */
6908 	if (priv->plat->addr64)
6909 		priv->dma_cap.addr64 = priv->plat->addr64;
6910 
6911 	if (priv->dma_cap.addr64) {
6912 		ret = dma_set_mask_and_coherent(device,
6913 				DMA_BIT_MASK(priv->dma_cap.addr64));
6914 		if (!ret) {
6915 			dev_info(priv->device, "Using %d bits DMA width\n",
6916 				 priv->dma_cap.addr64);
6917 
6918 			/*
6919 			 * If more than 32 bits can be addressed, make sure to
6920 			 * enable enhanced addressing mode.
6921 			 */
6922 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
6923 				priv->plat->dma_cfg->eame = true;
6924 		} else {
6925 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
6926 			if (ret) {
6927 				dev_err(priv->device, "Failed to set DMA Mask\n");
6928 				goto error_hw_init;
6929 			}
6930 
6931 			priv->dma_cap.addr64 = 32;
6932 		}
6933 	}
6934 
6935 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
6936 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
6937 #ifdef STMMAC_VLAN_TAG_USED
6938 	/* Both mac100 and gmac support receive VLAN tag detection */
6939 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
6940 	if (priv->dma_cap.vlhash) {
6941 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6942 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
6943 	}
6944 	if (priv->dma_cap.vlins) {
6945 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
6946 		if (priv->dma_cap.dvlan)
6947 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
6948 	}
6949 #endif
6950 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
6951 
6952 	/* Initialize RSS */
6953 	rxq = priv->plat->rx_queues_to_use;
6954 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
6955 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
6956 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
6957 
6958 	if (priv->dma_cap.rssen && priv->plat->rss_en)
6959 		ndev->features |= NETIF_F_RXHASH;
6960 
6961 	/* MTU range: 46 - hw-specific max */
6962 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
6963 	if (priv->plat->has_xgmac)
6964 		ndev->max_mtu = XGMAC_JUMBO_LEN;
6965 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
6966 		ndev->max_mtu = JUMBO_LEN;
6967 	else
6968 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
6969 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
6970 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
6971 	 */
6972 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
6973 	    (priv->plat->maxmtu >= ndev->min_mtu))
6974 		ndev->max_mtu = priv->plat->maxmtu;
6975 	else if (priv->plat->maxmtu < ndev->min_mtu)
6976 		dev_warn(priv->device,
6977 			 "%s: warning: maxmtu having invalid value (%d)\n",
6978 			 __func__, priv->plat->maxmtu);
6979 
6980 	if (flow_ctrl)
6981 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
6982 
6983 	/* Setup channels NAPI */
6984 	stmmac_napi_add(ndev);
6985 
6986 	mutex_init(&priv->lock);
6987 
6988 	/* If a specific clk_csr value is passed from the platform
6989 	 * this means that the CSR Clock Range selection cannot be
6990 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
6991 	 * set the MDC clock dynamically according to the csr actual
6992 	 * clock input.
6993 	 */
6994 	if (priv->plat->clk_csr >= 0)
6995 		priv->clk_csr = priv->plat->clk_csr;
6996 	else
6997 		stmmac_clk_csr_set(priv);
6998 
6999 	stmmac_check_pcs_mode(priv);
7000 
7001 	pm_runtime_get_noresume(device);
7002 	pm_runtime_set_active(device);
7003 	pm_runtime_enable(device);
7004 
7005 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7006 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
7007 		/* MDIO bus Registration */
7008 		ret = stmmac_mdio_register(ndev);
7009 		if (ret < 0) {
7010 			dev_err(priv->device,
7011 				"%s: MDIO bus (id: %d) registration failed",
7012 				__func__, priv->plat->bus_id);
7013 			goto error_mdio_register;
7014 		}
7015 	}
7016 
7017 	if (priv->plat->speed_mode_2500)
7018 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7019 
7020 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7021 		ret = stmmac_xpcs_setup(priv->mii);
7022 		if (ret)
7023 			goto error_xpcs_setup;
7024 	}
7025 
7026 	ret = stmmac_phy_setup(priv);
7027 	if (ret) {
7028 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7029 		goto error_phy_setup;
7030 	}
7031 
7032 	ret = register_netdev(ndev);
7033 	if (ret) {
7034 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7035 			__func__, ret);
7036 		goto error_netdev_register;
7037 	}
7038 
7039 	if (priv->plat->serdes_powerup) {
7040 		ret = priv->plat->serdes_powerup(ndev,
7041 						 priv->plat->bsp_priv);
7042 
7043 		if (ret < 0)
7044 			goto error_serdes_powerup;
7045 	}
7046 
7047 #ifdef CONFIG_DEBUG_FS
7048 	stmmac_init_fs(ndev);
7049 #endif
7050 
7051 	/* Let pm_runtime_put() disable the clocks.
7052 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7053 	 */
7054 	pm_runtime_put(device);
7055 
7056 	return ret;
7057 
7058 error_serdes_powerup:
7059 	unregister_netdev(ndev);
7060 error_netdev_register:
7061 	phylink_destroy(priv->phylink);
7062 error_xpcs_setup:
7063 error_phy_setup:
7064 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7065 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7066 		stmmac_mdio_unregister(ndev);
7067 error_mdio_register:
7068 	stmmac_napi_del(ndev);
7069 error_hw_init:
7070 	destroy_workqueue(priv->wq);
7071 	bitmap_free(priv->af_xdp_zc_qps);
7072 
7073 	return ret;
7074 }
7075 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7076 
7077 /**
7078  * stmmac_dvr_remove
7079  * @dev: device pointer
7080  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7081  * changes the link status, releases the DMA descriptor rings.
7082  */
7083 int stmmac_dvr_remove(struct device *dev)
7084 {
7085 	struct net_device *ndev = dev_get_drvdata(dev);
7086 	struct stmmac_priv *priv = netdev_priv(ndev);
7087 
7088 	netdev_info(priv->dev, "%s: removing driver", __func__);
7089 
7090 	stmmac_stop_all_dma(priv);
7091 	stmmac_mac_set(priv, priv->ioaddr, false);
7092 	netif_carrier_off(ndev);
7093 	unregister_netdev(ndev);
7094 
7095 	/* Serdes power down needs to happen after VLAN filter
7096 	 * is deleted that is triggered by unregister_netdev().
7097 	 */
7098 	if (priv->plat->serdes_powerdown)
7099 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7100 
7101 #ifdef CONFIG_DEBUG_FS
7102 	stmmac_exit_fs(ndev);
7103 #endif
7104 	phylink_destroy(priv->phylink);
7105 	if (priv->plat->stmmac_rst)
7106 		reset_control_assert(priv->plat->stmmac_rst);
7107 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7108 	pm_runtime_put(dev);
7109 	pm_runtime_disable(dev);
7110 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7111 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7112 		stmmac_mdio_unregister(ndev);
7113 	destroy_workqueue(priv->wq);
7114 	mutex_destroy(&priv->lock);
7115 	bitmap_free(priv->af_xdp_zc_qps);
7116 
7117 	return 0;
7118 }
7119 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7120 
7121 /**
7122  * stmmac_suspend - suspend callback
7123  * @dev: device pointer
7124  * Description: this is the function to suspend the device and it is called
7125  * by the platform driver to stop the network queue, release the resources,
7126  * program the PMT register (for WoL), clean and release driver resources.
7127  */
7128 int stmmac_suspend(struct device *dev)
7129 {
7130 	struct net_device *ndev = dev_get_drvdata(dev);
7131 	struct stmmac_priv *priv = netdev_priv(ndev);
7132 	u32 chan;
7133 
7134 	if (!ndev || !netif_running(ndev))
7135 		return 0;
7136 
7137 	mutex_lock(&priv->lock);
7138 
7139 	netif_device_detach(ndev);
7140 
7141 	stmmac_disable_all_queues(priv);
7142 
7143 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7144 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
7145 
7146 	if (priv->eee_enabled) {
7147 		priv->tx_path_in_lpi_mode = false;
7148 		del_timer_sync(&priv->eee_ctrl_timer);
7149 	}
7150 
7151 	/* Stop TX/RX DMA */
7152 	stmmac_stop_all_dma(priv);
7153 
7154 	if (priv->plat->serdes_powerdown)
7155 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7156 
7157 	/* Enable Power down mode by programming the PMT regs */
7158 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7159 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7160 		priv->irq_wake = 1;
7161 	} else {
7162 		stmmac_mac_set(priv, priv->ioaddr, false);
7163 		pinctrl_pm_select_sleep_state(priv->device);
7164 	}
7165 
7166 	mutex_unlock(&priv->lock);
7167 
7168 	rtnl_lock();
7169 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7170 		phylink_suspend(priv->phylink, true);
7171 	} else {
7172 		if (device_may_wakeup(priv->device))
7173 			phylink_speed_down(priv->phylink, false);
7174 		phylink_suspend(priv->phylink, false);
7175 	}
7176 	rtnl_unlock();
7177 
7178 	if (priv->dma_cap.fpesel) {
7179 		/* Disable FPE */
7180 		stmmac_fpe_configure(priv, priv->ioaddr,
7181 				     priv->plat->tx_queues_to_use,
7182 				     priv->plat->rx_queues_to_use, false);
7183 
7184 		stmmac_fpe_handshake(priv, false);
7185 		stmmac_fpe_stop_wq(priv);
7186 	}
7187 
7188 	priv->speed = SPEED_UNKNOWN;
7189 	return 0;
7190 }
7191 EXPORT_SYMBOL_GPL(stmmac_suspend);
7192 
7193 /**
7194  * stmmac_reset_queues_param - reset queue parameters
7195  * @priv: device pointer
7196  */
7197 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7198 {
7199 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7200 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7201 	u32 queue;
7202 
7203 	for (queue = 0; queue < rx_cnt; queue++) {
7204 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
7205 
7206 		rx_q->cur_rx = 0;
7207 		rx_q->dirty_rx = 0;
7208 	}
7209 
7210 	for (queue = 0; queue < tx_cnt; queue++) {
7211 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
7212 
7213 		tx_q->cur_tx = 0;
7214 		tx_q->dirty_tx = 0;
7215 		tx_q->mss = 0;
7216 
7217 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7218 	}
7219 }
7220 
7221 /**
7222  * stmmac_resume - resume callback
7223  * @dev: device pointer
7224  * Description: when resume this function is invoked to setup the DMA and CORE
7225  * in a usable state.
7226  */
7227 int stmmac_resume(struct device *dev)
7228 {
7229 	struct net_device *ndev = dev_get_drvdata(dev);
7230 	struct stmmac_priv *priv = netdev_priv(ndev);
7231 	int ret;
7232 
7233 	if (!netif_running(ndev))
7234 		return 0;
7235 
7236 	/* Power Down bit, into the PM register, is cleared
7237 	 * automatically as soon as a magic packet or a Wake-up frame
7238 	 * is received. Anyway, it's better to manually clear
7239 	 * this bit because it can generate problems while resuming
7240 	 * from another devices (e.g. serial console).
7241 	 */
7242 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7243 		mutex_lock(&priv->lock);
7244 		stmmac_pmt(priv, priv->hw, 0);
7245 		mutex_unlock(&priv->lock);
7246 		priv->irq_wake = 0;
7247 	} else {
7248 		pinctrl_pm_select_default_state(priv->device);
7249 		/* reset the phy so that it's ready */
7250 		if (priv->mii)
7251 			stmmac_mdio_reset(priv->mii);
7252 	}
7253 
7254 	if (priv->plat->serdes_powerup) {
7255 		ret = priv->plat->serdes_powerup(ndev,
7256 						 priv->plat->bsp_priv);
7257 
7258 		if (ret < 0)
7259 			return ret;
7260 	}
7261 
7262 	rtnl_lock();
7263 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7264 		phylink_resume(priv->phylink);
7265 	} else {
7266 		phylink_resume(priv->phylink);
7267 		if (device_may_wakeup(priv->device))
7268 			phylink_speed_up(priv->phylink);
7269 	}
7270 	rtnl_unlock();
7271 
7272 	rtnl_lock();
7273 	mutex_lock(&priv->lock);
7274 
7275 	stmmac_reset_queues_param(priv);
7276 
7277 	stmmac_free_tx_skbufs(priv);
7278 	stmmac_clear_descriptors(priv);
7279 
7280 	stmmac_hw_setup(ndev, false);
7281 	stmmac_init_coalesce(priv);
7282 	stmmac_set_rx_mode(ndev);
7283 
7284 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7285 
7286 	stmmac_enable_all_queues(priv);
7287 
7288 	mutex_unlock(&priv->lock);
7289 	rtnl_unlock();
7290 
7291 	netif_device_attach(ndev);
7292 
7293 	return 0;
7294 }
7295 EXPORT_SYMBOL_GPL(stmmac_resume);
7296 
7297 #ifndef MODULE
7298 static int __init stmmac_cmdline_opt(char *str)
7299 {
7300 	char *opt;
7301 
7302 	if (!str || !*str)
7303 		return -EINVAL;
7304 	while ((opt = strsep(&str, ",")) != NULL) {
7305 		if (!strncmp(opt, "debug:", 6)) {
7306 			if (kstrtoint(opt + 6, 0, &debug))
7307 				goto err;
7308 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7309 			if (kstrtoint(opt + 8, 0, &phyaddr))
7310 				goto err;
7311 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7312 			if (kstrtoint(opt + 7, 0, &buf_sz))
7313 				goto err;
7314 		} else if (!strncmp(opt, "tc:", 3)) {
7315 			if (kstrtoint(opt + 3, 0, &tc))
7316 				goto err;
7317 		} else if (!strncmp(opt, "watchdog:", 9)) {
7318 			if (kstrtoint(opt + 9, 0, &watchdog))
7319 				goto err;
7320 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7321 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7322 				goto err;
7323 		} else if (!strncmp(opt, "pause:", 6)) {
7324 			if (kstrtoint(opt + 6, 0, &pause))
7325 				goto err;
7326 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7327 			if (kstrtoint(opt + 10, 0, &eee_timer))
7328 				goto err;
7329 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7330 			if (kstrtoint(opt + 11, 0, &chain_mode))
7331 				goto err;
7332 		}
7333 	}
7334 	return 0;
7335 
7336 err:
7337 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7338 	return -EINVAL;
7339 }
7340 
7341 __setup("stmmaceth=", stmmac_cmdline_opt);
7342 #endif /* MODULE */
7343 
7344 static int __init stmmac_init(void)
7345 {
7346 #ifdef CONFIG_DEBUG_FS
7347 	/* Create debugfs main directory if it doesn't exist yet */
7348 	if (!stmmac_fs_dir)
7349 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7350 	register_netdevice_notifier(&stmmac_notifier);
7351 #endif
7352 
7353 	return 0;
7354 }
7355 
7356 static void __exit stmmac_exit(void)
7357 {
7358 #ifdef CONFIG_DEBUG_FS
7359 	unregister_netdevice_notifier(&stmmac_notifier);
7360 	debugfs_remove_recursive(stmmac_fs_dir);
7361 #endif
7362 }
7363 
7364 module_init(stmmac_init)
7365 module_exit(stmmac_exit)
7366 
7367 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7368 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7369 MODULE_LICENSE("GPL");
7370