1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/pkt_cls.h>
43 #include <net/xdp_sock_drv.h>
44 #include "stmmac_ptp.h"
45 #include "stmmac.h"
46 #include "stmmac_xdp.h"
47 #include <linux/reset.h>
48 #include <linux/of_mdio.h>
49 #include "dwmac1000.h"
50 #include "dwxgmac2.h"
51 #include "hwif.h"
52 
53 /* As long as the interface is active, we keep the timestamping counter enabled
54  * with fine resolution and binary rollover. This avoid non-monotonic behavior
55  * (clock jumps) when changing timestamping settings at runtime.
56  */
57 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
58 				 PTP_TCR_TSCTRLSSR)
59 
60 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
61 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
62 
63 /* Module parameters */
64 #define TX_TIMEO	5000
65 static int watchdog = TX_TIMEO;
66 module_param(watchdog, int, 0644);
67 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
68 
69 static int debug = -1;
70 module_param(debug, int, 0644);
71 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
72 
73 static int phyaddr = -1;
74 module_param(phyaddr, int, 0444);
75 MODULE_PARM_DESC(phyaddr, "Physical device address");
76 
77 #define STMMAC_TX_THRESH(x)	((x)->dma_tx_size / 4)
78 #define STMMAC_RX_THRESH(x)	((x)->dma_rx_size / 4)
79 
80 /* Limit to make sure XDP TX and slow path can coexist */
81 #define STMMAC_XSK_TX_BUDGET_MAX	256
82 #define STMMAC_TX_XSK_AVAIL		16
83 #define STMMAC_RX_FILL_BATCH		16
84 
85 #define STMMAC_XDP_PASS		0
86 #define STMMAC_XDP_CONSUMED	BIT(0)
87 #define STMMAC_XDP_TX		BIT(1)
88 #define STMMAC_XDP_REDIRECT	BIT(2)
89 
90 static int flow_ctrl = FLOW_AUTO;
91 module_param(flow_ctrl, int, 0644);
92 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
93 
94 static int pause = PAUSE_TIME;
95 module_param(pause, int, 0644);
96 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
97 
98 #define TC_DEFAULT 64
99 static int tc = TC_DEFAULT;
100 module_param(tc, int, 0644);
101 MODULE_PARM_DESC(tc, "DMA threshold control value");
102 
103 #define	DEFAULT_BUFSIZE	1536
104 static int buf_sz = DEFAULT_BUFSIZE;
105 module_param(buf_sz, int, 0644);
106 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
107 
108 #define	STMMAC_RX_COPYBREAK	256
109 
110 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
111 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
112 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
113 
114 #define STMMAC_DEFAULT_LPI_TIMER	1000
115 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
116 module_param(eee_timer, int, 0644);
117 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
118 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
119 
120 /* By default the driver will use the ring mode to manage tx and rx descriptors,
121  * but allow user to force to use the chain instead of the ring
122  */
123 static unsigned int chain_mode;
124 module_param(chain_mode, int, 0444);
125 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
126 
127 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
128 /* For MSI interrupts handling */
129 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
130 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
132 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
133 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
134 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
135 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
136 					  u32 rxmode, u32 chan);
137 
138 #ifdef CONFIG_DEBUG_FS
139 static const struct net_device_ops stmmac_netdev_ops;
140 static void stmmac_init_fs(struct net_device *dev);
141 static void stmmac_exit_fs(struct net_device *dev);
142 #endif
143 
144 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
145 
146 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
147 {
148 	int ret = 0;
149 
150 	if (enabled) {
151 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
152 		if (ret)
153 			return ret;
154 		ret = clk_prepare_enable(priv->plat->pclk);
155 		if (ret) {
156 			clk_disable_unprepare(priv->plat->stmmac_clk);
157 			return ret;
158 		}
159 		if (priv->plat->clks_config) {
160 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
161 			if (ret) {
162 				clk_disable_unprepare(priv->plat->stmmac_clk);
163 				clk_disable_unprepare(priv->plat->pclk);
164 				return ret;
165 			}
166 		}
167 	} else {
168 		clk_disable_unprepare(priv->plat->stmmac_clk);
169 		clk_disable_unprepare(priv->plat->pclk);
170 		if (priv->plat->clks_config)
171 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
172 	}
173 
174 	return ret;
175 }
176 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
177 
178 /**
179  * stmmac_verify_args - verify the driver parameters.
180  * Description: it checks the driver parameters and set a default in case of
181  * errors.
182  */
183 static void stmmac_verify_args(void)
184 {
185 	if (unlikely(watchdog < 0))
186 		watchdog = TX_TIMEO;
187 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
188 		buf_sz = DEFAULT_BUFSIZE;
189 	if (unlikely(flow_ctrl > 1))
190 		flow_ctrl = FLOW_AUTO;
191 	else if (likely(flow_ctrl < 0))
192 		flow_ctrl = FLOW_OFF;
193 	if (unlikely((pause < 0) || (pause > 0xffff)))
194 		pause = PAUSE_TIME;
195 	if (eee_timer < 0)
196 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
197 }
198 
199 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
200 {
201 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
202 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
203 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
204 	u32 queue;
205 
206 	for (queue = 0; queue < maxq; queue++) {
207 		struct stmmac_channel *ch = &priv->channel[queue];
208 
209 		if (stmmac_xdp_is_enabled(priv) &&
210 		    test_bit(queue, priv->af_xdp_zc_qps)) {
211 			napi_disable(&ch->rxtx_napi);
212 			continue;
213 		}
214 
215 		if (queue < rx_queues_cnt)
216 			napi_disable(&ch->rx_napi);
217 		if (queue < tx_queues_cnt)
218 			napi_disable(&ch->tx_napi);
219 	}
220 }
221 
222 /**
223  * stmmac_disable_all_queues - Disable all queues
224  * @priv: driver private structure
225  */
226 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
227 {
228 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
229 	struct stmmac_rx_queue *rx_q;
230 	u32 queue;
231 
232 	/* synchronize_rcu() needed for pending XDP buffers to drain */
233 	for (queue = 0; queue < rx_queues_cnt; queue++) {
234 		rx_q = &priv->rx_queue[queue];
235 		if (rx_q->xsk_pool) {
236 			synchronize_rcu();
237 			break;
238 		}
239 	}
240 
241 	__stmmac_disable_all_queues(priv);
242 }
243 
244 /**
245  * stmmac_enable_all_queues - Enable all queues
246  * @priv: driver private structure
247  */
248 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
249 {
250 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
251 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
252 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
253 	u32 queue;
254 
255 	for (queue = 0; queue < maxq; queue++) {
256 		struct stmmac_channel *ch = &priv->channel[queue];
257 
258 		if (stmmac_xdp_is_enabled(priv) &&
259 		    test_bit(queue, priv->af_xdp_zc_qps)) {
260 			napi_enable(&ch->rxtx_napi);
261 			continue;
262 		}
263 
264 		if (queue < rx_queues_cnt)
265 			napi_enable(&ch->rx_napi);
266 		if (queue < tx_queues_cnt)
267 			napi_enable(&ch->tx_napi);
268 	}
269 }
270 
271 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
272 {
273 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
274 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
275 		queue_work(priv->wq, &priv->service_task);
276 }
277 
278 static void stmmac_global_err(struct stmmac_priv *priv)
279 {
280 	netif_carrier_off(priv->dev);
281 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
282 	stmmac_service_event_schedule(priv);
283 }
284 
285 /**
286  * stmmac_clk_csr_set - dynamically set the MDC clock
287  * @priv: driver private structure
288  * Description: this is to dynamically set the MDC clock according to the csr
289  * clock input.
290  * Note:
291  *	If a specific clk_csr value is passed from the platform
292  *	this means that the CSR Clock Range selection cannot be
293  *	changed at run-time and it is fixed (as reported in the driver
294  *	documentation). Viceversa the driver will try to set the MDC
295  *	clock dynamically according to the actual clock input.
296  */
297 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
298 {
299 	u32 clk_rate;
300 
301 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
302 
303 	/* Platform provided default clk_csr would be assumed valid
304 	 * for all other cases except for the below mentioned ones.
305 	 * For values higher than the IEEE 802.3 specified frequency
306 	 * we can not estimate the proper divider as it is not known
307 	 * the frequency of clk_csr_i. So we do not change the default
308 	 * divider.
309 	 */
310 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
311 		if (clk_rate < CSR_F_35M)
312 			priv->clk_csr = STMMAC_CSR_20_35M;
313 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
314 			priv->clk_csr = STMMAC_CSR_35_60M;
315 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
316 			priv->clk_csr = STMMAC_CSR_60_100M;
317 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
318 			priv->clk_csr = STMMAC_CSR_100_150M;
319 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
320 			priv->clk_csr = STMMAC_CSR_150_250M;
321 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
322 			priv->clk_csr = STMMAC_CSR_250_300M;
323 	}
324 
325 	if (priv->plat->has_sun8i) {
326 		if (clk_rate > 160000000)
327 			priv->clk_csr = 0x03;
328 		else if (clk_rate > 80000000)
329 			priv->clk_csr = 0x02;
330 		else if (clk_rate > 40000000)
331 			priv->clk_csr = 0x01;
332 		else
333 			priv->clk_csr = 0;
334 	}
335 
336 	if (priv->plat->has_xgmac) {
337 		if (clk_rate > 400000000)
338 			priv->clk_csr = 0x5;
339 		else if (clk_rate > 350000000)
340 			priv->clk_csr = 0x4;
341 		else if (clk_rate > 300000000)
342 			priv->clk_csr = 0x3;
343 		else if (clk_rate > 250000000)
344 			priv->clk_csr = 0x2;
345 		else if (clk_rate > 150000000)
346 			priv->clk_csr = 0x1;
347 		else
348 			priv->clk_csr = 0x0;
349 	}
350 }
351 
352 static void print_pkt(unsigned char *buf, int len)
353 {
354 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
355 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
356 }
357 
358 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
359 {
360 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
361 	u32 avail;
362 
363 	if (tx_q->dirty_tx > tx_q->cur_tx)
364 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
365 	else
366 		avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
367 
368 	return avail;
369 }
370 
371 /**
372  * stmmac_rx_dirty - Get RX queue dirty
373  * @priv: driver private structure
374  * @queue: RX queue index
375  */
376 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
377 {
378 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
379 	u32 dirty;
380 
381 	if (rx_q->dirty_rx <= rx_q->cur_rx)
382 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
383 	else
384 		dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
385 
386 	return dirty;
387 }
388 
389 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
390 {
391 	int tx_lpi_timer;
392 
393 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
394 	priv->eee_sw_timer_en = en ? 0 : 1;
395 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
396 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
397 }
398 
399 /**
400  * stmmac_enable_eee_mode - check and enter in LPI mode
401  * @priv: driver private structure
402  * Description: this function is to verify and enter in LPI mode in case of
403  * EEE.
404  */
405 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
406 {
407 	u32 tx_cnt = priv->plat->tx_queues_to_use;
408 	u32 queue;
409 
410 	/* check if all TX queues have the work finished */
411 	for (queue = 0; queue < tx_cnt; queue++) {
412 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
413 
414 		if (tx_q->dirty_tx != tx_q->cur_tx)
415 			return; /* still unfinished work */
416 	}
417 
418 	/* Check and enter in LPI mode */
419 	if (!priv->tx_path_in_lpi_mode)
420 		stmmac_set_eee_mode(priv, priv->hw,
421 				priv->plat->en_tx_lpi_clockgating);
422 }
423 
424 /**
425  * stmmac_disable_eee_mode - disable and exit from LPI mode
426  * @priv: driver private structure
427  * Description: this function is to exit and disable EEE in case of
428  * LPI state is true. This is called by the xmit.
429  */
430 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
431 {
432 	if (!priv->eee_sw_timer_en) {
433 		stmmac_lpi_entry_timer_config(priv, 0);
434 		return;
435 	}
436 
437 	stmmac_reset_eee_mode(priv, priv->hw);
438 	del_timer_sync(&priv->eee_ctrl_timer);
439 	priv->tx_path_in_lpi_mode = false;
440 }
441 
442 /**
443  * stmmac_eee_ctrl_timer - EEE TX SW timer.
444  * @t:  timer_list struct containing private info
445  * Description:
446  *  if there is no data transfer and if we are not in LPI state,
447  *  then MAC Transmitter can be moved to LPI state.
448  */
449 static void stmmac_eee_ctrl_timer(struct timer_list *t)
450 {
451 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
452 
453 	stmmac_enable_eee_mode(priv);
454 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
455 }
456 
457 /**
458  * stmmac_eee_init - init EEE
459  * @priv: driver private structure
460  * Description:
461  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
462  *  can also manage EEE, this function enable the LPI state and start related
463  *  timer.
464  */
465 bool stmmac_eee_init(struct stmmac_priv *priv)
466 {
467 	int eee_tw_timer = priv->eee_tw_timer;
468 
469 	/* Using PCS we cannot dial with the phy registers at this stage
470 	 * so we do not support extra feature like EEE.
471 	 */
472 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
473 	    priv->hw->pcs == STMMAC_PCS_RTBI)
474 		return false;
475 
476 	/* Check if MAC core supports the EEE feature. */
477 	if (!priv->dma_cap.eee)
478 		return false;
479 
480 	mutex_lock(&priv->lock);
481 
482 	/* Check if it needs to be deactivated */
483 	if (!priv->eee_active) {
484 		if (priv->eee_enabled) {
485 			netdev_dbg(priv->dev, "disable EEE\n");
486 			stmmac_lpi_entry_timer_config(priv, 0);
487 			del_timer_sync(&priv->eee_ctrl_timer);
488 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
489 			if (priv->hw->xpcs)
490 				xpcs_config_eee(priv->hw->xpcs,
491 						priv->plat->mult_fact_100ns,
492 						false);
493 		}
494 		mutex_unlock(&priv->lock);
495 		return false;
496 	}
497 
498 	if (priv->eee_active && !priv->eee_enabled) {
499 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
500 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
501 				     eee_tw_timer);
502 		if (priv->hw->xpcs)
503 			xpcs_config_eee(priv->hw->xpcs,
504 					priv->plat->mult_fact_100ns,
505 					true);
506 	}
507 
508 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
509 		del_timer_sync(&priv->eee_ctrl_timer);
510 		priv->tx_path_in_lpi_mode = false;
511 		stmmac_lpi_entry_timer_config(priv, 1);
512 	} else {
513 		stmmac_lpi_entry_timer_config(priv, 0);
514 		mod_timer(&priv->eee_ctrl_timer,
515 			  STMMAC_LPI_T(priv->tx_lpi_timer));
516 	}
517 
518 	mutex_unlock(&priv->lock);
519 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
520 	return true;
521 }
522 
523 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
524  * @priv: driver private structure
525  * @p : descriptor pointer
526  * @skb : the socket buffer
527  * Description :
528  * This function will read timestamp from the descriptor & pass it to stack.
529  * and also perform some sanity checks.
530  */
531 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
532 				   struct dma_desc *p, struct sk_buff *skb)
533 {
534 	struct skb_shared_hwtstamps shhwtstamp;
535 	bool found = false;
536 	u64 ns = 0;
537 
538 	if (!priv->hwts_tx_en)
539 		return;
540 
541 	/* exit if skb doesn't support hw tstamp */
542 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
543 		return;
544 
545 	/* check tx tstamp status */
546 	if (stmmac_get_tx_timestamp_status(priv, p)) {
547 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
548 		found = true;
549 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
550 		found = true;
551 	}
552 
553 	if (found) {
554 		ns -= priv->plat->cdc_error_adj;
555 
556 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
557 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
558 
559 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
560 		/* pass tstamp to stack */
561 		skb_tstamp_tx(skb, &shhwtstamp);
562 	}
563 }
564 
565 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
566  * @priv: driver private structure
567  * @p : descriptor pointer
568  * @np : next descriptor pointer
569  * @skb : the socket buffer
570  * Description :
571  * This function will read received packet's timestamp from the descriptor
572  * and pass it to stack. It also perform some sanity checks.
573  */
574 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
575 				   struct dma_desc *np, struct sk_buff *skb)
576 {
577 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
578 	struct dma_desc *desc = p;
579 	u64 ns = 0;
580 
581 	if (!priv->hwts_rx_en)
582 		return;
583 	/* For GMAC4, the valid timestamp is from CTX next desc. */
584 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
585 		desc = np;
586 
587 	/* Check if timestamp is available */
588 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
589 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
590 
591 		ns -= priv->plat->cdc_error_adj;
592 
593 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
594 		shhwtstamp = skb_hwtstamps(skb);
595 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
596 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
597 	} else  {
598 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
599 	}
600 }
601 
602 /**
603  *  stmmac_hwtstamp_set - control hardware timestamping.
604  *  @dev: device pointer.
605  *  @ifr: An IOCTL specific structure, that can contain a pointer to
606  *  a proprietary structure used to pass information to the driver.
607  *  Description:
608  *  This function configures the MAC to enable/disable both outgoing(TX)
609  *  and incoming(RX) packets time stamping based on user input.
610  *  Return Value:
611  *  0 on success and an appropriate -ve integer on failure.
612  */
613 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
614 {
615 	struct stmmac_priv *priv = netdev_priv(dev);
616 	struct hwtstamp_config config;
617 	u32 ptp_v2 = 0;
618 	u32 tstamp_all = 0;
619 	u32 ptp_over_ipv4_udp = 0;
620 	u32 ptp_over_ipv6_udp = 0;
621 	u32 ptp_over_ethernet = 0;
622 	u32 snap_type_sel = 0;
623 	u32 ts_master_en = 0;
624 	u32 ts_event_en = 0;
625 
626 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
627 		netdev_alert(priv->dev, "No support for HW time stamping\n");
628 		priv->hwts_tx_en = 0;
629 		priv->hwts_rx_en = 0;
630 
631 		return -EOPNOTSUPP;
632 	}
633 
634 	if (copy_from_user(&config, ifr->ifr_data,
635 			   sizeof(config)))
636 		return -EFAULT;
637 
638 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
639 		   __func__, config.flags, config.tx_type, config.rx_filter);
640 
641 	if (config.tx_type != HWTSTAMP_TX_OFF &&
642 	    config.tx_type != HWTSTAMP_TX_ON)
643 		return -ERANGE;
644 
645 	if (priv->adv_ts) {
646 		switch (config.rx_filter) {
647 		case HWTSTAMP_FILTER_NONE:
648 			/* time stamp no incoming packet at all */
649 			config.rx_filter = HWTSTAMP_FILTER_NONE;
650 			break;
651 
652 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
653 			/* PTP v1, UDP, any kind of event packet */
654 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
655 			/* 'xmac' hardware can support Sync, Pdelay_Req and
656 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
657 			 * This leaves Delay_Req timestamps out.
658 			 * Enable all events *and* general purpose message
659 			 * timestamping
660 			 */
661 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
662 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
663 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
664 			break;
665 
666 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
667 			/* PTP v1, UDP, Sync packet */
668 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
669 			/* take time stamp for SYNC messages only */
670 			ts_event_en = PTP_TCR_TSEVNTENA;
671 
672 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
673 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
674 			break;
675 
676 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
677 			/* PTP v1, UDP, Delay_req packet */
678 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
679 			/* take time stamp for Delay_Req messages only */
680 			ts_master_en = PTP_TCR_TSMSTRENA;
681 			ts_event_en = PTP_TCR_TSEVNTENA;
682 
683 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
684 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
685 			break;
686 
687 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
688 			/* PTP v2, UDP, any kind of event packet */
689 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
690 			ptp_v2 = PTP_TCR_TSVER2ENA;
691 			/* take time stamp for all event messages */
692 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
693 
694 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
695 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
696 			break;
697 
698 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
699 			/* PTP v2, UDP, Sync packet */
700 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
701 			ptp_v2 = PTP_TCR_TSVER2ENA;
702 			/* take time stamp for SYNC messages only */
703 			ts_event_en = PTP_TCR_TSEVNTENA;
704 
705 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
706 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
707 			break;
708 
709 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
710 			/* PTP v2, UDP, Delay_req packet */
711 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
712 			ptp_v2 = PTP_TCR_TSVER2ENA;
713 			/* take time stamp for Delay_Req messages only */
714 			ts_master_en = PTP_TCR_TSMSTRENA;
715 			ts_event_en = PTP_TCR_TSEVNTENA;
716 
717 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
718 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
719 			break;
720 
721 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
722 			/* PTP v2/802.AS1 any layer, any kind of event packet */
723 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
724 			ptp_v2 = PTP_TCR_TSVER2ENA;
725 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
726 			if (priv->synopsys_id < DWMAC_CORE_4_10)
727 				ts_event_en = PTP_TCR_TSEVNTENA;
728 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
729 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
730 			ptp_over_ethernet = PTP_TCR_TSIPENA;
731 			break;
732 
733 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
734 			/* PTP v2/802.AS1, any layer, Sync packet */
735 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
736 			ptp_v2 = PTP_TCR_TSVER2ENA;
737 			/* take time stamp for SYNC messages only */
738 			ts_event_en = PTP_TCR_TSEVNTENA;
739 
740 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
741 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
742 			ptp_over_ethernet = PTP_TCR_TSIPENA;
743 			break;
744 
745 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
746 			/* PTP v2/802.AS1, any layer, Delay_req packet */
747 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
748 			ptp_v2 = PTP_TCR_TSVER2ENA;
749 			/* take time stamp for Delay_Req messages only */
750 			ts_master_en = PTP_TCR_TSMSTRENA;
751 			ts_event_en = PTP_TCR_TSEVNTENA;
752 
753 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
754 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
755 			ptp_over_ethernet = PTP_TCR_TSIPENA;
756 			break;
757 
758 		case HWTSTAMP_FILTER_NTP_ALL:
759 		case HWTSTAMP_FILTER_ALL:
760 			/* time stamp any incoming packet */
761 			config.rx_filter = HWTSTAMP_FILTER_ALL;
762 			tstamp_all = PTP_TCR_TSENALL;
763 			break;
764 
765 		default:
766 			return -ERANGE;
767 		}
768 	} else {
769 		switch (config.rx_filter) {
770 		case HWTSTAMP_FILTER_NONE:
771 			config.rx_filter = HWTSTAMP_FILTER_NONE;
772 			break;
773 		default:
774 			/* PTP v1, UDP, any kind of event packet */
775 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
776 			break;
777 		}
778 	}
779 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
780 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
781 
782 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
783 
784 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
785 		priv->systime_flags |= tstamp_all | ptp_v2 |
786 				       ptp_over_ethernet | ptp_over_ipv6_udp |
787 				       ptp_over_ipv4_udp | ts_event_en |
788 				       ts_master_en | snap_type_sel;
789 	}
790 
791 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
792 
793 	memcpy(&priv->tstamp_config, &config, sizeof(config));
794 
795 	return copy_to_user(ifr->ifr_data, &config,
796 			    sizeof(config)) ? -EFAULT : 0;
797 }
798 
799 /**
800  *  stmmac_hwtstamp_get - read hardware timestamping.
801  *  @dev: device pointer.
802  *  @ifr: An IOCTL specific structure, that can contain a pointer to
803  *  a proprietary structure used to pass information to the driver.
804  *  Description:
805  *  This function obtain the current hardware timestamping settings
806  *  as requested.
807  */
808 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
809 {
810 	struct stmmac_priv *priv = netdev_priv(dev);
811 	struct hwtstamp_config *config = &priv->tstamp_config;
812 
813 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
814 		return -EOPNOTSUPP;
815 
816 	return copy_to_user(ifr->ifr_data, config,
817 			    sizeof(*config)) ? -EFAULT : 0;
818 }
819 
820 /**
821  * stmmac_init_tstamp_counter - init hardware timestamping counter
822  * @priv: driver private structure
823  * @systime_flags: timestamping flags
824  * Description:
825  * Initialize hardware counter for packet timestamping.
826  * This is valid as long as the interface is open and not suspended.
827  * Will be rerun after resuming from suspend, case in which the timestamping
828  * flags updated by stmmac_hwtstamp_set() also need to be restored.
829  */
830 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
831 {
832 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
833 	struct timespec64 now;
834 	u32 sec_inc = 0;
835 	u64 temp = 0;
836 	int ret;
837 
838 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
839 		return -EOPNOTSUPP;
840 
841 	ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
842 	if (ret < 0) {
843 		netdev_warn(priv->dev,
844 			    "failed to enable PTP reference clock: %pe\n",
845 			    ERR_PTR(ret));
846 		return ret;
847 	}
848 
849 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
850 	priv->systime_flags = systime_flags;
851 
852 	/* program Sub Second Increment reg */
853 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
854 					   priv->plat->clk_ptp_rate,
855 					   xmac, &sec_inc);
856 	temp = div_u64(1000000000ULL, sec_inc);
857 
858 	/* Store sub second increment for later use */
859 	priv->sub_second_inc = sec_inc;
860 
861 	/* calculate default added value:
862 	 * formula is :
863 	 * addend = (2^32)/freq_div_ratio;
864 	 * where, freq_div_ratio = 1e9ns/sec_inc
865 	 */
866 	temp = (u64)(temp << 32);
867 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
868 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
869 
870 	/* initialize system time */
871 	ktime_get_real_ts64(&now);
872 
873 	/* lower 32 bits of tv_sec are safe until y2106 */
874 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
875 
876 	return 0;
877 }
878 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
879 
880 /**
881  * stmmac_init_ptp - init PTP
882  * @priv: driver private structure
883  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
884  * This is done by looking at the HW cap. register.
885  * This function also registers the ptp driver.
886  */
887 static int stmmac_init_ptp(struct stmmac_priv *priv)
888 {
889 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
890 	int ret;
891 
892 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
893 	if (ret)
894 		return ret;
895 
896 	priv->adv_ts = 0;
897 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
898 	if (xmac && priv->dma_cap.atime_stamp)
899 		priv->adv_ts = 1;
900 	/* Dwmac 3.x core with extend_desc can support adv_ts */
901 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
902 		priv->adv_ts = 1;
903 
904 	if (priv->dma_cap.time_stamp)
905 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
906 
907 	if (priv->adv_ts)
908 		netdev_info(priv->dev,
909 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
910 
911 	priv->hwts_tx_en = 0;
912 	priv->hwts_rx_en = 0;
913 
914 	stmmac_ptp_register(priv);
915 
916 	return 0;
917 }
918 
919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
922 	stmmac_ptp_unregister(priv);
923 }
924 
925 /**
926  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
927  *  @priv: driver private structure
928  *  @duplex: duplex passed to the next function
929  *  Description: It is used for configuring the flow control in all queues
930  */
931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933 	u32 tx_cnt = priv->plat->tx_queues_to_use;
934 
935 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936 			priv->pause, tx_cnt);
937 }
938 
939 static void stmmac_validate(struct phylink_config *config,
940 			    unsigned long *supported,
941 			    struct phylink_link_state *state)
942 {
943 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
944 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
945 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
946 	int tx_cnt = priv->plat->tx_queues_to_use;
947 	int max_speed = priv->plat->max_speed;
948 
949 	phylink_set(mac_supported, 10baseT_Half);
950 	phylink_set(mac_supported, 10baseT_Full);
951 	phylink_set(mac_supported, 100baseT_Half);
952 	phylink_set(mac_supported, 100baseT_Full);
953 	phylink_set(mac_supported, 1000baseT_Half);
954 	phylink_set(mac_supported, 1000baseT_Full);
955 	phylink_set(mac_supported, 1000baseKX_Full);
956 
957 	phylink_set(mac_supported, Autoneg);
958 	phylink_set(mac_supported, Pause);
959 	phylink_set(mac_supported, Asym_Pause);
960 	phylink_set_port_modes(mac_supported);
961 
962 	/* Cut down 1G if asked to */
963 	if ((max_speed > 0) && (max_speed < 1000)) {
964 		phylink_set(mask, 1000baseT_Full);
965 		phylink_set(mask, 1000baseX_Full);
966 	} else if (priv->plat->has_gmac4) {
967 		if (!max_speed || max_speed >= 2500) {
968 			phylink_set(mac_supported, 2500baseT_Full);
969 			phylink_set(mac_supported, 2500baseX_Full);
970 		}
971 	} else if (priv->plat->has_xgmac) {
972 		if (!max_speed || (max_speed >= 2500)) {
973 			phylink_set(mac_supported, 2500baseT_Full);
974 			phylink_set(mac_supported, 2500baseX_Full);
975 		}
976 		if (!max_speed || (max_speed >= 5000)) {
977 			phylink_set(mac_supported, 5000baseT_Full);
978 		}
979 		if (!max_speed || (max_speed >= 10000)) {
980 			phylink_set(mac_supported, 10000baseSR_Full);
981 			phylink_set(mac_supported, 10000baseLR_Full);
982 			phylink_set(mac_supported, 10000baseER_Full);
983 			phylink_set(mac_supported, 10000baseLRM_Full);
984 			phylink_set(mac_supported, 10000baseT_Full);
985 			phylink_set(mac_supported, 10000baseKX4_Full);
986 			phylink_set(mac_supported, 10000baseKR_Full);
987 		}
988 		if (!max_speed || (max_speed >= 25000)) {
989 			phylink_set(mac_supported, 25000baseCR_Full);
990 			phylink_set(mac_supported, 25000baseKR_Full);
991 			phylink_set(mac_supported, 25000baseSR_Full);
992 		}
993 		if (!max_speed || (max_speed >= 40000)) {
994 			phylink_set(mac_supported, 40000baseKR4_Full);
995 			phylink_set(mac_supported, 40000baseCR4_Full);
996 			phylink_set(mac_supported, 40000baseSR4_Full);
997 			phylink_set(mac_supported, 40000baseLR4_Full);
998 		}
999 		if (!max_speed || (max_speed >= 50000)) {
1000 			phylink_set(mac_supported, 50000baseCR2_Full);
1001 			phylink_set(mac_supported, 50000baseKR2_Full);
1002 			phylink_set(mac_supported, 50000baseSR2_Full);
1003 			phylink_set(mac_supported, 50000baseKR_Full);
1004 			phylink_set(mac_supported, 50000baseSR_Full);
1005 			phylink_set(mac_supported, 50000baseCR_Full);
1006 			phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
1007 			phylink_set(mac_supported, 50000baseDR_Full);
1008 		}
1009 		if (!max_speed || (max_speed >= 100000)) {
1010 			phylink_set(mac_supported, 100000baseKR4_Full);
1011 			phylink_set(mac_supported, 100000baseSR4_Full);
1012 			phylink_set(mac_supported, 100000baseCR4_Full);
1013 			phylink_set(mac_supported, 100000baseLR4_ER4_Full);
1014 			phylink_set(mac_supported, 100000baseKR2_Full);
1015 			phylink_set(mac_supported, 100000baseSR2_Full);
1016 			phylink_set(mac_supported, 100000baseCR2_Full);
1017 			phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
1018 			phylink_set(mac_supported, 100000baseDR2_Full);
1019 		}
1020 	}
1021 
1022 	/* Half-Duplex can only work with single queue */
1023 	if (tx_cnt > 1) {
1024 		phylink_set(mask, 10baseT_Half);
1025 		phylink_set(mask, 100baseT_Half);
1026 		phylink_set(mask, 1000baseT_Half);
1027 	}
1028 
1029 	linkmode_and(supported, supported, mac_supported);
1030 	linkmode_andnot(supported, supported, mask);
1031 
1032 	linkmode_and(state->advertising, state->advertising, mac_supported);
1033 	linkmode_andnot(state->advertising, state->advertising, mask);
1034 
1035 	/* If PCS is supported, check which modes it supports. */
1036 	if (priv->hw->xpcs)
1037 		xpcs_validate(priv->hw->xpcs, supported, state);
1038 }
1039 
1040 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
1041 			      const struct phylink_link_state *state)
1042 {
1043 	/* Nothing to do, xpcs_config() handles everything */
1044 }
1045 
1046 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
1047 {
1048 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
1049 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
1050 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
1051 	bool *hs_enable = &fpe_cfg->hs_enable;
1052 
1053 	if (is_up && *hs_enable) {
1054 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
1055 	} else {
1056 		*lo_state = FPE_STATE_OFF;
1057 		*lp_state = FPE_STATE_OFF;
1058 	}
1059 }
1060 
1061 static void stmmac_mac_link_down(struct phylink_config *config,
1062 				 unsigned int mode, phy_interface_t interface)
1063 {
1064 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1065 
1066 	stmmac_mac_set(priv, priv->ioaddr, false);
1067 	priv->eee_active = false;
1068 	priv->tx_lpi_enabled = false;
1069 	priv->eee_enabled = stmmac_eee_init(priv);
1070 	stmmac_set_eee_pls(priv, priv->hw, false);
1071 
1072 	if (priv->dma_cap.fpesel)
1073 		stmmac_fpe_link_state_handle(priv, false);
1074 }
1075 
1076 static void stmmac_mac_link_up(struct phylink_config *config,
1077 			       struct phy_device *phy,
1078 			       unsigned int mode, phy_interface_t interface,
1079 			       int speed, int duplex,
1080 			       bool tx_pause, bool rx_pause)
1081 {
1082 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1083 	u32 ctrl;
1084 
1085 	ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1086 	ctrl &= ~priv->hw->link.speed_mask;
1087 
1088 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1089 		switch (speed) {
1090 		case SPEED_10000:
1091 			ctrl |= priv->hw->link.xgmii.speed10000;
1092 			break;
1093 		case SPEED_5000:
1094 			ctrl |= priv->hw->link.xgmii.speed5000;
1095 			break;
1096 		case SPEED_2500:
1097 			ctrl |= priv->hw->link.xgmii.speed2500;
1098 			break;
1099 		default:
1100 			return;
1101 		}
1102 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1103 		switch (speed) {
1104 		case SPEED_100000:
1105 			ctrl |= priv->hw->link.xlgmii.speed100000;
1106 			break;
1107 		case SPEED_50000:
1108 			ctrl |= priv->hw->link.xlgmii.speed50000;
1109 			break;
1110 		case SPEED_40000:
1111 			ctrl |= priv->hw->link.xlgmii.speed40000;
1112 			break;
1113 		case SPEED_25000:
1114 			ctrl |= priv->hw->link.xlgmii.speed25000;
1115 			break;
1116 		case SPEED_10000:
1117 			ctrl |= priv->hw->link.xgmii.speed10000;
1118 			break;
1119 		case SPEED_2500:
1120 			ctrl |= priv->hw->link.speed2500;
1121 			break;
1122 		case SPEED_1000:
1123 			ctrl |= priv->hw->link.speed1000;
1124 			break;
1125 		default:
1126 			return;
1127 		}
1128 	} else {
1129 		switch (speed) {
1130 		case SPEED_2500:
1131 			ctrl |= priv->hw->link.speed2500;
1132 			break;
1133 		case SPEED_1000:
1134 			ctrl |= priv->hw->link.speed1000;
1135 			break;
1136 		case SPEED_100:
1137 			ctrl |= priv->hw->link.speed100;
1138 			break;
1139 		case SPEED_10:
1140 			ctrl |= priv->hw->link.speed10;
1141 			break;
1142 		default:
1143 			return;
1144 		}
1145 	}
1146 
1147 	priv->speed = speed;
1148 
1149 	if (priv->plat->fix_mac_speed)
1150 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1151 
1152 	if (!duplex)
1153 		ctrl &= ~priv->hw->link.duplex;
1154 	else
1155 		ctrl |= priv->hw->link.duplex;
1156 
1157 	/* Flow Control operation */
1158 	if (tx_pause && rx_pause)
1159 		stmmac_mac_flow_ctrl(priv, duplex);
1160 
1161 	writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1162 
1163 	stmmac_mac_set(priv, priv->ioaddr, true);
1164 	if (phy && priv->dma_cap.eee) {
1165 		priv->eee_active = phy_init_eee(phy, 1) >= 0;
1166 		priv->eee_enabled = stmmac_eee_init(priv);
1167 		priv->tx_lpi_enabled = priv->eee_enabled;
1168 		stmmac_set_eee_pls(priv, priv->hw, true);
1169 	}
1170 
1171 	if (priv->dma_cap.fpesel)
1172 		stmmac_fpe_link_state_handle(priv, true);
1173 }
1174 
1175 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1176 	.validate = stmmac_validate,
1177 	.mac_config = stmmac_mac_config,
1178 	.mac_link_down = stmmac_mac_link_down,
1179 	.mac_link_up = stmmac_mac_link_up,
1180 };
1181 
1182 /**
1183  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1184  * @priv: driver private structure
1185  * Description: this is to verify if the HW supports the PCS.
1186  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1187  * configured for the TBI, RTBI, or SGMII PHY interface.
1188  */
1189 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1190 {
1191 	int interface = priv->plat->interface;
1192 
1193 	if (priv->dma_cap.pcs) {
1194 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1195 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1196 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1197 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1198 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1199 			priv->hw->pcs = STMMAC_PCS_RGMII;
1200 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1201 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1202 			priv->hw->pcs = STMMAC_PCS_SGMII;
1203 		}
1204 	}
1205 }
1206 
1207 /**
1208  * stmmac_init_phy - PHY initialization
1209  * @dev: net device structure
1210  * Description: it initializes the driver's PHY state, and attaches the PHY
1211  * to the mac driver.
1212  *  Return value:
1213  *  0 on success
1214  */
1215 static int stmmac_init_phy(struct net_device *dev)
1216 {
1217 	struct stmmac_priv *priv = netdev_priv(dev);
1218 	struct device_node *node;
1219 	int ret;
1220 
1221 	node = priv->plat->phylink_node;
1222 
1223 	if (node)
1224 		ret = phylink_of_phy_connect(priv->phylink, node, 0);
1225 
1226 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1227 	 * manually parse it
1228 	 */
1229 	if (!node || ret) {
1230 		int addr = priv->plat->phy_addr;
1231 		struct phy_device *phydev;
1232 
1233 		phydev = mdiobus_get_phy(priv->mii, addr);
1234 		if (!phydev) {
1235 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1236 			return -ENODEV;
1237 		}
1238 
1239 		ret = phylink_connect_phy(priv->phylink, phydev);
1240 	}
1241 
1242 	if (!priv->plat->pmt) {
1243 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1244 
1245 		phylink_ethtool_get_wol(priv->phylink, &wol);
1246 		device_set_wakeup_capable(priv->device, !!wol.supported);
1247 	}
1248 
1249 	return ret;
1250 }
1251 
1252 static int stmmac_phy_setup(struct stmmac_priv *priv)
1253 {
1254 	struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
1255 	struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1256 	int mode = priv->plat->phy_interface;
1257 	struct phylink *phylink;
1258 
1259 	priv->phylink_config.dev = &priv->dev->dev;
1260 	priv->phylink_config.type = PHYLINK_NETDEV;
1261 	priv->phylink_config.pcs_poll = true;
1262 	if (priv->plat->mdio_bus_data)
1263 		priv->phylink_config.ovr_an_inband =
1264 			mdio_bus_data->xpcs_an_inband;
1265 
1266 	if (!fwnode)
1267 		fwnode = dev_fwnode(priv->device);
1268 
1269 	phylink = phylink_create(&priv->phylink_config, fwnode,
1270 				 mode, &stmmac_phylink_mac_ops);
1271 	if (IS_ERR(phylink))
1272 		return PTR_ERR(phylink);
1273 
1274 	if (priv->hw->xpcs)
1275 		phylink_set_pcs(phylink, &priv->hw->xpcs->pcs);
1276 
1277 	priv->phylink = phylink;
1278 	return 0;
1279 }
1280 
1281 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1282 {
1283 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1284 	unsigned int desc_size;
1285 	void *head_rx;
1286 	u32 queue;
1287 
1288 	/* Display RX rings */
1289 	for (queue = 0; queue < rx_cnt; queue++) {
1290 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1291 
1292 		pr_info("\tRX Queue %u rings\n", queue);
1293 
1294 		if (priv->extend_desc) {
1295 			head_rx = (void *)rx_q->dma_erx;
1296 			desc_size = sizeof(struct dma_extended_desc);
1297 		} else {
1298 			head_rx = (void *)rx_q->dma_rx;
1299 			desc_size = sizeof(struct dma_desc);
1300 		}
1301 
1302 		/* Display RX ring */
1303 		stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
1304 				    rx_q->dma_rx_phy, desc_size);
1305 	}
1306 }
1307 
1308 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1309 {
1310 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1311 	unsigned int desc_size;
1312 	void *head_tx;
1313 	u32 queue;
1314 
1315 	/* Display TX rings */
1316 	for (queue = 0; queue < tx_cnt; queue++) {
1317 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1318 
1319 		pr_info("\tTX Queue %d rings\n", queue);
1320 
1321 		if (priv->extend_desc) {
1322 			head_tx = (void *)tx_q->dma_etx;
1323 			desc_size = sizeof(struct dma_extended_desc);
1324 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1325 			head_tx = (void *)tx_q->dma_entx;
1326 			desc_size = sizeof(struct dma_edesc);
1327 		} else {
1328 			head_tx = (void *)tx_q->dma_tx;
1329 			desc_size = sizeof(struct dma_desc);
1330 		}
1331 
1332 		stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
1333 				    tx_q->dma_tx_phy, desc_size);
1334 	}
1335 }
1336 
1337 static void stmmac_display_rings(struct stmmac_priv *priv)
1338 {
1339 	/* Display RX ring */
1340 	stmmac_display_rx_rings(priv);
1341 
1342 	/* Display TX ring */
1343 	stmmac_display_tx_rings(priv);
1344 }
1345 
1346 static int stmmac_set_bfsize(int mtu, int bufsize)
1347 {
1348 	int ret = bufsize;
1349 
1350 	if (mtu >= BUF_SIZE_8KiB)
1351 		ret = BUF_SIZE_16KiB;
1352 	else if (mtu >= BUF_SIZE_4KiB)
1353 		ret = BUF_SIZE_8KiB;
1354 	else if (mtu >= BUF_SIZE_2KiB)
1355 		ret = BUF_SIZE_4KiB;
1356 	else if (mtu > DEFAULT_BUFSIZE)
1357 		ret = BUF_SIZE_2KiB;
1358 	else
1359 		ret = DEFAULT_BUFSIZE;
1360 
1361 	return ret;
1362 }
1363 
1364 /**
1365  * stmmac_clear_rx_descriptors - clear RX descriptors
1366  * @priv: driver private structure
1367  * @queue: RX queue index
1368  * Description: this function is called to clear the RX descriptors
1369  * in case of both basic and extended descriptors are used.
1370  */
1371 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1372 {
1373 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1374 	int i;
1375 
1376 	/* Clear the RX descriptors */
1377 	for (i = 0; i < priv->dma_rx_size; i++)
1378 		if (priv->extend_desc)
1379 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1380 					priv->use_riwt, priv->mode,
1381 					(i == priv->dma_rx_size - 1),
1382 					priv->dma_buf_sz);
1383 		else
1384 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1385 					priv->use_riwt, priv->mode,
1386 					(i == priv->dma_rx_size - 1),
1387 					priv->dma_buf_sz);
1388 }
1389 
1390 /**
1391  * stmmac_clear_tx_descriptors - clear tx descriptors
1392  * @priv: driver private structure
1393  * @queue: TX queue index.
1394  * Description: this function is called to clear the TX descriptors
1395  * in case of both basic and extended descriptors are used.
1396  */
1397 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1398 {
1399 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1400 	int i;
1401 
1402 	/* Clear the TX descriptors */
1403 	for (i = 0; i < priv->dma_tx_size; i++) {
1404 		int last = (i == (priv->dma_tx_size - 1));
1405 		struct dma_desc *p;
1406 
1407 		if (priv->extend_desc)
1408 			p = &tx_q->dma_etx[i].basic;
1409 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1410 			p = &tx_q->dma_entx[i].basic;
1411 		else
1412 			p = &tx_q->dma_tx[i];
1413 
1414 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1415 	}
1416 }
1417 
1418 /**
1419  * stmmac_clear_descriptors - clear descriptors
1420  * @priv: driver private structure
1421  * Description: this function is called to clear the TX and RX descriptors
1422  * in case of both basic and extended descriptors are used.
1423  */
1424 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1425 {
1426 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1427 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1428 	u32 queue;
1429 
1430 	/* Clear the RX descriptors */
1431 	for (queue = 0; queue < rx_queue_cnt; queue++)
1432 		stmmac_clear_rx_descriptors(priv, queue);
1433 
1434 	/* Clear the TX descriptors */
1435 	for (queue = 0; queue < tx_queue_cnt; queue++)
1436 		stmmac_clear_tx_descriptors(priv, queue);
1437 }
1438 
1439 /**
1440  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1441  * @priv: driver private structure
1442  * @p: descriptor pointer
1443  * @i: descriptor index
1444  * @flags: gfp flag
1445  * @queue: RX queue index
1446  * Description: this function is called to allocate a receive buffer, perform
1447  * the DMA mapping and init the descriptor.
1448  */
1449 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1450 				  int i, gfp_t flags, u32 queue)
1451 {
1452 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1453 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1454 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1455 
1456 	if (priv->dma_cap.addr64 <= 32)
1457 		gfp |= GFP_DMA32;
1458 
1459 	if (!buf->page) {
1460 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1461 		if (!buf->page)
1462 			return -ENOMEM;
1463 		buf->page_offset = stmmac_rx_offset(priv);
1464 	}
1465 
1466 	if (priv->sph && !buf->sec_page) {
1467 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1468 		if (!buf->sec_page)
1469 			return -ENOMEM;
1470 
1471 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1472 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1473 	} else {
1474 		buf->sec_page = NULL;
1475 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1476 	}
1477 
1478 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1479 
1480 	stmmac_set_desc_addr(priv, p, buf->addr);
1481 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1482 		stmmac_init_desc3(priv, p);
1483 
1484 	return 0;
1485 }
1486 
1487 /**
1488  * stmmac_free_rx_buffer - free RX dma buffers
1489  * @priv: private structure
1490  * @queue: RX queue index
1491  * @i: buffer index.
1492  */
1493 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1494 {
1495 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1496 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1497 
1498 	if (buf->page)
1499 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1500 	buf->page = NULL;
1501 
1502 	if (buf->sec_page)
1503 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1504 	buf->sec_page = NULL;
1505 }
1506 
1507 /**
1508  * stmmac_free_tx_buffer - free RX dma buffers
1509  * @priv: private structure
1510  * @queue: RX queue index
1511  * @i: buffer index.
1512  */
1513 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1514 {
1515 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1516 
1517 	if (tx_q->tx_skbuff_dma[i].buf &&
1518 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1519 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1520 			dma_unmap_page(priv->device,
1521 				       tx_q->tx_skbuff_dma[i].buf,
1522 				       tx_q->tx_skbuff_dma[i].len,
1523 				       DMA_TO_DEVICE);
1524 		else
1525 			dma_unmap_single(priv->device,
1526 					 tx_q->tx_skbuff_dma[i].buf,
1527 					 tx_q->tx_skbuff_dma[i].len,
1528 					 DMA_TO_DEVICE);
1529 	}
1530 
1531 	if (tx_q->xdpf[i] &&
1532 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1533 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1534 		xdp_return_frame(tx_q->xdpf[i]);
1535 		tx_q->xdpf[i] = NULL;
1536 	}
1537 
1538 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1539 		tx_q->xsk_frames_done++;
1540 
1541 	if (tx_q->tx_skbuff[i] &&
1542 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1543 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1544 		tx_q->tx_skbuff[i] = NULL;
1545 	}
1546 
1547 	tx_q->tx_skbuff_dma[i].buf = 0;
1548 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1549 }
1550 
1551 /**
1552  * dma_free_rx_skbufs - free RX dma buffers
1553  * @priv: private structure
1554  * @queue: RX queue index
1555  */
1556 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1557 {
1558 	int i;
1559 
1560 	for (i = 0; i < priv->dma_rx_size; i++)
1561 		stmmac_free_rx_buffer(priv, queue, i);
1562 }
1563 
1564 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
1565 				   gfp_t flags)
1566 {
1567 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1568 	int i;
1569 
1570 	for (i = 0; i < priv->dma_rx_size; i++) {
1571 		struct dma_desc *p;
1572 		int ret;
1573 
1574 		if (priv->extend_desc)
1575 			p = &((rx_q->dma_erx + i)->basic);
1576 		else
1577 			p = rx_q->dma_rx + i;
1578 
1579 		ret = stmmac_init_rx_buffers(priv, p, i, flags,
1580 					     queue);
1581 		if (ret)
1582 			return ret;
1583 
1584 		rx_q->buf_alloc_num++;
1585 	}
1586 
1587 	return 0;
1588 }
1589 
1590 /**
1591  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1592  * @priv: private structure
1593  * @queue: RX queue index
1594  */
1595 static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
1596 {
1597 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1598 	int i;
1599 
1600 	for (i = 0; i < priv->dma_rx_size; i++) {
1601 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1602 
1603 		if (!buf->xdp)
1604 			continue;
1605 
1606 		xsk_buff_free(buf->xdp);
1607 		buf->xdp = NULL;
1608 	}
1609 }
1610 
1611 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
1612 {
1613 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1614 	int i;
1615 
1616 	for (i = 0; i < priv->dma_rx_size; i++) {
1617 		struct stmmac_rx_buffer *buf;
1618 		dma_addr_t dma_addr;
1619 		struct dma_desc *p;
1620 
1621 		if (priv->extend_desc)
1622 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1623 		else
1624 			p = rx_q->dma_rx + i;
1625 
1626 		buf = &rx_q->buf_pool[i];
1627 
1628 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1629 		if (!buf->xdp)
1630 			return -ENOMEM;
1631 
1632 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1633 		stmmac_set_desc_addr(priv, p, dma_addr);
1634 		rx_q->buf_alloc_num++;
1635 	}
1636 
1637 	return 0;
1638 }
1639 
1640 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1641 {
1642 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1643 		return NULL;
1644 
1645 	return xsk_get_pool_from_qid(priv->dev, queue);
1646 }
1647 
1648 /**
1649  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1650  * @priv: driver private structure
1651  * @queue: RX queue index
1652  * @flags: gfp flag.
1653  * Description: this function initializes the DMA RX descriptors
1654  * and allocates the socket buffers. It supports the chained and ring
1655  * modes.
1656  */
1657 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
1658 {
1659 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1660 	int ret;
1661 
1662 	netif_dbg(priv, probe, priv->dev,
1663 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1664 		  (u32)rx_q->dma_rx_phy);
1665 
1666 	stmmac_clear_rx_descriptors(priv, queue);
1667 
1668 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1669 
1670 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1671 
1672 	if (rx_q->xsk_pool) {
1673 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1674 						   MEM_TYPE_XSK_BUFF_POOL,
1675 						   NULL));
1676 		netdev_info(priv->dev,
1677 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1678 			    rx_q->queue_index);
1679 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1680 	} else {
1681 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1682 						   MEM_TYPE_PAGE_POOL,
1683 						   rx_q->page_pool));
1684 		netdev_info(priv->dev,
1685 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1686 			    rx_q->queue_index);
1687 	}
1688 
1689 	if (rx_q->xsk_pool) {
1690 		/* RX XDP ZC buffer pool may not be populated, e.g.
1691 		 * xdpsock TX-only.
1692 		 */
1693 		stmmac_alloc_rx_buffers_zc(priv, queue);
1694 	} else {
1695 		ret = stmmac_alloc_rx_buffers(priv, queue, flags);
1696 		if (ret < 0)
1697 			return -ENOMEM;
1698 	}
1699 
1700 	rx_q->cur_rx = 0;
1701 	rx_q->dirty_rx = 0;
1702 
1703 	/* Setup the chained descriptor addresses */
1704 	if (priv->mode == STMMAC_CHAIN_MODE) {
1705 		if (priv->extend_desc)
1706 			stmmac_mode_init(priv, rx_q->dma_erx,
1707 					 rx_q->dma_rx_phy,
1708 					 priv->dma_rx_size, 1);
1709 		else
1710 			stmmac_mode_init(priv, rx_q->dma_rx,
1711 					 rx_q->dma_rx_phy,
1712 					 priv->dma_rx_size, 0);
1713 	}
1714 
1715 	return 0;
1716 }
1717 
1718 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1719 {
1720 	struct stmmac_priv *priv = netdev_priv(dev);
1721 	u32 rx_count = priv->plat->rx_queues_to_use;
1722 	u32 queue;
1723 	int ret;
1724 
1725 	/* RX INITIALIZATION */
1726 	netif_dbg(priv, probe, priv->dev,
1727 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1728 
1729 	for (queue = 0; queue < rx_count; queue++) {
1730 		ret = __init_dma_rx_desc_rings(priv, queue, flags);
1731 		if (ret)
1732 			goto err_init_rx_buffers;
1733 	}
1734 
1735 	return 0;
1736 
1737 err_init_rx_buffers:
1738 	while (queue >= 0) {
1739 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1740 
1741 		if (rx_q->xsk_pool)
1742 			dma_free_rx_xskbufs(priv, queue);
1743 		else
1744 			dma_free_rx_skbufs(priv, queue);
1745 
1746 		rx_q->buf_alloc_num = 0;
1747 		rx_q->xsk_pool = NULL;
1748 
1749 		if (queue == 0)
1750 			break;
1751 
1752 		queue--;
1753 	}
1754 
1755 	return ret;
1756 }
1757 
1758 /**
1759  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1760  * @priv: driver private structure
1761  * @queue : TX queue index
1762  * Description: this function initializes the DMA TX descriptors
1763  * and allocates the socket buffers. It supports the chained and ring
1764  * modes.
1765  */
1766 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
1767 {
1768 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1769 	int i;
1770 
1771 	netif_dbg(priv, probe, priv->dev,
1772 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1773 		  (u32)tx_q->dma_tx_phy);
1774 
1775 	/* Setup the chained descriptor addresses */
1776 	if (priv->mode == STMMAC_CHAIN_MODE) {
1777 		if (priv->extend_desc)
1778 			stmmac_mode_init(priv, tx_q->dma_etx,
1779 					 tx_q->dma_tx_phy,
1780 					 priv->dma_tx_size, 1);
1781 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1782 			stmmac_mode_init(priv, tx_q->dma_tx,
1783 					 tx_q->dma_tx_phy,
1784 					 priv->dma_tx_size, 0);
1785 	}
1786 
1787 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1788 
1789 	for (i = 0; i < priv->dma_tx_size; i++) {
1790 		struct dma_desc *p;
1791 
1792 		if (priv->extend_desc)
1793 			p = &((tx_q->dma_etx + i)->basic);
1794 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1795 			p = &((tx_q->dma_entx + i)->basic);
1796 		else
1797 			p = tx_q->dma_tx + i;
1798 
1799 		stmmac_clear_desc(priv, p);
1800 
1801 		tx_q->tx_skbuff_dma[i].buf = 0;
1802 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1803 		tx_q->tx_skbuff_dma[i].len = 0;
1804 		tx_q->tx_skbuff_dma[i].last_segment = false;
1805 		tx_q->tx_skbuff[i] = NULL;
1806 	}
1807 
1808 	tx_q->dirty_tx = 0;
1809 	tx_q->cur_tx = 0;
1810 	tx_q->mss = 0;
1811 
1812 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1813 
1814 	return 0;
1815 }
1816 
1817 static int init_dma_tx_desc_rings(struct net_device *dev)
1818 {
1819 	struct stmmac_priv *priv = netdev_priv(dev);
1820 	u32 tx_queue_cnt;
1821 	u32 queue;
1822 
1823 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1824 
1825 	for (queue = 0; queue < tx_queue_cnt; queue++)
1826 		__init_dma_tx_desc_rings(priv, queue);
1827 
1828 	return 0;
1829 }
1830 
1831 /**
1832  * init_dma_desc_rings - init the RX/TX descriptor rings
1833  * @dev: net device structure
1834  * @flags: gfp flag.
1835  * Description: this function initializes the DMA RX/TX descriptors
1836  * and allocates the socket buffers. It supports the chained and ring
1837  * modes.
1838  */
1839 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1840 {
1841 	struct stmmac_priv *priv = netdev_priv(dev);
1842 	int ret;
1843 
1844 	ret = init_dma_rx_desc_rings(dev, flags);
1845 	if (ret)
1846 		return ret;
1847 
1848 	ret = init_dma_tx_desc_rings(dev);
1849 
1850 	stmmac_clear_descriptors(priv);
1851 
1852 	if (netif_msg_hw(priv))
1853 		stmmac_display_rings(priv);
1854 
1855 	return ret;
1856 }
1857 
1858 /**
1859  * dma_free_tx_skbufs - free TX dma buffers
1860  * @priv: private structure
1861  * @queue: TX queue index
1862  */
1863 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1864 {
1865 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1866 	int i;
1867 
1868 	tx_q->xsk_frames_done = 0;
1869 
1870 	for (i = 0; i < priv->dma_tx_size; i++)
1871 		stmmac_free_tx_buffer(priv, queue, i);
1872 
1873 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1874 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1875 		tx_q->xsk_frames_done = 0;
1876 		tx_q->xsk_pool = NULL;
1877 	}
1878 }
1879 
1880 /**
1881  * stmmac_free_tx_skbufs - free TX skb buffers
1882  * @priv: private structure
1883  */
1884 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1885 {
1886 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1887 	u32 queue;
1888 
1889 	for (queue = 0; queue < tx_queue_cnt; queue++)
1890 		dma_free_tx_skbufs(priv, queue);
1891 }
1892 
1893 /**
1894  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1895  * @priv: private structure
1896  * @queue: RX queue index
1897  */
1898 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
1899 {
1900 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1901 
1902 	/* Release the DMA RX socket buffers */
1903 	if (rx_q->xsk_pool)
1904 		dma_free_rx_xskbufs(priv, queue);
1905 	else
1906 		dma_free_rx_skbufs(priv, queue);
1907 
1908 	rx_q->buf_alloc_num = 0;
1909 	rx_q->xsk_pool = NULL;
1910 
1911 	/* Free DMA regions of consistent memory previously allocated */
1912 	if (!priv->extend_desc)
1913 		dma_free_coherent(priv->device, priv->dma_rx_size *
1914 				  sizeof(struct dma_desc),
1915 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1916 	else
1917 		dma_free_coherent(priv->device, priv->dma_rx_size *
1918 				  sizeof(struct dma_extended_desc),
1919 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1920 
1921 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1922 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1923 
1924 	kfree(rx_q->buf_pool);
1925 	if (rx_q->page_pool)
1926 		page_pool_destroy(rx_q->page_pool);
1927 }
1928 
1929 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1930 {
1931 	u32 rx_count = priv->plat->rx_queues_to_use;
1932 	u32 queue;
1933 
1934 	/* Free RX queue resources */
1935 	for (queue = 0; queue < rx_count; queue++)
1936 		__free_dma_rx_desc_resources(priv, queue);
1937 }
1938 
1939 /**
1940  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1941  * @priv: private structure
1942  * @queue: TX queue index
1943  */
1944 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
1945 {
1946 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1947 	size_t size;
1948 	void *addr;
1949 
1950 	/* Release the DMA TX socket buffers */
1951 	dma_free_tx_skbufs(priv, queue);
1952 
1953 	if (priv->extend_desc) {
1954 		size = sizeof(struct dma_extended_desc);
1955 		addr = tx_q->dma_etx;
1956 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1957 		size = sizeof(struct dma_edesc);
1958 		addr = tx_q->dma_entx;
1959 	} else {
1960 		size = sizeof(struct dma_desc);
1961 		addr = tx_q->dma_tx;
1962 	}
1963 
1964 	size *= priv->dma_tx_size;
1965 
1966 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1967 
1968 	kfree(tx_q->tx_skbuff_dma);
1969 	kfree(tx_q->tx_skbuff);
1970 }
1971 
1972 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1973 {
1974 	u32 tx_count = priv->plat->tx_queues_to_use;
1975 	u32 queue;
1976 
1977 	/* Free TX queue resources */
1978 	for (queue = 0; queue < tx_count; queue++)
1979 		__free_dma_tx_desc_resources(priv, queue);
1980 }
1981 
1982 /**
1983  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1984  * @priv: private structure
1985  * @queue: RX queue index
1986  * Description: according to which descriptor can be used (extend or basic)
1987  * this function allocates the resources for TX and RX paths. In case of
1988  * reception, for example, it pre-allocated the RX socket buffer in order to
1989  * allow zero-copy mechanism.
1990  */
1991 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
1992 {
1993 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1994 	struct stmmac_channel *ch = &priv->channel[queue];
1995 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
1996 	struct page_pool_params pp_params = { 0 };
1997 	unsigned int num_pages;
1998 	unsigned int napi_id;
1999 	int ret;
2000 
2001 	rx_q->queue_index = queue;
2002 	rx_q->priv_data = priv;
2003 
2004 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2005 	pp_params.pool_size = priv->dma_rx_size;
2006 	num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
2007 	pp_params.order = ilog2(num_pages);
2008 	pp_params.nid = dev_to_node(priv->device);
2009 	pp_params.dev = priv->device;
2010 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2011 	pp_params.offset = stmmac_rx_offset(priv);
2012 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2013 
2014 	rx_q->page_pool = page_pool_create(&pp_params);
2015 	if (IS_ERR(rx_q->page_pool)) {
2016 		ret = PTR_ERR(rx_q->page_pool);
2017 		rx_q->page_pool = NULL;
2018 		return ret;
2019 	}
2020 
2021 	rx_q->buf_pool = kcalloc(priv->dma_rx_size,
2022 				 sizeof(*rx_q->buf_pool),
2023 				 GFP_KERNEL);
2024 	if (!rx_q->buf_pool)
2025 		return -ENOMEM;
2026 
2027 	if (priv->extend_desc) {
2028 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2029 						   priv->dma_rx_size *
2030 						   sizeof(struct dma_extended_desc),
2031 						   &rx_q->dma_rx_phy,
2032 						   GFP_KERNEL);
2033 		if (!rx_q->dma_erx)
2034 			return -ENOMEM;
2035 
2036 	} else {
2037 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2038 						  priv->dma_rx_size *
2039 						  sizeof(struct dma_desc),
2040 						  &rx_q->dma_rx_phy,
2041 						  GFP_KERNEL);
2042 		if (!rx_q->dma_rx)
2043 			return -ENOMEM;
2044 	}
2045 
2046 	if (stmmac_xdp_is_enabled(priv) &&
2047 	    test_bit(queue, priv->af_xdp_zc_qps))
2048 		napi_id = ch->rxtx_napi.napi_id;
2049 	else
2050 		napi_id = ch->rx_napi.napi_id;
2051 
2052 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2053 			       rx_q->queue_index,
2054 			       napi_id);
2055 	if (ret) {
2056 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2057 		return -EINVAL;
2058 	}
2059 
2060 	return 0;
2061 }
2062 
2063 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
2064 {
2065 	u32 rx_count = priv->plat->rx_queues_to_use;
2066 	u32 queue;
2067 	int ret;
2068 
2069 	/* RX queues buffers and DMA */
2070 	for (queue = 0; queue < rx_count; queue++) {
2071 		ret = __alloc_dma_rx_desc_resources(priv, queue);
2072 		if (ret)
2073 			goto err_dma;
2074 	}
2075 
2076 	return 0;
2077 
2078 err_dma:
2079 	free_dma_rx_desc_resources(priv);
2080 
2081 	return ret;
2082 }
2083 
2084 /**
2085  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2086  * @priv: private structure
2087  * @queue: TX queue index
2088  * Description: according to which descriptor can be used (extend or basic)
2089  * this function allocates the resources for TX and RX paths. In case of
2090  * reception, for example, it pre-allocated the RX socket buffer in order to
2091  * allow zero-copy mechanism.
2092  */
2093 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
2094 {
2095 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2096 	size_t size;
2097 	void *addr;
2098 
2099 	tx_q->queue_index = queue;
2100 	tx_q->priv_data = priv;
2101 
2102 	tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
2103 				      sizeof(*tx_q->tx_skbuff_dma),
2104 				      GFP_KERNEL);
2105 	if (!tx_q->tx_skbuff_dma)
2106 		return -ENOMEM;
2107 
2108 	tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
2109 				  sizeof(struct sk_buff *),
2110 				  GFP_KERNEL);
2111 	if (!tx_q->tx_skbuff)
2112 		return -ENOMEM;
2113 
2114 	if (priv->extend_desc)
2115 		size = sizeof(struct dma_extended_desc);
2116 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2117 		size = sizeof(struct dma_edesc);
2118 	else
2119 		size = sizeof(struct dma_desc);
2120 
2121 	size *= priv->dma_tx_size;
2122 
2123 	addr = dma_alloc_coherent(priv->device, size,
2124 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2125 	if (!addr)
2126 		return -ENOMEM;
2127 
2128 	if (priv->extend_desc)
2129 		tx_q->dma_etx = addr;
2130 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2131 		tx_q->dma_entx = addr;
2132 	else
2133 		tx_q->dma_tx = addr;
2134 
2135 	return 0;
2136 }
2137 
2138 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
2139 {
2140 	u32 tx_count = priv->plat->tx_queues_to_use;
2141 	u32 queue;
2142 	int ret;
2143 
2144 	/* TX queues buffers and DMA */
2145 	for (queue = 0; queue < tx_count; queue++) {
2146 		ret = __alloc_dma_tx_desc_resources(priv, queue);
2147 		if (ret)
2148 			goto err_dma;
2149 	}
2150 
2151 	return 0;
2152 
2153 err_dma:
2154 	free_dma_tx_desc_resources(priv);
2155 	return ret;
2156 }
2157 
2158 /**
2159  * alloc_dma_desc_resources - alloc TX/RX resources.
2160  * @priv: private structure
2161  * Description: according to which descriptor can be used (extend or basic)
2162  * this function allocates the resources for TX and RX paths. In case of
2163  * reception, for example, it pre-allocated the RX socket buffer in order to
2164  * allow zero-copy mechanism.
2165  */
2166 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
2167 {
2168 	/* RX Allocation */
2169 	int ret = alloc_dma_rx_desc_resources(priv);
2170 
2171 	if (ret)
2172 		return ret;
2173 
2174 	ret = alloc_dma_tx_desc_resources(priv);
2175 
2176 	return ret;
2177 }
2178 
2179 /**
2180  * free_dma_desc_resources - free dma desc resources
2181  * @priv: private structure
2182  */
2183 static void free_dma_desc_resources(struct stmmac_priv *priv)
2184 {
2185 	/* Release the DMA TX socket buffers */
2186 	free_dma_tx_desc_resources(priv);
2187 
2188 	/* Release the DMA RX socket buffers later
2189 	 * to ensure all pending XDP_TX buffers are returned.
2190 	 */
2191 	free_dma_rx_desc_resources(priv);
2192 }
2193 
2194 /**
2195  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2196  *  @priv: driver private structure
2197  *  Description: It is used for enabling the rx queues in the MAC
2198  */
2199 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2200 {
2201 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2202 	int queue;
2203 	u8 mode;
2204 
2205 	for (queue = 0; queue < rx_queues_count; queue++) {
2206 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2207 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2208 	}
2209 }
2210 
2211 /**
2212  * stmmac_start_rx_dma - start RX DMA channel
2213  * @priv: driver private structure
2214  * @chan: RX channel index
2215  * Description:
2216  * This starts a RX DMA channel
2217  */
2218 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2219 {
2220 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2221 	stmmac_start_rx(priv, priv->ioaddr, chan);
2222 }
2223 
2224 /**
2225  * stmmac_start_tx_dma - start TX DMA channel
2226  * @priv: driver private structure
2227  * @chan: TX channel index
2228  * Description:
2229  * This starts a TX DMA channel
2230  */
2231 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2232 {
2233 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2234 	stmmac_start_tx(priv, priv->ioaddr, chan);
2235 }
2236 
2237 /**
2238  * stmmac_stop_rx_dma - stop RX DMA channel
2239  * @priv: driver private structure
2240  * @chan: RX channel index
2241  * Description:
2242  * This stops a RX DMA channel
2243  */
2244 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2245 {
2246 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2247 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2248 }
2249 
2250 /**
2251  * stmmac_stop_tx_dma - stop TX DMA channel
2252  * @priv: driver private structure
2253  * @chan: TX channel index
2254  * Description:
2255  * This stops a TX DMA channel
2256  */
2257 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2258 {
2259 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2260 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2261 }
2262 
2263 /**
2264  * stmmac_start_all_dma - start all RX and TX DMA channels
2265  * @priv: driver private structure
2266  * Description:
2267  * This starts all the RX and TX DMA channels
2268  */
2269 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2270 {
2271 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2272 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2273 	u32 chan = 0;
2274 
2275 	for (chan = 0; chan < rx_channels_count; chan++)
2276 		stmmac_start_rx_dma(priv, chan);
2277 
2278 	for (chan = 0; chan < tx_channels_count; chan++)
2279 		stmmac_start_tx_dma(priv, chan);
2280 }
2281 
2282 /**
2283  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2284  * @priv: driver private structure
2285  * Description:
2286  * This stops the RX and TX DMA channels
2287  */
2288 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2289 {
2290 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2291 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2292 	u32 chan = 0;
2293 
2294 	for (chan = 0; chan < rx_channels_count; chan++)
2295 		stmmac_stop_rx_dma(priv, chan);
2296 
2297 	for (chan = 0; chan < tx_channels_count; chan++)
2298 		stmmac_stop_tx_dma(priv, chan);
2299 }
2300 
2301 /**
2302  *  stmmac_dma_operation_mode - HW DMA operation mode
2303  *  @priv: driver private structure
2304  *  Description: it is used for configuring the DMA operation mode register in
2305  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2306  */
2307 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2308 {
2309 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2310 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2311 	int rxfifosz = priv->plat->rx_fifo_size;
2312 	int txfifosz = priv->plat->tx_fifo_size;
2313 	u32 txmode = 0;
2314 	u32 rxmode = 0;
2315 	u32 chan = 0;
2316 	u8 qmode = 0;
2317 
2318 	if (rxfifosz == 0)
2319 		rxfifosz = priv->dma_cap.rx_fifo_size;
2320 	if (txfifosz == 0)
2321 		txfifosz = priv->dma_cap.tx_fifo_size;
2322 
2323 	/* Adjust for real per queue fifo size */
2324 	rxfifosz /= rx_channels_count;
2325 	txfifosz /= tx_channels_count;
2326 
2327 	if (priv->plat->force_thresh_dma_mode) {
2328 		txmode = tc;
2329 		rxmode = tc;
2330 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2331 		/*
2332 		 * In case of GMAC, SF mode can be enabled
2333 		 * to perform the TX COE in HW. This depends on:
2334 		 * 1) TX COE if actually supported
2335 		 * 2) There is no bugged Jumbo frame support
2336 		 *    that needs to not insert csum in the TDES.
2337 		 */
2338 		txmode = SF_DMA_MODE;
2339 		rxmode = SF_DMA_MODE;
2340 		priv->xstats.threshold = SF_DMA_MODE;
2341 	} else {
2342 		txmode = tc;
2343 		rxmode = SF_DMA_MODE;
2344 	}
2345 
2346 	/* configure all channels */
2347 	for (chan = 0; chan < rx_channels_count; chan++) {
2348 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2349 		u32 buf_size;
2350 
2351 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2352 
2353 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2354 				rxfifosz, qmode);
2355 
2356 		if (rx_q->xsk_pool) {
2357 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2358 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2359 					      buf_size,
2360 					      chan);
2361 		} else {
2362 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2363 					      priv->dma_buf_sz,
2364 					      chan);
2365 		}
2366 	}
2367 
2368 	for (chan = 0; chan < tx_channels_count; chan++) {
2369 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2370 
2371 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2372 				txfifosz, qmode);
2373 	}
2374 }
2375 
2376 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2377 {
2378 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2379 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2380 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2381 	unsigned int entry = tx_q->cur_tx;
2382 	struct dma_desc *tx_desc = NULL;
2383 	struct xdp_desc xdp_desc;
2384 	bool work_done = true;
2385 
2386 	/* Avoids TX time-out as we are sharing with slow path */
2387 	txq_trans_cond_update(nq);
2388 
2389 	budget = min(budget, stmmac_tx_avail(priv, queue));
2390 
2391 	while (budget-- > 0) {
2392 		dma_addr_t dma_addr;
2393 		bool set_ic;
2394 
2395 		/* We are sharing with slow path and stop XSK TX desc submission when
2396 		 * available TX ring is less than threshold.
2397 		 */
2398 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2399 		    !netif_carrier_ok(priv->dev)) {
2400 			work_done = false;
2401 			break;
2402 		}
2403 
2404 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2405 			break;
2406 
2407 		if (likely(priv->extend_desc))
2408 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2409 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2410 			tx_desc = &tx_q->dma_entx[entry].basic;
2411 		else
2412 			tx_desc = tx_q->dma_tx + entry;
2413 
2414 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2415 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2416 
2417 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2418 
2419 		/* To return XDP buffer to XSK pool, we simple call
2420 		 * xsk_tx_completed(), so we don't need to fill up
2421 		 * 'buf' and 'xdpf'.
2422 		 */
2423 		tx_q->tx_skbuff_dma[entry].buf = 0;
2424 		tx_q->xdpf[entry] = NULL;
2425 
2426 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2427 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2428 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2429 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2430 
2431 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2432 
2433 		tx_q->tx_count_frames++;
2434 
2435 		if (!priv->tx_coal_frames[queue])
2436 			set_ic = false;
2437 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2438 			set_ic = true;
2439 		else
2440 			set_ic = false;
2441 
2442 		if (set_ic) {
2443 			tx_q->tx_count_frames = 0;
2444 			stmmac_set_tx_ic(priv, tx_desc);
2445 			priv->xstats.tx_set_ic_bit++;
2446 		}
2447 
2448 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2449 				       true, priv->mode, true, true,
2450 				       xdp_desc.len);
2451 
2452 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2453 
2454 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
2455 		entry = tx_q->cur_tx;
2456 	}
2457 
2458 	if (tx_desc) {
2459 		stmmac_flush_tx_descriptors(priv, queue);
2460 		xsk_tx_release(pool);
2461 	}
2462 
2463 	/* Return true if all of the 3 conditions are met
2464 	 *  a) TX Budget is still available
2465 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2466 	 *     pending XSK TX for transmission)
2467 	 */
2468 	return !!budget && work_done;
2469 }
2470 
2471 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2472 {
2473 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2474 		tc += 64;
2475 
2476 		if (priv->plat->force_thresh_dma_mode)
2477 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2478 		else
2479 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2480 						      chan);
2481 
2482 		priv->xstats.threshold = tc;
2483 	}
2484 }
2485 
2486 /**
2487  * stmmac_tx_clean - to manage the transmission completion
2488  * @priv: driver private structure
2489  * @budget: napi budget limiting this functions packet handling
2490  * @queue: TX queue index
2491  * Description: it reclaims the transmit resources after transmission completes.
2492  */
2493 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2494 {
2495 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2496 	unsigned int bytes_compl = 0, pkts_compl = 0;
2497 	unsigned int entry, xmits = 0, count = 0;
2498 
2499 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2500 
2501 	priv->xstats.tx_clean++;
2502 
2503 	tx_q->xsk_frames_done = 0;
2504 
2505 	entry = tx_q->dirty_tx;
2506 
2507 	/* Try to clean all TX complete frame in 1 shot */
2508 	while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) {
2509 		struct xdp_frame *xdpf;
2510 		struct sk_buff *skb;
2511 		struct dma_desc *p;
2512 		int status;
2513 
2514 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2515 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2516 			xdpf = tx_q->xdpf[entry];
2517 			skb = NULL;
2518 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2519 			xdpf = NULL;
2520 			skb = tx_q->tx_skbuff[entry];
2521 		} else {
2522 			xdpf = NULL;
2523 			skb = NULL;
2524 		}
2525 
2526 		if (priv->extend_desc)
2527 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2528 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2529 			p = &tx_q->dma_entx[entry].basic;
2530 		else
2531 			p = tx_q->dma_tx + entry;
2532 
2533 		status = stmmac_tx_status(priv, &priv->dev->stats,
2534 				&priv->xstats, p, priv->ioaddr);
2535 		/* Check if the descriptor is owned by the DMA */
2536 		if (unlikely(status & tx_dma_own))
2537 			break;
2538 
2539 		count++;
2540 
2541 		/* Make sure descriptor fields are read after reading
2542 		 * the own bit.
2543 		 */
2544 		dma_rmb();
2545 
2546 		/* Just consider the last segment and ...*/
2547 		if (likely(!(status & tx_not_ls))) {
2548 			/* ... verify the status error condition */
2549 			if (unlikely(status & tx_err)) {
2550 				priv->dev->stats.tx_errors++;
2551 				if (unlikely(status & tx_err_bump_tc))
2552 					stmmac_bump_dma_threshold(priv, queue);
2553 			} else {
2554 				priv->dev->stats.tx_packets++;
2555 				priv->xstats.tx_pkt_n++;
2556 				priv->xstats.txq_stats[queue].tx_pkt_n++;
2557 			}
2558 			if (skb)
2559 				stmmac_get_tx_hwtstamp(priv, p, skb);
2560 		}
2561 
2562 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2563 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2564 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2565 				dma_unmap_page(priv->device,
2566 					       tx_q->tx_skbuff_dma[entry].buf,
2567 					       tx_q->tx_skbuff_dma[entry].len,
2568 					       DMA_TO_DEVICE);
2569 			else
2570 				dma_unmap_single(priv->device,
2571 						 tx_q->tx_skbuff_dma[entry].buf,
2572 						 tx_q->tx_skbuff_dma[entry].len,
2573 						 DMA_TO_DEVICE);
2574 			tx_q->tx_skbuff_dma[entry].buf = 0;
2575 			tx_q->tx_skbuff_dma[entry].len = 0;
2576 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2577 		}
2578 
2579 		stmmac_clean_desc3(priv, tx_q, p);
2580 
2581 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2582 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2583 
2584 		if (xdpf &&
2585 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2586 			xdp_return_frame_rx_napi(xdpf);
2587 			tx_q->xdpf[entry] = NULL;
2588 		}
2589 
2590 		if (xdpf &&
2591 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2592 			xdp_return_frame(xdpf);
2593 			tx_q->xdpf[entry] = NULL;
2594 		}
2595 
2596 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2597 			tx_q->xsk_frames_done++;
2598 
2599 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2600 			if (likely(skb)) {
2601 				pkts_compl++;
2602 				bytes_compl += skb->len;
2603 				dev_consume_skb_any(skb);
2604 				tx_q->tx_skbuff[entry] = NULL;
2605 			}
2606 		}
2607 
2608 		stmmac_release_tx_desc(priv, p, priv->mode);
2609 
2610 		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
2611 	}
2612 	tx_q->dirty_tx = entry;
2613 
2614 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2615 				  pkts_compl, bytes_compl);
2616 
2617 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2618 								queue))) &&
2619 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2620 
2621 		netif_dbg(priv, tx_done, priv->dev,
2622 			  "%s: restart transmit\n", __func__);
2623 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2624 	}
2625 
2626 	if (tx_q->xsk_pool) {
2627 		bool work_done;
2628 
2629 		if (tx_q->xsk_frames_done)
2630 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2631 
2632 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2633 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2634 
2635 		/* For XSK TX, we try to send as many as possible.
2636 		 * If XSK work done (XSK TX desc empty and budget still
2637 		 * available), return "budget - 1" to reenable TX IRQ.
2638 		 * Else, return "budget" to make NAPI continue polling.
2639 		 */
2640 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2641 					       STMMAC_XSK_TX_BUDGET_MAX);
2642 		if (work_done)
2643 			xmits = budget - 1;
2644 		else
2645 			xmits = budget;
2646 	}
2647 
2648 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2649 	    priv->eee_sw_timer_en) {
2650 		stmmac_enable_eee_mode(priv);
2651 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2652 	}
2653 
2654 	/* We still have pending packets, let's call for a new scheduling */
2655 	if (tx_q->dirty_tx != tx_q->cur_tx)
2656 		hrtimer_start(&tx_q->txtimer,
2657 			      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2658 			      HRTIMER_MODE_REL);
2659 
2660 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2661 
2662 	/* Combine decisions from TX clean and XSK TX */
2663 	return max(count, xmits);
2664 }
2665 
2666 /**
2667  * stmmac_tx_err - to manage the tx error
2668  * @priv: driver private structure
2669  * @chan: channel index
2670  * Description: it cleans the descriptors and restarts the transmission
2671  * in case of transmission errors.
2672  */
2673 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2674 {
2675 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2676 
2677 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2678 
2679 	stmmac_stop_tx_dma(priv, chan);
2680 	dma_free_tx_skbufs(priv, chan);
2681 	stmmac_clear_tx_descriptors(priv, chan);
2682 	tx_q->dirty_tx = 0;
2683 	tx_q->cur_tx = 0;
2684 	tx_q->mss = 0;
2685 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2686 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2687 			    tx_q->dma_tx_phy, chan);
2688 	stmmac_start_tx_dma(priv, chan);
2689 
2690 	priv->dev->stats.tx_errors++;
2691 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2692 }
2693 
2694 /**
2695  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2696  *  @priv: driver private structure
2697  *  @txmode: TX operating mode
2698  *  @rxmode: RX operating mode
2699  *  @chan: channel index
2700  *  Description: it is used for configuring of the DMA operation mode in
2701  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2702  *  mode.
2703  */
2704 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2705 					  u32 rxmode, u32 chan)
2706 {
2707 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2708 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2709 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2710 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2711 	int rxfifosz = priv->plat->rx_fifo_size;
2712 	int txfifosz = priv->plat->tx_fifo_size;
2713 
2714 	if (rxfifosz == 0)
2715 		rxfifosz = priv->dma_cap.rx_fifo_size;
2716 	if (txfifosz == 0)
2717 		txfifosz = priv->dma_cap.tx_fifo_size;
2718 
2719 	/* Adjust for real per queue fifo size */
2720 	rxfifosz /= rx_channels_count;
2721 	txfifosz /= tx_channels_count;
2722 
2723 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2724 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2725 }
2726 
2727 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2728 {
2729 	int ret;
2730 
2731 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2732 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2733 	if (ret && (ret != -EINVAL)) {
2734 		stmmac_global_err(priv);
2735 		return true;
2736 	}
2737 
2738 	return false;
2739 }
2740 
2741 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2742 {
2743 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2744 						 &priv->xstats, chan, dir);
2745 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2746 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2747 	struct stmmac_channel *ch = &priv->channel[chan];
2748 	struct napi_struct *rx_napi;
2749 	struct napi_struct *tx_napi;
2750 	unsigned long flags;
2751 
2752 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2753 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2754 
2755 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2756 		if (napi_schedule_prep(rx_napi)) {
2757 			spin_lock_irqsave(&ch->lock, flags);
2758 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2759 			spin_unlock_irqrestore(&ch->lock, flags);
2760 			__napi_schedule(rx_napi);
2761 		}
2762 	}
2763 
2764 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2765 		if (napi_schedule_prep(tx_napi)) {
2766 			spin_lock_irqsave(&ch->lock, flags);
2767 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2768 			spin_unlock_irqrestore(&ch->lock, flags);
2769 			__napi_schedule(tx_napi);
2770 		}
2771 	}
2772 
2773 	return status;
2774 }
2775 
2776 /**
2777  * stmmac_dma_interrupt - DMA ISR
2778  * @priv: driver private structure
2779  * Description: this is the DMA ISR. It is called by the main ISR.
2780  * It calls the dwmac dma routine and schedule poll method in case of some
2781  * work can be done.
2782  */
2783 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2784 {
2785 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2786 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2787 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2788 				tx_channel_count : rx_channel_count;
2789 	u32 chan;
2790 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2791 
2792 	/* Make sure we never check beyond our status buffer. */
2793 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2794 		channels_to_check = ARRAY_SIZE(status);
2795 
2796 	for (chan = 0; chan < channels_to_check; chan++)
2797 		status[chan] = stmmac_napi_check(priv, chan,
2798 						 DMA_DIR_RXTX);
2799 
2800 	for (chan = 0; chan < tx_channel_count; chan++) {
2801 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2802 			/* Try to bump up the dma threshold on this failure */
2803 			stmmac_bump_dma_threshold(priv, chan);
2804 		} else if (unlikely(status[chan] == tx_hard_error)) {
2805 			stmmac_tx_err(priv, chan);
2806 		}
2807 	}
2808 }
2809 
2810 /**
2811  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2812  * @priv: driver private structure
2813  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2814  */
2815 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2816 {
2817 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2818 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2819 
2820 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2821 
2822 	if (priv->dma_cap.rmon) {
2823 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2824 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2825 	} else
2826 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2827 }
2828 
2829 /**
2830  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2831  * @priv: driver private structure
2832  * Description:
2833  *  new GMAC chip generations have a new register to indicate the
2834  *  presence of the optional feature/functions.
2835  *  This can be also used to override the value passed through the
2836  *  platform and necessary for old MAC10/100 and GMAC chips.
2837  */
2838 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2839 {
2840 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2841 }
2842 
2843 /**
2844  * stmmac_check_ether_addr - check if the MAC addr is valid
2845  * @priv: driver private structure
2846  * Description:
2847  * it is to verify if the MAC address is valid, in case of failures it
2848  * generates a random MAC address
2849  */
2850 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2851 {
2852 	u8 addr[ETH_ALEN];
2853 
2854 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2855 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2856 		if (is_valid_ether_addr(addr))
2857 			eth_hw_addr_set(priv->dev, addr);
2858 		else
2859 			eth_hw_addr_random(priv->dev);
2860 		dev_info(priv->device, "device MAC address %pM\n",
2861 			 priv->dev->dev_addr);
2862 	}
2863 }
2864 
2865 /**
2866  * stmmac_init_dma_engine - DMA init.
2867  * @priv: driver private structure
2868  * Description:
2869  * It inits the DMA invoking the specific MAC/GMAC callback.
2870  * Some DMA parameters can be passed from the platform;
2871  * in case of these are not passed a default is kept for the MAC or GMAC.
2872  */
2873 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2874 {
2875 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2876 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2877 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2878 	struct stmmac_rx_queue *rx_q;
2879 	struct stmmac_tx_queue *tx_q;
2880 	u32 chan = 0;
2881 	int atds = 0;
2882 	int ret = 0;
2883 
2884 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2885 		dev_err(priv->device, "Invalid DMA configuration\n");
2886 		return -EINVAL;
2887 	}
2888 
2889 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2890 		atds = 1;
2891 
2892 	ret = stmmac_reset(priv, priv->ioaddr);
2893 	if (ret) {
2894 		dev_err(priv->device, "Failed to reset the dma\n");
2895 		return ret;
2896 	}
2897 
2898 	/* DMA Configuration */
2899 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2900 
2901 	if (priv->plat->axi)
2902 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2903 
2904 	/* DMA CSR Channel configuration */
2905 	for (chan = 0; chan < dma_csr_ch; chan++)
2906 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2907 
2908 	/* DMA RX Channel Configuration */
2909 	for (chan = 0; chan < rx_channels_count; chan++) {
2910 		rx_q = &priv->rx_queue[chan];
2911 
2912 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2913 				    rx_q->dma_rx_phy, chan);
2914 
2915 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2916 				     (rx_q->buf_alloc_num *
2917 				      sizeof(struct dma_desc));
2918 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2919 				       rx_q->rx_tail_addr, chan);
2920 	}
2921 
2922 	/* DMA TX Channel Configuration */
2923 	for (chan = 0; chan < tx_channels_count; chan++) {
2924 		tx_q = &priv->tx_queue[chan];
2925 
2926 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2927 				    tx_q->dma_tx_phy, chan);
2928 
2929 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2930 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2931 				       tx_q->tx_tail_addr, chan);
2932 	}
2933 
2934 	return ret;
2935 }
2936 
2937 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2938 {
2939 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2940 
2941 	hrtimer_start(&tx_q->txtimer,
2942 		      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2943 		      HRTIMER_MODE_REL);
2944 }
2945 
2946 /**
2947  * stmmac_tx_timer - mitigation sw timer for tx.
2948  * @t: data pointer
2949  * Description:
2950  * This is the timer handler to directly invoke the stmmac_tx_clean.
2951  */
2952 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
2953 {
2954 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
2955 	struct stmmac_priv *priv = tx_q->priv_data;
2956 	struct stmmac_channel *ch;
2957 	struct napi_struct *napi;
2958 
2959 	ch = &priv->channel[tx_q->queue_index];
2960 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2961 
2962 	if (likely(napi_schedule_prep(napi))) {
2963 		unsigned long flags;
2964 
2965 		spin_lock_irqsave(&ch->lock, flags);
2966 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2967 		spin_unlock_irqrestore(&ch->lock, flags);
2968 		__napi_schedule(napi);
2969 	}
2970 
2971 	return HRTIMER_NORESTART;
2972 }
2973 
2974 /**
2975  * stmmac_init_coalesce - init mitigation options.
2976  * @priv: driver private structure
2977  * Description:
2978  * This inits the coalesce parameters: i.e. timer rate,
2979  * timer handler and default threshold used for enabling the
2980  * interrupt on completion bit.
2981  */
2982 static void stmmac_init_coalesce(struct stmmac_priv *priv)
2983 {
2984 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2985 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2986 	u32 chan;
2987 
2988 	for (chan = 0; chan < tx_channel_count; chan++) {
2989 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2990 
2991 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
2992 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
2993 
2994 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2995 		tx_q->txtimer.function = stmmac_tx_timer;
2996 	}
2997 
2998 	for (chan = 0; chan < rx_channel_count; chan++)
2999 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3000 }
3001 
3002 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3003 {
3004 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3005 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3006 	u32 chan;
3007 
3008 	/* set TX ring length */
3009 	for (chan = 0; chan < tx_channels_count; chan++)
3010 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3011 				       (priv->dma_tx_size - 1), chan);
3012 
3013 	/* set RX ring length */
3014 	for (chan = 0; chan < rx_channels_count; chan++)
3015 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3016 				       (priv->dma_rx_size - 1), chan);
3017 }
3018 
3019 /**
3020  *  stmmac_set_tx_queue_weight - Set TX queue weight
3021  *  @priv: driver private structure
3022  *  Description: It is used for setting TX queues weight
3023  */
3024 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3025 {
3026 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3027 	u32 weight;
3028 	u32 queue;
3029 
3030 	for (queue = 0; queue < tx_queues_count; queue++) {
3031 		weight = priv->plat->tx_queues_cfg[queue].weight;
3032 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3033 	}
3034 }
3035 
3036 /**
3037  *  stmmac_configure_cbs - Configure CBS in TX queue
3038  *  @priv: driver private structure
3039  *  Description: It is used for configuring CBS in AVB TX queues
3040  */
3041 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3042 {
3043 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3044 	u32 mode_to_use;
3045 	u32 queue;
3046 
3047 	/* queue 0 is reserved for legacy traffic */
3048 	for (queue = 1; queue < tx_queues_count; queue++) {
3049 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3050 		if (mode_to_use == MTL_QUEUE_DCB)
3051 			continue;
3052 
3053 		stmmac_config_cbs(priv, priv->hw,
3054 				priv->plat->tx_queues_cfg[queue].send_slope,
3055 				priv->plat->tx_queues_cfg[queue].idle_slope,
3056 				priv->plat->tx_queues_cfg[queue].high_credit,
3057 				priv->plat->tx_queues_cfg[queue].low_credit,
3058 				queue);
3059 	}
3060 }
3061 
3062 /**
3063  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3064  *  @priv: driver private structure
3065  *  Description: It is used for mapping RX queues to RX dma channels
3066  */
3067 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3068 {
3069 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3070 	u32 queue;
3071 	u32 chan;
3072 
3073 	for (queue = 0; queue < rx_queues_count; queue++) {
3074 		chan = priv->plat->rx_queues_cfg[queue].chan;
3075 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3076 	}
3077 }
3078 
3079 /**
3080  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3081  *  @priv: driver private structure
3082  *  Description: It is used for configuring the RX Queue Priority
3083  */
3084 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3085 {
3086 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3087 	u32 queue;
3088 	u32 prio;
3089 
3090 	for (queue = 0; queue < rx_queues_count; queue++) {
3091 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3092 			continue;
3093 
3094 		prio = priv->plat->rx_queues_cfg[queue].prio;
3095 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3096 	}
3097 }
3098 
3099 /**
3100  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3101  *  @priv: driver private structure
3102  *  Description: It is used for configuring the TX Queue Priority
3103  */
3104 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3105 {
3106 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3107 	u32 queue;
3108 	u32 prio;
3109 
3110 	for (queue = 0; queue < tx_queues_count; queue++) {
3111 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3112 			continue;
3113 
3114 		prio = priv->plat->tx_queues_cfg[queue].prio;
3115 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3116 	}
3117 }
3118 
3119 /**
3120  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3121  *  @priv: driver private structure
3122  *  Description: It is used for configuring the RX queue routing
3123  */
3124 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3125 {
3126 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3127 	u32 queue;
3128 	u8 packet;
3129 
3130 	for (queue = 0; queue < rx_queues_count; queue++) {
3131 		/* no specific packet type routing specified for the queue */
3132 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3133 			continue;
3134 
3135 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3136 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3137 	}
3138 }
3139 
3140 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3141 {
3142 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3143 		priv->rss.enable = false;
3144 		return;
3145 	}
3146 
3147 	if (priv->dev->features & NETIF_F_RXHASH)
3148 		priv->rss.enable = true;
3149 	else
3150 		priv->rss.enable = false;
3151 
3152 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3153 			     priv->plat->rx_queues_to_use);
3154 }
3155 
3156 /**
3157  *  stmmac_mtl_configuration - Configure MTL
3158  *  @priv: driver private structure
3159  *  Description: It is used for configurring MTL
3160  */
3161 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3162 {
3163 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3164 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3165 
3166 	if (tx_queues_count > 1)
3167 		stmmac_set_tx_queue_weight(priv);
3168 
3169 	/* Configure MTL RX algorithms */
3170 	if (rx_queues_count > 1)
3171 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3172 				priv->plat->rx_sched_algorithm);
3173 
3174 	/* Configure MTL TX algorithms */
3175 	if (tx_queues_count > 1)
3176 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3177 				priv->plat->tx_sched_algorithm);
3178 
3179 	/* Configure CBS in AVB TX queues */
3180 	if (tx_queues_count > 1)
3181 		stmmac_configure_cbs(priv);
3182 
3183 	/* Map RX MTL to DMA channels */
3184 	stmmac_rx_queue_dma_chan_map(priv);
3185 
3186 	/* Enable MAC RX Queues */
3187 	stmmac_mac_enable_rx_queues(priv);
3188 
3189 	/* Set RX priorities */
3190 	if (rx_queues_count > 1)
3191 		stmmac_mac_config_rx_queues_prio(priv);
3192 
3193 	/* Set TX priorities */
3194 	if (tx_queues_count > 1)
3195 		stmmac_mac_config_tx_queues_prio(priv);
3196 
3197 	/* Set RX routing */
3198 	if (rx_queues_count > 1)
3199 		stmmac_mac_config_rx_queues_routing(priv);
3200 
3201 	/* Receive Side Scaling */
3202 	if (rx_queues_count > 1)
3203 		stmmac_mac_config_rss(priv);
3204 }
3205 
3206 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3207 {
3208 	if (priv->dma_cap.asp) {
3209 		netdev_info(priv->dev, "Enabling Safety Features\n");
3210 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3211 					  priv->plat->safety_feat_cfg);
3212 	} else {
3213 		netdev_info(priv->dev, "No Safety Features support found\n");
3214 	}
3215 }
3216 
3217 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3218 {
3219 	char *name;
3220 
3221 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3222 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3223 
3224 	name = priv->wq_name;
3225 	sprintf(name, "%s-fpe", priv->dev->name);
3226 
3227 	priv->fpe_wq = create_singlethread_workqueue(name);
3228 	if (!priv->fpe_wq) {
3229 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3230 
3231 		return -ENOMEM;
3232 	}
3233 	netdev_info(priv->dev, "FPE workqueue start");
3234 
3235 	return 0;
3236 }
3237 
3238 /**
3239  * stmmac_hw_setup - setup mac in a usable state.
3240  *  @dev : pointer to the device structure.
3241  *  @init_ptp: initialize PTP if set
3242  *  Description:
3243  *  this is the main function to setup the HW in a usable state because the
3244  *  dma engine is reset, the core registers are configured (e.g. AXI,
3245  *  Checksum features, timers). The DMA is ready to start receiving and
3246  *  transmitting.
3247  *  Return value:
3248  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3249  *  file on failure.
3250  */
3251 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
3252 {
3253 	struct stmmac_priv *priv = netdev_priv(dev);
3254 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3255 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3256 	bool sph_en;
3257 	u32 chan;
3258 	int ret;
3259 
3260 	/* DMA initialization and SW reset */
3261 	ret = stmmac_init_dma_engine(priv);
3262 	if (ret < 0) {
3263 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3264 			   __func__);
3265 		return ret;
3266 	}
3267 
3268 	/* Copy the MAC addr into the HW  */
3269 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3270 
3271 	/* PS and related bits will be programmed according to the speed */
3272 	if (priv->hw->pcs) {
3273 		int speed = priv->plat->mac_port_sel_speed;
3274 
3275 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3276 		    (speed == SPEED_1000)) {
3277 			priv->hw->ps = speed;
3278 		} else {
3279 			dev_warn(priv->device, "invalid port speed\n");
3280 			priv->hw->ps = 0;
3281 		}
3282 	}
3283 
3284 	/* Initialize the MAC Core */
3285 	stmmac_core_init(priv, priv->hw, dev);
3286 
3287 	/* Initialize MTL*/
3288 	stmmac_mtl_configuration(priv);
3289 
3290 	/* Initialize Safety Features */
3291 	stmmac_safety_feat_configuration(priv);
3292 
3293 	ret = stmmac_rx_ipc(priv, priv->hw);
3294 	if (!ret) {
3295 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3296 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3297 		priv->hw->rx_csum = 0;
3298 	}
3299 
3300 	/* Enable the MAC Rx/Tx */
3301 	stmmac_mac_set(priv, priv->ioaddr, true);
3302 
3303 	/* Set the HW DMA mode and the COE */
3304 	stmmac_dma_operation_mode(priv);
3305 
3306 	stmmac_mmc_setup(priv);
3307 
3308 	if (init_ptp) {
3309 		ret = stmmac_init_ptp(priv);
3310 		if (ret == -EOPNOTSUPP)
3311 			netdev_warn(priv->dev, "PTP not supported by HW\n");
3312 		else if (ret)
3313 			netdev_warn(priv->dev, "PTP init failed\n");
3314 	}
3315 
3316 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3317 
3318 	/* Convert the timer from msec to usec */
3319 	if (!priv->tx_lpi_timer)
3320 		priv->tx_lpi_timer = eee_timer * 1000;
3321 
3322 	if (priv->use_riwt) {
3323 		u32 queue;
3324 
3325 		for (queue = 0; queue < rx_cnt; queue++) {
3326 			if (!priv->rx_riwt[queue])
3327 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3328 
3329 			stmmac_rx_watchdog(priv, priv->ioaddr,
3330 					   priv->rx_riwt[queue], queue);
3331 		}
3332 	}
3333 
3334 	if (priv->hw->pcs)
3335 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3336 
3337 	/* set TX and RX rings length */
3338 	stmmac_set_rings_length(priv);
3339 
3340 	/* Enable TSO */
3341 	if (priv->tso) {
3342 		for (chan = 0; chan < tx_cnt; chan++) {
3343 			struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3344 
3345 			/* TSO and TBS cannot co-exist */
3346 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3347 				continue;
3348 
3349 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3350 		}
3351 	}
3352 
3353 	/* Enable Split Header */
3354 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3355 	for (chan = 0; chan < rx_cnt; chan++)
3356 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3357 
3358 
3359 	/* VLAN Tag Insertion */
3360 	if (priv->dma_cap.vlins)
3361 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3362 
3363 	/* TBS */
3364 	for (chan = 0; chan < tx_cnt; chan++) {
3365 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3366 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3367 
3368 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3369 	}
3370 
3371 	/* Configure real RX and TX queues */
3372 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3373 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3374 
3375 	/* Start the ball rolling... */
3376 	stmmac_start_all_dma(priv);
3377 
3378 	if (priv->dma_cap.fpesel) {
3379 		stmmac_fpe_start_wq(priv);
3380 
3381 		if (priv->plat->fpe_cfg->enable)
3382 			stmmac_fpe_handshake(priv, true);
3383 	}
3384 
3385 	return 0;
3386 }
3387 
3388 static void stmmac_hw_teardown(struct net_device *dev)
3389 {
3390 	struct stmmac_priv *priv = netdev_priv(dev);
3391 
3392 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3393 }
3394 
3395 static void stmmac_free_irq(struct net_device *dev,
3396 			    enum request_irq_err irq_err, int irq_idx)
3397 {
3398 	struct stmmac_priv *priv = netdev_priv(dev);
3399 	int j;
3400 
3401 	switch (irq_err) {
3402 	case REQ_IRQ_ERR_ALL:
3403 		irq_idx = priv->plat->tx_queues_to_use;
3404 		fallthrough;
3405 	case REQ_IRQ_ERR_TX:
3406 		for (j = irq_idx - 1; j >= 0; j--) {
3407 			if (priv->tx_irq[j] > 0) {
3408 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3409 				free_irq(priv->tx_irq[j], &priv->tx_queue[j]);
3410 			}
3411 		}
3412 		irq_idx = priv->plat->rx_queues_to_use;
3413 		fallthrough;
3414 	case REQ_IRQ_ERR_RX:
3415 		for (j = irq_idx - 1; j >= 0; j--) {
3416 			if (priv->rx_irq[j] > 0) {
3417 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3418 				free_irq(priv->rx_irq[j], &priv->rx_queue[j]);
3419 			}
3420 		}
3421 
3422 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3423 			free_irq(priv->sfty_ue_irq, dev);
3424 		fallthrough;
3425 	case REQ_IRQ_ERR_SFTY_UE:
3426 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3427 			free_irq(priv->sfty_ce_irq, dev);
3428 		fallthrough;
3429 	case REQ_IRQ_ERR_SFTY_CE:
3430 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3431 			free_irq(priv->lpi_irq, dev);
3432 		fallthrough;
3433 	case REQ_IRQ_ERR_LPI:
3434 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3435 			free_irq(priv->wol_irq, dev);
3436 		fallthrough;
3437 	case REQ_IRQ_ERR_WOL:
3438 		free_irq(dev->irq, dev);
3439 		fallthrough;
3440 	case REQ_IRQ_ERR_MAC:
3441 	case REQ_IRQ_ERR_NO:
3442 		/* If MAC IRQ request error, no more IRQ to free */
3443 		break;
3444 	}
3445 }
3446 
3447 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3448 {
3449 	struct stmmac_priv *priv = netdev_priv(dev);
3450 	enum request_irq_err irq_err;
3451 	cpumask_t cpu_mask;
3452 	int irq_idx = 0;
3453 	char *int_name;
3454 	int ret;
3455 	int i;
3456 
3457 	/* For common interrupt */
3458 	int_name = priv->int_name_mac;
3459 	sprintf(int_name, "%s:%s", dev->name, "mac");
3460 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3461 			  0, int_name, dev);
3462 	if (unlikely(ret < 0)) {
3463 		netdev_err(priv->dev,
3464 			   "%s: alloc mac MSI %d (error: %d)\n",
3465 			   __func__, dev->irq, ret);
3466 		irq_err = REQ_IRQ_ERR_MAC;
3467 		goto irq_error;
3468 	}
3469 
3470 	/* Request the Wake IRQ in case of another line
3471 	 * is used for WoL
3472 	 */
3473 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3474 		int_name = priv->int_name_wol;
3475 		sprintf(int_name, "%s:%s", dev->name, "wol");
3476 		ret = request_irq(priv->wol_irq,
3477 				  stmmac_mac_interrupt,
3478 				  0, int_name, dev);
3479 		if (unlikely(ret < 0)) {
3480 			netdev_err(priv->dev,
3481 				   "%s: alloc wol MSI %d (error: %d)\n",
3482 				   __func__, priv->wol_irq, ret);
3483 			irq_err = REQ_IRQ_ERR_WOL;
3484 			goto irq_error;
3485 		}
3486 	}
3487 
3488 	/* Request the LPI IRQ in case of another line
3489 	 * is used for LPI
3490 	 */
3491 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3492 		int_name = priv->int_name_lpi;
3493 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3494 		ret = request_irq(priv->lpi_irq,
3495 				  stmmac_mac_interrupt,
3496 				  0, int_name, dev);
3497 		if (unlikely(ret < 0)) {
3498 			netdev_err(priv->dev,
3499 				   "%s: alloc lpi MSI %d (error: %d)\n",
3500 				   __func__, priv->lpi_irq, ret);
3501 			irq_err = REQ_IRQ_ERR_LPI;
3502 			goto irq_error;
3503 		}
3504 	}
3505 
3506 	/* Request the Safety Feature Correctible Error line in
3507 	 * case of another line is used
3508 	 */
3509 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3510 		int_name = priv->int_name_sfty_ce;
3511 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3512 		ret = request_irq(priv->sfty_ce_irq,
3513 				  stmmac_safety_interrupt,
3514 				  0, int_name, dev);
3515 		if (unlikely(ret < 0)) {
3516 			netdev_err(priv->dev,
3517 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3518 				   __func__, priv->sfty_ce_irq, ret);
3519 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3520 			goto irq_error;
3521 		}
3522 	}
3523 
3524 	/* Request the Safety Feature Uncorrectible Error line in
3525 	 * case of another line is used
3526 	 */
3527 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3528 		int_name = priv->int_name_sfty_ue;
3529 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3530 		ret = request_irq(priv->sfty_ue_irq,
3531 				  stmmac_safety_interrupt,
3532 				  0, int_name, dev);
3533 		if (unlikely(ret < 0)) {
3534 			netdev_err(priv->dev,
3535 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3536 				   __func__, priv->sfty_ue_irq, ret);
3537 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3538 			goto irq_error;
3539 		}
3540 	}
3541 
3542 	/* Request Rx MSI irq */
3543 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3544 		if (i >= MTL_MAX_RX_QUEUES)
3545 			break;
3546 		if (priv->rx_irq[i] == 0)
3547 			continue;
3548 
3549 		int_name = priv->int_name_rx_irq[i];
3550 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3551 		ret = request_irq(priv->rx_irq[i],
3552 				  stmmac_msi_intr_rx,
3553 				  0, int_name, &priv->rx_queue[i]);
3554 		if (unlikely(ret < 0)) {
3555 			netdev_err(priv->dev,
3556 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3557 				   __func__, i, priv->rx_irq[i], ret);
3558 			irq_err = REQ_IRQ_ERR_RX;
3559 			irq_idx = i;
3560 			goto irq_error;
3561 		}
3562 		cpumask_clear(&cpu_mask);
3563 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3564 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3565 	}
3566 
3567 	/* Request Tx MSI irq */
3568 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3569 		if (i >= MTL_MAX_TX_QUEUES)
3570 			break;
3571 		if (priv->tx_irq[i] == 0)
3572 			continue;
3573 
3574 		int_name = priv->int_name_tx_irq[i];
3575 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3576 		ret = request_irq(priv->tx_irq[i],
3577 				  stmmac_msi_intr_tx,
3578 				  0, int_name, &priv->tx_queue[i]);
3579 		if (unlikely(ret < 0)) {
3580 			netdev_err(priv->dev,
3581 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3582 				   __func__, i, priv->tx_irq[i], ret);
3583 			irq_err = REQ_IRQ_ERR_TX;
3584 			irq_idx = i;
3585 			goto irq_error;
3586 		}
3587 		cpumask_clear(&cpu_mask);
3588 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3589 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3590 	}
3591 
3592 	return 0;
3593 
3594 irq_error:
3595 	stmmac_free_irq(dev, irq_err, irq_idx);
3596 	return ret;
3597 }
3598 
3599 static int stmmac_request_irq_single(struct net_device *dev)
3600 {
3601 	struct stmmac_priv *priv = netdev_priv(dev);
3602 	enum request_irq_err irq_err;
3603 	int ret;
3604 
3605 	ret = request_irq(dev->irq, stmmac_interrupt,
3606 			  IRQF_SHARED, dev->name, dev);
3607 	if (unlikely(ret < 0)) {
3608 		netdev_err(priv->dev,
3609 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3610 			   __func__, dev->irq, ret);
3611 		irq_err = REQ_IRQ_ERR_MAC;
3612 		goto irq_error;
3613 	}
3614 
3615 	/* Request the Wake IRQ in case of another line
3616 	 * is used for WoL
3617 	 */
3618 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3619 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3620 				  IRQF_SHARED, dev->name, dev);
3621 		if (unlikely(ret < 0)) {
3622 			netdev_err(priv->dev,
3623 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3624 				   __func__, priv->wol_irq, ret);
3625 			irq_err = REQ_IRQ_ERR_WOL;
3626 			goto irq_error;
3627 		}
3628 	}
3629 
3630 	/* Request the IRQ lines */
3631 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3632 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3633 				  IRQF_SHARED, dev->name, dev);
3634 		if (unlikely(ret < 0)) {
3635 			netdev_err(priv->dev,
3636 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3637 				   __func__, priv->lpi_irq, ret);
3638 			irq_err = REQ_IRQ_ERR_LPI;
3639 			goto irq_error;
3640 		}
3641 	}
3642 
3643 	return 0;
3644 
3645 irq_error:
3646 	stmmac_free_irq(dev, irq_err, 0);
3647 	return ret;
3648 }
3649 
3650 static int stmmac_request_irq(struct net_device *dev)
3651 {
3652 	struct stmmac_priv *priv = netdev_priv(dev);
3653 	int ret;
3654 
3655 	/* Request the IRQ lines */
3656 	if (priv->plat->multi_msi_en)
3657 		ret = stmmac_request_irq_multi_msi(dev);
3658 	else
3659 		ret = stmmac_request_irq_single(dev);
3660 
3661 	return ret;
3662 }
3663 
3664 /**
3665  *  stmmac_open - open entry point of the driver
3666  *  @dev : pointer to the device structure.
3667  *  Description:
3668  *  This function is the open entry point of the driver.
3669  *  Return value:
3670  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3671  *  file on failure.
3672  */
3673 static int stmmac_open(struct net_device *dev)
3674 {
3675 	struct stmmac_priv *priv = netdev_priv(dev);
3676 	int mode = priv->plat->phy_interface;
3677 	int bfsize = 0;
3678 	u32 chan;
3679 	int ret;
3680 
3681 	ret = pm_runtime_get_sync(priv->device);
3682 	if (ret < 0) {
3683 		pm_runtime_put_noidle(priv->device);
3684 		return ret;
3685 	}
3686 
3687 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3688 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
3689 	    (!priv->hw->xpcs ||
3690 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3691 		ret = stmmac_init_phy(dev);
3692 		if (ret) {
3693 			netdev_err(priv->dev,
3694 				   "%s: Cannot attach to PHY (error: %d)\n",
3695 				   __func__, ret);
3696 			goto init_phy_error;
3697 		}
3698 	}
3699 
3700 	/* Extra statistics */
3701 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
3702 	priv->xstats.threshold = tc;
3703 
3704 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
3705 	if (bfsize < 0)
3706 		bfsize = 0;
3707 
3708 	if (bfsize < BUF_SIZE_16KiB)
3709 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
3710 
3711 	priv->dma_buf_sz = bfsize;
3712 	buf_sz = bfsize;
3713 
3714 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3715 
3716 	if (!priv->dma_tx_size)
3717 		priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3718 	if (!priv->dma_rx_size)
3719 		priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3720 
3721 	/* Earlier check for TBS */
3722 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3723 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3724 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3725 
3726 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3727 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3728 	}
3729 
3730 	ret = alloc_dma_desc_resources(priv);
3731 	if (ret < 0) {
3732 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3733 			   __func__);
3734 		goto dma_desc_error;
3735 	}
3736 
3737 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
3738 	if (ret < 0) {
3739 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3740 			   __func__);
3741 		goto init_error;
3742 	}
3743 
3744 	ret = stmmac_hw_setup(dev, true);
3745 	if (ret < 0) {
3746 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3747 		goto init_error;
3748 	}
3749 
3750 	stmmac_init_coalesce(priv);
3751 
3752 	phylink_start(priv->phylink);
3753 	/* We may have called phylink_speed_down before */
3754 	phylink_speed_up(priv->phylink);
3755 
3756 	ret = stmmac_request_irq(dev);
3757 	if (ret)
3758 		goto irq_error;
3759 
3760 	stmmac_enable_all_queues(priv);
3761 	netif_tx_start_all_queues(priv->dev);
3762 
3763 	return 0;
3764 
3765 irq_error:
3766 	phylink_stop(priv->phylink);
3767 
3768 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3769 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
3770 
3771 	stmmac_hw_teardown(dev);
3772 init_error:
3773 	free_dma_desc_resources(priv);
3774 dma_desc_error:
3775 	phylink_disconnect_phy(priv->phylink);
3776 init_phy_error:
3777 	pm_runtime_put(priv->device);
3778 	return ret;
3779 }
3780 
3781 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3782 {
3783 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3784 
3785 	if (priv->fpe_wq)
3786 		destroy_workqueue(priv->fpe_wq);
3787 
3788 	netdev_info(priv->dev, "FPE workqueue stop");
3789 }
3790 
3791 /**
3792  *  stmmac_release - close entry point of the driver
3793  *  @dev : device pointer.
3794  *  Description:
3795  *  This is the stop entry point of the driver.
3796  */
3797 static int stmmac_release(struct net_device *dev)
3798 {
3799 	struct stmmac_priv *priv = netdev_priv(dev);
3800 	u32 chan;
3801 
3802 	netif_tx_disable(dev);
3803 
3804 	if (device_may_wakeup(priv->device))
3805 		phylink_speed_down(priv->phylink, false);
3806 	/* Stop and disconnect the PHY */
3807 	phylink_stop(priv->phylink);
3808 	phylink_disconnect_phy(priv->phylink);
3809 
3810 	stmmac_disable_all_queues(priv);
3811 
3812 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3813 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
3814 
3815 	/* Free the IRQ lines */
3816 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3817 
3818 	if (priv->eee_enabled) {
3819 		priv->tx_path_in_lpi_mode = false;
3820 		del_timer_sync(&priv->eee_ctrl_timer);
3821 	}
3822 
3823 	/* Stop TX/RX DMA and clear the descriptors */
3824 	stmmac_stop_all_dma(priv);
3825 
3826 	/* Release and free the Rx/Tx resources */
3827 	free_dma_desc_resources(priv);
3828 
3829 	/* Disable the MAC Rx/Tx */
3830 	stmmac_mac_set(priv, priv->ioaddr, false);
3831 
3832 	netif_carrier_off(dev);
3833 
3834 	stmmac_release_ptp(priv);
3835 
3836 	pm_runtime_put(priv->device);
3837 
3838 	if (priv->dma_cap.fpesel)
3839 		stmmac_fpe_stop_wq(priv);
3840 
3841 	return 0;
3842 }
3843 
3844 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3845 			       struct stmmac_tx_queue *tx_q)
3846 {
3847 	u16 tag = 0x0, inner_tag = 0x0;
3848 	u32 inner_type = 0x0;
3849 	struct dma_desc *p;
3850 
3851 	if (!priv->dma_cap.vlins)
3852 		return false;
3853 	if (!skb_vlan_tag_present(skb))
3854 		return false;
3855 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3856 		inner_tag = skb_vlan_tag_get(skb);
3857 		inner_type = STMMAC_VLAN_INSERT;
3858 	}
3859 
3860 	tag = skb_vlan_tag_get(skb);
3861 
3862 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3863 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3864 	else
3865 		p = &tx_q->dma_tx[tx_q->cur_tx];
3866 
3867 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3868 		return false;
3869 
3870 	stmmac_set_tx_owner(priv, p);
3871 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3872 	return true;
3873 }
3874 
3875 /**
3876  *  stmmac_tso_allocator - close entry point of the driver
3877  *  @priv: driver private structure
3878  *  @des: buffer start address
3879  *  @total_len: total length to fill in descriptors
3880  *  @last_segment: condition for the last descriptor
3881  *  @queue: TX queue index
3882  *  Description:
3883  *  This function fills descriptor and request new descriptors according to
3884  *  buffer length to fill
3885  */
3886 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
3887 				 int total_len, bool last_segment, u32 queue)
3888 {
3889 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3890 	struct dma_desc *desc;
3891 	u32 buff_size;
3892 	int tmp_len;
3893 
3894 	tmp_len = total_len;
3895 
3896 	while (tmp_len > 0) {
3897 		dma_addr_t curr_addr;
3898 
3899 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3900 						priv->dma_tx_size);
3901 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3902 
3903 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3904 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3905 		else
3906 			desc = &tx_q->dma_tx[tx_q->cur_tx];
3907 
3908 		curr_addr = des + (total_len - tmp_len);
3909 		if (priv->dma_cap.addr64 <= 32)
3910 			desc->des0 = cpu_to_le32(curr_addr);
3911 		else
3912 			stmmac_set_desc_addr(priv, desc, curr_addr);
3913 
3914 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
3915 			    TSO_MAX_BUFF_SIZE : tmp_len;
3916 
3917 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
3918 				0, 1,
3919 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
3920 				0, 0);
3921 
3922 		tmp_len -= TSO_MAX_BUFF_SIZE;
3923 	}
3924 }
3925 
3926 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
3927 {
3928 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3929 	int desc_size;
3930 
3931 	if (likely(priv->extend_desc))
3932 		desc_size = sizeof(struct dma_extended_desc);
3933 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3934 		desc_size = sizeof(struct dma_edesc);
3935 	else
3936 		desc_size = sizeof(struct dma_desc);
3937 
3938 	/* The own bit must be the latest setting done when prepare the
3939 	 * descriptor and then barrier is needed to make sure that
3940 	 * all is coherent before granting the DMA engine.
3941 	 */
3942 	wmb();
3943 
3944 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3945 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3946 }
3947 
3948 /**
3949  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
3950  *  @skb : the socket buffer
3951  *  @dev : device pointer
3952  *  Description: this is the transmit function that is called on TSO frames
3953  *  (support available on GMAC4 and newer chips).
3954  *  Diagram below show the ring programming in case of TSO frames:
3955  *
3956  *  First Descriptor
3957  *   --------
3958  *   | DES0 |---> buffer1 = L2/L3/L4 header
3959  *   | DES1 |---> TCP Payload (can continue on next descr...)
3960  *   | DES2 |---> buffer 1 and 2 len
3961  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
3962  *   --------
3963  *	|
3964  *     ...
3965  *	|
3966  *   --------
3967  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
3968  *   | DES1 | --|
3969  *   | DES2 | --> buffer 1 and 2 len
3970  *   | DES3 |
3971  *   --------
3972  *
3973  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
3974  */
3975 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3976 {
3977 	struct dma_desc *desc, *first, *mss_desc = NULL;
3978 	struct stmmac_priv *priv = netdev_priv(dev);
3979 	int nfrags = skb_shinfo(skb)->nr_frags;
3980 	u32 queue = skb_get_queue_mapping(skb);
3981 	unsigned int first_entry, tx_packets;
3982 	int tmp_pay_len = 0, first_tx;
3983 	struct stmmac_tx_queue *tx_q;
3984 	bool has_vlan, set_ic;
3985 	u8 proto_hdr_len, hdr;
3986 	u32 pay_len, mss;
3987 	dma_addr_t des;
3988 	int i;
3989 
3990 	tx_q = &priv->tx_queue[queue];
3991 	first_tx = tx_q->cur_tx;
3992 
3993 	/* Compute header lengths */
3994 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3995 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
3996 		hdr = sizeof(struct udphdr);
3997 	} else {
3998 		proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3999 		hdr = tcp_hdrlen(skb);
4000 	}
4001 
4002 	/* Desc availability based on threshold should be enough safe */
4003 	if (unlikely(stmmac_tx_avail(priv, queue) <
4004 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4005 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4006 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4007 								queue));
4008 			/* This is a hard error, log it. */
4009 			netdev_err(priv->dev,
4010 				   "%s: Tx Ring full when queue awake\n",
4011 				   __func__);
4012 		}
4013 		return NETDEV_TX_BUSY;
4014 	}
4015 
4016 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4017 
4018 	mss = skb_shinfo(skb)->gso_size;
4019 
4020 	/* set new MSS value if needed */
4021 	if (mss != tx_q->mss) {
4022 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4023 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4024 		else
4025 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4026 
4027 		stmmac_set_mss(priv, mss_desc, mss);
4028 		tx_q->mss = mss;
4029 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4030 						priv->dma_tx_size);
4031 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4032 	}
4033 
4034 	if (netif_msg_tx_queued(priv)) {
4035 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4036 			__func__, hdr, proto_hdr_len, pay_len, mss);
4037 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4038 			skb->data_len);
4039 	}
4040 
4041 	/* Check if VLAN can be inserted by HW */
4042 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4043 
4044 	first_entry = tx_q->cur_tx;
4045 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4046 
4047 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4048 		desc = &tx_q->dma_entx[first_entry].basic;
4049 	else
4050 		desc = &tx_q->dma_tx[first_entry];
4051 	first = desc;
4052 
4053 	if (has_vlan)
4054 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4055 
4056 	/* first descriptor: fill Headers on Buf1 */
4057 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4058 			     DMA_TO_DEVICE);
4059 	if (dma_mapping_error(priv->device, des))
4060 		goto dma_map_err;
4061 
4062 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4063 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4064 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4065 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4066 
4067 	if (priv->dma_cap.addr64 <= 32) {
4068 		first->des0 = cpu_to_le32(des);
4069 
4070 		/* Fill start of payload in buff2 of first descriptor */
4071 		if (pay_len)
4072 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4073 
4074 		/* If needed take extra descriptors to fill the remaining payload */
4075 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4076 	} else {
4077 		stmmac_set_desc_addr(priv, first, des);
4078 		tmp_pay_len = pay_len;
4079 		des += proto_hdr_len;
4080 		pay_len = 0;
4081 	}
4082 
4083 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4084 
4085 	/* Prepare fragments */
4086 	for (i = 0; i < nfrags; i++) {
4087 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4088 
4089 		des = skb_frag_dma_map(priv->device, frag, 0,
4090 				       skb_frag_size(frag),
4091 				       DMA_TO_DEVICE);
4092 		if (dma_mapping_error(priv->device, des))
4093 			goto dma_map_err;
4094 
4095 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4096 				     (i == nfrags - 1), queue);
4097 
4098 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4099 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4100 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4101 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4102 	}
4103 
4104 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4105 
4106 	/* Only the last descriptor gets to point to the skb. */
4107 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4108 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4109 
4110 	/* Manage tx mitigation */
4111 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4112 	tx_q->tx_count_frames += tx_packets;
4113 
4114 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4115 		set_ic = true;
4116 	else if (!priv->tx_coal_frames[queue])
4117 		set_ic = false;
4118 	else if (tx_packets > priv->tx_coal_frames[queue])
4119 		set_ic = true;
4120 	else if ((tx_q->tx_count_frames %
4121 		  priv->tx_coal_frames[queue]) < tx_packets)
4122 		set_ic = true;
4123 	else
4124 		set_ic = false;
4125 
4126 	if (set_ic) {
4127 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4128 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4129 		else
4130 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4131 
4132 		tx_q->tx_count_frames = 0;
4133 		stmmac_set_tx_ic(priv, desc);
4134 		priv->xstats.tx_set_ic_bit++;
4135 	}
4136 
4137 	/* We've used all descriptors we need for this skb, however,
4138 	 * advance cur_tx so that it references a fresh descriptor.
4139 	 * ndo_start_xmit will fill this descriptor the next time it's
4140 	 * called and stmmac_tx_clean may clean up to this descriptor.
4141 	 */
4142 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
4143 
4144 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4145 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4146 			  __func__);
4147 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4148 	}
4149 
4150 	dev->stats.tx_bytes += skb->len;
4151 	priv->xstats.tx_tso_frames++;
4152 	priv->xstats.tx_tso_nfrags += nfrags;
4153 
4154 	if (priv->sarc_type)
4155 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4156 
4157 	skb_tx_timestamp(skb);
4158 
4159 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4160 		     priv->hwts_tx_en)) {
4161 		/* declare that device is doing timestamping */
4162 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4163 		stmmac_enable_tx_timestamp(priv, first);
4164 	}
4165 
4166 	/* Complete the first descriptor before granting the DMA */
4167 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4168 			proto_hdr_len,
4169 			pay_len,
4170 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4171 			hdr / 4, (skb->len - proto_hdr_len));
4172 
4173 	/* If context desc is used to change MSS */
4174 	if (mss_desc) {
4175 		/* Make sure that first descriptor has been completely
4176 		 * written, including its own bit. This is because MSS is
4177 		 * actually before first descriptor, so we need to make
4178 		 * sure that MSS's own bit is the last thing written.
4179 		 */
4180 		dma_wmb();
4181 		stmmac_set_tx_owner(priv, mss_desc);
4182 	}
4183 
4184 	if (netif_msg_pktdata(priv)) {
4185 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4186 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4187 			tx_q->cur_tx, first, nfrags);
4188 		pr_info(">>> frame to be transmitted: ");
4189 		print_pkt(skb->data, skb_headlen(skb));
4190 	}
4191 
4192 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4193 
4194 	stmmac_flush_tx_descriptors(priv, queue);
4195 	stmmac_tx_timer_arm(priv, queue);
4196 
4197 	return NETDEV_TX_OK;
4198 
4199 dma_map_err:
4200 	dev_err(priv->device, "Tx dma map failed\n");
4201 	dev_kfree_skb(skb);
4202 	priv->dev->stats.tx_dropped++;
4203 	return NETDEV_TX_OK;
4204 }
4205 
4206 /**
4207  *  stmmac_xmit - Tx entry point of the driver
4208  *  @skb : the socket buffer
4209  *  @dev : device pointer
4210  *  Description : this is the tx entry point of the driver.
4211  *  It programs the chain or the ring and supports oversized frames
4212  *  and SG feature.
4213  */
4214 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4215 {
4216 	unsigned int first_entry, tx_packets, enh_desc;
4217 	struct stmmac_priv *priv = netdev_priv(dev);
4218 	unsigned int nopaged_len = skb_headlen(skb);
4219 	int i, csum_insertion = 0, is_jumbo = 0;
4220 	u32 queue = skb_get_queue_mapping(skb);
4221 	int nfrags = skb_shinfo(skb)->nr_frags;
4222 	int gso = skb_shinfo(skb)->gso_type;
4223 	struct dma_edesc *tbs_desc = NULL;
4224 	struct dma_desc *desc, *first;
4225 	struct stmmac_tx_queue *tx_q;
4226 	bool has_vlan, set_ic;
4227 	int entry, first_tx;
4228 	dma_addr_t des;
4229 
4230 	tx_q = &priv->tx_queue[queue];
4231 	first_tx = tx_q->cur_tx;
4232 
4233 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4234 		stmmac_disable_eee_mode(priv);
4235 
4236 	/* Manage oversized TCP frames for GMAC4 device */
4237 	if (skb_is_gso(skb) && priv->tso) {
4238 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4239 			return stmmac_tso_xmit(skb, dev);
4240 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4241 			return stmmac_tso_xmit(skb, dev);
4242 	}
4243 
4244 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4245 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4246 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4247 								queue));
4248 			/* This is a hard error, log it. */
4249 			netdev_err(priv->dev,
4250 				   "%s: Tx Ring full when queue awake\n",
4251 				   __func__);
4252 		}
4253 		return NETDEV_TX_BUSY;
4254 	}
4255 
4256 	/* Check if VLAN can be inserted by HW */
4257 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4258 
4259 	entry = tx_q->cur_tx;
4260 	first_entry = entry;
4261 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4262 
4263 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4264 
4265 	if (likely(priv->extend_desc))
4266 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4267 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4268 		desc = &tx_q->dma_entx[entry].basic;
4269 	else
4270 		desc = tx_q->dma_tx + entry;
4271 
4272 	first = desc;
4273 
4274 	if (has_vlan)
4275 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4276 
4277 	enh_desc = priv->plat->enh_desc;
4278 	/* To program the descriptors according to the size of the frame */
4279 	if (enh_desc)
4280 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4281 
4282 	if (unlikely(is_jumbo)) {
4283 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4284 		if (unlikely(entry < 0) && (entry != -EINVAL))
4285 			goto dma_map_err;
4286 	}
4287 
4288 	for (i = 0; i < nfrags; i++) {
4289 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4290 		int len = skb_frag_size(frag);
4291 		bool last_segment = (i == (nfrags - 1));
4292 
4293 		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4294 		WARN_ON(tx_q->tx_skbuff[entry]);
4295 
4296 		if (likely(priv->extend_desc))
4297 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4298 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4299 			desc = &tx_q->dma_entx[entry].basic;
4300 		else
4301 			desc = tx_q->dma_tx + entry;
4302 
4303 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4304 				       DMA_TO_DEVICE);
4305 		if (dma_mapping_error(priv->device, des))
4306 			goto dma_map_err; /* should reuse desc w/o issues */
4307 
4308 		tx_q->tx_skbuff_dma[entry].buf = des;
4309 
4310 		stmmac_set_desc_addr(priv, desc, des);
4311 
4312 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4313 		tx_q->tx_skbuff_dma[entry].len = len;
4314 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4315 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4316 
4317 		/* Prepare the descriptor and set the own bit too */
4318 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4319 				priv->mode, 1, last_segment, skb->len);
4320 	}
4321 
4322 	/* Only the last descriptor gets to point to the skb. */
4323 	tx_q->tx_skbuff[entry] = skb;
4324 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4325 
4326 	/* According to the coalesce parameter the IC bit for the latest
4327 	 * segment is reset and the timer re-started to clean the tx status.
4328 	 * This approach takes care about the fragments: desc is the first
4329 	 * element in case of no SG.
4330 	 */
4331 	tx_packets = (entry + 1) - first_tx;
4332 	tx_q->tx_count_frames += tx_packets;
4333 
4334 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4335 		set_ic = true;
4336 	else if (!priv->tx_coal_frames[queue])
4337 		set_ic = false;
4338 	else if (tx_packets > priv->tx_coal_frames[queue])
4339 		set_ic = true;
4340 	else if ((tx_q->tx_count_frames %
4341 		  priv->tx_coal_frames[queue]) < tx_packets)
4342 		set_ic = true;
4343 	else
4344 		set_ic = false;
4345 
4346 	if (set_ic) {
4347 		if (likely(priv->extend_desc))
4348 			desc = &tx_q->dma_etx[entry].basic;
4349 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4350 			desc = &tx_q->dma_entx[entry].basic;
4351 		else
4352 			desc = &tx_q->dma_tx[entry];
4353 
4354 		tx_q->tx_count_frames = 0;
4355 		stmmac_set_tx_ic(priv, desc);
4356 		priv->xstats.tx_set_ic_bit++;
4357 	}
4358 
4359 	/* We've used all descriptors we need for this skb, however,
4360 	 * advance cur_tx so that it references a fresh descriptor.
4361 	 * ndo_start_xmit will fill this descriptor the next time it's
4362 	 * called and stmmac_tx_clean may clean up to this descriptor.
4363 	 */
4364 	entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4365 	tx_q->cur_tx = entry;
4366 
4367 	if (netif_msg_pktdata(priv)) {
4368 		netdev_dbg(priv->dev,
4369 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4370 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4371 			   entry, first, nfrags);
4372 
4373 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4374 		print_pkt(skb->data, skb->len);
4375 	}
4376 
4377 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4378 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4379 			  __func__);
4380 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4381 	}
4382 
4383 	dev->stats.tx_bytes += skb->len;
4384 
4385 	if (priv->sarc_type)
4386 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4387 
4388 	skb_tx_timestamp(skb);
4389 
4390 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4391 	 * problems because all the descriptors are actually ready to be
4392 	 * passed to the DMA engine.
4393 	 */
4394 	if (likely(!is_jumbo)) {
4395 		bool last_segment = (nfrags == 0);
4396 
4397 		des = dma_map_single(priv->device, skb->data,
4398 				     nopaged_len, DMA_TO_DEVICE);
4399 		if (dma_mapping_error(priv->device, des))
4400 			goto dma_map_err;
4401 
4402 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4403 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4404 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4405 
4406 		stmmac_set_desc_addr(priv, first, des);
4407 
4408 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4409 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4410 
4411 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4412 			     priv->hwts_tx_en)) {
4413 			/* declare that device is doing timestamping */
4414 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4415 			stmmac_enable_tx_timestamp(priv, first);
4416 		}
4417 
4418 		/* Prepare the first descriptor setting the OWN bit too */
4419 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4420 				csum_insertion, priv->mode, 0, last_segment,
4421 				skb->len);
4422 	}
4423 
4424 	if (tx_q->tbs & STMMAC_TBS_EN) {
4425 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4426 
4427 		tbs_desc = &tx_q->dma_entx[first_entry];
4428 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4429 	}
4430 
4431 	stmmac_set_tx_owner(priv, first);
4432 
4433 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4434 
4435 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4436 
4437 	stmmac_flush_tx_descriptors(priv, queue);
4438 	stmmac_tx_timer_arm(priv, queue);
4439 
4440 	return NETDEV_TX_OK;
4441 
4442 dma_map_err:
4443 	netdev_err(priv->dev, "Tx DMA map failed\n");
4444 	dev_kfree_skb(skb);
4445 	priv->dev->stats.tx_dropped++;
4446 	return NETDEV_TX_OK;
4447 }
4448 
4449 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4450 {
4451 	struct vlan_ethhdr *veth;
4452 	__be16 vlan_proto;
4453 	u16 vlanid;
4454 
4455 	veth = (struct vlan_ethhdr *)skb->data;
4456 	vlan_proto = veth->h_vlan_proto;
4457 
4458 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4459 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4460 	    (vlan_proto == htons(ETH_P_8021AD) &&
4461 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4462 		/* pop the vlan tag */
4463 		vlanid = ntohs(veth->h_vlan_TCI);
4464 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4465 		skb_pull(skb, VLAN_HLEN);
4466 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4467 	}
4468 }
4469 
4470 /**
4471  * stmmac_rx_refill - refill used skb preallocated buffers
4472  * @priv: driver private structure
4473  * @queue: RX queue index
4474  * Description : this is to reallocate the skb for the reception process
4475  * that is based on zero-copy.
4476  */
4477 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4478 {
4479 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4480 	int dirty = stmmac_rx_dirty(priv, queue);
4481 	unsigned int entry = rx_q->dirty_rx;
4482 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4483 
4484 	if (priv->dma_cap.addr64 <= 32)
4485 		gfp |= GFP_DMA32;
4486 
4487 	while (dirty-- > 0) {
4488 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4489 		struct dma_desc *p;
4490 		bool use_rx_wd;
4491 
4492 		if (priv->extend_desc)
4493 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4494 		else
4495 			p = rx_q->dma_rx + entry;
4496 
4497 		if (!buf->page) {
4498 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4499 			if (!buf->page)
4500 				break;
4501 		}
4502 
4503 		if (priv->sph && !buf->sec_page) {
4504 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4505 			if (!buf->sec_page)
4506 				break;
4507 
4508 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4509 		}
4510 
4511 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4512 
4513 		stmmac_set_desc_addr(priv, p, buf->addr);
4514 		if (priv->sph)
4515 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4516 		else
4517 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4518 		stmmac_refill_desc3(priv, rx_q, p);
4519 
4520 		rx_q->rx_count_frames++;
4521 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4522 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4523 			rx_q->rx_count_frames = 0;
4524 
4525 		use_rx_wd = !priv->rx_coal_frames[queue];
4526 		use_rx_wd |= rx_q->rx_count_frames > 0;
4527 		if (!priv->use_riwt)
4528 			use_rx_wd = false;
4529 
4530 		dma_wmb();
4531 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4532 
4533 		entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
4534 	}
4535 	rx_q->dirty_rx = entry;
4536 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4537 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4538 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4539 }
4540 
4541 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4542 				       struct dma_desc *p,
4543 				       int status, unsigned int len)
4544 {
4545 	unsigned int plen = 0, hlen = 0;
4546 	int coe = priv->hw->rx_csum;
4547 
4548 	/* Not first descriptor, buffer is always zero */
4549 	if (priv->sph && len)
4550 		return 0;
4551 
4552 	/* First descriptor, get split header length */
4553 	stmmac_get_rx_header_len(priv, p, &hlen);
4554 	if (priv->sph && hlen) {
4555 		priv->xstats.rx_split_hdr_pkt_n++;
4556 		return hlen;
4557 	}
4558 
4559 	/* First descriptor, not last descriptor and not split header */
4560 	if (status & rx_not_ls)
4561 		return priv->dma_buf_sz;
4562 
4563 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4564 
4565 	/* First descriptor and last descriptor and not split header */
4566 	return min_t(unsigned int, priv->dma_buf_sz, plen);
4567 }
4568 
4569 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4570 				       struct dma_desc *p,
4571 				       int status, unsigned int len)
4572 {
4573 	int coe = priv->hw->rx_csum;
4574 	unsigned int plen = 0;
4575 
4576 	/* Not split header, buffer is not available */
4577 	if (!priv->sph)
4578 		return 0;
4579 
4580 	/* Not last descriptor */
4581 	if (status & rx_not_ls)
4582 		return priv->dma_buf_sz;
4583 
4584 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4585 
4586 	/* Last descriptor */
4587 	return plen - len;
4588 }
4589 
4590 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4591 				struct xdp_frame *xdpf, bool dma_map)
4592 {
4593 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4594 	unsigned int entry = tx_q->cur_tx;
4595 	struct dma_desc *tx_desc;
4596 	dma_addr_t dma_addr;
4597 	bool set_ic;
4598 
4599 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4600 		return STMMAC_XDP_CONSUMED;
4601 
4602 	if (likely(priv->extend_desc))
4603 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4604 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4605 		tx_desc = &tx_q->dma_entx[entry].basic;
4606 	else
4607 		tx_desc = tx_q->dma_tx + entry;
4608 
4609 	if (dma_map) {
4610 		dma_addr = dma_map_single(priv->device, xdpf->data,
4611 					  xdpf->len, DMA_TO_DEVICE);
4612 		if (dma_mapping_error(priv->device, dma_addr))
4613 			return STMMAC_XDP_CONSUMED;
4614 
4615 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4616 	} else {
4617 		struct page *page = virt_to_page(xdpf->data);
4618 
4619 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4620 			   xdpf->headroom;
4621 		dma_sync_single_for_device(priv->device, dma_addr,
4622 					   xdpf->len, DMA_BIDIRECTIONAL);
4623 
4624 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4625 	}
4626 
4627 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4628 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4629 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4630 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4631 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4632 
4633 	tx_q->xdpf[entry] = xdpf;
4634 
4635 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4636 
4637 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4638 			       true, priv->mode, true, true,
4639 			       xdpf->len);
4640 
4641 	tx_q->tx_count_frames++;
4642 
4643 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4644 		set_ic = true;
4645 	else
4646 		set_ic = false;
4647 
4648 	if (set_ic) {
4649 		tx_q->tx_count_frames = 0;
4650 		stmmac_set_tx_ic(priv, tx_desc);
4651 		priv->xstats.tx_set_ic_bit++;
4652 	}
4653 
4654 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4655 
4656 	entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4657 	tx_q->cur_tx = entry;
4658 
4659 	return STMMAC_XDP_TX;
4660 }
4661 
4662 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4663 				   int cpu)
4664 {
4665 	int index = cpu;
4666 
4667 	if (unlikely(index < 0))
4668 		index = 0;
4669 
4670 	while (index >= priv->plat->tx_queues_to_use)
4671 		index -= priv->plat->tx_queues_to_use;
4672 
4673 	return index;
4674 }
4675 
4676 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4677 				struct xdp_buff *xdp)
4678 {
4679 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4680 	int cpu = smp_processor_id();
4681 	struct netdev_queue *nq;
4682 	int queue;
4683 	int res;
4684 
4685 	if (unlikely(!xdpf))
4686 		return STMMAC_XDP_CONSUMED;
4687 
4688 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4689 	nq = netdev_get_tx_queue(priv->dev, queue);
4690 
4691 	__netif_tx_lock(nq, cpu);
4692 	/* Avoids TX time-out as we are sharing with slow path */
4693 	txq_trans_cond_update(nq);
4694 
4695 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4696 	if (res == STMMAC_XDP_TX)
4697 		stmmac_flush_tx_descriptors(priv, queue);
4698 
4699 	__netif_tx_unlock(nq);
4700 
4701 	return res;
4702 }
4703 
4704 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4705 				 struct bpf_prog *prog,
4706 				 struct xdp_buff *xdp)
4707 {
4708 	u32 act;
4709 	int res;
4710 
4711 	act = bpf_prog_run_xdp(prog, xdp);
4712 	switch (act) {
4713 	case XDP_PASS:
4714 		res = STMMAC_XDP_PASS;
4715 		break;
4716 	case XDP_TX:
4717 		res = stmmac_xdp_xmit_back(priv, xdp);
4718 		break;
4719 	case XDP_REDIRECT:
4720 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4721 			res = STMMAC_XDP_CONSUMED;
4722 		else
4723 			res = STMMAC_XDP_REDIRECT;
4724 		break;
4725 	default:
4726 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4727 		fallthrough;
4728 	case XDP_ABORTED:
4729 		trace_xdp_exception(priv->dev, prog, act);
4730 		fallthrough;
4731 	case XDP_DROP:
4732 		res = STMMAC_XDP_CONSUMED;
4733 		break;
4734 	}
4735 
4736 	return res;
4737 }
4738 
4739 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4740 					   struct xdp_buff *xdp)
4741 {
4742 	struct bpf_prog *prog;
4743 	int res;
4744 
4745 	prog = READ_ONCE(priv->xdp_prog);
4746 	if (!prog) {
4747 		res = STMMAC_XDP_PASS;
4748 		goto out;
4749 	}
4750 
4751 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
4752 out:
4753 	return ERR_PTR(-res);
4754 }
4755 
4756 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4757 				   int xdp_status)
4758 {
4759 	int cpu = smp_processor_id();
4760 	int queue;
4761 
4762 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4763 
4764 	if (xdp_status & STMMAC_XDP_TX)
4765 		stmmac_tx_timer_arm(priv, queue);
4766 
4767 	if (xdp_status & STMMAC_XDP_REDIRECT)
4768 		xdp_do_flush();
4769 }
4770 
4771 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4772 					       struct xdp_buff *xdp)
4773 {
4774 	unsigned int metasize = xdp->data - xdp->data_meta;
4775 	unsigned int datasize = xdp->data_end - xdp->data;
4776 	struct sk_buff *skb;
4777 
4778 	skb = __napi_alloc_skb(&ch->rxtx_napi,
4779 			       xdp->data_end - xdp->data_hard_start,
4780 			       GFP_ATOMIC | __GFP_NOWARN);
4781 	if (unlikely(!skb))
4782 		return NULL;
4783 
4784 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
4785 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4786 	if (metasize)
4787 		skb_metadata_set(skb, metasize);
4788 
4789 	return skb;
4790 }
4791 
4792 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4793 				   struct dma_desc *p, struct dma_desc *np,
4794 				   struct xdp_buff *xdp)
4795 {
4796 	struct stmmac_channel *ch = &priv->channel[queue];
4797 	unsigned int len = xdp->data_end - xdp->data;
4798 	enum pkt_hash_types hash_type;
4799 	int coe = priv->hw->rx_csum;
4800 	struct sk_buff *skb;
4801 	u32 hash;
4802 
4803 	skb = stmmac_construct_skb_zc(ch, xdp);
4804 	if (!skb) {
4805 		priv->dev->stats.rx_dropped++;
4806 		return;
4807 	}
4808 
4809 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
4810 	stmmac_rx_vlan(priv->dev, skb);
4811 	skb->protocol = eth_type_trans(skb, priv->dev);
4812 
4813 	if (unlikely(!coe))
4814 		skb_checksum_none_assert(skb);
4815 	else
4816 		skb->ip_summed = CHECKSUM_UNNECESSARY;
4817 
4818 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4819 		skb_set_hash(skb, hash, hash_type);
4820 
4821 	skb_record_rx_queue(skb, queue);
4822 	napi_gro_receive(&ch->rxtx_napi, skb);
4823 
4824 	priv->dev->stats.rx_packets++;
4825 	priv->dev->stats.rx_bytes += len;
4826 }
4827 
4828 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4829 {
4830 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4831 	unsigned int entry = rx_q->dirty_rx;
4832 	struct dma_desc *rx_desc = NULL;
4833 	bool ret = true;
4834 
4835 	budget = min(budget, stmmac_rx_dirty(priv, queue));
4836 
4837 	while (budget-- > 0 && entry != rx_q->cur_rx) {
4838 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4839 		dma_addr_t dma_addr;
4840 		bool use_rx_wd;
4841 
4842 		if (!buf->xdp) {
4843 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
4844 			if (!buf->xdp) {
4845 				ret = false;
4846 				break;
4847 			}
4848 		}
4849 
4850 		if (priv->extend_desc)
4851 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
4852 		else
4853 			rx_desc = rx_q->dma_rx + entry;
4854 
4855 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
4856 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
4857 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
4858 		stmmac_refill_desc3(priv, rx_q, rx_desc);
4859 
4860 		rx_q->rx_count_frames++;
4861 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4862 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4863 			rx_q->rx_count_frames = 0;
4864 
4865 		use_rx_wd = !priv->rx_coal_frames[queue];
4866 		use_rx_wd |= rx_q->rx_count_frames > 0;
4867 		if (!priv->use_riwt)
4868 			use_rx_wd = false;
4869 
4870 		dma_wmb();
4871 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
4872 
4873 		entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
4874 	}
4875 
4876 	if (rx_desc) {
4877 		rx_q->dirty_rx = entry;
4878 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4879 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
4880 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4881 	}
4882 
4883 	return ret;
4884 }
4885 
4886 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
4887 {
4888 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4889 	unsigned int count = 0, error = 0, len = 0;
4890 	int dirty = stmmac_rx_dirty(priv, queue);
4891 	unsigned int next_entry = rx_q->cur_rx;
4892 	unsigned int desc_size;
4893 	struct bpf_prog *prog;
4894 	bool failure = false;
4895 	int xdp_status = 0;
4896 	int status = 0;
4897 
4898 	if (netif_msg_rx_status(priv)) {
4899 		void *rx_head;
4900 
4901 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
4902 		if (priv->extend_desc) {
4903 			rx_head = (void *)rx_q->dma_erx;
4904 			desc_size = sizeof(struct dma_extended_desc);
4905 		} else {
4906 			rx_head = (void *)rx_q->dma_rx;
4907 			desc_size = sizeof(struct dma_desc);
4908 		}
4909 
4910 		stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
4911 				    rx_q->dma_rx_phy, desc_size);
4912 	}
4913 	while (count < limit) {
4914 		struct stmmac_rx_buffer *buf;
4915 		unsigned int buf1_len = 0;
4916 		struct dma_desc *np, *p;
4917 		int entry;
4918 		int res;
4919 
4920 		if (!count && rx_q->state_saved) {
4921 			error = rx_q->state.error;
4922 			len = rx_q->state.len;
4923 		} else {
4924 			rx_q->state_saved = false;
4925 			error = 0;
4926 			len = 0;
4927 		}
4928 
4929 		if (count >= limit)
4930 			break;
4931 
4932 read_again:
4933 		buf1_len = 0;
4934 		entry = next_entry;
4935 		buf = &rx_q->buf_pool[entry];
4936 
4937 		if (dirty >= STMMAC_RX_FILL_BATCH) {
4938 			failure = failure ||
4939 				  !stmmac_rx_refill_zc(priv, queue, dirty);
4940 			dirty = 0;
4941 		}
4942 
4943 		if (priv->extend_desc)
4944 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4945 		else
4946 			p = rx_q->dma_rx + entry;
4947 
4948 		/* read the status of the incoming frame */
4949 		status = stmmac_rx_status(priv, &priv->dev->stats,
4950 					  &priv->xstats, p);
4951 		/* check if managed by the DMA otherwise go ahead */
4952 		if (unlikely(status & dma_own))
4953 			break;
4954 
4955 		/* Prefetch the next RX descriptor */
4956 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
4957 						priv->dma_rx_size);
4958 		next_entry = rx_q->cur_rx;
4959 
4960 		if (priv->extend_desc)
4961 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
4962 		else
4963 			np = rx_q->dma_rx + next_entry;
4964 
4965 		prefetch(np);
4966 
4967 		/* Ensure a valid XSK buffer before proceed */
4968 		if (!buf->xdp)
4969 			break;
4970 
4971 		if (priv->extend_desc)
4972 			stmmac_rx_extended_status(priv, &priv->dev->stats,
4973 						  &priv->xstats,
4974 						  rx_q->dma_erx + entry);
4975 		if (unlikely(status == discard_frame)) {
4976 			xsk_buff_free(buf->xdp);
4977 			buf->xdp = NULL;
4978 			dirty++;
4979 			error = 1;
4980 			if (!priv->hwts_rx_en)
4981 				priv->dev->stats.rx_errors++;
4982 		}
4983 
4984 		if (unlikely(error && (status & rx_not_ls)))
4985 			goto read_again;
4986 		if (unlikely(error)) {
4987 			count++;
4988 			continue;
4989 		}
4990 
4991 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
4992 		if (likely(status & rx_not_ls)) {
4993 			xsk_buff_free(buf->xdp);
4994 			buf->xdp = NULL;
4995 			dirty++;
4996 			count++;
4997 			goto read_again;
4998 		}
4999 
5000 		/* XDP ZC Frame only support primary buffers for now */
5001 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5002 		len += buf1_len;
5003 
5004 		/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
5005 		 * Type frames (LLC/LLC-SNAP)
5006 		 *
5007 		 * llc_snap is never checked in GMAC >= 4, so this ACS
5008 		 * feature is always disabled and packets need to be
5009 		 * stripped manually.
5010 		 */
5011 		if (likely(!(status & rx_not_ls)) &&
5012 		    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
5013 		     unlikely(status != llc_snap))) {
5014 			buf1_len -= ETH_FCS_LEN;
5015 			len -= ETH_FCS_LEN;
5016 		}
5017 
5018 		/* RX buffer is good and fit into a XSK pool buffer */
5019 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5020 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5021 
5022 		prog = READ_ONCE(priv->xdp_prog);
5023 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5024 
5025 		switch (res) {
5026 		case STMMAC_XDP_PASS:
5027 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5028 			xsk_buff_free(buf->xdp);
5029 			break;
5030 		case STMMAC_XDP_CONSUMED:
5031 			xsk_buff_free(buf->xdp);
5032 			priv->dev->stats.rx_dropped++;
5033 			break;
5034 		case STMMAC_XDP_TX:
5035 		case STMMAC_XDP_REDIRECT:
5036 			xdp_status |= res;
5037 			break;
5038 		}
5039 
5040 		buf->xdp = NULL;
5041 		dirty++;
5042 		count++;
5043 	}
5044 
5045 	if (status & rx_not_ls) {
5046 		rx_q->state_saved = true;
5047 		rx_q->state.error = error;
5048 		rx_q->state.len = len;
5049 	}
5050 
5051 	stmmac_finalize_xdp_rx(priv, xdp_status);
5052 
5053 	priv->xstats.rx_pkt_n += count;
5054 	priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5055 
5056 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5057 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5058 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5059 		else
5060 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5061 
5062 		return (int)count;
5063 	}
5064 
5065 	return failure ? limit : (int)count;
5066 }
5067 
5068 /**
5069  * stmmac_rx - manage the receive process
5070  * @priv: driver private structure
5071  * @limit: napi bugget
5072  * @queue: RX queue index.
5073  * Description :  this the function called by the napi poll method.
5074  * It gets all the frames inside the ring.
5075  */
5076 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5077 {
5078 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5079 	struct stmmac_channel *ch = &priv->channel[queue];
5080 	unsigned int count = 0, error = 0, len = 0;
5081 	int status = 0, coe = priv->hw->rx_csum;
5082 	unsigned int next_entry = rx_q->cur_rx;
5083 	enum dma_data_direction dma_dir;
5084 	unsigned int desc_size;
5085 	struct sk_buff *skb = NULL;
5086 	struct xdp_buff xdp;
5087 	int xdp_status = 0;
5088 	int buf_sz;
5089 
5090 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5091 	buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5092 
5093 	if (netif_msg_rx_status(priv)) {
5094 		void *rx_head;
5095 
5096 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5097 		if (priv->extend_desc) {
5098 			rx_head = (void *)rx_q->dma_erx;
5099 			desc_size = sizeof(struct dma_extended_desc);
5100 		} else {
5101 			rx_head = (void *)rx_q->dma_rx;
5102 			desc_size = sizeof(struct dma_desc);
5103 		}
5104 
5105 		stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
5106 				    rx_q->dma_rx_phy, desc_size);
5107 	}
5108 	while (count < limit) {
5109 		unsigned int buf1_len = 0, buf2_len = 0;
5110 		enum pkt_hash_types hash_type;
5111 		struct stmmac_rx_buffer *buf;
5112 		struct dma_desc *np, *p;
5113 		int entry;
5114 		u32 hash;
5115 
5116 		if (!count && rx_q->state_saved) {
5117 			skb = rx_q->state.skb;
5118 			error = rx_q->state.error;
5119 			len = rx_q->state.len;
5120 		} else {
5121 			rx_q->state_saved = false;
5122 			skb = NULL;
5123 			error = 0;
5124 			len = 0;
5125 		}
5126 
5127 		if (count >= limit)
5128 			break;
5129 
5130 read_again:
5131 		buf1_len = 0;
5132 		buf2_len = 0;
5133 		entry = next_entry;
5134 		buf = &rx_q->buf_pool[entry];
5135 
5136 		if (priv->extend_desc)
5137 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5138 		else
5139 			p = rx_q->dma_rx + entry;
5140 
5141 		/* read the status of the incoming frame */
5142 		status = stmmac_rx_status(priv, &priv->dev->stats,
5143 				&priv->xstats, p);
5144 		/* check if managed by the DMA otherwise go ahead */
5145 		if (unlikely(status & dma_own))
5146 			break;
5147 
5148 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5149 						priv->dma_rx_size);
5150 		next_entry = rx_q->cur_rx;
5151 
5152 		if (priv->extend_desc)
5153 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5154 		else
5155 			np = rx_q->dma_rx + next_entry;
5156 
5157 		prefetch(np);
5158 
5159 		if (priv->extend_desc)
5160 			stmmac_rx_extended_status(priv, &priv->dev->stats,
5161 					&priv->xstats, rx_q->dma_erx + entry);
5162 		if (unlikely(status == discard_frame)) {
5163 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5164 			buf->page = NULL;
5165 			error = 1;
5166 			if (!priv->hwts_rx_en)
5167 				priv->dev->stats.rx_errors++;
5168 		}
5169 
5170 		if (unlikely(error && (status & rx_not_ls)))
5171 			goto read_again;
5172 		if (unlikely(error)) {
5173 			dev_kfree_skb(skb);
5174 			skb = NULL;
5175 			count++;
5176 			continue;
5177 		}
5178 
5179 		/* Buffer is good. Go on. */
5180 
5181 		prefetch(page_address(buf->page) + buf->page_offset);
5182 		if (buf->sec_page)
5183 			prefetch(page_address(buf->sec_page));
5184 
5185 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5186 		len += buf1_len;
5187 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5188 		len += buf2_len;
5189 
5190 		/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
5191 		 * Type frames (LLC/LLC-SNAP)
5192 		 *
5193 		 * llc_snap is never checked in GMAC >= 4, so this ACS
5194 		 * feature is always disabled and packets need to be
5195 		 * stripped manually.
5196 		 */
5197 		if (likely(!(status & rx_not_ls)) &&
5198 		    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
5199 		     unlikely(status != llc_snap))) {
5200 			if (buf2_len) {
5201 				buf2_len -= ETH_FCS_LEN;
5202 				len -= ETH_FCS_LEN;
5203 			} else if (buf1_len) {
5204 				buf1_len -= ETH_FCS_LEN;
5205 				len -= ETH_FCS_LEN;
5206 			}
5207 		}
5208 
5209 		if (!skb) {
5210 			unsigned int pre_len, sync_len;
5211 
5212 			dma_sync_single_for_cpu(priv->device, buf->addr,
5213 						buf1_len, dma_dir);
5214 
5215 			xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq);
5216 			xdp_prepare_buff(&xdp, page_address(buf->page),
5217 					 buf->page_offset, buf1_len, false);
5218 
5219 			pre_len = xdp.data_end - xdp.data_hard_start -
5220 				  buf->page_offset;
5221 			skb = stmmac_xdp_run_prog(priv, &xdp);
5222 			/* Due xdp_adjust_tail: DMA sync for_device
5223 			 * cover max len CPU touch
5224 			 */
5225 			sync_len = xdp.data_end - xdp.data_hard_start -
5226 				   buf->page_offset;
5227 			sync_len = max(sync_len, pre_len);
5228 
5229 			/* For Not XDP_PASS verdict */
5230 			if (IS_ERR(skb)) {
5231 				unsigned int xdp_res = -PTR_ERR(skb);
5232 
5233 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5234 					page_pool_put_page(rx_q->page_pool,
5235 							   virt_to_head_page(xdp.data),
5236 							   sync_len, true);
5237 					buf->page = NULL;
5238 					priv->dev->stats.rx_dropped++;
5239 
5240 					/* Clear skb as it was set as
5241 					 * status by XDP program.
5242 					 */
5243 					skb = NULL;
5244 
5245 					if (unlikely((status & rx_not_ls)))
5246 						goto read_again;
5247 
5248 					count++;
5249 					continue;
5250 				} else if (xdp_res & (STMMAC_XDP_TX |
5251 						      STMMAC_XDP_REDIRECT)) {
5252 					xdp_status |= xdp_res;
5253 					buf->page = NULL;
5254 					skb = NULL;
5255 					count++;
5256 					continue;
5257 				}
5258 			}
5259 		}
5260 
5261 		if (!skb) {
5262 			/* XDP program may expand or reduce tail */
5263 			buf1_len = xdp.data_end - xdp.data;
5264 
5265 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5266 			if (!skb) {
5267 				priv->dev->stats.rx_dropped++;
5268 				count++;
5269 				goto drain_data;
5270 			}
5271 
5272 			/* XDP program may adjust header */
5273 			skb_copy_to_linear_data(skb, xdp.data, buf1_len);
5274 			skb_put(skb, buf1_len);
5275 
5276 			/* Data payload copied into SKB, page ready for recycle */
5277 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5278 			buf->page = NULL;
5279 		} else if (buf1_len) {
5280 			dma_sync_single_for_cpu(priv->device, buf->addr,
5281 						buf1_len, dma_dir);
5282 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5283 					buf->page, buf->page_offset, buf1_len,
5284 					priv->dma_buf_sz);
5285 
5286 			/* Data payload appended into SKB */
5287 			page_pool_release_page(rx_q->page_pool, buf->page);
5288 			buf->page = NULL;
5289 		}
5290 
5291 		if (buf2_len) {
5292 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5293 						buf2_len, dma_dir);
5294 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5295 					buf->sec_page, 0, buf2_len,
5296 					priv->dma_buf_sz);
5297 
5298 			/* Data payload appended into SKB */
5299 			page_pool_release_page(rx_q->page_pool, buf->sec_page);
5300 			buf->sec_page = NULL;
5301 		}
5302 
5303 drain_data:
5304 		if (likely(status & rx_not_ls))
5305 			goto read_again;
5306 		if (!skb)
5307 			continue;
5308 
5309 		/* Got entire packet into SKB. Finish it. */
5310 
5311 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5312 		stmmac_rx_vlan(priv->dev, skb);
5313 		skb->protocol = eth_type_trans(skb, priv->dev);
5314 
5315 		if (unlikely(!coe))
5316 			skb_checksum_none_assert(skb);
5317 		else
5318 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5319 
5320 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5321 			skb_set_hash(skb, hash, hash_type);
5322 
5323 		skb_record_rx_queue(skb, queue);
5324 		napi_gro_receive(&ch->rx_napi, skb);
5325 		skb = NULL;
5326 
5327 		priv->dev->stats.rx_packets++;
5328 		priv->dev->stats.rx_bytes += len;
5329 		count++;
5330 	}
5331 
5332 	if (status & rx_not_ls || skb) {
5333 		rx_q->state_saved = true;
5334 		rx_q->state.skb = skb;
5335 		rx_q->state.error = error;
5336 		rx_q->state.len = len;
5337 	}
5338 
5339 	stmmac_finalize_xdp_rx(priv, xdp_status);
5340 
5341 	stmmac_rx_refill(priv, queue);
5342 
5343 	priv->xstats.rx_pkt_n += count;
5344 	priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5345 
5346 	return count;
5347 }
5348 
5349 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5350 {
5351 	struct stmmac_channel *ch =
5352 		container_of(napi, struct stmmac_channel, rx_napi);
5353 	struct stmmac_priv *priv = ch->priv_data;
5354 	u32 chan = ch->index;
5355 	int work_done;
5356 
5357 	priv->xstats.napi_poll++;
5358 
5359 	work_done = stmmac_rx(priv, budget, chan);
5360 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5361 		unsigned long flags;
5362 
5363 		spin_lock_irqsave(&ch->lock, flags);
5364 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5365 		spin_unlock_irqrestore(&ch->lock, flags);
5366 	}
5367 
5368 	return work_done;
5369 }
5370 
5371 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5372 {
5373 	struct stmmac_channel *ch =
5374 		container_of(napi, struct stmmac_channel, tx_napi);
5375 	struct stmmac_priv *priv = ch->priv_data;
5376 	u32 chan = ch->index;
5377 	int work_done;
5378 
5379 	priv->xstats.napi_poll++;
5380 
5381 	work_done = stmmac_tx_clean(priv, budget, chan);
5382 	work_done = min(work_done, budget);
5383 
5384 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5385 		unsigned long flags;
5386 
5387 		spin_lock_irqsave(&ch->lock, flags);
5388 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5389 		spin_unlock_irqrestore(&ch->lock, flags);
5390 	}
5391 
5392 	return work_done;
5393 }
5394 
5395 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5396 {
5397 	struct stmmac_channel *ch =
5398 		container_of(napi, struct stmmac_channel, rxtx_napi);
5399 	struct stmmac_priv *priv = ch->priv_data;
5400 	int rx_done, tx_done, rxtx_done;
5401 	u32 chan = ch->index;
5402 
5403 	priv->xstats.napi_poll++;
5404 
5405 	tx_done = stmmac_tx_clean(priv, budget, chan);
5406 	tx_done = min(tx_done, budget);
5407 
5408 	rx_done = stmmac_rx_zc(priv, budget, chan);
5409 
5410 	rxtx_done = max(tx_done, rx_done);
5411 
5412 	/* If either TX or RX work is not complete, return budget
5413 	 * and keep pooling
5414 	 */
5415 	if (rxtx_done >= budget)
5416 		return budget;
5417 
5418 	/* all work done, exit the polling mode */
5419 	if (napi_complete_done(napi, rxtx_done)) {
5420 		unsigned long flags;
5421 
5422 		spin_lock_irqsave(&ch->lock, flags);
5423 		/* Both RX and TX work done are compelte,
5424 		 * so enable both RX & TX IRQs.
5425 		 */
5426 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5427 		spin_unlock_irqrestore(&ch->lock, flags);
5428 	}
5429 
5430 	return min(rxtx_done, budget - 1);
5431 }
5432 
5433 /**
5434  *  stmmac_tx_timeout
5435  *  @dev : Pointer to net device structure
5436  *  @txqueue: the index of the hanging transmit queue
5437  *  Description: this function is called when a packet transmission fails to
5438  *   complete within a reasonable time. The driver will mark the error in the
5439  *   netdev structure and arrange for the device to be reset to a sane state
5440  *   in order to transmit a new packet.
5441  */
5442 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5443 {
5444 	struct stmmac_priv *priv = netdev_priv(dev);
5445 
5446 	stmmac_global_err(priv);
5447 }
5448 
5449 /**
5450  *  stmmac_set_rx_mode - entry point for multicast addressing
5451  *  @dev : pointer to the device structure
5452  *  Description:
5453  *  This function is a driver entry point which gets called by the kernel
5454  *  whenever multicast addresses must be enabled/disabled.
5455  *  Return value:
5456  *  void.
5457  */
5458 static void stmmac_set_rx_mode(struct net_device *dev)
5459 {
5460 	struct stmmac_priv *priv = netdev_priv(dev);
5461 
5462 	stmmac_set_filter(priv, priv->hw, dev);
5463 }
5464 
5465 /**
5466  *  stmmac_change_mtu - entry point to change MTU size for the device.
5467  *  @dev : device pointer.
5468  *  @new_mtu : the new MTU size for the device.
5469  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5470  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5471  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5472  *  Return value:
5473  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5474  *  file on failure.
5475  */
5476 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5477 {
5478 	struct stmmac_priv *priv = netdev_priv(dev);
5479 	int txfifosz = priv->plat->tx_fifo_size;
5480 	const int mtu = new_mtu;
5481 
5482 	if (txfifosz == 0)
5483 		txfifosz = priv->dma_cap.tx_fifo_size;
5484 
5485 	txfifosz /= priv->plat->tx_queues_to_use;
5486 
5487 	if (netif_running(dev)) {
5488 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
5489 		return -EBUSY;
5490 	}
5491 
5492 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5493 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5494 		return -EINVAL;
5495 	}
5496 
5497 	new_mtu = STMMAC_ALIGN(new_mtu);
5498 
5499 	/* If condition true, FIFO is too small or MTU too large */
5500 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5501 		return -EINVAL;
5502 
5503 	dev->mtu = mtu;
5504 
5505 	netdev_update_features(dev);
5506 
5507 	return 0;
5508 }
5509 
5510 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5511 					     netdev_features_t features)
5512 {
5513 	struct stmmac_priv *priv = netdev_priv(dev);
5514 
5515 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5516 		features &= ~NETIF_F_RXCSUM;
5517 
5518 	if (!priv->plat->tx_coe)
5519 		features &= ~NETIF_F_CSUM_MASK;
5520 
5521 	/* Some GMAC devices have a bugged Jumbo frame support that
5522 	 * needs to have the Tx COE disabled for oversized frames
5523 	 * (due to limited buffer sizes). In this case we disable
5524 	 * the TX csum insertion in the TDES and not use SF.
5525 	 */
5526 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5527 		features &= ~NETIF_F_CSUM_MASK;
5528 
5529 	/* Disable tso if asked by ethtool */
5530 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
5531 		if (features & NETIF_F_TSO)
5532 			priv->tso = true;
5533 		else
5534 			priv->tso = false;
5535 	}
5536 
5537 	return features;
5538 }
5539 
5540 static int stmmac_set_features(struct net_device *netdev,
5541 			       netdev_features_t features)
5542 {
5543 	struct stmmac_priv *priv = netdev_priv(netdev);
5544 
5545 	/* Keep the COE Type in case of csum is supporting */
5546 	if (features & NETIF_F_RXCSUM)
5547 		priv->hw->rx_csum = priv->plat->rx_coe;
5548 	else
5549 		priv->hw->rx_csum = 0;
5550 	/* No check needed because rx_coe has been set before and it will be
5551 	 * fixed in case of issue.
5552 	 */
5553 	stmmac_rx_ipc(priv, priv->hw);
5554 
5555 	if (priv->sph_cap) {
5556 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5557 		u32 chan;
5558 
5559 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5560 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5561 	}
5562 
5563 	return 0;
5564 }
5565 
5566 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5567 {
5568 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5569 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5570 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5571 	bool *hs_enable = &fpe_cfg->hs_enable;
5572 
5573 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5574 		return;
5575 
5576 	/* If LP has sent verify mPacket, LP is FPE capable */
5577 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5578 		if (*lp_state < FPE_STATE_CAPABLE)
5579 			*lp_state = FPE_STATE_CAPABLE;
5580 
5581 		/* If user has requested FPE enable, quickly response */
5582 		if (*hs_enable)
5583 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5584 						MPACKET_RESPONSE);
5585 	}
5586 
5587 	/* If Local has sent verify mPacket, Local is FPE capable */
5588 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5589 		if (*lo_state < FPE_STATE_CAPABLE)
5590 			*lo_state = FPE_STATE_CAPABLE;
5591 	}
5592 
5593 	/* If LP has sent response mPacket, LP is entering FPE ON */
5594 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5595 		*lp_state = FPE_STATE_ENTERING_ON;
5596 
5597 	/* If Local has sent response mPacket, Local is entering FPE ON */
5598 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5599 		*lo_state = FPE_STATE_ENTERING_ON;
5600 
5601 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5602 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5603 	    priv->fpe_wq) {
5604 		queue_work(priv->fpe_wq, &priv->fpe_task);
5605 	}
5606 }
5607 
5608 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5609 {
5610 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5611 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5612 	u32 queues_count;
5613 	u32 queue;
5614 	bool xmac;
5615 
5616 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5617 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5618 
5619 	if (priv->irq_wake)
5620 		pm_wakeup_event(priv->device, 0);
5621 
5622 	if (priv->dma_cap.estsel)
5623 		stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5624 				      &priv->xstats, tx_cnt);
5625 
5626 	if (priv->dma_cap.fpesel) {
5627 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5628 						   priv->dev);
5629 
5630 		stmmac_fpe_event_status(priv, status);
5631 	}
5632 
5633 	/* To handle GMAC own interrupts */
5634 	if ((priv->plat->has_gmac) || xmac) {
5635 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5636 
5637 		if (unlikely(status)) {
5638 			/* For LPI we need to save the tx status */
5639 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5640 				priv->tx_path_in_lpi_mode = true;
5641 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5642 				priv->tx_path_in_lpi_mode = false;
5643 		}
5644 
5645 		for (queue = 0; queue < queues_count; queue++) {
5646 			status = stmmac_host_mtl_irq_status(priv, priv->hw,
5647 							    queue);
5648 		}
5649 
5650 		/* PCS link status */
5651 		if (priv->hw->pcs) {
5652 			if (priv->xstats.pcs_link)
5653 				netif_carrier_on(priv->dev);
5654 			else
5655 				netif_carrier_off(priv->dev);
5656 		}
5657 
5658 		stmmac_timestamp_interrupt(priv, priv);
5659 	}
5660 }
5661 
5662 /**
5663  *  stmmac_interrupt - main ISR
5664  *  @irq: interrupt number.
5665  *  @dev_id: to pass the net device pointer.
5666  *  Description: this is the main driver interrupt service routine.
5667  *  It can call:
5668  *  o DMA service routine (to manage incoming frame reception and transmission
5669  *    status)
5670  *  o Core interrupts to manage: remote wake-up, management counter, LPI
5671  *    interrupts.
5672  */
5673 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5674 {
5675 	struct net_device *dev = (struct net_device *)dev_id;
5676 	struct stmmac_priv *priv = netdev_priv(dev);
5677 
5678 	/* Check if adapter is up */
5679 	if (test_bit(STMMAC_DOWN, &priv->state))
5680 		return IRQ_HANDLED;
5681 
5682 	/* Check if a fatal error happened */
5683 	if (stmmac_safety_feat_interrupt(priv))
5684 		return IRQ_HANDLED;
5685 
5686 	/* To handle Common interrupts */
5687 	stmmac_common_interrupt(priv);
5688 
5689 	/* To handle DMA interrupts */
5690 	stmmac_dma_interrupt(priv);
5691 
5692 	return IRQ_HANDLED;
5693 }
5694 
5695 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5696 {
5697 	struct net_device *dev = (struct net_device *)dev_id;
5698 	struct stmmac_priv *priv = netdev_priv(dev);
5699 
5700 	if (unlikely(!dev)) {
5701 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5702 		return IRQ_NONE;
5703 	}
5704 
5705 	/* Check if adapter is up */
5706 	if (test_bit(STMMAC_DOWN, &priv->state))
5707 		return IRQ_HANDLED;
5708 
5709 	/* To handle Common interrupts */
5710 	stmmac_common_interrupt(priv);
5711 
5712 	return IRQ_HANDLED;
5713 }
5714 
5715 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5716 {
5717 	struct net_device *dev = (struct net_device *)dev_id;
5718 	struct stmmac_priv *priv = netdev_priv(dev);
5719 
5720 	if (unlikely(!dev)) {
5721 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5722 		return IRQ_NONE;
5723 	}
5724 
5725 	/* Check if adapter is up */
5726 	if (test_bit(STMMAC_DOWN, &priv->state))
5727 		return IRQ_HANDLED;
5728 
5729 	/* Check if a fatal error happened */
5730 	stmmac_safety_feat_interrupt(priv);
5731 
5732 	return IRQ_HANDLED;
5733 }
5734 
5735 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5736 {
5737 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5738 	int chan = tx_q->queue_index;
5739 	struct stmmac_priv *priv;
5740 	int status;
5741 
5742 	priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]);
5743 
5744 	if (unlikely(!data)) {
5745 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5746 		return IRQ_NONE;
5747 	}
5748 
5749 	/* Check if adapter is up */
5750 	if (test_bit(STMMAC_DOWN, &priv->state))
5751 		return IRQ_HANDLED;
5752 
5753 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5754 
5755 	if (unlikely(status & tx_hard_error_bump_tc)) {
5756 		/* Try to bump up the dma threshold on this failure */
5757 		stmmac_bump_dma_threshold(priv, chan);
5758 	} else if (unlikely(status == tx_hard_error)) {
5759 		stmmac_tx_err(priv, chan);
5760 	}
5761 
5762 	return IRQ_HANDLED;
5763 }
5764 
5765 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
5766 {
5767 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
5768 	int chan = rx_q->queue_index;
5769 	struct stmmac_priv *priv;
5770 
5771 	priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]);
5772 
5773 	if (unlikely(!data)) {
5774 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5775 		return IRQ_NONE;
5776 	}
5777 
5778 	/* Check if adapter is up */
5779 	if (test_bit(STMMAC_DOWN, &priv->state))
5780 		return IRQ_HANDLED;
5781 
5782 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
5783 
5784 	return IRQ_HANDLED;
5785 }
5786 
5787 #ifdef CONFIG_NET_POLL_CONTROLLER
5788 /* Polling receive - used by NETCONSOLE and other diagnostic tools
5789  * to allow network I/O with interrupts disabled.
5790  */
5791 static void stmmac_poll_controller(struct net_device *dev)
5792 {
5793 	struct stmmac_priv *priv = netdev_priv(dev);
5794 	int i;
5795 
5796 	/* If adapter is down, do nothing */
5797 	if (test_bit(STMMAC_DOWN, &priv->state))
5798 		return;
5799 
5800 	if (priv->plat->multi_msi_en) {
5801 		for (i = 0; i < priv->plat->rx_queues_to_use; i++)
5802 			stmmac_msi_intr_rx(0, &priv->rx_queue[i]);
5803 
5804 		for (i = 0; i < priv->plat->tx_queues_to_use; i++)
5805 			stmmac_msi_intr_tx(0, &priv->tx_queue[i]);
5806 	} else {
5807 		disable_irq(dev->irq);
5808 		stmmac_interrupt(dev->irq, dev);
5809 		enable_irq(dev->irq);
5810 	}
5811 }
5812 #endif
5813 
5814 /**
5815  *  stmmac_ioctl - Entry point for the Ioctl
5816  *  @dev: Device pointer.
5817  *  @rq: An IOCTL specefic structure, that can contain a pointer to
5818  *  a proprietary structure used to pass information to the driver.
5819  *  @cmd: IOCTL command
5820  *  Description:
5821  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
5822  */
5823 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5824 {
5825 	struct stmmac_priv *priv = netdev_priv (dev);
5826 	int ret = -EOPNOTSUPP;
5827 
5828 	if (!netif_running(dev))
5829 		return -EINVAL;
5830 
5831 	switch (cmd) {
5832 	case SIOCGMIIPHY:
5833 	case SIOCGMIIREG:
5834 	case SIOCSMIIREG:
5835 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
5836 		break;
5837 	case SIOCSHWTSTAMP:
5838 		ret = stmmac_hwtstamp_set(dev, rq);
5839 		break;
5840 	case SIOCGHWTSTAMP:
5841 		ret = stmmac_hwtstamp_get(dev, rq);
5842 		break;
5843 	default:
5844 		break;
5845 	}
5846 
5847 	return ret;
5848 }
5849 
5850 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5851 				    void *cb_priv)
5852 {
5853 	struct stmmac_priv *priv = cb_priv;
5854 	int ret = -EOPNOTSUPP;
5855 
5856 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
5857 		return ret;
5858 
5859 	__stmmac_disable_all_queues(priv);
5860 
5861 	switch (type) {
5862 	case TC_SETUP_CLSU32:
5863 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
5864 		break;
5865 	case TC_SETUP_CLSFLOWER:
5866 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
5867 		break;
5868 	default:
5869 		break;
5870 	}
5871 
5872 	stmmac_enable_all_queues(priv);
5873 	return ret;
5874 }
5875 
5876 static LIST_HEAD(stmmac_block_cb_list);
5877 
5878 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
5879 			   void *type_data)
5880 {
5881 	struct stmmac_priv *priv = netdev_priv(ndev);
5882 
5883 	switch (type) {
5884 	case TC_SETUP_BLOCK:
5885 		return flow_block_cb_setup_simple(type_data,
5886 						  &stmmac_block_cb_list,
5887 						  stmmac_setup_tc_block_cb,
5888 						  priv, priv, true);
5889 	case TC_SETUP_QDISC_CBS:
5890 		return stmmac_tc_setup_cbs(priv, priv, type_data);
5891 	case TC_SETUP_QDISC_TAPRIO:
5892 		return stmmac_tc_setup_taprio(priv, priv, type_data);
5893 	case TC_SETUP_QDISC_ETF:
5894 		return stmmac_tc_setup_etf(priv, priv, type_data);
5895 	default:
5896 		return -EOPNOTSUPP;
5897 	}
5898 }
5899 
5900 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
5901 			       struct net_device *sb_dev)
5902 {
5903 	int gso = skb_shinfo(skb)->gso_type;
5904 
5905 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
5906 		/*
5907 		 * There is no way to determine the number of TSO/USO
5908 		 * capable Queues. Let's use always the Queue 0
5909 		 * because if TSO/USO is supported then at least this
5910 		 * one will be capable.
5911 		 */
5912 		return 0;
5913 	}
5914 
5915 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
5916 }
5917 
5918 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
5919 {
5920 	struct stmmac_priv *priv = netdev_priv(ndev);
5921 	int ret = 0;
5922 
5923 	ret = pm_runtime_get_sync(priv->device);
5924 	if (ret < 0) {
5925 		pm_runtime_put_noidle(priv->device);
5926 		return ret;
5927 	}
5928 
5929 	ret = eth_mac_addr(ndev, addr);
5930 	if (ret)
5931 		goto set_mac_error;
5932 
5933 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
5934 
5935 set_mac_error:
5936 	pm_runtime_put(priv->device);
5937 
5938 	return ret;
5939 }
5940 
5941 #ifdef CONFIG_DEBUG_FS
5942 static struct dentry *stmmac_fs_dir;
5943 
5944 static void sysfs_display_ring(void *head, int size, int extend_desc,
5945 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
5946 {
5947 	int i;
5948 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
5949 	struct dma_desc *p = (struct dma_desc *)head;
5950 	dma_addr_t dma_addr;
5951 
5952 	for (i = 0; i < size; i++) {
5953 		if (extend_desc) {
5954 			dma_addr = dma_phy_addr + i * sizeof(*ep);
5955 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
5956 				   i, &dma_addr,
5957 				   le32_to_cpu(ep->basic.des0),
5958 				   le32_to_cpu(ep->basic.des1),
5959 				   le32_to_cpu(ep->basic.des2),
5960 				   le32_to_cpu(ep->basic.des3));
5961 			ep++;
5962 		} else {
5963 			dma_addr = dma_phy_addr + i * sizeof(*p);
5964 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
5965 				   i, &dma_addr,
5966 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
5967 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
5968 			p++;
5969 		}
5970 		seq_printf(seq, "\n");
5971 	}
5972 }
5973 
5974 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
5975 {
5976 	struct net_device *dev = seq->private;
5977 	struct stmmac_priv *priv = netdev_priv(dev);
5978 	u32 rx_count = priv->plat->rx_queues_to_use;
5979 	u32 tx_count = priv->plat->tx_queues_to_use;
5980 	u32 queue;
5981 
5982 	if ((dev->flags & IFF_UP) == 0)
5983 		return 0;
5984 
5985 	for (queue = 0; queue < rx_count; queue++) {
5986 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5987 
5988 		seq_printf(seq, "RX Queue %d:\n", queue);
5989 
5990 		if (priv->extend_desc) {
5991 			seq_printf(seq, "Extended descriptor ring:\n");
5992 			sysfs_display_ring((void *)rx_q->dma_erx,
5993 					   priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
5994 		} else {
5995 			seq_printf(seq, "Descriptor ring:\n");
5996 			sysfs_display_ring((void *)rx_q->dma_rx,
5997 					   priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
5998 		}
5999 	}
6000 
6001 	for (queue = 0; queue < tx_count; queue++) {
6002 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
6003 
6004 		seq_printf(seq, "TX Queue %d:\n", queue);
6005 
6006 		if (priv->extend_desc) {
6007 			seq_printf(seq, "Extended descriptor ring:\n");
6008 			sysfs_display_ring((void *)tx_q->dma_etx,
6009 					   priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6010 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6011 			seq_printf(seq, "Descriptor ring:\n");
6012 			sysfs_display_ring((void *)tx_q->dma_tx,
6013 					   priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6014 		}
6015 	}
6016 
6017 	return 0;
6018 }
6019 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6020 
6021 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6022 {
6023 	struct net_device *dev = seq->private;
6024 	struct stmmac_priv *priv = netdev_priv(dev);
6025 
6026 	if (!priv->hw_cap_support) {
6027 		seq_printf(seq, "DMA HW features not supported\n");
6028 		return 0;
6029 	}
6030 
6031 	seq_printf(seq, "==============================\n");
6032 	seq_printf(seq, "\tDMA HW features\n");
6033 	seq_printf(seq, "==============================\n");
6034 
6035 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6036 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6037 	seq_printf(seq, "\t1000 Mbps: %s\n",
6038 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6039 	seq_printf(seq, "\tHalf duplex: %s\n",
6040 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6041 	seq_printf(seq, "\tHash Filter: %s\n",
6042 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
6043 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6044 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
6045 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6046 		   (priv->dma_cap.pcs) ? "Y" : "N");
6047 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6048 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6049 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6050 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6051 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6052 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6053 	seq_printf(seq, "\tRMON module: %s\n",
6054 		   (priv->dma_cap.rmon) ? "Y" : "N");
6055 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6056 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6057 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6058 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6059 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6060 		   (priv->dma_cap.eee) ? "Y" : "N");
6061 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6062 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6063 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6064 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
6065 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6066 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6067 	} else {
6068 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6069 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6070 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6071 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6072 	}
6073 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6074 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6075 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6076 		   priv->dma_cap.number_rx_channel);
6077 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6078 		   priv->dma_cap.number_tx_channel);
6079 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6080 		   priv->dma_cap.number_rx_queues);
6081 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6082 		   priv->dma_cap.number_tx_queues);
6083 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6084 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6085 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6086 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6087 	seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
6088 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6089 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6090 		   priv->dma_cap.pps_out_num);
6091 	seq_printf(seq, "\tSafety Features: %s\n",
6092 		   priv->dma_cap.asp ? "Y" : "N");
6093 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6094 		   priv->dma_cap.frpsel ? "Y" : "N");
6095 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6096 		   priv->dma_cap.addr64);
6097 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6098 		   priv->dma_cap.rssen ? "Y" : "N");
6099 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6100 		   priv->dma_cap.vlhash ? "Y" : "N");
6101 	seq_printf(seq, "\tSplit Header: %s\n",
6102 		   priv->dma_cap.sphen ? "Y" : "N");
6103 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6104 		   priv->dma_cap.vlins ? "Y" : "N");
6105 	seq_printf(seq, "\tDouble VLAN: %s\n",
6106 		   priv->dma_cap.dvlan ? "Y" : "N");
6107 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6108 		   priv->dma_cap.l3l4fnum);
6109 	seq_printf(seq, "\tARP Offloading: %s\n",
6110 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6111 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6112 		   priv->dma_cap.estsel ? "Y" : "N");
6113 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6114 		   priv->dma_cap.fpesel ? "Y" : "N");
6115 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6116 		   priv->dma_cap.tbssel ? "Y" : "N");
6117 	return 0;
6118 }
6119 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6120 
6121 /* Use network device events to rename debugfs file entries.
6122  */
6123 static int stmmac_device_event(struct notifier_block *unused,
6124 			       unsigned long event, void *ptr)
6125 {
6126 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6127 	struct stmmac_priv *priv = netdev_priv(dev);
6128 
6129 	if (dev->netdev_ops != &stmmac_netdev_ops)
6130 		goto done;
6131 
6132 	switch (event) {
6133 	case NETDEV_CHANGENAME:
6134 		if (priv->dbgfs_dir)
6135 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6136 							 priv->dbgfs_dir,
6137 							 stmmac_fs_dir,
6138 							 dev->name);
6139 		break;
6140 	}
6141 done:
6142 	return NOTIFY_DONE;
6143 }
6144 
6145 static struct notifier_block stmmac_notifier = {
6146 	.notifier_call = stmmac_device_event,
6147 };
6148 
6149 static void stmmac_init_fs(struct net_device *dev)
6150 {
6151 	struct stmmac_priv *priv = netdev_priv(dev);
6152 
6153 	rtnl_lock();
6154 
6155 	/* Create per netdev entries */
6156 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6157 
6158 	/* Entry to report DMA RX/TX rings */
6159 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6160 			    &stmmac_rings_status_fops);
6161 
6162 	/* Entry to report the DMA HW features */
6163 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6164 			    &stmmac_dma_cap_fops);
6165 
6166 	rtnl_unlock();
6167 }
6168 
6169 static void stmmac_exit_fs(struct net_device *dev)
6170 {
6171 	struct stmmac_priv *priv = netdev_priv(dev);
6172 
6173 	debugfs_remove_recursive(priv->dbgfs_dir);
6174 }
6175 #endif /* CONFIG_DEBUG_FS */
6176 
6177 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6178 {
6179 	unsigned char *data = (unsigned char *)&vid_le;
6180 	unsigned char data_byte = 0;
6181 	u32 crc = ~0x0;
6182 	u32 temp = 0;
6183 	int i, bits;
6184 
6185 	bits = get_bitmask_order(VLAN_VID_MASK);
6186 	for (i = 0; i < bits; i++) {
6187 		if ((i % 8) == 0)
6188 			data_byte = data[i / 8];
6189 
6190 		temp = ((crc & 1) ^ data_byte) & 1;
6191 		crc >>= 1;
6192 		data_byte >>= 1;
6193 
6194 		if (temp)
6195 			crc ^= 0xedb88320;
6196 	}
6197 
6198 	return crc;
6199 }
6200 
6201 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6202 {
6203 	u32 crc, hash = 0;
6204 	__le16 pmatch = 0;
6205 	int count = 0;
6206 	u16 vid = 0;
6207 
6208 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6209 		__le16 vid_le = cpu_to_le16(vid);
6210 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6211 		hash |= (1 << crc);
6212 		count++;
6213 	}
6214 
6215 	if (!priv->dma_cap.vlhash) {
6216 		if (count > 2) /* VID = 0 always passes filter */
6217 			return -EOPNOTSUPP;
6218 
6219 		pmatch = cpu_to_le16(vid);
6220 		hash = 0;
6221 	}
6222 
6223 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6224 }
6225 
6226 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6227 {
6228 	struct stmmac_priv *priv = netdev_priv(ndev);
6229 	bool is_double = false;
6230 	int ret;
6231 
6232 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6233 		is_double = true;
6234 
6235 	set_bit(vid, priv->active_vlans);
6236 	ret = stmmac_vlan_update(priv, is_double);
6237 	if (ret) {
6238 		clear_bit(vid, priv->active_vlans);
6239 		return ret;
6240 	}
6241 
6242 	if (priv->hw->num_vlan) {
6243 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6244 		if (ret)
6245 			return ret;
6246 	}
6247 
6248 	return 0;
6249 }
6250 
6251 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6252 {
6253 	struct stmmac_priv *priv = netdev_priv(ndev);
6254 	bool is_double = false;
6255 	int ret;
6256 
6257 	ret = pm_runtime_get_sync(priv->device);
6258 	if (ret < 0) {
6259 		pm_runtime_put_noidle(priv->device);
6260 		return ret;
6261 	}
6262 
6263 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6264 		is_double = true;
6265 
6266 	clear_bit(vid, priv->active_vlans);
6267 
6268 	if (priv->hw->num_vlan) {
6269 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6270 		if (ret)
6271 			goto del_vlan_error;
6272 	}
6273 
6274 	ret = stmmac_vlan_update(priv, is_double);
6275 
6276 del_vlan_error:
6277 	pm_runtime_put(priv->device);
6278 
6279 	return ret;
6280 }
6281 
6282 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6283 {
6284 	struct stmmac_priv *priv = netdev_priv(dev);
6285 
6286 	switch (bpf->command) {
6287 	case XDP_SETUP_PROG:
6288 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6289 	case XDP_SETUP_XSK_POOL:
6290 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6291 					     bpf->xsk.queue_id);
6292 	default:
6293 		return -EOPNOTSUPP;
6294 	}
6295 }
6296 
6297 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6298 			   struct xdp_frame **frames, u32 flags)
6299 {
6300 	struct stmmac_priv *priv = netdev_priv(dev);
6301 	int cpu = smp_processor_id();
6302 	struct netdev_queue *nq;
6303 	int i, nxmit = 0;
6304 	int queue;
6305 
6306 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6307 		return -ENETDOWN;
6308 
6309 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6310 		return -EINVAL;
6311 
6312 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6313 	nq = netdev_get_tx_queue(priv->dev, queue);
6314 
6315 	__netif_tx_lock(nq, cpu);
6316 	/* Avoids TX time-out as we are sharing with slow path */
6317 	txq_trans_cond_update(nq);
6318 
6319 	for (i = 0; i < num_frames; i++) {
6320 		int res;
6321 
6322 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6323 		if (res == STMMAC_XDP_CONSUMED)
6324 			break;
6325 
6326 		nxmit++;
6327 	}
6328 
6329 	if (flags & XDP_XMIT_FLUSH) {
6330 		stmmac_flush_tx_descriptors(priv, queue);
6331 		stmmac_tx_timer_arm(priv, queue);
6332 	}
6333 
6334 	__netif_tx_unlock(nq);
6335 
6336 	return nxmit;
6337 }
6338 
6339 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6340 {
6341 	struct stmmac_channel *ch = &priv->channel[queue];
6342 	unsigned long flags;
6343 
6344 	spin_lock_irqsave(&ch->lock, flags);
6345 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6346 	spin_unlock_irqrestore(&ch->lock, flags);
6347 
6348 	stmmac_stop_rx_dma(priv, queue);
6349 	__free_dma_rx_desc_resources(priv, queue);
6350 }
6351 
6352 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6353 {
6354 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
6355 	struct stmmac_channel *ch = &priv->channel[queue];
6356 	unsigned long flags;
6357 	u32 buf_size;
6358 	int ret;
6359 
6360 	ret = __alloc_dma_rx_desc_resources(priv, queue);
6361 	if (ret) {
6362 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6363 		return;
6364 	}
6365 
6366 	ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL);
6367 	if (ret) {
6368 		__free_dma_rx_desc_resources(priv, queue);
6369 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6370 		return;
6371 	}
6372 
6373 	stmmac_clear_rx_descriptors(priv, queue);
6374 
6375 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6376 			    rx_q->dma_rx_phy, rx_q->queue_index);
6377 
6378 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6379 			     sizeof(struct dma_desc));
6380 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6381 			       rx_q->rx_tail_addr, rx_q->queue_index);
6382 
6383 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6384 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6385 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6386 				      buf_size,
6387 				      rx_q->queue_index);
6388 	} else {
6389 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6390 				      priv->dma_buf_sz,
6391 				      rx_q->queue_index);
6392 	}
6393 
6394 	stmmac_start_rx_dma(priv, queue);
6395 
6396 	spin_lock_irqsave(&ch->lock, flags);
6397 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6398 	spin_unlock_irqrestore(&ch->lock, flags);
6399 }
6400 
6401 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6402 {
6403 	struct stmmac_channel *ch = &priv->channel[queue];
6404 	unsigned long flags;
6405 
6406 	spin_lock_irqsave(&ch->lock, flags);
6407 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6408 	spin_unlock_irqrestore(&ch->lock, flags);
6409 
6410 	stmmac_stop_tx_dma(priv, queue);
6411 	__free_dma_tx_desc_resources(priv, queue);
6412 }
6413 
6414 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6415 {
6416 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
6417 	struct stmmac_channel *ch = &priv->channel[queue];
6418 	unsigned long flags;
6419 	int ret;
6420 
6421 	ret = __alloc_dma_tx_desc_resources(priv, queue);
6422 	if (ret) {
6423 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6424 		return;
6425 	}
6426 
6427 	ret = __init_dma_tx_desc_rings(priv, queue);
6428 	if (ret) {
6429 		__free_dma_tx_desc_resources(priv, queue);
6430 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6431 		return;
6432 	}
6433 
6434 	stmmac_clear_tx_descriptors(priv, queue);
6435 
6436 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6437 			    tx_q->dma_tx_phy, tx_q->queue_index);
6438 
6439 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6440 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6441 
6442 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6443 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6444 			       tx_q->tx_tail_addr, tx_q->queue_index);
6445 
6446 	stmmac_start_tx_dma(priv, queue);
6447 
6448 	spin_lock_irqsave(&ch->lock, flags);
6449 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6450 	spin_unlock_irqrestore(&ch->lock, flags);
6451 }
6452 
6453 void stmmac_xdp_release(struct net_device *dev)
6454 {
6455 	struct stmmac_priv *priv = netdev_priv(dev);
6456 	u32 chan;
6457 
6458 	/* Disable NAPI process */
6459 	stmmac_disable_all_queues(priv);
6460 
6461 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6462 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
6463 
6464 	/* Free the IRQ lines */
6465 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6466 
6467 	/* Stop TX/RX DMA channels */
6468 	stmmac_stop_all_dma(priv);
6469 
6470 	/* Release and free the Rx/Tx resources */
6471 	free_dma_desc_resources(priv);
6472 
6473 	/* Disable the MAC Rx/Tx */
6474 	stmmac_mac_set(priv, priv->ioaddr, false);
6475 
6476 	/* set trans_start so we don't get spurious
6477 	 * watchdogs during reset
6478 	 */
6479 	netif_trans_update(dev);
6480 	netif_carrier_off(dev);
6481 }
6482 
6483 int stmmac_xdp_open(struct net_device *dev)
6484 {
6485 	struct stmmac_priv *priv = netdev_priv(dev);
6486 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6487 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6488 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6489 	struct stmmac_rx_queue *rx_q;
6490 	struct stmmac_tx_queue *tx_q;
6491 	u32 buf_size;
6492 	bool sph_en;
6493 	u32 chan;
6494 	int ret;
6495 
6496 	ret = alloc_dma_desc_resources(priv);
6497 	if (ret < 0) {
6498 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6499 			   __func__);
6500 		goto dma_desc_error;
6501 	}
6502 
6503 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
6504 	if (ret < 0) {
6505 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6506 			   __func__);
6507 		goto init_error;
6508 	}
6509 
6510 	/* DMA CSR Channel configuration */
6511 	for (chan = 0; chan < dma_csr_ch; chan++)
6512 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6513 
6514 	/* Adjust Split header */
6515 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6516 
6517 	/* DMA RX Channel Configuration */
6518 	for (chan = 0; chan < rx_cnt; chan++) {
6519 		rx_q = &priv->rx_queue[chan];
6520 
6521 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6522 				    rx_q->dma_rx_phy, chan);
6523 
6524 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6525 				     (rx_q->buf_alloc_num *
6526 				      sizeof(struct dma_desc));
6527 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6528 				       rx_q->rx_tail_addr, chan);
6529 
6530 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6531 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6532 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6533 					      buf_size,
6534 					      rx_q->queue_index);
6535 		} else {
6536 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6537 					      priv->dma_buf_sz,
6538 					      rx_q->queue_index);
6539 		}
6540 
6541 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6542 	}
6543 
6544 	/* DMA TX Channel Configuration */
6545 	for (chan = 0; chan < tx_cnt; chan++) {
6546 		tx_q = &priv->tx_queue[chan];
6547 
6548 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6549 				    tx_q->dma_tx_phy, chan);
6550 
6551 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6552 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6553 				       tx_q->tx_tail_addr, chan);
6554 
6555 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6556 		tx_q->txtimer.function = stmmac_tx_timer;
6557 	}
6558 
6559 	/* Enable the MAC Rx/Tx */
6560 	stmmac_mac_set(priv, priv->ioaddr, true);
6561 
6562 	/* Start Rx & Tx DMA Channels */
6563 	stmmac_start_all_dma(priv);
6564 
6565 	ret = stmmac_request_irq(dev);
6566 	if (ret)
6567 		goto irq_error;
6568 
6569 	/* Enable NAPI process*/
6570 	stmmac_enable_all_queues(priv);
6571 	netif_carrier_on(dev);
6572 	netif_tx_start_all_queues(dev);
6573 
6574 	return 0;
6575 
6576 irq_error:
6577 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6578 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
6579 
6580 	stmmac_hw_teardown(dev);
6581 init_error:
6582 	free_dma_desc_resources(priv);
6583 dma_desc_error:
6584 	return ret;
6585 }
6586 
6587 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6588 {
6589 	struct stmmac_priv *priv = netdev_priv(dev);
6590 	struct stmmac_rx_queue *rx_q;
6591 	struct stmmac_tx_queue *tx_q;
6592 	struct stmmac_channel *ch;
6593 
6594 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6595 	    !netif_carrier_ok(priv->dev))
6596 		return -ENETDOWN;
6597 
6598 	if (!stmmac_xdp_is_enabled(priv))
6599 		return -ENXIO;
6600 
6601 	if (queue >= priv->plat->rx_queues_to_use ||
6602 	    queue >= priv->plat->tx_queues_to_use)
6603 		return -EINVAL;
6604 
6605 	rx_q = &priv->rx_queue[queue];
6606 	tx_q = &priv->tx_queue[queue];
6607 	ch = &priv->channel[queue];
6608 
6609 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6610 		return -ENXIO;
6611 
6612 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6613 		/* EQoS does not have per-DMA channel SW interrupt,
6614 		 * so we schedule RX Napi straight-away.
6615 		 */
6616 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6617 			__napi_schedule(&ch->rxtx_napi);
6618 	}
6619 
6620 	return 0;
6621 }
6622 
6623 static const struct net_device_ops stmmac_netdev_ops = {
6624 	.ndo_open = stmmac_open,
6625 	.ndo_start_xmit = stmmac_xmit,
6626 	.ndo_stop = stmmac_release,
6627 	.ndo_change_mtu = stmmac_change_mtu,
6628 	.ndo_fix_features = stmmac_fix_features,
6629 	.ndo_set_features = stmmac_set_features,
6630 	.ndo_set_rx_mode = stmmac_set_rx_mode,
6631 	.ndo_tx_timeout = stmmac_tx_timeout,
6632 	.ndo_eth_ioctl = stmmac_ioctl,
6633 	.ndo_setup_tc = stmmac_setup_tc,
6634 	.ndo_select_queue = stmmac_select_queue,
6635 #ifdef CONFIG_NET_POLL_CONTROLLER
6636 	.ndo_poll_controller = stmmac_poll_controller,
6637 #endif
6638 	.ndo_set_mac_address = stmmac_set_mac_address,
6639 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6640 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6641 	.ndo_bpf = stmmac_bpf,
6642 	.ndo_xdp_xmit = stmmac_xdp_xmit,
6643 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
6644 };
6645 
6646 static void stmmac_reset_subtask(struct stmmac_priv *priv)
6647 {
6648 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
6649 		return;
6650 	if (test_bit(STMMAC_DOWN, &priv->state))
6651 		return;
6652 
6653 	netdev_err(priv->dev, "Reset adapter.\n");
6654 
6655 	rtnl_lock();
6656 	netif_trans_update(priv->dev);
6657 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
6658 		usleep_range(1000, 2000);
6659 
6660 	set_bit(STMMAC_DOWN, &priv->state);
6661 	dev_close(priv->dev);
6662 	dev_open(priv->dev, NULL);
6663 	clear_bit(STMMAC_DOWN, &priv->state);
6664 	clear_bit(STMMAC_RESETING, &priv->state);
6665 	rtnl_unlock();
6666 }
6667 
6668 static void stmmac_service_task(struct work_struct *work)
6669 {
6670 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6671 			service_task);
6672 
6673 	stmmac_reset_subtask(priv);
6674 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
6675 }
6676 
6677 /**
6678  *  stmmac_hw_init - Init the MAC device
6679  *  @priv: driver private structure
6680  *  Description: this function is to configure the MAC device according to
6681  *  some platform parameters or the HW capability register. It prepares the
6682  *  driver to use either ring or chain modes and to setup either enhanced or
6683  *  normal descriptors.
6684  */
6685 static int stmmac_hw_init(struct stmmac_priv *priv)
6686 {
6687 	int ret;
6688 
6689 	/* dwmac-sun8i only work in chain mode */
6690 	if (priv->plat->has_sun8i)
6691 		chain_mode = 1;
6692 	priv->chain_mode = chain_mode;
6693 
6694 	/* Initialize HW Interface */
6695 	ret = stmmac_hwif_init(priv);
6696 	if (ret)
6697 		return ret;
6698 
6699 	/* Get the HW capability (new GMAC newer than 3.50a) */
6700 	priv->hw_cap_support = stmmac_get_hw_features(priv);
6701 	if (priv->hw_cap_support) {
6702 		dev_info(priv->device, "DMA HW capability register supported\n");
6703 
6704 		/* We can override some gmac/dma configuration fields: e.g.
6705 		 * enh_desc, tx_coe (e.g. that are passed through the
6706 		 * platform) with the values from the HW capability
6707 		 * register (if supported).
6708 		 */
6709 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
6710 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
6711 				!priv->plat->use_phy_wol;
6712 		priv->hw->pmt = priv->plat->pmt;
6713 		if (priv->dma_cap.hash_tb_sz) {
6714 			priv->hw->multicast_filter_bins =
6715 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
6716 			priv->hw->mcast_bits_log2 =
6717 					ilog2(priv->hw->multicast_filter_bins);
6718 		}
6719 
6720 		/* TXCOE doesn't work in thresh DMA mode */
6721 		if (priv->plat->force_thresh_dma_mode)
6722 			priv->plat->tx_coe = 0;
6723 		else
6724 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
6725 
6726 		/* In case of GMAC4 rx_coe is from HW cap register. */
6727 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
6728 
6729 		if (priv->dma_cap.rx_coe_type2)
6730 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
6731 		else if (priv->dma_cap.rx_coe_type1)
6732 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
6733 
6734 	} else {
6735 		dev_info(priv->device, "No HW DMA feature register supported\n");
6736 	}
6737 
6738 	if (priv->plat->rx_coe) {
6739 		priv->hw->rx_csum = priv->plat->rx_coe;
6740 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
6741 		if (priv->synopsys_id < DWMAC_CORE_4_00)
6742 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
6743 	}
6744 	if (priv->plat->tx_coe)
6745 		dev_info(priv->device, "TX Checksum insertion supported\n");
6746 
6747 	if (priv->plat->pmt) {
6748 		dev_info(priv->device, "Wake-Up On Lan supported\n");
6749 		device_set_wakeup_capable(priv->device, 1);
6750 	}
6751 
6752 	if (priv->dma_cap.tsoen)
6753 		dev_info(priv->device, "TSO supported\n");
6754 
6755 	priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
6756 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
6757 
6758 	/* Run HW quirks, if any */
6759 	if (priv->hwif_quirks) {
6760 		ret = priv->hwif_quirks(priv);
6761 		if (ret)
6762 			return ret;
6763 	}
6764 
6765 	/* Rx Watchdog is available in the COREs newer than the 3.40.
6766 	 * In some case, for example on bugged HW this feature
6767 	 * has to be disable and this can be done by passing the
6768 	 * riwt_off field from the platform.
6769 	 */
6770 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
6771 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
6772 		priv->use_riwt = 1;
6773 		dev_info(priv->device,
6774 			 "Enable RX Mitigation via HW Watchdog Timer\n");
6775 	}
6776 
6777 	return 0;
6778 }
6779 
6780 static void stmmac_napi_add(struct net_device *dev)
6781 {
6782 	struct stmmac_priv *priv = netdev_priv(dev);
6783 	u32 queue, maxq;
6784 
6785 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6786 
6787 	for (queue = 0; queue < maxq; queue++) {
6788 		struct stmmac_channel *ch = &priv->channel[queue];
6789 
6790 		ch->priv_data = priv;
6791 		ch->index = queue;
6792 		spin_lock_init(&ch->lock);
6793 
6794 		if (queue < priv->plat->rx_queues_to_use) {
6795 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
6796 				       NAPI_POLL_WEIGHT);
6797 		}
6798 		if (queue < priv->plat->tx_queues_to_use) {
6799 			netif_tx_napi_add(dev, &ch->tx_napi,
6800 					  stmmac_napi_poll_tx,
6801 					  NAPI_POLL_WEIGHT);
6802 		}
6803 		if (queue < priv->plat->rx_queues_to_use &&
6804 		    queue < priv->plat->tx_queues_to_use) {
6805 			netif_napi_add(dev, &ch->rxtx_napi,
6806 				       stmmac_napi_poll_rxtx,
6807 				       NAPI_POLL_WEIGHT);
6808 		}
6809 	}
6810 }
6811 
6812 static void stmmac_napi_del(struct net_device *dev)
6813 {
6814 	struct stmmac_priv *priv = netdev_priv(dev);
6815 	u32 queue, maxq;
6816 
6817 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6818 
6819 	for (queue = 0; queue < maxq; queue++) {
6820 		struct stmmac_channel *ch = &priv->channel[queue];
6821 
6822 		if (queue < priv->plat->rx_queues_to_use)
6823 			netif_napi_del(&ch->rx_napi);
6824 		if (queue < priv->plat->tx_queues_to_use)
6825 			netif_napi_del(&ch->tx_napi);
6826 		if (queue < priv->plat->rx_queues_to_use &&
6827 		    queue < priv->plat->tx_queues_to_use) {
6828 			netif_napi_del(&ch->rxtx_napi);
6829 		}
6830 	}
6831 }
6832 
6833 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
6834 {
6835 	struct stmmac_priv *priv = netdev_priv(dev);
6836 	int ret = 0;
6837 
6838 	if (netif_running(dev))
6839 		stmmac_release(dev);
6840 
6841 	stmmac_napi_del(dev);
6842 
6843 	priv->plat->rx_queues_to_use = rx_cnt;
6844 	priv->plat->tx_queues_to_use = tx_cnt;
6845 
6846 	stmmac_napi_add(dev);
6847 
6848 	if (netif_running(dev))
6849 		ret = stmmac_open(dev);
6850 
6851 	return ret;
6852 }
6853 
6854 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
6855 {
6856 	struct stmmac_priv *priv = netdev_priv(dev);
6857 	int ret = 0;
6858 
6859 	if (netif_running(dev))
6860 		stmmac_release(dev);
6861 
6862 	priv->dma_rx_size = rx_size;
6863 	priv->dma_tx_size = tx_size;
6864 
6865 	if (netif_running(dev))
6866 		ret = stmmac_open(dev);
6867 
6868 	return ret;
6869 }
6870 
6871 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
6872 static void stmmac_fpe_lp_task(struct work_struct *work)
6873 {
6874 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6875 						fpe_task);
6876 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
6877 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
6878 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
6879 	bool *hs_enable = &fpe_cfg->hs_enable;
6880 	bool *enable = &fpe_cfg->enable;
6881 	int retries = 20;
6882 
6883 	while (retries-- > 0) {
6884 		/* Bail out immediately if FPE handshake is OFF */
6885 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
6886 			break;
6887 
6888 		if (*lo_state == FPE_STATE_ENTERING_ON &&
6889 		    *lp_state == FPE_STATE_ENTERING_ON) {
6890 			stmmac_fpe_configure(priv, priv->ioaddr,
6891 					     priv->plat->tx_queues_to_use,
6892 					     priv->plat->rx_queues_to_use,
6893 					     *enable);
6894 
6895 			netdev_info(priv->dev, "configured FPE\n");
6896 
6897 			*lo_state = FPE_STATE_ON;
6898 			*lp_state = FPE_STATE_ON;
6899 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
6900 			break;
6901 		}
6902 
6903 		if ((*lo_state == FPE_STATE_CAPABLE ||
6904 		     *lo_state == FPE_STATE_ENTERING_ON) &&
6905 		     *lp_state != FPE_STATE_ON) {
6906 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
6907 				    *lo_state, *lp_state);
6908 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
6909 						MPACKET_VERIFY);
6910 		}
6911 		/* Sleep then retry */
6912 		msleep(500);
6913 	}
6914 
6915 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
6916 }
6917 
6918 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
6919 {
6920 	if (priv->plat->fpe_cfg->hs_enable != enable) {
6921 		if (enable) {
6922 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
6923 						MPACKET_VERIFY);
6924 		} else {
6925 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
6926 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
6927 		}
6928 
6929 		priv->plat->fpe_cfg->hs_enable = enable;
6930 	}
6931 }
6932 
6933 /**
6934  * stmmac_dvr_probe
6935  * @device: device pointer
6936  * @plat_dat: platform data pointer
6937  * @res: stmmac resource pointer
6938  * Description: this is the main probe function used to
6939  * call the alloc_etherdev, allocate the priv structure.
6940  * Return:
6941  * returns 0 on success, otherwise errno.
6942  */
6943 int stmmac_dvr_probe(struct device *device,
6944 		     struct plat_stmmacenet_data *plat_dat,
6945 		     struct stmmac_resources *res)
6946 {
6947 	struct net_device *ndev = NULL;
6948 	struct stmmac_priv *priv;
6949 	u32 rxq;
6950 	int i, ret = 0;
6951 
6952 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
6953 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
6954 	if (!ndev)
6955 		return -ENOMEM;
6956 
6957 	SET_NETDEV_DEV(ndev, device);
6958 
6959 	priv = netdev_priv(ndev);
6960 	priv->device = device;
6961 	priv->dev = ndev;
6962 
6963 	stmmac_set_ethtool_ops(ndev);
6964 	priv->pause = pause;
6965 	priv->plat = plat_dat;
6966 	priv->ioaddr = res->addr;
6967 	priv->dev->base_addr = (unsigned long)res->addr;
6968 	priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
6969 
6970 	priv->dev->irq = res->irq;
6971 	priv->wol_irq = res->wol_irq;
6972 	priv->lpi_irq = res->lpi_irq;
6973 	priv->sfty_ce_irq = res->sfty_ce_irq;
6974 	priv->sfty_ue_irq = res->sfty_ue_irq;
6975 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
6976 		priv->rx_irq[i] = res->rx_irq[i];
6977 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
6978 		priv->tx_irq[i] = res->tx_irq[i];
6979 
6980 	if (!is_zero_ether_addr(res->mac))
6981 		eth_hw_addr_set(priv->dev, res->mac);
6982 
6983 	dev_set_drvdata(device, priv->dev);
6984 
6985 	/* Verify driver arguments */
6986 	stmmac_verify_args();
6987 
6988 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
6989 	if (!priv->af_xdp_zc_qps)
6990 		return -ENOMEM;
6991 
6992 	/* Allocate workqueue */
6993 	priv->wq = create_singlethread_workqueue("stmmac_wq");
6994 	if (!priv->wq) {
6995 		dev_err(priv->device, "failed to create workqueue\n");
6996 		return -ENOMEM;
6997 	}
6998 
6999 	INIT_WORK(&priv->service_task, stmmac_service_task);
7000 
7001 	/* Initialize Link Partner FPE workqueue */
7002 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7003 
7004 	/* Override with kernel parameters if supplied XXX CRS XXX
7005 	 * this needs to have multiple instances
7006 	 */
7007 	if ((phyaddr >= 0) && (phyaddr <= 31))
7008 		priv->plat->phy_addr = phyaddr;
7009 
7010 	if (priv->plat->stmmac_rst) {
7011 		ret = reset_control_assert(priv->plat->stmmac_rst);
7012 		reset_control_deassert(priv->plat->stmmac_rst);
7013 		/* Some reset controllers have only reset callback instead of
7014 		 * assert + deassert callbacks pair.
7015 		 */
7016 		if (ret == -ENOTSUPP)
7017 			reset_control_reset(priv->plat->stmmac_rst);
7018 	}
7019 
7020 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7021 	if (ret == -ENOTSUPP)
7022 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7023 			ERR_PTR(ret));
7024 
7025 	/* Init MAC and get the capabilities */
7026 	ret = stmmac_hw_init(priv);
7027 	if (ret)
7028 		goto error_hw_init;
7029 
7030 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7031 	 */
7032 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7033 		priv->plat->dma_cfg->dche = false;
7034 
7035 	stmmac_check_ether_addr(priv);
7036 
7037 	ndev->netdev_ops = &stmmac_netdev_ops;
7038 
7039 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7040 			    NETIF_F_RXCSUM;
7041 
7042 	ret = stmmac_tc_init(priv, priv);
7043 	if (!ret) {
7044 		ndev->hw_features |= NETIF_F_HW_TC;
7045 	}
7046 
7047 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
7048 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7049 		if (priv->plat->has_gmac4)
7050 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7051 		priv->tso = true;
7052 		dev_info(priv->device, "TSO feature enabled\n");
7053 	}
7054 
7055 	if (priv->dma_cap.sphen) {
7056 		ndev->hw_features |= NETIF_F_GRO;
7057 		priv->sph_cap = true;
7058 		priv->sph = priv->sph_cap;
7059 		dev_info(priv->device, "SPH feature enabled\n");
7060 	}
7061 
7062 	/* The current IP register MAC_HW_Feature1[ADDR64] only define
7063 	 * 32/40/64 bit width, but some SOC support others like i.MX8MP
7064 	 * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
7065 	 * So overwrite dma_cap.addr64 according to HW real design.
7066 	 */
7067 	if (priv->plat->addr64)
7068 		priv->dma_cap.addr64 = priv->plat->addr64;
7069 
7070 	if (priv->dma_cap.addr64) {
7071 		ret = dma_set_mask_and_coherent(device,
7072 				DMA_BIT_MASK(priv->dma_cap.addr64));
7073 		if (!ret) {
7074 			dev_info(priv->device, "Using %d bits DMA width\n",
7075 				 priv->dma_cap.addr64);
7076 
7077 			/*
7078 			 * If more than 32 bits can be addressed, make sure to
7079 			 * enable enhanced addressing mode.
7080 			 */
7081 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7082 				priv->plat->dma_cfg->eame = true;
7083 		} else {
7084 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7085 			if (ret) {
7086 				dev_err(priv->device, "Failed to set DMA Mask\n");
7087 				goto error_hw_init;
7088 			}
7089 
7090 			priv->dma_cap.addr64 = 32;
7091 		}
7092 	}
7093 
7094 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7095 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7096 #ifdef STMMAC_VLAN_TAG_USED
7097 	/* Both mac100 and gmac support receive VLAN tag detection */
7098 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7099 	if (priv->dma_cap.vlhash) {
7100 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7101 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7102 	}
7103 	if (priv->dma_cap.vlins) {
7104 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7105 		if (priv->dma_cap.dvlan)
7106 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7107 	}
7108 #endif
7109 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7110 
7111 	/* Initialize RSS */
7112 	rxq = priv->plat->rx_queues_to_use;
7113 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7114 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7115 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7116 
7117 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7118 		ndev->features |= NETIF_F_RXHASH;
7119 
7120 	/* MTU range: 46 - hw-specific max */
7121 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7122 	if (priv->plat->has_xgmac)
7123 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7124 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7125 		ndev->max_mtu = JUMBO_LEN;
7126 	else
7127 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7128 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7129 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7130 	 */
7131 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7132 	    (priv->plat->maxmtu >= ndev->min_mtu))
7133 		ndev->max_mtu = priv->plat->maxmtu;
7134 	else if (priv->plat->maxmtu < ndev->min_mtu)
7135 		dev_warn(priv->device,
7136 			 "%s: warning: maxmtu having invalid value (%d)\n",
7137 			 __func__, priv->plat->maxmtu);
7138 
7139 	if (flow_ctrl)
7140 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7141 
7142 	/* Setup channels NAPI */
7143 	stmmac_napi_add(ndev);
7144 
7145 	mutex_init(&priv->lock);
7146 
7147 	/* If a specific clk_csr value is passed from the platform
7148 	 * this means that the CSR Clock Range selection cannot be
7149 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7150 	 * set the MDC clock dynamically according to the csr actual
7151 	 * clock input.
7152 	 */
7153 	if (priv->plat->clk_csr >= 0)
7154 		priv->clk_csr = priv->plat->clk_csr;
7155 	else
7156 		stmmac_clk_csr_set(priv);
7157 
7158 	stmmac_check_pcs_mode(priv);
7159 
7160 	pm_runtime_get_noresume(device);
7161 	pm_runtime_set_active(device);
7162 	if (!pm_runtime_enabled(device))
7163 		pm_runtime_enable(device);
7164 
7165 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7166 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
7167 		/* MDIO bus Registration */
7168 		ret = stmmac_mdio_register(ndev);
7169 		if (ret < 0) {
7170 			dev_err(priv->device,
7171 				"%s: MDIO bus (id: %d) registration failed",
7172 				__func__, priv->plat->bus_id);
7173 			goto error_mdio_register;
7174 		}
7175 	}
7176 
7177 	if (priv->plat->speed_mode_2500)
7178 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7179 
7180 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7181 		ret = stmmac_xpcs_setup(priv->mii);
7182 		if (ret)
7183 			goto error_xpcs_setup;
7184 	}
7185 
7186 	ret = stmmac_phy_setup(priv);
7187 	if (ret) {
7188 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7189 		goto error_phy_setup;
7190 	}
7191 
7192 	ret = register_netdev(ndev);
7193 	if (ret) {
7194 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7195 			__func__, ret);
7196 		goto error_netdev_register;
7197 	}
7198 
7199 	if (priv->plat->serdes_powerup) {
7200 		ret = priv->plat->serdes_powerup(ndev,
7201 						 priv->plat->bsp_priv);
7202 
7203 		if (ret < 0)
7204 			goto error_serdes_powerup;
7205 	}
7206 
7207 #ifdef CONFIG_DEBUG_FS
7208 	stmmac_init_fs(ndev);
7209 #endif
7210 
7211 	if (priv->plat->dump_debug_regs)
7212 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7213 
7214 	/* Let pm_runtime_put() disable the clocks.
7215 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7216 	 */
7217 	pm_runtime_put(device);
7218 
7219 	return ret;
7220 
7221 error_serdes_powerup:
7222 	unregister_netdev(ndev);
7223 error_netdev_register:
7224 	phylink_destroy(priv->phylink);
7225 error_xpcs_setup:
7226 error_phy_setup:
7227 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7228 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7229 		stmmac_mdio_unregister(ndev);
7230 error_mdio_register:
7231 	stmmac_napi_del(ndev);
7232 error_hw_init:
7233 	destroy_workqueue(priv->wq);
7234 	bitmap_free(priv->af_xdp_zc_qps);
7235 
7236 	return ret;
7237 }
7238 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7239 
7240 /**
7241  * stmmac_dvr_remove
7242  * @dev: device pointer
7243  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7244  * changes the link status, releases the DMA descriptor rings.
7245  */
7246 int stmmac_dvr_remove(struct device *dev)
7247 {
7248 	struct net_device *ndev = dev_get_drvdata(dev);
7249 	struct stmmac_priv *priv = netdev_priv(ndev);
7250 
7251 	netdev_info(priv->dev, "%s: removing driver", __func__);
7252 
7253 	stmmac_stop_all_dma(priv);
7254 	stmmac_mac_set(priv, priv->ioaddr, false);
7255 	netif_carrier_off(ndev);
7256 	unregister_netdev(ndev);
7257 
7258 	/* Serdes power down needs to happen after VLAN filter
7259 	 * is deleted that is triggered by unregister_netdev().
7260 	 */
7261 	if (priv->plat->serdes_powerdown)
7262 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7263 
7264 #ifdef CONFIG_DEBUG_FS
7265 	stmmac_exit_fs(ndev);
7266 #endif
7267 	phylink_destroy(priv->phylink);
7268 	if (priv->plat->stmmac_rst)
7269 		reset_control_assert(priv->plat->stmmac_rst);
7270 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7271 	pm_runtime_put(dev);
7272 	pm_runtime_disable(dev);
7273 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7274 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7275 		stmmac_mdio_unregister(ndev);
7276 	destroy_workqueue(priv->wq);
7277 	mutex_destroy(&priv->lock);
7278 	bitmap_free(priv->af_xdp_zc_qps);
7279 
7280 	return 0;
7281 }
7282 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7283 
7284 /**
7285  * stmmac_suspend - suspend callback
7286  * @dev: device pointer
7287  * Description: this is the function to suspend the device and it is called
7288  * by the platform driver to stop the network queue, release the resources,
7289  * program the PMT register (for WoL), clean and release driver resources.
7290  */
7291 int stmmac_suspend(struct device *dev)
7292 {
7293 	struct net_device *ndev = dev_get_drvdata(dev);
7294 	struct stmmac_priv *priv = netdev_priv(ndev);
7295 	u32 chan;
7296 
7297 	if (!ndev || !netif_running(ndev))
7298 		return 0;
7299 
7300 	mutex_lock(&priv->lock);
7301 
7302 	netif_device_detach(ndev);
7303 
7304 	stmmac_disable_all_queues(priv);
7305 
7306 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7307 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
7308 
7309 	if (priv->eee_enabled) {
7310 		priv->tx_path_in_lpi_mode = false;
7311 		del_timer_sync(&priv->eee_ctrl_timer);
7312 	}
7313 
7314 	/* Stop TX/RX DMA */
7315 	stmmac_stop_all_dma(priv);
7316 
7317 	if (priv->plat->serdes_powerdown)
7318 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7319 
7320 	/* Enable Power down mode by programming the PMT regs */
7321 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7322 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7323 		priv->irq_wake = 1;
7324 	} else {
7325 		stmmac_mac_set(priv, priv->ioaddr, false);
7326 		pinctrl_pm_select_sleep_state(priv->device);
7327 	}
7328 
7329 	mutex_unlock(&priv->lock);
7330 
7331 	rtnl_lock();
7332 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7333 		phylink_suspend(priv->phylink, true);
7334 	} else {
7335 		if (device_may_wakeup(priv->device))
7336 			phylink_speed_down(priv->phylink, false);
7337 		phylink_suspend(priv->phylink, false);
7338 	}
7339 	rtnl_unlock();
7340 
7341 	if (priv->dma_cap.fpesel) {
7342 		/* Disable FPE */
7343 		stmmac_fpe_configure(priv, priv->ioaddr,
7344 				     priv->plat->tx_queues_to_use,
7345 				     priv->plat->rx_queues_to_use, false);
7346 
7347 		stmmac_fpe_handshake(priv, false);
7348 		stmmac_fpe_stop_wq(priv);
7349 	}
7350 
7351 	priv->speed = SPEED_UNKNOWN;
7352 	return 0;
7353 }
7354 EXPORT_SYMBOL_GPL(stmmac_suspend);
7355 
7356 /**
7357  * stmmac_reset_queues_param - reset queue parameters
7358  * @priv: device pointer
7359  */
7360 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7361 {
7362 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7363 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7364 	u32 queue;
7365 
7366 	for (queue = 0; queue < rx_cnt; queue++) {
7367 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
7368 
7369 		rx_q->cur_rx = 0;
7370 		rx_q->dirty_rx = 0;
7371 	}
7372 
7373 	for (queue = 0; queue < tx_cnt; queue++) {
7374 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
7375 
7376 		tx_q->cur_tx = 0;
7377 		tx_q->dirty_tx = 0;
7378 		tx_q->mss = 0;
7379 
7380 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7381 	}
7382 }
7383 
7384 /**
7385  * stmmac_resume - resume callback
7386  * @dev: device pointer
7387  * Description: when resume this function is invoked to setup the DMA and CORE
7388  * in a usable state.
7389  */
7390 int stmmac_resume(struct device *dev)
7391 {
7392 	struct net_device *ndev = dev_get_drvdata(dev);
7393 	struct stmmac_priv *priv = netdev_priv(ndev);
7394 	int ret;
7395 
7396 	if (!netif_running(ndev))
7397 		return 0;
7398 
7399 	/* Power Down bit, into the PM register, is cleared
7400 	 * automatically as soon as a magic packet or a Wake-up frame
7401 	 * is received. Anyway, it's better to manually clear
7402 	 * this bit because it can generate problems while resuming
7403 	 * from another devices (e.g. serial console).
7404 	 */
7405 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7406 		mutex_lock(&priv->lock);
7407 		stmmac_pmt(priv, priv->hw, 0);
7408 		mutex_unlock(&priv->lock);
7409 		priv->irq_wake = 0;
7410 	} else {
7411 		pinctrl_pm_select_default_state(priv->device);
7412 		/* reset the phy so that it's ready */
7413 		if (priv->mii)
7414 			stmmac_mdio_reset(priv->mii);
7415 	}
7416 
7417 	if (priv->plat->serdes_powerup) {
7418 		ret = priv->plat->serdes_powerup(ndev,
7419 						 priv->plat->bsp_priv);
7420 
7421 		if (ret < 0)
7422 			return ret;
7423 	}
7424 
7425 	rtnl_lock();
7426 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7427 		phylink_resume(priv->phylink);
7428 	} else {
7429 		phylink_resume(priv->phylink);
7430 		if (device_may_wakeup(priv->device))
7431 			phylink_speed_up(priv->phylink);
7432 	}
7433 	rtnl_unlock();
7434 
7435 	rtnl_lock();
7436 	mutex_lock(&priv->lock);
7437 
7438 	stmmac_reset_queues_param(priv);
7439 
7440 	stmmac_free_tx_skbufs(priv);
7441 	stmmac_clear_descriptors(priv);
7442 
7443 	stmmac_hw_setup(ndev, false);
7444 	stmmac_init_coalesce(priv);
7445 	stmmac_set_rx_mode(ndev);
7446 
7447 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7448 
7449 	stmmac_enable_all_queues(priv);
7450 
7451 	mutex_unlock(&priv->lock);
7452 	rtnl_unlock();
7453 
7454 	netif_device_attach(ndev);
7455 
7456 	return 0;
7457 }
7458 EXPORT_SYMBOL_GPL(stmmac_resume);
7459 
7460 #ifndef MODULE
7461 static int __init stmmac_cmdline_opt(char *str)
7462 {
7463 	char *opt;
7464 
7465 	if (!str || !*str)
7466 		return -EINVAL;
7467 	while ((opt = strsep(&str, ",")) != NULL) {
7468 		if (!strncmp(opt, "debug:", 6)) {
7469 			if (kstrtoint(opt + 6, 0, &debug))
7470 				goto err;
7471 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7472 			if (kstrtoint(opt + 8, 0, &phyaddr))
7473 				goto err;
7474 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7475 			if (kstrtoint(opt + 7, 0, &buf_sz))
7476 				goto err;
7477 		} else if (!strncmp(opt, "tc:", 3)) {
7478 			if (kstrtoint(opt + 3, 0, &tc))
7479 				goto err;
7480 		} else if (!strncmp(opt, "watchdog:", 9)) {
7481 			if (kstrtoint(opt + 9, 0, &watchdog))
7482 				goto err;
7483 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7484 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7485 				goto err;
7486 		} else if (!strncmp(opt, "pause:", 6)) {
7487 			if (kstrtoint(opt + 6, 0, &pause))
7488 				goto err;
7489 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7490 			if (kstrtoint(opt + 10, 0, &eee_timer))
7491 				goto err;
7492 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7493 			if (kstrtoint(opt + 11, 0, &chain_mode))
7494 				goto err;
7495 		}
7496 	}
7497 	return 0;
7498 
7499 err:
7500 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7501 	return -EINVAL;
7502 }
7503 
7504 __setup("stmmaceth=", stmmac_cmdline_opt);
7505 #endif /* MODULE */
7506 
7507 static int __init stmmac_init(void)
7508 {
7509 #ifdef CONFIG_DEBUG_FS
7510 	/* Create debugfs main directory if it doesn't exist yet */
7511 	if (!stmmac_fs_dir)
7512 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7513 	register_netdevice_notifier(&stmmac_notifier);
7514 #endif
7515 
7516 	return 0;
7517 }
7518 
7519 static void __exit stmmac_exit(void)
7520 {
7521 #ifdef CONFIG_DEBUG_FS
7522 	unregister_netdevice_notifier(&stmmac_notifier);
7523 	debugfs_remove_recursive(stmmac_fs_dir);
7524 #endif
7525 }
7526 
7527 module_init(stmmac_init)
7528 module_exit(stmmac_exit)
7529 
7530 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7531 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7532 MODULE_LICENSE("GPL");
7533