1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Using PCS we cannot dial with the phy registers at this stage
475 	 * so we do not support extra feature like EEE.
476 	 */
477 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
478 	    priv->hw->pcs == STMMAC_PCS_RTBI)
479 		return false;
480 
481 	/* Check if MAC core supports the EEE feature. */
482 	if (!priv->dma_cap.eee)
483 		return false;
484 
485 	mutex_lock(&priv->lock);
486 
487 	/* Check if it needs to be deactivated */
488 	if (!priv->eee_active) {
489 		if (priv->eee_enabled) {
490 			netdev_dbg(priv->dev, "disable EEE\n");
491 			stmmac_lpi_entry_timer_config(priv, 0);
492 			del_timer_sync(&priv->eee_ctrl_timer);
493 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494 			if (priv->hw->xpcs)
495 				xpcs_config_eee(priv->hw->xpcs,
496 						priv->plat->mult_fact_100ns,
497 						false);
498 		}
499 		mutex_unlock(&priv->lock);
500 		return false;
501 	}
502 
503 	if (priv->eee_active && !priv->eee_enabled) {
504 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506 				     eee_tw_timer);
507 		if (priv->hw->xpcs)
508 			xpcs_config_eee(priv->hw->xpcs,
509 					priv->plat->mult_fact_100ns,
510 					true);
511 	}
512 
513 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514 		del_timer_sync(&priv->eee_ctrl_timer);
515 		priv->tx_path_in_lpi_mode = false;
516 		stmmac_lpi_entry_timer_config(priv, 1);
517 	} else {
518 		stmmac_lpi_entry_timer_config(priv, 0);
519 		mod_timer(&priv->eee_ctrl_timer,
520 			  STMMAC_LPI_T(priv->tx_lpi_timer));
521 	}
522 
523 	mutex_unlock(&priv->lock);
524 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525 	return true;
526 }
527 
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529  * @priv: driver private structure
530  * @p : descriptor pointer
531  * @skb : the socket buffer
532  * Description :
533  * This function will read timestamp from the descriptor & pass it to stack.
534  * and also perform some sanity checks.
535  */
536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537 				   struct dma_desc *p, struct sk_buff *skb)
538 {
539 	struct skb_shared_hwtstamps shhwtstamp;
540 	bool found = false;
541 	u64 ns = 0;
542 
543 	if (!priv->hwts_tx_en)
544 		return;
545 
546 	/* exit if skb doesn't support hw tstamp */
547 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548 		return;
549 
550 	/* check tx tstamp status */
551 	if (stmmac_get_tx_timestamp_status(priv, p)) {
552 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553 		found = true;
554 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555 		found = true;
556 	}
557 
558 	if (found) {
559 		ns -= priv->plat->cdc_error_adj;
560 
561 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
563 
564 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565 		/* pass tstamp to stack */
566 		skb_tstamp_tx(skb, &shhwtstamp);
567 	}
568 }
569 
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571  * @priv: driver private structure
572  * @p : descriptor pointer
573  * @np : next descriptor pointer
574  * @skb : the socket buffer
575  * Description :
576  * This function will read received packet's timestamp from the descriptor
577  * and pass it to stack. It also perform some sanity checks.
578  */
579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580 				   struct dma_desc *np, struct sk_buff *skb)
581 {
582 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
583 	struct dma_desc *desc = p;
584 	u64 ns = 0;
585 
586 	if (!priv->hwts_rx_en)
587 		return;
588 	/* For GMAC4, the valid timestamp is from CTX next desc. */
589 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590 		desc = np;
591 
592 	/* Check if timestamp is available */
593 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595 
596 		ns -= priv->plat->cdc_error_adj;
597 
598 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599 		shhwtstamp = skb_hwtstamps(skb);
600 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
602 	} else  {
603 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604 	}
605 }
606 
607 /**
608  *  stmmac_hwtstamp_set - control hardware timestamping.
609  *  @dev: device pointer.
610  *  @ifr: An IOCTL specific structure, that can contain a pointer to
611  *  a proprietary structure used to pass information to the driver.
612  *  Description:
613  *  This function configures the MAC to enable/disable both outgoing(TX)
614  *  and incoming(RX) packets time stamping based on user input.
615  *  Return Value:
616  *  0 on success and an appropriate -ve integer on failure.
617  */
618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620 	struct stmmac_priv *priv = netdev_priv(dev);
621 	struct hwtstamp_config config;
622 	u32 ptp_v2 = 0;
623 	u32 tstamp_all = 0;
624 	u32 ptp_over_ipv4_udp = 0;
625 	u32 ptp_over_ipv6_udp = 0;
626 	u32 ptp_over_ethernet = 0;
627 	u32 snap_type_sel = 0;
628 	u32 ts_master_en = 0;
629 	u32 ts_event_en = 0;
630 
631 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632 		netdev_alert(priv->dev, "No support for HW time stamping\n");
633 		priv->hwts_tx_en = 0;
634 		priv->hwts_rx_en = 0;
635 
636 		return -EOPNOTSUPP;
637 	}
638 
639 	if (copy_from_user(&config, ifr->ifr_data,
640 			   sizeof(config)))
641 		return -EFAULT;
642 
643 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 		   __func__, config.flags, config.tx_type, config.rx_filter);
645 
646 	if (config.tx_type != HWTSTAMP_TX_OFF &&
647 	    config.tx_type != HWTSTAMP_TX_ON)
648 		return -ERANGE;
649 
650 	if (priv->adv_ts) {
651 		switch (config.rx_filter) {
652 		case HWTSTAMP_FILTER_NONE:
653 			/* time stamp no incoming packet at all */
654 			config.rx_filter = HWTSTAMP_FILTER_NONE;
655 			break;
656 
657 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 			/* PTP v1, UDP, any kind of event packet */
659 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 			/* 'xmac' hardware can support Sync, Pdelay_Req and
661 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 			 * This leaves Delay_Req timestamps out.
663 			 * Enable all events *and* general purpose message
664 			 * timestamping
665 			 */
666 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			break;
670 
671 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 			/* PTP v1, UDP, Sync packet */
673 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 			/* take time stamp for SYNC messages only */
675 			ts_event_en = PTP_TCR_TSEVNTENA;
676 
677 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 			break;
680 
681 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 			/* PTP v1, UDP, Delay_req packet */
683 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 			/* take time stamp for Delay_Req messages only */
685 			ts_master_en = PTP_TCR_TSMSTRENA;
686 			ts_event_en = PTP_TCR_TSEVNTENA;
687 
688 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 			break;
691 
692 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 			/* PTP v2, UDP, any kind of event packet */
694 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 			ptp_v2 = PTP_TCR_TSVER2ENA;
696 			/* take time stamp for all event messages */
697 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698 
699 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 			break;
702 
703 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 			/* PTP v2, UDP, Sync packet */
705 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 			ptp_v2 = PTP_TCR_TSVER2ENA;
707 			/* take time stamp for SYNC messages only */
708 			ts_event_en = PTP_TCR_TSEVNTENA;
709 
710 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 			break;
713 
714 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 			/* PTP v2, UDP, Delay_req packet */
716 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 			ptp_v2 = PTP_TCR_TSVER2ENA;
718 			/* take time stamp for Delay_Req messages only */
719 			ts_master_en = PTP_TCR_TSMSTRENA;
720 			ts_event_en = PTP_TCR_TSEVNTENA;
721 
722 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 			break;
725 
726 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 			/* PTP v2/802.AS1 any layer, any kind of event packet */
728 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 			ptp_v2 = PTP_TCR_TSVER2ENA;
730 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 			if (priv->synopsys_id < DWMAC_CORE_4_10)
732 				ts_event_en = PTP_TCR_TSEVNTENA;
733 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 			ptp_over_ethernet = PTP_TCR_TSIPENA;
736 			break;
737 
738 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 			/* PTP v2/802.AS1, any layer, Sync packet */
740 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 			ptp_v2 = PTP_TCR_TSVER2ENA;
742 			/* take time stamp for SYNC messages only */
743 			ts_event_en = PTP_TCR_TSEVNTENA;
744 
745 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 			ptp_over_ethernet = PTP_TCR_TSIPENA;
748 			break;
749 
750 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 			/* PTP v2/802.AS1, any layer, Delay_req packet */
752 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 			ptp_v2 = PTP_TCR_TSVER2ENA;
754 			/* take time stamp for Delay_Req messages only */
755 			ts_master_en = PTP_TCR_TSMSTRENA;
756 			ts_event_en = PTP_TCR_TSEVNTENA;
757 
758 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 			ptp_over_ethernet = PTP_TCR_TSIPENA;
761 			break;
762 
763 		case HWTSTAMP_FILTER_NTP_ALL:
764 		case HWTSTAMP_FILTER_ALL:
765 			/* time stamp any incoming packet */
766 			config.rx_filter = HWTSTAMP_FILTER_ALL;
767 			tstamp_all = PTP_TCR_TSENALL;
768 			break;
769 
770 		default:
771 			return -ERANGE;
772 		}
773 	} else {
774 		switch (config.rx_filter) {
775 		case HWTSTAMP_FILTER_NONE:
776 			config.rx_filter = HWTSTAMP_FILTER_NONE;
777 			break;
778 		default:
779 			/* PTP v1, UDP, any kind of event packet */
780 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 			break;
782 		}
783 	}
784 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786 
787 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
788 
789 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
790 		priv->systime_flags |= tstamp_all | ptp_v2 |
791 				       ptp_over_ethernet | ptp_over_ipv6_udp |
792 				       ptp_over_ipv4_udp | ts_event_en |
793 				       ts_master_en | snap_type_sel;
794 	}
795 
796 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797 
798 	memcpy(&priv->tstamp_config, &config, sizeof(config));
799 
800 	return copy_to_user(ifr->ifr_data, &config,
801 			    sizeof(config)) ? -EFAULT : 0;
802 }
803 
804 /**
805  *  stmmac_hwtstamp_get - read hardware timestamping.
806  *  @dev: device pointer.
807  *  @ifr: An IOCTL specific structure, that can contain a pointer to
808  *  a proprietary structure used to pass information to the driver.
809  *  Description:
810  *  This function obtain the current hardware timestamping settings
811  *  as requested.
812  */
813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815 	struct stmmac_priv *priv = netdev_priv(dev);
816 	struct hwtstamp_config *config = &priv->tstamp_config;
817 
818 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819 		return -EOPNOTSUPP;
820 
821 	return copy_to_user(ifr->ifr_data, config,
822 			    sizeof(*config)) ? -EFAULT : 0;
823 }
824 
825 /**
826  * stmmac_init_tstamp_counter - init hardware timestamping counter
827  * @priv: driver private structure
828  * @systime_flags: timestamping flags
829  * Description:
830  * Initialize hardware counter for packet timestamping.
831  * This is valid as long as the interface is open and not suspended.
832  * Will be rerun after resuming from suspend, case in which the timestamping
833  * flags updated by stmmac_hwtstamp_set() also need to be restored.
834  */
835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838 	struct timespec64 now;
839 	u32 sec_inc = 0;
840 	u64 temp = 0;
841 
842 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843 		return -EOPNOTSUPP;
844 
845 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846 	priv->systime_flags = systime_flags;
847 
848 	/* program Sub Second Increment reg */
849 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850 					   priv->plat->clk_ptp_rate,
851 					   xmac, &sec_inc);
852 	temp = div_u64(1000000000ULL, sec_inc);
853 
854 	/* Store sub second increment for later use */
855 	priv->sub_second_inc = sec_inc;
856 
857 	/* calculate default added value:
858 	 * formula is :
859 	 * addend = (2^32)/freq_div_ratio;
860 	 * where, freq_div_ratio = 1e9ns/sec_inc
861 	 */
862 	temp = (u64)(temp << 32);
863 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865 
866 	/* initialize system time */
867 	ktime_get_real_ts64(&now);
868 
869 	/* lower 32 bits of tv_sec are safe until y2106 */
870 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871 
872 	return 0;
873 }
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875 
876 /**
877  * stmmac_init_ptp - init PTP
878  * @priv: driver private structure
879  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880  * This is done by looking at the HW cap. register.
881  * This function also registers the ptp driver.
882  */
883 static int stmmac_init_ptp(struct stmmac_priv *priv)
884 {
885 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886 	int ret;
887 
888 	if (priv->plat->ptp_clk_freq_config)
889 		priv->plat->ptp_clk_freq_config(priv);
890 
891 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892 	if (ret)
893 		return ret;
894 
895 	priv->adv_ts = 0;
896 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 	if (xmac && priv->dma_cap.atime_stamp)
898 		priv->adv_ts = 1;
899 	/* Dwmac 3.x core with extend_desc can support adv_ts */
900 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901 		priv->adv_ts = 1;
902 
903 	if (priv->dma_cap.time_stamp)
904 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
905 
906 	if (priv->adv_ts)
907 		netdev_info(priv->dev,
908 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
909 
910 	priv->hwts_tx_en = 0;
911 	priv->hwts_rx_en = 0;
912 
913 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
914 		stmmac_hwtstamp_correct_latency(priv, priv);
915 
916 	return 0;
917 }
918 
919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
922 	stmmac_ptp_unregister(priv);
923 }
924 
925 /**
926  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
927  *  @priv: driver private structure
928  *  @duplex: duplex passed to the next function
929  *  Description: It is used for configuring the flow control in all queues
930  */
931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933 	u32 tx_cnt = priv->plat->tx_queues_to_use;
934 
935 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936 			priv->pause, tx_cnt);
937 }
938 
939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
940 						 phy_interface_t interface)
941 {
942 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943 
944 	if (priv->hw->xpcs)
945 		return &priv->hw->xpcs->pcs;
946 
947 	if (priv->hw->lynx_pcs)
948 		return priv->hw->lynx_pcs;
949 
950 	return NULL;
951 }
952 
953 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
954 			      const struct phylink_link_state *state)
955 {
956 	/* Nothing to do, xpcs_config() handles everything */
957 }
958 
959 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
960 {
961 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
962 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
963 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
964 	bool *hs_enable = &fpe_cfg->hs_enable;
965 
966 	if (is_up && *hs_enable) {
967 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
968 					MPACKET_VERIFY);
969 	} else {
970 		*lo_state = FPE_STATE_OFF;
971 		*lp_state = FPE_STATE_OFF;
972 	}
973 }
974 
975 static void stmmac_mac_link_down(struct phylink_config *config,
976 				 unsigned int mode, phy_interface_t interface)
977 {
978 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
979 
980 	stmmac_mac_set(priv, priv->ioaddr, false);
981 	priv->eee_active = false;
982 	priv->tx_lpi_enabled = false;
983 	priv->eee_enabled = stmmac_eee_init(priv);
984 	stmmac_set_eee_pls(priv, priv->hw, false);
985 
986 	if (priv->dma_cap.fpesel)
987 		stmmac_fpe_link_state_handle(priv, false);
988 }
989 
990 static void stmmac_mac_link_up(struct phylink_config *config,
991 			       struct phy_device *phy,
992 			       unsigned int mode, phy_interface_t interface,
993 			       int speed, int duplex,
994 			       bool tx_pause, bool rx_pause)
995 {
996 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
997 	u32 old_ctrl, ctrl;
998 
999 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1000 	    priv->plat->serdes_powerup)
1001 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1002 
1003 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1004 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1005 
1006 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1007 		switch (speed) {
1008 		case SPEED_10000:
1009 			ctrl |= priv->hw->link.xgmii.speed10000;
1010 			break;
1011 		case SPEED_5000:
1012 			ctrl |= priv->hw->link.xgmii.speed5000;
1013 			break;
1014 		case SPEED_2500:
1015 			ctrl |= priv->hw->link.xgmii.speed2500;
1016 			break;
1017 		default:
1018 			return;
1019 		}
1020 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1021 		switch (speed) {
1022 		case SPEED_100000:
1023 			ctrl |= priv->hw->link.xlgmii.speed100000;
1024 			break;
1025 		case SPEED_50000:
1026 			ctrl |= priv->hw->link.xlgmii.speed50000;
1027 			break;
1028 		case SPEED_40000:
1029 			ctrl |= priv->hw->link.xlgmii.speed40000;
1030 			break;
1031 		case SPEED_25000:
1032 			ctrl |= priv->hw->link.xlgmii.speed25000;
1033 			break;
1034 		case SPEED_10000:
1035 			ctrl |= priv->hw->link.xgmii.speed10000;
1036 			break;
1037 		case SPEED_2500:
1038 			ctrl |= priv->hw->link.speed2500;
1039 			break;
1040 		case SPEED_1000:
1041 			ctrl |= priv->hw->link.speed1000;
1042 			break;
1043 		default:
1044 			return;
1045 		}
1046 	} else {
1047 		switch (speed) {
1048 		case SPEED_2500:
1049 			ctrl |= priv->hw->link.speed2500;
1050 			break;
1051 		case SPEED_1000:
1052 			ctrl |= priv->hw->link.speed1000;
1053 			break;
1054 		case SPEED_100:
1055 			ctrl |= priv->hw->link.speed100;
1056 			break;
1057 		case SPEED_10:
1058 			ctrl |= priv->hw->link.speed10;
1059 			break;
1060 		default:
1061 			return;
1062 		}
1063 	}
1064 
1065 	priv->speed = speed;
1066 
1067 	if (priv->plat->fix_mac_speed)
1068 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1069 
1070 	if (!duplex)
1071 		ctrl &= ~priv->hw->link.duplex;
1072 	else
1073 		ctrl |= priv->hw->link.duplex;
1074 
1075 	/* Flow Control operation */
1076 	if (rx_pause && tx_pause)
1077 		priv->flow_ctrl = FLOW_AUTO;
1078 	else if (rx_pause && !tx_pause)
1079 		priv->flow_ctrl = FLOW_RX;
1080 	else if (!rx_pause && tx_pause)
1081 		priv->flow_ctrl = FLOW_TX;
1082 	else
1083 		priv->flow_ctrl = FLOW_OFF;
1084 
1085 	stmmac_mac_flow_ctrl(priv, duplex);
1086 
1087 	if (ctrl != old_ctrl)
1088 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1089 
1090 	stmmac_mac_set(priv, priv->ioaddr, true);
1091 	if (phy && priv->dma_cap.eee) {
1092 		priv->eee_active =
1093 			phy_init_eee(phy, !(priv->plat->flags &
1094 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1095 		priv->eee_enabled = stmmac_eee_init(priv);
1096 		priv->tx_lpi_enabled = priv->eee_enabled;
1097 		stmmac_set_eee_pls(priv, priv->hw, true);
1098 	}
1099 
1100 	if (priv->dma_cap.fpesel)
1101 		stmmac_fpe_link_state_handle(priv, true);
1102 
1103 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1104 		stmmac_hwtstamp_correct_latency(priv, priv);
1105 }
1106 
1107 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1108 	.mac_select_pcs = stmmac_mac_select_pcs,
1109 	.mac_config = stmmac_mac_config,
1110 	.mac_link_down = stmmac_mac_link_down,
1111 	.mac_link_up = stmmac_mac_link_up,
1112 };
1113 
1114 /**
1115  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1116  * @priv: driver private structure
1117  * Description: this is to verify if the HW supports the PCS.
1118  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1119  * configured for the TBI, RTBI, or SGMII PHY interface.
1120  */
1121 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1122 {
1123 	int interface = priv->plat->mac_interface;
1124 
1125 	if (priv->dma_cap.pcs) {
1126 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1127 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1128 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1129 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1130 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1131 			priv->hw->pcs = STMMAC_PCS_RGMII;
1132 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1133 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1134 			priv->hw->pcs = STMMAC_PCS_SGMII;
1135 		}
1136 	}
1137 }
1138 
1139 /**
1140  * stmmac_init_phy - PHY initialization
1141  * @dev: net device structure
1142  * Description: it initializes the driver's PHY state, and attaches the PHY
1143  * to the mac driver.
1144  *  Return value:
1145  *  0 on success
1146  */
1147 static int stmmac_init_phy(struct net_device *dev)
1148 {
1149 	struct stmmac_priv *priv = netdev_priv(dev);
1150 	struct fwnode_handle *phy_fwnode;
1151 	struct fwnode_handle *fwnode;
1152 	int ret;
1153 
1154 	if (!phylink_expects_phy(priv->phylink))
1155 		return 0;
1156 
1157 	fwnode = priv->plat->port_node;
1158 	if (!fwnode)
1159 		fwnode = dev_fwnode(priv->device);
1160 
1161 	if (fwnode)
1162 		phy_fwnode = fwnode_get_phy_node(fwnode);
1163 	else
1164 		phy_fwnode = NULL;
1165 
1166 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1167 	 * manually parse it
1168 	 */
1169 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1170 		int addr = priv->plat->phy_addr;
1171 		struct phy_device *phydev;
1172 
1173 		if (addr < 0) {
1174 			netdev_err(priv->dev, "no phy found\n");
1175 			return -ENODEV;
1176 		}
1177 
1178 		phydev = mdiobus_get_phy(priv->mii, addr);
1179 		if (!phydev) {
1180 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1181 			return -ENODEV;
1182 		}
1183 
1184 		ret = phylink_connect_phy(priv->phylink, phydev);
1185 	} else {
1186 		fwnode_handle_put(phy_fwnode);
1187 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1188 	}
1189 
1190 	if (!priv->plat->pmt) {
1191 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1192 
1193 		phylink_ethtool_get_wol(priv->phylink, &wol);
1194 		device_set_wakeup_capable(priv->device, !!wol.supported);
1195 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1196 	}
1197 
1198 	return ret;
1199 }
1200 
1201 static int stmmac_phy_setup(struct stmmac_priv *priv)
1202 {
1203 	struct stmmac_mdio_bus_data *mdio_bus_data;
1204 	int mode = priv->plat->phy_interface;
1205 	struct fwnode_handle *fwnode;
1206 	struct phylink *phylink;
1207 	int max_speed;
1208 
1209 	priv->phylink_config.dev = &priv->dev->dev;
1210 	priv->phylink_config.type = PHYLINK_NETDEV;
1211 	priv->phylink_config.mac_managed_pm = true;
1212 
1213 	mdio_bus_data = priv->plat->mdio_bus_data;
1214 	if (mdio_bus_data)
1215 		priv->phylink_config.ovr_an_inband =
1216 			mdio_bus_data->xpcs_an_inband;
1217 
1218 	/* Set the platform/firmware specified interface mode. Note, phylink
1219 	 * deals with the PHY interface mode, not the MAC interface mode.
1220 	 */
1221 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1222 
1223 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1224 	if (priv->hw->xpcs)
1225 		xpcs_get_interfaces(priv->hw->xpcs,
1226 				    priv->phylink_config.supported_interfaces);
1227 
1228 	priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1229 						MAC_10 | MAC_100 | MAC_1000;
1230 
1231 	/* Get the MAC specific capabilities */
1232 	stmmac_mac_phylink_get_caps(priv);
1233 
1234 	max_speed = priv->plat->max_speed;
1235 	if (max_speed)
1236 		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1237 
1238 	fwnode = priv->plat->port_node;
1239 	if (!fwnode)
1240 		fwnode = dev_fwnode(priv->device);
1241 
1242 	phylink = phylink_create(&priv->phylink_config, fwnode,
1243 				 mode, &stmmac_phylink_mac_ops);
1244 	if (IS_ERR(phylink))
1245 		return PTR_ERR(phylink);
1246 
1247 	priv->phylink = phylink;
1248 	return 0;
1249 }
1250 
1251 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1252 				    struct stmmac_dma_conf *dma_conf)
1253 {
1254 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1255 	unsigned int desc_size;
1256 	void *head_rx;
1257 	u32 queue;
1258 
1259 	/* Display RX rings */
1260 	for (queue = 0; queue < rx_cnt; queue++) {
1261 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1262 
1263 		pr_info("\tRX Queue %u rings\n", queue);
1264 
1265 		if (priv->extend_desc) {
1266 			head_rx = (void *)rx_q->dma_erx;
1267 			desc_size = sizeof(struct dma_extended_desc);
1268 		} else {
1269 			head_rx = (void *)rx_q->dma_rx;
1270 			desc_size = sizeof(struct dma_desc);
1271 		}
1272 
1273 		/* Display RX ring */
1274 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1275 				    rx_q->dma_rx_phy, desc_size);
1276 	}
1277 }
1278 
1279 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1280 				    struct stmmac_dma_conf *dma_conf)
1281 {
1282 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1283 	unsigned int desc_size;
1284 	void *head_tx;
1285 	u32 queue;
1286 
1287 	/* Display TX rings */
1288 	for (queue = 0; queue < tx_cnt; queue++) {
1289 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1290 
1291 		pr_info("\tTX Queue %d rings\n", queue);
1292 
1293 		if (priv->extend_desc) {
1294 			head_tx = (void *)tx_q->dma_etx;
1295 			desc_size = sizeof(struct dma_extended_desc);
1296 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1297 			head_tx = (void *)tx_q->dma_entx;
1298 			desc_size = sizeof(struct dma_edesc);
1299 		} else {
1300 			head_tx = (void *)tx_q->dma_tx;
1301 			desc_size = sizeof(struct dma_desc);
1302 		}
1303 
1304 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1305 				    tx_q->dma_tx_phy, desc_size);
1306 	}
1307 }
1308 
1309 static void stmmac_display_rings(struct stmmac_priv *priv,
1310 				 struct stmmac_dma_conf *dma_conf)
1311 {
1312 	/* Display RX ring */
1313 	stmmac_display_rx_rings(priv, dma_conf);
1314 
1315 	/* Display TX ring */
1316 	stmmac_display_tx_rings(priv, dma_conf);
1317 }
1318 
1319 static int stmmac_set_bfsize(int mtu, int bufsize)
1320 {
1321 	int ret = bufsize;
1322 
1323 	if (mtu >= BUF_SIZE_8KiB)
1324 		ret = BUF_SIZE_16KiB;
1325 	else if (mtu >= BUF_SIZE_4KiB)
1326 		ret = BUF_SIZE_8KiB;
1327 	else if (mtu >= BUF_SIZE_2KiB)
1328 		ret = BUF_SIZE_4KiB;
1329 	else if (mtu > DEFAULT_BUFSIZE)
1330 		ret = BUF_SIZE_2KiB;
1331 	else
1332 		ret = DEFAULT_BUFSIZE;
1333 
1334 	return ret;
1335 }
1336 
1337 /**
1338  * stmmac_clear_rx_descriptors - clear RX descriptors
1339  * @priv: driver private structure
1340  * @dma_conf: structure to take the dma data
1341  * @queue: RX queue index
1342  * Description: this function is called to clear the RX descriptors
1343  * in case of both basic and extended descriptors are used.
1344  */
1345 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1346 					struct stmmac_dma_conf *dma_conf,
1347 					u32 queue)
1348 {
1349 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1350 	int i;
1351 
1352 	/* Clear the RX descriptors */
1353 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1354 		if (priv->extend_desc)
1355 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1356 					priv->use_riwt, priv->mode,
1357 					(i == dma_conf->dma_rx_size - 1),
1358 					dma_conf->dma_buf_sz);
1359 		else
1360 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1361 					priv->use_riwt, priv->mode,
1362 					(i == dma_conf->dma_rx_size - 1),
1363 					dma_conf->dma_buf_sz);
1364 }
1365 
1366 /**
1367  * stmmac_clear_tx_descriptors - clear tx descriptors
1368  * @priv: driver private structure
1369  * @dma_conf: structure to take the dma data
1370  * @queue: TX queue index.
1371  * Description: this function is called to clear the TX descriptors
1372  * in case of both basic and extended descriptors are used.
1373  */
1374 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1375 					struct stmmac_dma_conf *dma_conf,
1376 					u32 queue)
1377 {
1378 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1379 	int i;
1380 
1381 	/* Clear the TX descriptors */
1382 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1383 		int last = (i == (dma_conf->dma_tx_size - 1));
1384 		struct dma_desc *p;
1385 
1386 		if (priv->extend_desc)
1387 			p = &tx_q->dma_etx[i].basic;
1388 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1389 			p = &tx_q->dma_entx[i].basic;
1390 		else
1391 			p = &tx_q->dma_tx[i];
1392 
1393 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1394 	}
1395 }
1396 
1397 /**
1398  * stmmac_clear_descriptors - clear descriptors
1399  * @priv: driver private structure
1400  * @dma_conf: structure to take the dma data
1401  * Description: this function is called to clear the TX and RX descriptors
1402  * in case of both basic and extended descriptors are used.
1403  */
1404 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1405 				     struct stmmac_dma_conf *dma_conf)
1406 {
1407 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1408 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1409 	u32 queue;
1410 
1411 	/* Clear the RX descriptors */
1412 	for (queue = 0; queue < rx_queue_cnt; queue++)
1413 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1414 
1415 	/* Clear the TX descriptors */
1416 	for (queue = 0; queue < tx_queue_cnt; queue++)
1417 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1418 }
1419 
1420 /**
1421  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1422  * @priv: driver private structure
1423  * @dma_conf: structure to take the dma data
1424  * @p: descriptor pointer
1425  * @i: descriptor index
1426  * @flags: gfp flag
1427  * @queue: RX queue index
1428  * Description: this function is called to allocate a receive buffer, perform
1429  * the DMA mapping and init the descriptor.
1430  */
1431 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1432 				  struct stmmac_dma_conf *dma_conf,
1433 				  struct dma_desc *p,
1434 				  int i, gfp_t flags, u32 queue)
1435 {
1436 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1437 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1438 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1439 
1440 	if (priv->dma_cap.host_dma_width <= 32)
1441 		gfp |= GFP_DMA32;
1442 
1443 	if (!buf->page) {
1444 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1445 		if (!buf->page)
1446 			return -ENOMEM;
1447 		buf->page_offset = stmmac_rx_offset(priv);
1448 	}
1449 
1450 	if (priv->sph && !buf->sec_page) {
1451 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1452 		if (!buf->sec_page)
1453 			return -ENOMEM;
1454 
1455 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1456 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1457 	} else {
1458 		buf->sec_page = NULL;
1459 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1460 	}
1461 
1462 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1463 
1464 	stmmac_set_desc_addr(priv, p, buf->addr);
1465 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1466 		stmmac_init_desc3(priv, p);
1467 
1468 	return 0;
1469 }
1470 
1471 /**
1472  * stmmac_free_rx_buffer - free RX dma buffers
1473  * @priv: private structure
1474  * @rx_q: RX queue
1475  * @i: buffer index.
1476  */
1477 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1478 				  struct stmmac_rx_queue *rx_q,
1479 				  int i)
1480 {
1481 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1482 
1483 	if (buf->page)
1484 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1485 	buf->page = NULL;
1486 
1487 	if (buf->sec_page)
1488 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1489 	buf->sec_page = NULL;
1490 }
1491 
1492 /**
1493  * stmmac_free_tx_buffer - free RX dma buffers
1494  * @priv: private structure
1495  * @dma_conf: structure to take the dma data
1496  * @queue: RX queue index
1497  * @i: buffer index.
1498  */
1499 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1500 				  struct stmmac_dma_conf *dma_conf,
1501 				  u32 queue, int i)
1502 {
1503 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1504 
1505 	if (tx_q->tx_skbuff_dma[i].buf &&
1506 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1507 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1508 			dma_unmap_page(priv->device,
1509 				       tx_q->tx_skbuff_dma[i].buf,
1510 				       tx_q->tx_skbuff_dma[i].len,
1511 				       DMA_TO_DEVICE);
1512 		else
1513 			dma_unmap_single(priv->device,
1514 					 tx_q->tx_skbuff_dma[i].buf,
1515 					 tx_q->tx_skbuff_dma[i].len,
1516 					 DMA_TO_DEVICE);
1517 	}
1518 
1519 	if (tx_q->xdpf[i] &&
1520 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1521 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1522 		xdp_return_frame(tx_q->xdpf[i]);
1523 		tx_q->xdpf[i] = NULL;
1524 	}
1525 
1526 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1527 		tx_q->xsk_frames_done++;
1528 
1529 	if (tx_q->tx_skbuff[i] &&
1530 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1531 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1532 		tx_q->tx_skbuff[i] = NULL;
1533 	}
1534 
1535 	tx_q->tx_skbuff_dma[i].buf = 0;
1536 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1537 }
1538 
1539 /**
1540  * dma_free_rx_skbufs - free RX dma buffers
1541  * @priv: private structure
1542  * @dma_conf: structure to take the dma data
1543  * @queue: RX queue index
1544  */
1545 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1546 			       struct stmmac_dma_conf *dma_conf,
1547 			       u32 queue)
1548 {
1549 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1550 	int i;
1551 
1552 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1553 		stmmac_free_rx_buffer(priv, rx_q, i);
1554 }
1555 
1556 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1557 				   struct stmmac_dma_conf *dma_conf,
1558 				   u32 queue, gfp_t flags)
1559 {
1560 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1561 	int i;
1562 
1563 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1564 		struct dma_desc *p;
1565 		int ret;
1566 
1567 		if (priv->extend_desc)
1568 			p = &((rx_q->dma_erx + i)->basic);
1569 		else
1570 			p = rx_q->dma_rx + i;
1571 
1572 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1573 					     queue);
1574 		if (ret)
1575 			return ret;
1576 
1577 		rx_q->buf_alloc_num++;
1578 	}
1579 
1580 	return 0;
1581 }
1582 
1583 /**
1584  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1585  * @priv: private structure
1586  * @dma_conf: structure to take the dma data
1587  * @queue: RX queue index
1588  */
1589 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1590 				struct stmmac_dma_conf *dma_conf,
1591 				u32 queue)
1592 {
1593 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1594 	int i;
1595 
1596 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1597 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1598 
1599 		if (!buf->xdp)
1600 			continue;
1601 
1602 		xsk_buff_free(buf->xdp);
1603 		buf->xdp = NULL;
1604 	}
1605 }
1606 
1607 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1608 				      struct stmmac_dma_conf *dma_conf,
1609 				      u32 queue)
1610 {
1611 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1612 	int i;
1613 
1614 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1615 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1616 	 * use this macro to make sure no size violations.
1617 	 */
1618 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1619 
1620 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1621 		struct stmmac_rx_buffer *buf;
1622 		dma_addr_t dma_addr;
1623 		struct dma_desc *p;
1624 
1625 		if (priv->extend_desc)
1626 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1627 		else
1628 			p = rx_q->dma_rx + i;
1629 
1630 		buf = &rx_q->buf_pool[i];
1631 
1632 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1633 		if (!buf->xdp)
1634 			return -ENOMEM;
1635 
1636 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1637 		stmmac_set_desc_addr(priv, p, dma_addr);
1638 		rx_q->buf_alloc_num++;
1639 	}
1640 
1641 	return 0;
1642 }
1643 
1644 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1645 {
1646 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1647 		return NULL;
1648 
1649 	return xsk_get_pool_from_qid(priv->dev, queue);
1650 }
1651 
1652 /**
1653  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1654  * @priv: driver private structure
1655  * @dma_conf: structure to take the dma data
1656  * @queue: RX queue index
1657  * @flags: gfp flag.
1658  * Description: this function initializes the DMA RX descriptors
1659  * and allocates the socket buffers. It supports the chained and ring
1660  * modes.
1661  */
1662 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1663 				    struct stmmac_dma_conf *dma_conf,
1664 				    u32 queue, gfp_t flags)
1665 {
1666 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1667 	int ret;
1668 
1669 	netif_dbg(priv, probe, priv->dev,
1670 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1671 		  (u32)rx_q->dma_rx_phy);
1672 
1673 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1674 
1675 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1676 
1677 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1678 
1679 	if (rx_q->xsk_pool) {
1680 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1681 						   MEM_TYPE_XSK_BUFF_POOL,
1682 						   NULL));
1683 		netdev_info(priv->dev,
1684 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1685 			    rx_q->queue_index);
1686 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1687 	} else {
1688 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1689 						   MEM_TYPE_PAGE_POOL,
1690 						   rx_q->page_pool));
1691 		netdev_info(priv->dev,
1692 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1693 			    rx_q->queue_index);
1694 	}
1695 
1696 	if (rx_q->xsk_pool) {
1697 		/* RX XDP ZC buffer pool may not be populated, e.g.
1698 		 * xdpsock TX-only.
1699 		 */
1700 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1701 	} else {
1702 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1703 		if (ret < 0)
1704 			return -ENOMEM;
1705 	}
1706 
1707 	/* Setup the chained descriptor addresses */
1708 	if (priv->mode == STMMAC_CHAIN_MODE) {
1709 		if (priv->extend_desc)
1710 			stmmac_mode_init(priv, rx_q->dma_erx,
1711 					 rx_q->dma_rx_phy,
1712 					 dma_conf->dma_rx_size, 1);
1713 		else
1714 			stmmac_mode_init(priv, rx_q->dma_rx,
1715 					 rx_q->dma_rx_phy,
1716 					 dma_conf->dma_rx_size, 0);
1717 	}
1718 
1719 	return 0;
1720 }
1721 
1722 static int init_dma_rx_desc_rings(struct net_device *dev,
1723 				  struct stmmac_dma_conf *dma_conf,
1724 				  gfp_t flags)
1725 {
1726 	struct stmmac_priv *priv = netdev_priv(dev);
1727 	u32 rx_count = priv->plat->rx_queues_to_use;
1728 	int queue;
1729 	int ret;
1730 
1731 	/* RX INITIALIZATION */
1732 	netif_dbg(priv, probe, priv->dev,
1733 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1734 
1735 	for (queue = 0; queue < rx_count; queue++) {
1736 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1737 		if (ret)
1738 			goto err_init_rx_buffers;
1739 	}
1740 
1741 	return 0;
1742 
1743 err_init_rx_buffers:
1744 	while (queue >= 0) {
1745 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1746 
1747 		if (rx_q->xsk_pool)
1748 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1749 		else
1750 			dma_free_rx_skbufs(priv, dma_conf, queue);
1751 
1752 		rx_q->buf_alloc_num = 0;
1753 		rx_q->xsk_pool = NULL;
1754 
1755 		queue--;
1756 	}
1757 
1758 	return ret;
1759 }
1760 
1761 /**
1762  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1763  * @priv: driver private structure
1764  * @dma_conf: structure to take the dma data
1765  * @queue: TX queue index
1766  * Description: this function initializes the DMA TX descriptors
1767  * and allocates the socket buffers. It supports the chained and ring
1768  * modes.
1769  */
1770 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1771 				    struct stmmac_dma_conf *dma_conf,
1772 				    u32 queue)
1773 {
1774 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1775 	int i;
1776 
1777 	netif_dbg(priv, probe, priv->dev,
1778 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1779 		  (u32)tx_q->dma_tx_phy);
1780 
1781 	/* Setup the chained descriptor addresses */
1782 	if (priv->mode == STMMAC_CHAIN_MODE) {
1783 		if (priv->extend_desc)
1784 			stmmac_mode_init(priv, tx_q->dma_etx,
1785 					 tx_q->dma_tx_phy,
1786 					 dma_conf->dma_tx_size, 1);
1787 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1788 			stmmac_mode_init(priv, tx_q->dma_tx,
1789 					 tx_q->dma_tx_phy,
1790 					 dma_conf->dma_tx_size, 0);
1791 	}
1792 
1793 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1794 
1795 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1796 		struct dma_desc *p;
1797 
1798 		if (priv->extend_desc)
1799 			p = &((tx_q->dma_etx + i)->basic);
1800 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1801 			p = &((tx_q->dma_entx + i)->basic);
1802 		else
1803 			p = tx_q->dma_tx + i;
1804 
1805 		stmmac_clear_desc(priv, p);
1806 
1807 		tx_q->tx_skbuff_dma[i].buf = 0;
1808 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1809 		tx_q->tx_skbuff_dma[i].len = 0;
1810 		tx_q->tx_skbuff_dma[i].last_segment = false;
1811 		tx_q->tx_skbuff[i] = NULL;
1812 	}
1813 
1814 	return 0;
1815 }
1816 
1817 static int init_dma_tx_desc_rings(struct net_device *dev,
1818 				  struct stmmac_dma_conf *dma_conf)
1819 {
1820 	struct stmmac_priv *priv = netdev_priv(dev);
1821 	u32 tx_queue_cnt;
1822 	u32 queue;
1823 
1824 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1825 
1826 	for (queue = 0; queue < tx_queue_cnt; queue++)
1827 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1828 
1829 	return 0;
1830 }
1831 
1832 /**
1833  * init_dma_desc_rings - init the RX/TX descriptor rings
1834  * @dev: net device structure
1835  * @dma_conf: structure to take the dma data
1836  * @flags: gfp flag.
1837  * Description: this function initializes the DMA RX/TX descriptors
1838  * and allocates the socket buffers. It supports the chained and ring
1839  * modes.
1840  */
1841 static int init_dma_desc_rings(struct net_device *dev,
1842 			       struct stmmac_dma_conf *dma_conf,
1843 			       gfp_t flags)
1844 {
1845 	struct stmmac_priv *priv = netdev_priv(dev);
1846 	int ret;
1847 
1848 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1849 	if (ret)
1850 		return ret;
1851 
1852 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1853 
1854 	stmmac_clear_descriptors(priv, dma_conf);
1855 
1856 	if (netif_msg_hw(priv))
1857 		stmmac_display_rings(priv, dma_conf);
1858 
1859 	return ret;
1860 }
1861 
1862 /**
1863  * dma_free_tx_skbufs - free TX dma buffers
1864  * @priv: private structure
1865  * @dma_conf: structure to take the dma data
1866  * @queue: TX queue index
1867  */
1868 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1869 			       struct stmmac_dma_conf *dma_conf,
1870 			       u32 queue)
1871 {
1872 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1873 	int i;
1874 
1875 	tx_q->xsk_frames_done = 0;
1876 
1877 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1878 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1879 
1880 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1881 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1882 		tx_q->xsk_frames_done = 0;
1883 		tx_q->xsk_pool = NULL;
1884 	}
1885 }
1886 
1887 /**
1888  * stmmac_free_tx_skbufs - free TX skb buffers
1889  * @priv: private structure
1890  */
1891 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1892 {
1893 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1894 	u32 queue;
1895 
1896 	for (queue = 0; queue < tx_queue_cnt; queue++)
1897 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1898 }
1899 
1900 /**
1901  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1902  * @priv: private structure
1903  * @dma_conf: structure to take the dma data
1904  * @queue: RX queue index
1905  */
1906 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1907 					 struct stmmac_dma_conf *dma_conf,
1908 					 u32 queue)
1909 {
1910 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1911 
1912 	/* Release the DMA RX socket buffers */
1913 	if (rx_q->xsk_pool)
1914 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1915 	else
1916 		dma_free_rx_skbufs(priv, dma_conf, queue);
1917 
1918 	rx_q->buf_alloc_num = 0;
1919 	rx_q->xsk_pool = NULL;
1920 
1921 	/* Free DMA regions of consistent memory previously allocated */
1922 	if (!priv->extend_desc)
1923 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1924 				  sizeof(struct dma_desc),
1925 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1926 	else
1927 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1928 				  sizeof(struct dma_extended_desc),
1929 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1930 
1931 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1932 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1933 
1934 	kfree(rx_q->buf_pool);
1935 	if (rx_q->page_pool)
1936 		page_pool_destroy(rx_q->page_pool);
1937 }
1938 
1939 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1940 				       struct stmmac_dma_conf *dma_conf)
1941 {
1942 	u32 rx_count = priv->plat->rx_queues_to_use;
1943 	u32 queue;
1944 
1945 	/* Free RX queue resources */
1946 	for (queue = 0; queue < rx_count; queue++)
1947 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1948 }
1949 
1950 /**
1951  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1952  * @priv: private structure
1953  * @dma_conf: structure to take the dma data
1954  * @queue: TX queue index
1955  */
1956 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1957 					 struct stmmac_dma_conf *dma_conf,
1958 					 u32 queue)
1959 {
1960 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1961 	size_t size;
1962 	void *addr;
1963 
1964 	/* Release the DMA TX socket buffers */
1965 	dma_free_tx_skbufs(priv, dma_conf, queue);
1966 
1967 	if (priv->extend_desc) {
1968 		size = sizeof(struct dma_extended_desc);
1969 		addr = tx_q->dma_etx;
1970 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1971 		size = sizeof(struct dma_edesc);
1972 		addr = tx_q->dma_entx;
1973 	} else {
1974 		size = sizeof(struct dma_desc);
1975 		addr = tx_q->dma_tx;
1976 	}
1977 
1978 	size *= dma_conf->dma_tx_size;
1979 
1980 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1981 
1982 	kfree(tx_q->tx_skbuff_dma);
1983 	kfree(tx_q->tx_skbuff);
1984 }
1985 
1986 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1987 				       struct stmmac_dma_conf *dma_conf)
1988 {
1989 	u32 tx_count = priv->plat->tx_queues_to_use;
1990 	u32 queue;
1991 
1992 	/* Free TX queue resources */
1993 	for (queue = 0; queue < tx_count; queue++)
1994 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
1995 }
1996 
1997 /**
1998  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1999  * @priv: private structure
2000  * @dma_conf: structure to take the dma data
2001  * @queue: RX queue index
2002  * Description: according to which descriptor can be used (extend or basic)
2003  * this function allocates the resources for TX and RX paths. In case of
2004  * reception, for example, it pre-allocated the RX socket buffer in order to
2005  * allow zero-copy mechanism.
2006  */
2007 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2008 					 struct stmmac_dma_conf *dma_conf,
2009 					 u32 queue)
2010 {
2011 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2012 	struct stmmac_channel *ch = &priv->channel[queue];
2013 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2014 	struct page_pool_params pp_params = { 0 };
2015 	unsigned int num_pages;
2016 	unsigned int napi_id;
2017 	int ret;
2018 
2019 	rx_q->queue_index = queue;
2020 	rx_q->priv_data = priv;
2021 
2022 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2023 	pp_params.pool_size = dma_conf->dma_rx_size;
2024 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2025 	pp_params.order = ilog2(num_pages);
2026 	pp_params.nid = dev_to_node(priv->device);
2027 	pp_params.dev = priv->device;
2028 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2029 	pp_params.offset = stmmac_rx_offset(priv);
2030 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2031 
2032 	rx_q->page_pool = page_pool_create(&pp_params);
2033 	if (IS_ERR(rx_q->page_pool)) {
2034 		ret = PTR_ERR(rx_q->page_pool);
2035 		rx_q->page_pool = NULL;
2036 		return ret;
2037 	}
2038 
2039 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2040 				 sizeof(*rx_q->buf_pool),
2041 				 GFP_KERNEL);
2042 	if (!rx_q->buf_pool)
2043 		return -ENOMEM;
2044 
2045 	if (priv->extend_desc) {
2046 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2047 						   dma_conf->dma_rx_size *
2048 						   sizeof(struct dma_extended_desc),
2049 						   &rx_q->dma_rx_phy,
2050 						   GFP_KERNEL);
2051 		if (!rx_q->dma_erx)
2052 			return -ENOMEM;
2053 
2054 	} else {
2055 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2056 						  dma_conf->dma_rx_size *
2057 						  sizeof(struct dma_desc),
2058 						  &rx_q->dma_rx_phy,
2059 						  GFP_KERNEL);
2060 		if (!rx_q->dma_rx)
2061 			return -ENOMEM;
2062 	}
2063 
2064 	if (stmmac_xdp_is_enabled(priv) &&
2065 	    test_bit(queue, priv->af_xdp_zc_qps))
2066 		napi_id = ch->rxtx_napi.napi_id;
2067 	else
2068 		napi_id = ch->rx_napi.napi_id;
2069 
2070 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2071 			       rx_q->queue_index,
2072 			       napi_id);
2073 	if (ret) {
2074 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2075 		return -EINVAL;
2076 	}
2077 
2078 	return 0;
2079 }
2080 
2081 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2082 				       struct stmmac_dma_conf *dma_conf)
2083 {
2084 	u32 rx_count = priv->plat->rx_queues_to_use;
2085 	u32 queue;
2086 	int ret;
2087 
2088 	/* RX queues buffers and DMA */
2089 	for (queue = 0; queue < rx_count; queue++) {
2090 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2091 		if (ret)
2092 			goto err_dma;
2093 	}
2094 
2095 	return 0;
2096 
2097 err_dma:
2098 	free_dma_rx_desc_resources(priv, dma_conf);
2099 
2100 	return ret;
2101 }
2102 
2103 /**
2104  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2105  * @priv: private structure
2106  * @dma_conf: structure to take the dma data
2107  * @queue: TX queue index
2108  * Description: according to which descriptor can be used (extend or basic)
2109  * this function allocates the resources for TX and RX paths. In case of
2110  * reception, for example, it pre-allocated the RX socket buffer in order to
2111  * allow zero-copy mechanism.
2112  */
2113 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2114 					 struct stmmac_dma_conf *dma_conf,
2115 					 u32 queue)
2116 {
2117 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2118 	size_t size;
2119 	void *addr;
2120 
2121 	tx_q->queue_index = queue;
2122 	tx_q->priv_data = priv;
2123 
2124 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2125 				      sizeof(*tx_q->tx_skbuff_dma),
2126 				      GFP_KERNEL);
2127 	if (!tx_q->tx_skbuff_dma)
2128 		return -ENOMEM;
2129 
2130 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2131 				  sizeof(struct sk_buff *),
2132 				  GFP_KERNEL);
2133 	if (!tx_q->tx_skbuff)
2134 		return -ENOMEM;
2135 
2136 	if (priv->extend_desc)
2137 		size = sizeof(struct dma_extended_desc);
2138 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2139 		size = sizeof(struct dma_edesc);
2140 	else
2141 		size = sizeof(struct dma_desc);
2142 
2143 	size *= dma_conf->dma_tx_size;
2144 
2145 	addr = dma_alloc_coherent(priv->device, size,
2146 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2147 	if (!addr)
2148 		return -ENOMEM;
2149 
2150 	if (priv->extend_desc)
2151 		tx_q->dma_etx = addr;
2152 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2153 		tx_q->dma_entx = addr;
2154 	else
2155 		tx_q->dma_tx = addr;
2156 
2157 	return 0;
2158 }
2159 
2160 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2161 				       struct stmmac_dma_conf *dma_conf)
2162 {
2163 	u32 tx_count = priv->plat->tx_queues_to_use;
2164 	u32 queue;
2165 	int ret;
2166 
2167 	/* TX queues buffers and DMA */
2168 	for (queue = 0; queue < tx_count; queue++) {
2169 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2170 		if (ret)
2171 			goto err_dma;
2172 	}
2173 
2174 	return 0;
2175 
2176 err_dma:
2177 	free_dma_tx_desc_resources(priv, dma_conf);
2178 	return ret;
2179 }
2180 
2181 /**
2182  * alloc_dma_desc_resources - alloc TX/RX resources.
2183  * @priv: private structure
2184  * @dma_conf: structure to take the dma data
2185  * Description: according to which descriptor can be used (extend or basic)
2186  * this function allocates the resources for TX and RX paths. In case of
2187  * reception, for example, it pre-allocated the RX socket buffer in order to
2188  * allow zero-copy mechanism.
2189  */
2190 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2191 				    struct stmmac_dma_conf *dma_conf)
2192 {
2193 	/* RX Allocation */
2194 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2195 
2196 	if (ret)
2197 		return ret;
2198 
2199 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2200 
2201 	return ret;
2202 }
2203 
2204 /**
2205  * free_dma_desc_resources - free dma desc resources
2206  * @priv: private structure
2207  * @dma_conf: structure to take the dma data
2208  */
2209 static void free_dma_desc_resources(struct stmmac_priv *priv,
2210 				    struct stmmac_dma_conf *dma_conf)
2211 {
2212 	/* Release the DMA TX socket buffers */
2213 	free_dma_tx_desc_resources(priv, dma_conf);
2214 
2215 	/* Release the DMA RX socket buffers later
2216 	 * to ensure all pending XDP_TX buffers are returned.
2217 	 */
2218 	free_dma_rx_desc_resources(priv, dma_conf);
2219 }
2220 
2221 /**
2222  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2223  *  @priv: driver private structure
2224  *  Description: It is used for enabling the rx queues in the MAC
2225  */
2226 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2227 {
2228 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2229 	int queue;
2230 	u8 mode;
2231 
2232 	for (queue = 0; queue < rx_queues_count; queue++) {
2233 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2234 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2235 	}
2236 }
2237 
2238 /**
2239  * stmmac_start_rx_dma - start RX DMA channel
2240  * @priv: driver private structure
2241  * @chan: RX channel index
2242  * Description:
2243  * This starts a RX DMA channel
2244  */
2245 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2246 {
2247 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2248 	stmmac_start_rx(priv, priv->ioaddr, chan);
2249 }
2250 
2251 /**
2252  * stmmac_start_tx_dma - start TX DMA channel
2253  * @priv: driver private structure
2254  * @chan: TX channel index
2255  * Description:
2256  * This starts a TX DMA channel
2257  */
2258 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2259 {
2260 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2261 	stmmac_start_tx(priv, priv->ioaddr, chan);
2262 }
2263 
2264 /**
2265  * stmmac_stop_rx_dma - stop RX DMA channel
2266  * @priv: driver private structure
2267  * @chan: RX channel index
2268  * Description:
2269  * This stops a RX DMA channel
2270  */
2271 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2272 {
2273 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2274 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2275 }
2276 
2277 /**
2278  * stmmac_stop_tx_dma - stop TX DMA channel
2279  * @priv: driver private structure
2280  * @chan: TX channel index
2281  * Description:
2282  * This stops a TX DMA channel
2283  */
2284 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2285 {
2286 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2287 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2288 }
2289 
2290 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2291 {
2292 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2293 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2294 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2295 	u32 chan;
2296 
2297 	for (chan = 0; chan < dma_csr_ch; chan++) {
2298 		struct stmmac_channel *ch = &priv->channel[chan];
2299 		unsigned long flags;
2300 
2301 		spin_lock_irqsave(&ch->lock, flags);
2302 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2303 		spin_unlock_irqrestore(&ch->lock, flags);
2304 	}
2305 }
2306 
2307 /**
2308  * stmmac_start_all_dma - start all RX and TX DMA channels
2309  * @priv: driver private structure
2310  * Description:
2311  * This starts all the RX and TX DMA channels
2312  */
2313 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2314 {
2315 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2316 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2317 	u32 chan = 0;
2318 
2319 	for (chan = 0; chan < rx_channels_count; chan++)
2320 		stmmac_start_rx_dma(priv, chan);
2321 
2322 	for (chan = 0; chan < tx_channels_count; chan++)
2323 		stmmac_start_tx_dma(priv, chan);
2324 }
2325 
2326 /**
2327  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2328  * @priv: driver private structure
2329  * Description:
2330  * This stops the RX and TX DMA channels
2331  */
2332 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2333 {
2334 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2335 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2336 	u32 chan = 0;
2337 
2338 	for (chan = 0; chan < rx_channels_count; chan++)
2339 		stmmac_stop_rx_dma(priv, chan);
2340 
2341 	for (chan = 0; chan < tx_channels_count; chan++)
2342 		stmmac_stop_tx_dma(priv, chan);
2343 }
2344 
2345 /**
2346  *  stmmac_dma_operation_mode - HW DMA operation mode
2347  *  @priv: driver private structure
2348  *  Description: it is used for configuring the DMA operation mode register in
2349  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2350  */
2351 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2352 {
2353 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2354 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2355 	int rxfifosz = priv->plat->rx_fifo_size;
2356 	int txfifosz = priv->plat->tx_fifo_size;
2357 	u32 txmode = 0;
2358 	u32 rxmode = 0;
2359 	u32 chan = 0;
2360 	u8 qmode = 0;
2361 
2362 	if (rxfifosz == 0)
2363 		rxfifosz = priv->dma_cap.rx_fifo_size;
2364 	if (txfifosz == 0)
2365 		txfifosz = priv->dma_cap.tx_fifo_size;
2366 
2367 	/* Adjust for real per queue fifo size */
2368 	rxfifosz /= rx_channels_count;
2369 	txfifosz /= tx_channels_count;
2370 
2371 	if (priv->plat->force_thresh_dma_mode) {
2372 		txmode = tc;
2373 		rxmode = tc;
2374 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2375 		/*
2376 		 * In case of GMAC, SF mode can be enabled
2377 		 * to perform the TX COE in HW. This depends on:
2378 		 * 1) TX COE if actually supported
2379 		 * 2) There is no bugged Jumbo frame support
2380 		 *    that needs to not insert csum in the TDES.
2381 		 */
2382 		txmode = SF_DMA_MODE;
2383 		rxmode = SF_DMA_MODE;
2384 		priv->xstats.threshold = SF_DMA_MODE;
2385 	} else {
2386 		txmode = tc;
2387 		rxmode = SF_DMA_MODE;
2388 	}
2389 
2390 	/* configure all channels */
2391 	for (chan = 0; chan < rx_channels_count; chan++) {
2392 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2393 		u32 buf_size;
2394 
2395 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2396 
2397 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2398 				rxfifosz, qmode);
2399 
2400 		if (rx_q->xsk_pool) {
2401 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2402 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2403 					      buf_size,
2404 					      chan);
2405 		} else {
2406 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2407 					      priv->dma_conf.dma_buf_sz,
2408 					      chan);
2409 		}
2410 	}
2411 
2412 	for (chan = 0; chan < tx_channels_count; chan++) {
2413 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2414 
2415 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2416 				txfifosz, qmode);
2417 	}
2418 }
2419 
2420 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2421 {
2422 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2423 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2424 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2425 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2426 	unsigned int entry = tx_q->cur_tx;
2427 	struct dma_desc *tx_desc = NULL;
2428 	struct xdp_desc xdp_desc;
2429 	bool work_done = true;
2430 	u32 tx_set_ic_bit = 0;
2431 
2432 	/* Avoids TX time-out as we are sharing with slow path */
2433 	txq_trans_cond_update(nq);
2434 
2435 	budget = min(budget, stmmac_tx_avail(priv, queue));
2436 
2437 	while (budget-- > 0) {
2438 		dma_addr_t dma_addr;
2439 		bool set_ic;
2440 
2441 		/* We are sharing with slow path and stop XSK TX desc submission when
2442 		 * available TX ring is less than threshold.
2443 		 */
2444 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2445 		    !netif_carrier_ok(priv->dev)) {
2446 			work_done = false;
2447 			break;
2448 		}
2449 
2450 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2451 			break;
2452 
2453 		if (likely(priv->extend_desc))
2454 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2455 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2456 			tx_desc = &tx_q->dma_entx[entry].basic;
2457 		else
2458 			tx_desc = tx_q->dma_tx + entry;
2459 
2460 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2461 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2462 
2463 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2464 
2465 		/* To return XDP buffer to XSK pool, we simple call
2466 		 * xsk_tx_completed(), so we don't need to fill up
2467 		 * 'buf' and 'xdpf'.
2468 		 */
2469 		tx_q->tx_skbuff_dma[entry].buf = 0;
2470 		tx_q->xdpf[entry] = NULL;
2471 
2472 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2473 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2474 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2475 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2476 
2477 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2478 
2479 		tx_q->tx_count_frames++;
2480 
2481 		if (!priv->tx_coal_frames[queue])
2482 			set_ic = false;
2483 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2484 			set_ic = true;
2485 		else
2486 			set_ic = false;
2487 
2488 		if (set_ic) {
2489 			tx_q->tx_count_frames = 0;
2490 			stmmac_set_tx_ic(priv, tx_desc);
2491 			tx_set_ic_bit++;
2492 		}
2493 
2494 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2495 				       true, priv->mode, true, true,
2496 				       xdp_desc.len);
2497 
2498 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2499 
2500 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2501 		entry = tx_q->cur_tx;
2502 	}
2503 	u64_stats_update_begin(&txq_stats->napi_syncp);
2504 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2505 	u64_stats_update_end(&txq_stats->napi_syncp);
2506 
2507 	if (tx_desc) {
2508 		stmmac_flush_tx_descriptors(priv, queue);
2509 		xsk_tx_release(pool);
2510 	}
2511 
2512 	/* Return true if all of the 3 conditions are met
2513 	 *  a) TX Budget is still available
2514 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2515 	 *     pending XSK TX for transmission)
2516 	 */
2517 	return !!budget && work_done;
2518 }
2519 
2520 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2521 {
2522 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2523 		tc += 64;
2524 
2525 		if (priv->plat->force_thresh_dma_mode)
2526 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2527 		else
2528 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2529 						      chan);
2530 
2531 		priv->xstats.threshold = tc;
2532 	}
2533 }
2534 
2535 /**
2536  * stmmac_tx_clean - to manage the transmission completion
2537  * @priv: driver private structure
2538  * @budget: napi budget limiting this functions packet handling
2539  * @queue: TX queue index
2540  * Description: it reclaims the transmit resources after transmission completes.
2541  */
2542 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2543 {
2544 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2545 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2546 	unsigned int bytes_compl = 0, pkts_compl = 0;
2547 	unsigned int entry, xmits = 0, count = 0;
2548 	u32 tx_packets = 0, tx_errors = 0;
2549 
2550 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2551 
2552 	tx_q->xsk_frames_done = 0;
2553 
2554 	entry = tx_q->dirty_tx;
2555 
2556 	/* Try to clean all TX complete frame in 1 shot */
2557 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2558 		struct xdp_frame *xdpf;
2559 		struct sk_buff *skb;
2560 		struct dma_desc *p;
2561 		int status;
2562 
2563 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2564 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2565 			xdpf = tx_q->xdpf[entry];
2566 			skb = NULL;
2567 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2568 			xdpf = NULL;
2569 			skb = tx_q->tx_skbuff[entry];
2570 		} else {
2571 			xdpf = NULL;
2572 			skb = NULL;
2573 		}
2574 
2575 		if (priv->extend_desc)
2576 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2577 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2578 			p = &tx_q->dma_entx[entry].basic;
2579 		else
2580 			p = tx_q->dma_tx + entry;
2581 
2582 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2583 		/* Check if the descriptor is owned by the DMA */
2584 		if (unlikely(status & tx_dma_own))
2585 			break;
2586 
2587 		count++;
2588 
2589 		/* Make sure descriptor fields are read after reading
2590 		 * the own bit.
2591 		 */
2592 		dma_rmb();
2593 
2594 		/* Just consider the last segment and ...*/
2595 		if (likely(!(status & tx_not_ls))) {
2596 			/* ... verify the status error condition */
2597 			if (unlikely(status & tx_err)) {
2598 				tx_errors++;
2599 				if (unlikely(status & tx_err_bump_tc))
2600 					stmmac_bump_dma_threshold(priv, queue);
2601 			} else {
2602 				tx_packets++;
2603 			}
2604 			if (skb)
2605 				stmmac_get_tx_hwtstamp(priv, p, skb);
2606 		}
2607 
2608 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2609 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2610 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2611 				dma_unmap_page(priv->device,
2612 					       tx_q->tx_skbuff_dma[entry].buf,
2613 					       tx_q->tx_skbuff_dma[entry].len,
2614 					       DMA_TO_DEVICE);
2615 			else
2616 				dma_unmap_single(priv->device,
2617 						 tx_q->tx_skbuff_dma[entry].buf,
2618 						 tx_q->tx_skbuff_dma[entry].len,
2619 						 DMA_TO_DEVICE);
2620 			tx_q->tx_skbuff_dma[entry].buf = 0;
2621 			tx_q->tx_skbuff_dma[entry].len = 0;
2622 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2623 		}
2624 
2625 		stmmac_clean_desc3(priv, tx_q, p);
2626 
2627 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2628 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2629 
2630 		if (xdpf &&
2631 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2632 			xdp_return_frame_rx_napi(xdpf);
2633 			tx_q->xdpf[entry] = NULL;
2634 		}
2635 
2636 		if (xdpf &&
2637 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2638 			xdp_return_frame(xdpf);
2639 			tx_q->xdpf[entry] = NULL;
2640 		}
2641 
2642 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2643 			tx_q->xsk_frames_done++;
2644 
2645 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2646 			if (likely(skb)) {
2647 				pkts_compl++;
2648 				bytes_compl += skb->len;
2649 				dev_consume_skb_any(skb);
2650 				tx_q->tx_skbuff[entry] = NULL;
2651 			}
2652 		}
2653 
2654 		stmmac_release_tx_desc(priv, p, priv->mode);
2655 
2656 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2657 	}
2658 	tx_q->dirty_tx = entry;
2659 
2660 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2661 				  pkts_compl, bytes_compl);
2662 
2663 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2664 								queue))) &&
2665 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2666 
2667 		netif_dbg(priv, tx_done, priv->dev,
2668 			  "%s: restart transmit\n", __func__);
2669 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2670 	}
2671 
2672 	if (tx_q->xsk_pool) {
2673 		bool work_done;
2674 
2675 		if (tx_q->xsk_frames_done)
2676 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2677 
2678 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2679 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2680 
2681 		/* For XSK TX, we try to send as many as possible.
2682 		 * If XSK work done (XSK TX desc empty and budget still
2683 		 * available), return "budget - 1" to reenable TX IRQ.
2684 		 * Else, return "budget" to make NAPI continue polling.
2685 		 */
2686 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2687 					       STMMAC_XSK_TX_BUDGET_MAX);
2688 		if (work_done)
2689 			xmits = budget - 1;
2690 		else
2691 			xmits = budget;
2692 	}
2693 
2694 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2695 	    priv->eee_sw_timer_en) {
2696 		if (stmmac_enable_eee_mode(priv))
2697 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2698 	}
2699 
2700 	/* We still have pending packets, let's call for a new scheduling */
2701 	if (tx_q->dirty_tx != tx_q->cur_tx)
2702 		stmmac_tx_timer_arm(priv, queue);
2703 
2704 	u64_stats_update_begin(&txq_stats->napi_syncp);
2705 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2706 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2707 	u64_stats_inc(&txq_stats->napi.tx_clean);
2708 	u64_stats_update_end(&txq_stats->napi_syncp);
2709 
2710 	priv->xstats.tx_errors += tx_errors;
2711 
2712 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2713 
2714 	/* Combine decisions from TX clean and XSK TX */
2715 	return max(count, xmits);
2716 }
2717 
2718 /**
2719  * stmmac_tx_err - to manage the tx error
2720  * @priv: driver private structure
2721  * @chan: channel index
2722  * Description: it cleans the descriptors and restarts the transmission
2723  * in case of transmission errors.
2724  */
2725 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2726 {
2727 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2728 
2729 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2730 
2731 	stmmac_stop_tx_dma(priv, chan);
2732 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2733 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2734 	stmmac_reset_tx_queue(priv, chan);
2735 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2736 			    tx_q->dma_tx_phy, chan);
2737 	stmmac_start_tx_dma(priv, chan);
2738 
2739 	priv->xstats.tx_errors++;
2740 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2741 }
2742 
2743 /**
2744  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2745  *  @priv: driver private structure
2746  *  @txmode: TX operating mode
2747  *  @rxmode: RX operating mode
2748  *  @chan: channel index
2749  *  Description: it is used for configuring of the DMA operation mode in
2750  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2751  *  mode.
2752  */
2753 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2754 					  u32 rxmode, u32 chan)
2755 {
2756 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2757 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2758 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2759 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2760 	int rxfifosz = priv->plat->rx_fifo_size;
2761 	int txfifosz = priv->plat->tx_fifo_size;
2762 
2763 	if (rxfifosz == 0)
2764 		rxfifosz = priv->dma_cap.rx_fifo_size;
2765 	if (txfifosz == 0)
2766 		txfifosz = priv->dma_cap.tx_fifo_size;
2767 
2768 	/* Adjust for real per queue fifo size */
2769 	rxfifosz /= rx_channels_count;
2770 	txfifosz /= tx_channels_count;
2771 
2772 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2773 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2774 }
2775 
2776 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2777 {
2778 	int ret;
2779 
2780 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2781 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2782 	if (ret && (ret != -EINVAL)) {
2783 		stmmac_global_err(priv);
2784 		return true;
2785 	}
2786 
2787 	return false;
2788 }
2789 
2790 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2791 {
2792 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2793 						 &priv->xstats, chan, dir);
2794 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2795 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2796 	struct stmmac_channel *ch = &priv->channel[chan];
2797 	struct napi_struct *rx_napi;
2798 	struct napi_struct *tx_napi;
2799 	unsigned long flags;
2800 
2801 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2802 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2803 
2804 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2805 		if (napi_schedule_prep(rx_napi)) {
2806 			spin_lock_irqsave(&ch->lock, flags);
2807 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2808 			spin_unlock_irqrestore(&ch->lock, flags);
2809 			__napi_schedule(rx_napi);
2810 		}
2811 	}
2812 
2813 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2814 		if (napi_schedule_prep(tx_napi)) {
2815 			spin_lock_irqsave(&ch->lock, flags);
2816 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2817 			spin_unlock_irqrestore(&ch->lock, flags);
2818 			__napi_schedule(tx_napi);
2819 		}
2820 	}
2821 
2822 	return status;
2823 }
2824 
2825 /**
2826  * stmmac_dma_interrupt - DMA ISR
2827  * @priv: driver private structure
2828  * Description: this is the DMA ISR. It is called by the main ISR.
2829  * It calls the dwmac dma routine and schedule poll method in case of some
2830  * work can be done.
2831  */
2832 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2833 {
2834 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2835 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2836 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2837 				tx_channel_count : rx_channel_count;
2838 	u32 chan;
2839 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2840 
2841 	/* Make sure we never check beyond our status buffer. */
2842 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2843 		channels_to_check = ARRAY_SIZE(status);
2844 
2845 	for (chan = 0; chan < channels_to_check; chan++)
2846 		status[chan] = stmmac_napi_check(priv, chan,
2847 						 DMA_DIR_RXTX);
2848 
2849 	for (chan = 0; chan < tx_channel_count; chan++) {
2850 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2851 			/* Try to bump up the dma threshold on this failure */
2852 			stmmac_bump_dma_threshold(priv, chan);
2853 		} else if (unlikely(status[chan] == tx_hard_error)) {
2854 			stmmac_tx_err(priv, chan);
2855 		}
2856 	}
2857 }
2858 
2859 /**
2860  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2861  * @priv: driver private structure
2862  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2863  */
2864 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2865 {
2866 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2867 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2868 
2869 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2870 
2871 	if (priv->dma_cap.rmon) {
2872 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2873 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2874 	} else
2875 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2876 }
2877 
2878 /**
2879  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2880  * @priv: driver private structure
2881  * Description:
2882  *  new GMAC chip generations have a new register to indicate the
2883  *  presence of the optional feature/functions.
2884  *  This can be also used to override the value passed through the
2885  *  platform and necessary for old MAC10/100 and GMAC chips.
2886  */
2887 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2888 {
2889 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2890 }
2891 
2892 /**
2893  * stmmac_check_ether_addr - check if the MAC addr is valid
2894  * @priv: driver private structure
2895  * Description:
2896  * it is to verify if the MAC address is valid, in case of failures it
2897  * generates a random MAC address
2898  */
2899 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2900 {
2901 	u8 addr[ETH_ALEN];
2902 
2903 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2904 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2905 		if (is_valid_ether_addr(addr))
2906 			eth_hw_addr_set(priv->dev, addr);
2907 		else
2908 			eth_hw_addr_random(priv->dev);
2909 		dev_info(priv->device, "device MAC address %pM\n",
2910 			 priv->dev->dev_addr);
2911 	}
2912 }
2913 
2914 /**
2915  * stmmac_init_dma_engine - DMA init.
2916  * @priv: driver private structure
2917  * Description:
2918  * It inits the DMA invoking the specific MAC/GMAC callback.
2919  * Some DMA parameters can be passed from the platform;
2920  * in case of these are not passed a default is kept for the MAC or GMAC.
2921  */
2922 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2923 {
2924 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2925 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2926 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2927 	struct stmmac_rx_queue *rx_q;
2928 	struct stmmac_tx_queue *tx_q;
2929 	u32 chan = 0;
2930 	int atds = 0;
2931 	int ret = 0;
2932 
2933 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2934 		dev_err(priv->device, "Invalid DMA configuration\n");
2935 		return -EINVAL;
2936 	}
2937 
2938 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2939 		atds = 1;
2940 
2941 	ret = stmmac_reset(priv, priv->ioaddr);
2942 	if (ret) {
2943 		dev_err(priv->device, "Failed to reset the dma\n");
2944 		return ret;
2945 	}
2946 
2947 	/* DMA Configuration */
2948 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2949 
2950 	if (priv->plat->axi)
2951 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2952 
2953 	/* DMA CSR Channel configuration */
2954 	for (chan = 0; chan < dma_csr_ch; chan++) {
2955 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2956 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2957 	}
2958 
2959 	/* DMA RX Channel Configuration */
2960 	for (chan = 0; chan < rx_channels_count; chan++) {
2961 		rx_q = &priv->dma_conf.rx_queue[chan];
2962 
2963 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2964 				    rx_q->dma_rx_phy, chan);
2965 
2966 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2967 				     (rx_q->buf_alloc_num *
2968 				      sizeof(struct dma_desc));
2969 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2970 				       rx_q->rx_tail_addr, chan);
2971 	}
2972 
2973 	/* DMA TX Channel Configuration */
2974 	for (chan = 0; chan < tx_channels_count; chan++) {
2975 		tx_q = &priv->dma_conf.tx_queue[chan];
2976 
2977 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2978 				    tx_q->dma_tx_phy, chan);
2979 
2980 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2981 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2982 				       tx_q->tx_tail_addr, chan);
2983 	}
2984 
2985 	return ret;
2986 }
2987 
2988 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2989 {
2990 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2991 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
2992 
2993 	if (!tx_coal_timer)
2994 		return;
2995 
2996 	hrtimer_start(&tx_q->txtimer,
2997 		      STMMAC_COAL_TIMER(tx_coal_timer),
2998 		      HRTIMER_MODE_REL);
2999 }
3000 
3001 /**
3002  * stmmac_tx_timer - mitigation sw timer for tx.
3003  * @t: data pointer
3004  * Description:
3005  * This is the timer handler to directly invoke the stmmac_tx_clean.
3006  */
3007 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3008 {
3009 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3010 	struct stmmac_priv *priv = tx_q->priv_data;
3011 	struct stmmac_channel *ch;
3012 	struct napi_struct *napi;
3013 
3014 	ch = &priv->channel[tx_q->queue_index];
3015 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3016 
3017 	if (likely(napi_schedule_prep(napi))) {
3018 		unsigned long flags;
3019 
3020 		spin_lock_irqsave(&ch->lock, flags);
3021 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3022 		spin_unlock_irqrestore(&ch->lock, flags);
3023 		__napi_schedule(napi);
3024 	}
3025 
3026 	return HRTIMER_NORESTART;
3027 }
3028 
3029 /**
3030  * stmmac_init_coalesce - init mitigation options.
3031  * @priv: driver private structure
3032  * Description:
3033  * This inits the coalesce parameters: i.e. timer rate,
3034  * timer handler and default threshold used for enabling the
3035  * interrupt on completion bit.
3036  */
3037 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3038 {
3039 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3040 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3041 	u32 chan;
3042 
3043 	for (chan = 0; chan < tx_channel_count; chan++) {
3044 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3045 
3046 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3047 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3048 
3049 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3050 		tx_q->txtimer.function = stmmac_tx_timer;
3051 	}
3052 
3053 	for (chan = 0; chan < rx_channel_count; chan++)
3054 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3055 }
3056 
3057 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3058 {
3059 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3060 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3061 	u32 chan;
3062 
3063 	/* set TX ring length */
3064 	for (chan = 0; chan < tx_channels_count; chan++)
3065 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3066 				       (priv->dma_conf.dma_tx_size - 1), chan);
3067 
3068 	/* set RX ring length */
3069 	for (chan = 0; chan < rx_channels_count; chan++)
3070 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3071 				       (priv->dma_conf.dma_rx_size - 1), chan);
3072 }
3073 
3074 /**
3075  *  stmmac_set_tx_queue_weight - Set TX queue weight
3076  *  @priv: driver private structure
3077  *  Description: It is used for setting TX queues weight
3078  */
3079 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3080 {
3081 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3082 	u32 weight;
3083 	u32 queue;
3084 
3085 	for (queue = 0; queue < tx_queues_count; queue++) {
3086 		weight = priv->plat->tx_queues_cfg[queue].weight;
3087 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3088 	}
3089 }
3090 
3091 /**
3092  *  stmmac_configure_cbs - Configure CBS in TX queue
3093  *  @priv: driver private structure
3094  *  Description: It is used for configuring CBS in AVB TX queues
3095  */
3096 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3097 {
3098 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3099 	u32 mode_to_use;
3100 	u32 queue;
3101 
3102 	/* queue 0 is reserved for legacy traffic */
3103 	for (queue = 1; queue < tx_queues_count; queue++) {
3104 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3105 		if (mode_to_use == MTL_QUEUE_DCB)
3106 			continue;
3107 
3108 		stmmac_config_cbs(priv, priv->hw,
3109 				priv->plat->tx_queues_cfg[queue].send_slope,
3110 				priv->plat->tx_queues_cfg[queue].idle_slope,
3111 				priv->plat->tx_queues_cfg[queue].high_credit,
3112 				priv->plat->tx_queues_cfg[queue].low_credit,
3113 				queue);
3114 	}
3115 }
3116 
3117 /**
3118  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3119  *  @priv: driver private structure
3120  *  Description: It is used for mapping RX queues to RX dma channels
3121  */
3122 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3123 {
3124 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3125 	u32 queue;
3126 	u32 chan;
3127 
3128 	for (queue = 0; queue < rx_queues_count; queue++) {
3129 		chan = priv->plat->rx_queues_cfg[queue].chan;
3130 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3131 	}
3132 }
3133 
3134 /**
3135  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3136  *  @priv: driver private structure
3137  *  Description: It is used for configuring the RX Queue Priority
3138  */
3139 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3140 {
3141 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3142 	u32 queue;
3143 	u32 prio;
3144 
3145 	for (queue = 0; queue < rx_queues_count; queue++) {
3146 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3147 			continue;
3148 
3149 		prio = priv->plat->rx_queues_cfg[queue].prio;
3150 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3151 	}
3152 }
3153 
3154 /**
3155  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3156  *  @priv: driver private structure
3157  *  Description: It is used for configuring the TX Queue Priority
3158  */
3159 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3160 {
3161 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3162 	u32 queue;
3163 	u32 prio;
3164 
3165 	for (queue = 0; queue < tx_queues_count; queue++) {
3166 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3167 			continue;
3168 
3169 		prio = priv->plat->tx_queues_cfg[queue].prio;
3170 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3171 	}
3172 }
3173 
3174 /**
3175  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3176  *  @priv: driver private structure
3177  *  Description: It is used for configuring the RX queue routing
3178  */
3179 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3180 {
3181 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3182 	u32 queue;
3183 	u8 packet;
3184 
3185 	for (queue = 0; queue < rx_queues_count; queue++) {
3186 		/* no specific packet type routing specified for the queue */
3187 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3188 			continue;
3189 
3190 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3191 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3192 	}
3193 }
3194 
3195 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3196 {
3197 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3198 		priv->rss.enable = false;
3199 		return;
3200 	}
3201 
3202 	if (priv->dev->features & NETIF_F_RXHASH)
3203 		priv->rss.enable = true;
3204 	else
3205 		priv->rss.enable = false;
3206 
3207 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3208 			     priv->plat->rx_queues_to_use);
3209 }
3210 
3211 /**
3212  *  stmmac_mtl_configuration - Configure MTL
3213  *  @priv: driver private structure
3214  *  Description: It is used for configurring MTL
3215  */
3216 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3217 {
3218 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3219 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3220 
3221 	if (tx_queues_count > 1)
3222 		stmmac_set_tx_queue_weight(priv);
3223 
3224 	/* Configure MTL RX algorithms */
3225 	if (rx_queues_count > 1)
3226 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3227 				priv->plat->rx_sched_algorithm);
3228 
3229 	/* Configure MTL TX algorithms */
3230 	if (tx_queues_count > 1)
3231 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3232 				priv->plat->tx_sched_algorithm);
3233 
3234 	/* Configure CBS in AVB TX queues */
3235 	if (tx_queues_count > 1)
3236 		stmmac_configure_cbs(priv);
3237 
3238 	/* Map RX MTL to DMA channels */
3239 	stmmac_rx_queue_dma_chan_map(priv);
3240 
3241 	/* Enable MAC RX Queues */
3242 	stmmac_mac_enable_rx_queues(priv);
3243 
3244 	/* Set RX priorities */
3245 	if (rx_queues_count > 1)
3246 		stmmac_mac_config_rx_queues_prio(priv);
3247 
3248 	/* Set TX priorities */
3249 	if (tx_queues_count > 1)
3250 		stmmac_mac_config_tx_queues_prio(priv);
3251 
3252 	/* Set RX routing */
3253 	if (rx_queues_count > 1)
3254 		stmmac_mac_config_rx_queues_routing(priv);
3255 
3256 	/* Receive Side Scaling */
3257 	if (rx_queues_count > 1)
3258 		stmmac_mac_config_rss(priv);
3259 }
3260 
3261 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3262 {
3263 	if (priv->dma_cap.asp) {
3264 		netdev_info(priv->dev, "Enabling Safety Features\n");
3265 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3266 					  priv->plat->safety_feat_cfg);
3267 	} else {
3268 		netdev_info(priv->dev, "No Safety Features support found\n");
3269 	}
3270 }
3271 
3272 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3273 {
3274 	char *name;
3275 
3276 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3277 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3278 
3279 	name = priv->wq_name;
3280 	sprintf(name, "%s-fpe", priv->dev->name);
3281 
3282 	priv->fpe_wq = create_singlethread_workqueue(name);
3283 	if (!priv->fpe_wq) {
3284 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3285 
3286 		return -ENOMEM;
3287 	}
3288 	netdev_info(priv->dev, "FPE workqueue start");
3289 
3290 	return 0;
3291 }
3292 
3293 /**
3294  * stmmac_hw_setup - setup mac in a usable state.
3295  *  @dev : pointer to the device structure.
3296  *  @ptp_register: register PTP if set
3297  *  Description:
3298  *  this is the main function to setup the HW in a usable state because the
3299  *  dma engine is reset, the core registers are configured (e.g. AXI,
3300  *  Checksum features, timers). The DMA is ready to start receiving and
3301  *  transmitting.
3302  *  Return value:
3303  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3304  *  file on failure.
3305  */
3306 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3307 {
3308 	struct stmmac_priv *priv = netdev_priv(dev);
3309 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3310 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3311 	bool sph_en;
3312 	u32 chan;
3313 	int ret;
3314 
3315 	/* DMA initialization and SW reset */
3316 	ret = stmmac_init_dma_engine(priv);
3317 	if (ret < 0) {
3318 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3319 			   __func__);
3320 		return ret;
3321 	}
3322 
3323 	/* Copy the MAC addr into the HW  */
3324 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3325 
3326 	/* PS and related bits will be programmed according to the speed */
3327 	if (priv->hw->pcs) {
3328 		int speed = priv->plat->mac_port_sel_speed;
3329 
3330 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3331 		    (speed == SPEED_1000)) {
3332 			priv->hw->ps = speed;
3333 		} else {
3334 			dev_warn(priv->device, "invalid port speed\n");
3335 			priv->hw->ps = 0;
3336 		}
3337 	}
3338 
3339 	/* Initialize the MAC Core */
3340 	stmmac_core_init(priv, priv->hw, dev);
3341 
3342 	/* Initialize MTL*/
3343 	stmmac_mtl_configuration(priv);
3344 
3345 	/* Initialize Safety Features */
3346 	stmmac_safety_feat_configuration(priv);
3347 
3348 	ret = stmmac_rx_ipc(priv, priv->hw);
3349 	if (!ret) {
3350 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3351 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3352 		priv->hw->rx_csum = 0;
3353 	}
3354 
3355 	/* Enable the MAC Rx/Tx */
3356 	stmmac_mac_set(priv, priv->ioaddr, true);
3357 
3358 	/* Set the HW DMA mode and the COE */
3359 	stmmac_dma_operation_mode(priv);
3360 
3361 	stmmac_mmc_setup(priv);
3362 
3363 	if (ptp_register) {
3364 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3365 		if (ret < 0)
3366 			netdev_warn(priv->dev,
3367 				    "failed to enable PTP reference clock: %pe\n",
3368 				    ERR_PTR(ret));
3369 	}
3370 
3371 	ret = stmmac_init_ptp(priv);
3372 	if (ret == -EOPNOTSUPP)
3373 		netdev_info(priv->dev, "PTP not supported by HW\n");
3374 	else if (ret)
3375 		netdev_warn(priv->dev, "PTP init failed\n");
3376 	else if (ptp_register)
3377 		stmmac_ptp_register(priv);
3378 
3379 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3380 
3381 	/* Convert the timer from msec to usec */
3382 	if (!priv->tx_lpi_timer)
3383 		priv->tx_lpi_timer = eee_timer * 1000;
3384 
3385 	if (priv->use_riwt) {
3386 		u32 queue;
3387 
3388 		for (queue = 0; queue < rx_cnt; queue++) {
3389 			if (!priv->rx_riwt[queue])
3390 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3391 
3392 			stmmac_rx_watchdog(priv, priv->ioaddr,
3393 					   priv->rx_riwt[queue], queue);
3394 		}
3395 	}
3396 
3397 	if (priv->hw->pcs)
3398 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3399 
3400 	/* set TX and RX rings length */
3401 	stmmac_set_rings_length(priv);
3402 
3403 	/* Enable TSO */
3404 	if (priv->tso) {
3405 		for (chan = 0; chan < tx_cnt; chan++) {
3406 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3407 
3408 			/* TSO and TBS cannot co-exist */
3409 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3410 				continue;
3411 
3412 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3413 		}
3414 	}
3415 
3416 	/* Enable Split Header */
3417 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3418 	for (chan = 0; chan < rx_cnt; chan++)
3419 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3420 
3421 
3422 	/* VLAN Tag Insertion */
3423 	if (priv->dma_cap.vlins)
3424 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3425 
3426 	/* TBS */
3427 	for (chan = 0; chan < tx_cnt; chan++) {
3428 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3429 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3430 
3431 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3432 	}
3433 
3434 	/* Configure real RX and TX queues */
3435 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3436 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3437 
3438 	/* Start the ball rolling... */
3439 	stmmac_start_all_dma(priv);
3440 
3441 	if (priv->dma_cap.fpesel) {
3442 		stmmac_fpe_start_wq(priv);
3443 
3444 		if (priv->plat->fpe_cfg->enable)
3445 			stmmac_fpe_handshake(priv, true);
3446 	}
3447 
3448 	return 0;
3449 }
3450 
3451 static void stmmac_hw_teardown(struct net_device *dev)
3452 {
3453 	struct stmmac_priv *priv = netdev_priv(dev);
3454 
3455 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3456 }
3457 
3458 static void stmmac_free_irq(struct net_device *dev,
3459 			    enum request_irq_err irq_err, int irq_idx)
3460 {
3461 	struct stmmac_priv *priv = netdev_priv(dev);
3462 	int j;
3463 
3464 	switch (irq_err) {
3465 	case REQ_IRQ_ERR_ALL:
3466 		irq_idx = priv->plat->tx_queues_to_use;
3467 		fallthrough;
3468 	case REQ_IRQ_ERR_TX:
3469 		for (j = irq_idx - 1; j >= 0; j--) {
3470 			if (priv->tx_irq[j] > 0) {
3471 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3472 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3473 			}
3474 		}
3475 		irq_idx = priv->plat->rx_queues_to_use;
3476 		fallthrough;
3477 	case REQ_IRQ_ERR_RX:
3478 		for (j = irq_idx - 1; j >= 0; j--) {
3479 			if (priv->rx_irq[j] > 0) {
3480 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3481 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3482 			}
3483 		}
3484 
3485 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3486 			free_irq(priv->sfty_ue_irq, dev);
3487 		fallthrough;
3488 	case REQ_IRQ_ERR_SFTY_UE:
3489 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3490 			free_irq(priv->sfty_ce_irq, dev);
3491 		fallthrough;
3492 	case REQ_IRQ_ERR_SFTY_CE:
3493 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3494 			free_irq(priv->lpi_irq, dev);
3495 		fallthrough;
3496 	case REQ_IRQ_ERR_LPI:
3497 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3498 			free_irq(priv->wol_irq, dev);
3499 		fallthrough;
3500 	case REQ_IRQ_ERR_WOL:
3501 		free_irq(dev->irq, dev);
3502 		fallthrough;
3503 	case REQ_IRQ_ERR_MAC:
3504 	case REQ_IRQ_ERR_NO:
3505 		/* If MAC IRQ request error, no more IRQ to free */
3506 		break;
3507 	}
3508 }
3509 
3510 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3511 {
3512 	struct stmmac_priv *priv = netdev_priv(dev);
3513 	enum request_irq_err irq_err;
3514 	cpumask_t cpu_mask;
3515 	int irq_idx = 0;
3516 	char *int_name;
3517 	int ret;
3518 	int i;
3519 
3520 	/* For common interrupt */
3521 	int_name = priv->int_name_mac;
3522 	sprintf(int_name, "%s:%s", dev->name, "mac");
3523 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3524 			  0, int_name, dev);
3525 	if (unlikely(ret < 0)) {
3526 		netdev_err(priv->dev,
3527 			   "%s: alloc mac MSI %d (error: %d)\n",
3528 			   __func__, dev->irq, ret);
3529 		irq_err = REQ_IRQ_ERR_MAC;
3530 		goto irq_error;
3531 	}
3532 
3533 	/* Request the Wake IRQ in case of another line
3534 	 * is used for WoL
3535 	 */
3536 	priv->wol_irq_disabled = true;
3537 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3538 		int_name = priv->int_name_wol;
3539 		sprintf(int_name, "%s:%s", dev->name, "wol");
3540 		ret = request_irq(priv->wol_irq,
3541 				  stmmac_mac_interrupt,
3542 				  0, int_name, dev);
3543 		if (unlikely(ret < 0)) {
3544 			netdev_err(priv->dev,
3545 				   "%s: alloc wol MSI %d (error: %d)\n",
3546 				   __func__, priv->wol_irq, ret);
3547 			irq_err = REQ_IRQ_ERR_WOL;
3548 			goto irq_error;
3549 		}
3550 	}
3551 
3552 	/* Request the LPI IRQ in case of another line
3553 	 * is used for LPI
3554 	 */
3555 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3556 		int_name = priv->int_name_lpi;
3557 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3558 		ret = request_irq(priv->lpi_irq,
3559 				  stmmac_mac_interrupt,
3560 				  0, int_name, dev);
3561 		if (unlikely(ret < 0)) {
3562 			netdev_err(priv->dev,
3563 				   "%s: alloc lpi MSI %d (error: %d)\n",
3564 				   __func__, priv->lpi_irq, ret);
3565 			irq_err = REQ_IRQ_ERR_LPI;
3566 			goto irq_error;
3567 		}
3568 	}
3569 
3570 	/* Request the Safety Feature Correctible Error line in
3571 	 * case of another line is used
3572 	 */
3573 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3574 		int_name = priv->int_name_sfty_ce;
3575 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3576 		ret = request_irq(priv->sfty_ce_irq,
3577 				  stmmac_safety_interrupt,
3578 				  0, int_name, dev);
3579 		if (unlikely(ret < 0)) {
3580 			netdev_err(priv->dev,
3581 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3582 				   __func__, priv->sfty_ce_irq, ret);
3583 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3584 			goto irq_error;
3585 		}
3586 	}
3587 
3588 	/* Request the Safety Feature Uncorrectible Error line in
3589 	 * case of another line is used
3590 	 */
3591 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3592 		int_name = priv->int_name_sfty_ue;
3593 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3594 		ret = request_irq(priv->sfty_ue_irq,
3595 				  stmmac_safety_interrupt,
3596 				  0, int_name, dev);
3597 		if (unlikely(ret < 0)) {
3598 			netdev_err(priv->dev,
3599 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3600 				   __func__, priv->sfty_ue_irq, ret);
3601 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3602 			goto irq_error;
3603 		}
3604 	}
3605 
3606 	/* Request Rx MSI irq */
3607 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3608 		if (i >= MTL_MAX_RX_QUEUES)
3609 			break;
3610 		if (priv->rx_irq[i] == 0)
3611 			continue;
3612 
3613 		int_name = priv->int_name_rx_irq[i];
3614 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3615 		ret = request_irq(priv->rx_irq[i],
3616 				  stmmac_msi_intr_rx,
3617 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3618 		if (unlikely(ret < 0)) {
3619 			netdev_err(priv->dev,
3620 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3621 				   __func__, i, priv->rx_irq[i], ret);
3622 			irq_err = REQ_IRQ_ERR_RX;
3623 			irq_idx = i;
3624 			goto irq_error;
3625 		}
3626 		cpumask_clear(&cpu_mask);
3627 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3628 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3629 	}
3630 
3631 	/* Request Tx MSI irq */
3632 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3633 		if (i >= MTL_MAX_TX_QUEUES)
3634 			break;
3635 		if (priv->tx_irq[i] == 0)
3636 			continue;
3637 
3638 		int_name = priv->int_name_tx_irq[i];
3639 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3640 		ret = request_irq(priv->tx_irq[i],
3641 				  stmmac_msi_intr_tx,
3642 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3643 		if (unlikely(ret < 0)) {
3644 			netdev_err(priv->dev,
3645 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3646 				   __func__, i, priv->tx_irq[i], ret);
3647 			irq_err = REQ_IRQ_ERR_TX;
3648 			irq_idx = i;
3649 			goto irq_error;
3650 		}
3651 		cpumask_clear(&cpu_mask);
3652 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3653 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3654 	}
3655 
3656 	return 0;
3657 
3658 irq_error:
3659 	stmmac_free_irq(dev, irq_err, irq_idx);
3660 	return ret;
3661 }
3662 
3663 static int stmmac_request_irq_single(struct net_device *dev)
3664 {
3665 	struct stmmac_priv *priv = netdev_priv(dev);
3666 	enum request_irq_err irq_err;
3667 	int ret;
3668 
3669 	ret = request_irq(dev->irq, stmmac_interrupt,
3670 			  IRQF_SHARED, dev->name, dev);
3671 	if (unlikely(ret < 0)) {
3672 		netdev_err(priv->dev,
3673 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3674 			   __func__, dev->irq, ret);
3675 		irq_err = REQ_IRQ_ERR_MAC;
3676 		goto irq_error;
3677 	}
3678 
3679 	/* Request the Wake IRQ in case of another line
3680 	 * is used for WoL
3681 	 */
3682 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3683 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3684 				  IRQF_SHARED, dev->name, dev);
3685 		if (unlikely(ret < 0)) {
3686 			netdev_err(priv->dev,
3687 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3688 				   __func__, priv->wol_irq, ret);
3689 			irq_err = REQ_IRQ_ERR_WOL;
3690 			goto irq_error;
3691 		}
3692 	}
3693 
3694 	/* Request the IRQ lines */
3695 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3696 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3697 				  IRQF_SHARED, dev->name, dev);
3698 		if (unlikely(ret < 0)) {
3699 			netdev_err(priv->dev,
3700 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3701 				   __func__, priv->lpi_irq, ret);
3702 			irq_err = REQ_IRQ_ERR_LPI;
3703 			goto irq_error;
3704 		}
3705 	}
3706 
3707 	return 0;
3708 
3709 irq_error:
3710 	stmmac_free_irq(dev, irq_err, 0);
3711 	return ret;
3712 }
3713 
3714 static int stmmac_request_irq(struct net_device *dev)
3715 {
3716 	struct stmmac_priv *priv = netdev_priv(dev);
3717 	int ret;
3718 
3719 	/* Request the IRQ lines */
3720 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3721 		ret = stmmac_request_irq_multi_msi(dev);
3722 	else
3723 		ret = stmmac_request_irq_single(dev);
3724 
3725 	return ret;
3726 }
3727 
3728 /**
3729  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3730  *  @priv: driver private structure
3731  *  @mtu: MTU to setup the dma queue and buf with
3732  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3733  *  Allocate the Tx/Rx DMA queue and init them.
3734  *  Return value:
3735  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3736  */
3737 static struct stmmac_dma_conf *
3738 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3739 {
3740 	struct stmmac_dma_conf *dma_conf;
3741 	int chan, bfsize, ret;
3742 
3743 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3744 	if (!dma_conf) {
3745 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3746 			   __func__);
3747 		return ERR_PTR(-ENOMEM);
3748 	}
3749 
3750 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3751 	if (bfsize < 0)
3752 		bfsize = 0;
3753 
3754 	if (bfsize < BUF_SIZE_16KiB)
3755 		bfsize = stmmac_set_bfsize(mtu, 0);
3756 
3757 	dma_conf->dma_buf_sz = bfsize;
3758 	/* Chose the tx/rx size from the already defined one in the
3759 	 * priv struct. (if defined)
3760 	 */
3761 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3762 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3763 
3764 	if (!dma_conf->dma_tx_size)
3765 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3766 	if (!dma_conf->dma_rx_size)
3767 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3768 
3769 	/* Earlier check for TBS */
3770 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3771 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3772 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3773 
3774 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3775 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3776 	}
3777 
3778 	ret = alloc_dma_desc_resources(priv, dma_conf);
3779 	if (ret < 0) {
3780 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3781 			   __func__);
3782 		goto alloc_error;
3783 	}
3784 
3785 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3786 	if (ret < 0) {
3787 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3788 			   __func__);
3789 		goto init_error;
3790 	}
3791 
3792 	return dma_conf;
3793 
3794 init_error:
3795 	free_dma_desc_resources(priv, dma_conf);
3796 alloc_error:
3797 	kfree(dma_conf);
3798 	return ERR_PTR(ret);
3799 }
3800 
3801 /**
3802  *  __stmmac_open - open entry point of the driver
3803  *  @dev : pointer to the device structure.
3804  *  @dma_conf :  structure to take the dma data
3805  *  Description:
3806  *  This function is the open entry point of the driver.
3807  *  Return value:
3808  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3809  *  file on failure.
3810  */
3811 static int __stmmac_open(struct net_device *dev,
3812 			 struct stmmac_dma_conf *dma_conf)
3813 {
3814 	struct stmmac_priv *priv = netdev_priv(dev);
3815 	int mode = priv->plat->phy_interface;
3816 	u32 chan;
3817 	int ret;
3818 
3819 	ret = pm_runtime_resume_and_get(priv->device);
3820 	if (ret < 0)
3821 		return ret;
3822 
3823 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3824 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
3825 	    (!priv->hw->xpcs ||
3826 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
3827 	    !priv->hw->lynx_pcs) {
3828 		ret = stmmac_init_phy(dev);
3829 		if (ret) {
3830 			netdev_err(priv->dev,
3831 				   "%s: Cannot attach to PHY (error: %d)\n",
3832 				   __func__, ret);
3833 			goto init_phy_error;
3834 		}
3835 	}
3836 
3837 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3838 
3839 	buf_sz = dma_conf->dma_buf_sz;
3840 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3841 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3842 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3843 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3844 
3845 	stmmac_reset_queues_param(priv);
3846 
3847 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3848 	    priv->plat->serdes_powerup) {
3849 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3850 		if (ret < 0) {
3851 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3852 				   __func__);
3853 			goto init_error;
3854 		}
3855 	}
3856 
3857 	ret = stmmac_hw_setup(dev, true);
3858 	if (ret < 0) {
3859 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3860 		goto init_error;
3861 	}
3862 
3863 	stmmac_init_coalesce(priv);
3864 
3865 	phylink_start(priv->phylink);
3866 	/* We may have called phylink_speed_down before */
3867 	phylink_speed_up(priv->phylink);
3868 
3869 	ret = stmmac_request_irq(dev);
3870 	if (ret)
3871 		goto irq_error;
3872 
3873 	stmmac_enable_all_queues(priv);
3874 	netif_tx_start_all_queues(priv->dev);
3875 	stmmac_enable_all_dma_irq(priv);
3876 
3877 	return 0;
3878 
3879 irq_error:
3880 	phylink_stop(priv->phylink);
3881 
3882 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3883 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3884 
3885 	stmmac_hw_teardown(dev);
3886 init_error:
3887 	phylink_disconnect_phy(priv->phylink);
3888 init_phy_error:
3889 	pm_runtime_put(priv->device);
3890 	return ret;
3891 }
3892 
3893 static int stmmac_open(struct net_device *dev)
3894 {
3895 	struct stmmac_priv *priv = netdev_priv(dev);
3896 	struct stmmac_dma_conf *dma_conf;
3897 	int ret;
3898 
3899 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3900 	if (IS_ERR(dma_conf))
3901 		return PTR_ERR(dma_conf);
3902 
3903 	ret = __stmmac_open(dev, dma_conf);
3904 	if (ret)
3905 		free_dma_desc_resources(priv, dma_conf);
3906 
3907 	kfree(dma_conf);
3908 	return ret;
3909 }
3910 
3911 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3912 {
3913 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3914 
3915 	if (priv->fpe_wq) {
3916 		destroy_workqueue(priv->fpe_wq);
3917 		priv->fpe_wq = NULL;
3918 	}
3919 
3920 	netdev_info(priv->dev, "FPE workqueue stop");
3921 }
3922 
3923 /**
3924  *  stmmac_release - close entry point of the driver
3925  *  @dev : device pointer.
3926  *  Description:
3927  *  This is the stop entry point of the driver.
3928  */
3929 static int stmmac_release(struct net_device *dev)
3930 {
3931 	struct stmmac_priv *priv = netdev_priv(dev);
3932 	u32 chan;
3933 
3934 	if (device_may_wakeup(priv->device))
3935 		phylink_speed_down(priv->phylink, false);
3936 	/* Stop and disconnect the PHY */
3937 	phylink_stop(priv->phylink);
3938 	phylink_disconnect_phy(priv->phylink);
3939 
3940 	stmmac_disable_all_queues(priv);
3941 
3942 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3943 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3944 
3945 	netif_tx_disable(dev);
3946 
3947 	/* Free the IRQ lines */
3948 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3949 
3950 	if (priv->eee_enabled) {
3951 		priv->tx_path_in_lpi_mode = false;
3952 		del_timer_sync(&priv->eee_ctrl_timer);
3953 	}
3954 
3955 	/* Stop TX/RX DMA and clear the descriptors */
3956 	stmmac_stop_all_dma(priv);
3957 
3958 	/* Release and free the Rx/Tx resources */
3959 	free_dma_desc_resources(priv, &priv->dma_conf);
3960 
3961 	/* Disable the MAC Rx/Tx */
3962 	stmmac_mac_set(priv, priv->ioaddr, false);
3963 
3964 	/* Powerdown Serdes if there is */
3965 	if (priv->plat->serdes_powerdown)
3966 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3967 
3968 	netif_carrier_off(dev);
3969 
3970 	stmmac_release_ptp(priv);
3971 
3972 	pm_runtime_put(priv->device);
3973 
3974 	if (priv->dma_cap.fpesel)
3975 		stmmac_fpe_stop_wq(priv);
3976 
3977 	return 0;
3978 }
3979 
3980 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3981 			       struct stmmac_tx_queue *tx_q)
3982 {
3983 	u16 tag = 0x0, inner_tag = 0x0;
3984 	u32 inner_type = 0x0;
3985 	struct dma_desc *p;
3986 
3987 	if (!priv->dma_cap.vlins)
3988 		return false;
3989 	if (!skb_vlan_tag_present(skb))
3990 		return false;
3991 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3992 		inner_tag = skb_vlan_tag_get(skb);
3993 		inner_type = STMMAC_VLAN_INSERT;
3994 	}
3995 
3996 	tag = skb_vlan_tag_get(skb);
3997 
3998 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3999 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4000 	else
4001 		p = &tx_q->dma_tx[tx_q->cur_tx];
4002 
4003 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4004 		return false;
4005 
4006 	stmmac_set_tx_owner(priv, p);
4007 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4008 	return true;
4009 }
4010 
4011 /**
4012  *  stmmac_tso_allocator - close entry point of the driver
4013  *  @priv: driver private structure
4014  *  @des: buffer start address
4015  *  @total_len: total length to fill in descriptors
4016  *  @last_segment: condition for the last descriptor
4017  *  @queue: TX queue index
4018  *  Description:
4019  *  This function fills descriptor and request new descriptors according to
4020  *  buffer length to fill
4021  */
4022 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4023 				 int total_len, bool last_segment, u32 queue)
4024 {
4025 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4026 	struct dma_desc *desc;
4027 	u32 buff_size;
4028 	int tmp_len;
4029 
4030 	tmp_len = total_len;
4031 
4032 	while (tmp_len > 0) {
4033 		dma_addr_t curr_addr;
4034 
4035 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4036 						priv->dma_conf.dma_tx_size);
4037 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4038 
4039 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4040 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4041 		else
4042 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4043 
4044 		curr_addr = des + (total_len - tmp_len);
4045 		if (priv->dma_cap.addr64 <= 32)
4046 			desc->des0 = cpu_to_le32(curr_addr);
4047 		else
4048 			stmmac_set_desc_addr(priv, desc, curr_addr);
4049 
4050 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4051 			    TSO_MAX_BUFF_SIZE : tmp_len;
4052 
4053 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4054 				0, 1,
4055 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4056 				0, 0);
4057 
4058 		tmp_len -= TSO_MAX_BUFF_SIZE;
4059 	}
4060 }
4061 
4062 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4063 {
4064 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4065 	int desc_size;
4066 
4067 	if (likely(priv->extend_desc))
4068 		desc_size = sizeof(struct dma_extended_desc);
4069 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4070 		desc_size = sizeof(struct dma_edesc);
4071 	else
4072 		desc_size = sizeof(struct dma_desc);
4073 
4074 	/* The own bit must be the latest setting done when prepare the
4075 	 * descriptor and then barrier is needed to make sure that
4076 	 * all is coherent before granting the DMA engine.
4077 	 */
4078 	wmb();
4079 
4080 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4081 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4082 }
4083 
4084 /**
4085  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4086  *  @skb : the socket buffer
4087  *  @dev : device pointer
4088  *  Description: this is the transmit function that is called on TSO frames
4089  *  (support available on GMAC4 and newer chips).
4090  *  Diagram below show the ring programming in case of TSO frames:
4091  *
4092  *  First Descriptor
4093  *   --------
4094  *   | DES0 |---> buffer1 = L2/L3/L4 header
4095  *   | DES1 |---> TCP Payload (can continue on next descr...)
4096  *   | DES2 |---> buffer 1 and 2 len
4097  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4098  *   --------
4099  *	|
4100  *     ...
4101  *	|
4102  *   --------
4103  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4104  *   | DES1 | --|
4105  *   | DES2 | --> buffer 1 and 2 len
4106  *   | DES3 |
4107  *   --------
4108  *
4109  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4110  */
4111 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4112 {
4113 	struct dma_desc *desc, *first, *mss_desc = NULL;
4114 	struct stmmac_priv *priv = netdev_priv(dev);
4115 	int nfrags = skb_shinfo(skb)->nr_frags;
4116 	u32 queue = skb_get_queue_mapping(skb);
4117 	unsigned int first_entry, tx_packets;
4118 	struct stmmac_txq_stats *txq_stats;
4119 	int tmp_pay_len = 0, first_tx;
4120 	struct stmmac_tx_queue *tx_q;
4121 	bool has_vlan, set_ic;
4122 	u8 proto_hdr_len, hdr;
4123 	u32 pay_len, mss;
4124 	dma_addr_t des;
4125 	int i;
4126 
4127 	tx_q = &priv->dma_conf.tx_queue[queue];
4128 	txq_stats = &priv->xstats.txq_stats[queue];
4129 	first_tx = tx_q->cur_tx;
4130 
4131 	/* Compute header lengths */
4132 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4133 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4134 		hdr = sizeof(struct udphdr);
4135 	} else {
4136 		proto_hdr_len = skb_tcp_all_headers(skb);
4137 		hdr = tcp_hdrlen(skb);
4138 	}
4139 
4140 	/* Desc availability based on threshold should be enough safe */
4141 	if (unlikely(stmmac_tx_avail(priv, queue) <
4142 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4143 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4144 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4145 								queue));
4146 			/* This is a hard error, log it. */
4147 			netdev_err(priv->dev,
4148 				   "%s: Tx Ring full when queue awake\n",
4149 				   __func__);
4150 		}
4151 		return NETDEV_TX_BUSY;
4152 	}
4153 
4154 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4155 
4156 	mss = skb_shinfo(skb)->gso_size;
4157 
4158 	/* set new MSS value if needed */
4159 	if (mss != tx_q->mss) {
4160 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4161 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4162 		else
4163 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4164 
4165 		stmmac_set_mss(priv, mss_desc, mss);
4166 		tx_q->mss = mss;
4167 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4168 						priv->dma_conf.dma_tx_size);
4169 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4170 	}
4171 
4172 	if (netif_msg_tx_queued(priv)) {
4173 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4174 			__func__, hdr, proto_hdr_len, pay_len, mss);
4175 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4176 			skb->data_len);
4177 	}
4178 
4179 	/* Check if VLAN can be inserted by HW */
4180 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4181 
4182 	first_entry = tx_q->cur_tx;
4183 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4184 
4185 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4186 		desc = &tx_q->dma_entx[first_entry].basic;
4187 	else
4188 		desc = &tx_q->dma_tx[first_entry];
4189 	first = desc;
4190 
4191 	if (has_vlan)
4192 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4193 
4194 	/* first descriptor: fill Headers on Buf1 */
4195 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4196 			     DMA_TO_DEVICE);
4197 	if (dma_mapping_error(priv->device, des))
4198 		goto dma_map_err;
4199 
4200 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4201 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4202 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4203 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4204 
4205 	if (priv->dma_cap.addr64 <= 32) {
4206 		first->des0 = cpu_to_le32(des);
4207 
4208 		/* Fill start of payload in buff2 of first descriptor */
4209 		if (pay_len)
4210 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4211 
4212 		/* If needed take extra descriptors to fill the remaining payload */
4213 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4214 	} else {
4215 		stmmac_set_desc_addr(priv, first, des);
4216 		tmp_pay_len = pay_len;
4217 		des += proto_hdr_len;
4218 		pay_len = 0;
4219 	}
4220 
4221 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4222 
4223 	/* Prepare fragments */
4224 	for (i = 0; i < nfrags; i++) {
4225 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4226 
4227 		des = skb_frag_dma_map(priv->device, frag, 0,
4228 				       skb_frag_size(frag),
4229 				       DMA_TO_DEVICE);
4230 		if (dma_mapping_error(priv->device, des))
4231 			goto dma_map_err;
4232 
4233 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4234 				     (i == nfrags - 1), queue);
4235 
4236 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4237 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4238 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4239 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4240 	}
4241 
4242 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4243 
4244 	/* Only the last descriptor gets to point to the skb. */
4245 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4246 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4247 
4248 	/* Manage tx mitigation */
4249 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4250 	tx_q->tx_count_frames += tx_packets;
4251 
4252 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4253 		set_ic = true;
4254 	else if (!priv->tx_coal_frames[queue])
4255 		set_ic = false;
4256 	else if (tx_packets > priv->tx_coal_frames[queue])
4257 		set_ic = true;
4258 	else if ((tx_q->tx_count_frames %
4259 		  priv->tx_coal_frames[queue]) < tx_packets)
4260 		set_ic = true;
4261 	else
4262 		set_ic = false;
4263 
4264 	if (set_ic) {
4265 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4266 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4267 		else
4268 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4269 
4270 		tx_q->tx_count_frames = 0;
4271 		stmmac_set_tx_ic(priv, desc);
4272 	}
4273 
4274 	/* We've used all descriptors we need for this skb, however,
4275 	 * advance cur_tx so that it references a fresh descriptor.
4276 	 * ndo_start_xmit will fill this descriptor the next time it's
4277 	 * called and stmmac_tx_clean may clean up to this descriptor.
4278 	 */
4279 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4280 
4281 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4282 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4283 			  __func__);
4284 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4285 	}
4286 
4287 	u64_stats_update_begin(&txq_stats->q_syncp);
4288 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4289 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4290 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4291 	if (set_ic)
4292 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4293 	u64_stats_update_end(&txq_stats->q_syncp);
4294 
4295 	if (priv->sarc_type)
4296 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4297 
4298 	skb_tx_timestamp(skb);
4299 
4300 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4301 		     priv->hwts_tx_en)) {
4302 		/* declare that device is doing timestamping */
4303 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4304 		stmmac_enable_tx_timestamp(priv, first);
4305 	}
4306 
4307 	/* Complete the first descriptor before granting the DMA */
4308 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4309 			proto_hdr_len,
4310 			pay_len,
4311 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4312 			hdr / 4, (skb->len - proto_hdr_len));
4313 
4314 	/* If context desc is used to change MSS */
4315 	if (mss_desc) {
4316 		/* Make sure that first descriptor has been completely
4317 		 * written, including its own bit. This is because MSS is
4318 		 * actually before first descriptor, so we need to make
4319 		 * sure that MSS's own bit is the last thing written.
4320 		 */
4321 		dma_wmb();
4322 		stmmac_set_tx_owner(priv, mss_desc);
4323 	}
4324 
4325 	if (netif_msg_pktdata(priv)) {
4326 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4327 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4328 			tx_q->cur_tx, first, nfrags);
4329 		pr_info(">>> frame to be transmitted: ");
4330 		print_pkt(skb->data, skb_headlen(skb));
4331 	}
4332 
4333 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4334 
4335 	stmmac_flush_tx_descriptors(priv, queue);
4336 	stmmac_tx_timer_arm(priv, queue);
4337 
4338 	return NETDEV_TX_OK;
4339 
4340 dma_map_err:
4341 	dev_err(priv->device, "Tx dma map failed\n");
4342 	dev_kfree_skb(skb);
4343 	priv->xstats.tx_dropped++;
4344 	return NETDEV_TX_OK;
4345 }
4346 
4347 /**
4348  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4349  * @skb: socket buffer to check
4350  *
4351  * Check if a packet has an ethertype that will trigger the IP header checks
4352  * and IP/TCP checksum engine of the stmmac core.
4353  *
4354  * Return: true if the ethertype can trigger the checksum engine, false
4355  * otherwise
4356  */
4357 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4358 {
4359 	int depth = 0;
4360 	__be16 proto;
4361 
4362 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4363 				    &depth);
4364 
4365 	return (depth <= ETH_HLEN) &&
4366 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4367 }
4368 
4369 /**
4370  *  stmmac_xmit - Tx entry point of the driver
4371  *  @skb : the socket buffer
4372  *  @dev : device pointer
4373  *  Description : this is the tx entry point of the driver.
4374  *  It programs the chain or the ring and supports oversized frames
4375  *  and SG feature.
4376  */
4377 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4378 {
4379 	unsigned int first_entry, tx_packets, enh_desc;
4380 	struct stmmac_priv *priv = netdev_priv(dev);
4381 	unsigned int nopaged_len = skb_headlen(skb);
4382 	int i, csum_insertion = 0, is_jumbo = 0;
4383 	u32 queue = skb_get_queue_mapping(skb);
4384 	int nfrags = skb_shinfo(skb)->nr_frags;
4385 	int gso = skb_shinfo(skb)->gso_type;
4386 	struct stmmac_txq_stats *txq_stats;
4387 	struct dma_edesc *tbs_desc = NULL;
4388 	struct dma_desc *desc, *first;
4389 	struct stmmac_tx_queue *tx_q;
4390 	bool has_vlan, set_ic;
4391 	int entry, first_tx;
4392 	dma_addr_t des;
4393 
4394 	tx_q = &priv->dma_conf.tx_queue[queue];
4395 	txq_stats = &priv->xstats.txq_stats[queue];
4396 	first_tx = tx_q->cur_tx;
4397 
4398 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4399 		stmmac_disable_eee_mode(priv);
4400 
4401 	/* Manage oversized TCP frames for GMAC4 device */
4402 	if (skb_is_gso(skb) && priv->tso) {
4403 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4404 			return stmmac_tso_xmit(skb, dev);
4405 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4406 			return stmmac_tso_xmit(skb, dev);
4407 	}
4408 
4409 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4410 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4411 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4412 								queue));
4413 			/* This is a hard error, log it. */
4414 			netdev_err(priv->dev,
4415 				   "%s: Tx Ring full when queue awake\n",
4416 				   __func__);
4417 		}
4418 		return NETDEV_TX_BUSY;
4419 	}
4420 
4421 	/* Check if VLAN can be inserted by HW */
4422 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4423 
4424 	entry = tx_q->cur_tx;
4425 	first_entry = entry;
4426 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4427 
4428 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4429 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4430 	 * queues. In that case, checksum offloading for those queues that don't
4431 	 * support tx coe needs to fallback to software checksum calculation.
4432 	 *
4433 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4434 	 * also have to be checksummed in software.
4435 	 */
4436 	if (csum_insertion &&
4437 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4438 	     !stmmac_has_ip_ethertype(skb))) {
4439 		if (unlikely(skb_checksum_help(skb)))
4440 			goto dma_map_err;
4441 		csum_insertion = !csum_insertion;
4442 	}
4443 
4444 	if (likely(priv->extend_desc))
4445 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4446 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4447 		desc = &tx_q->dma_entx[entry].basic;
4448 	else
4449 		desc = tx_q->dma_tx + entry;
4450 
4451 	first = desc;
4452 
4453 	if (has_vlan)
4454 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4455 
4456 	enh_desc = priv->plat->enh_desc;
4457 	/* To program the descriptors according to the size of the frame */
4458 	if (enh_desc)
4459 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4460 
4461 	if (unlikely(is_jumbo)) {
4462 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4463 		if (unlikely(entry < 0) && (entry != -EINVAL))
4464 			goto dma_map_err;
4465 	}
4466 
4467 	for (i = 0; i < nfrags; i++) {
4468 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4469 		int len = skb_frag_size(frag);
4470 		bool last_segment = (i == (nfrags - 1));
4471 
4472 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4473 		WARN_ON(tx_q->tx_skbuff[entry]);
4474 
4475 		if (likely(priv->extend_desc))
4476 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4477 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4478 			desc = &tx_q->dma_entx[entry].basic;
4479 		else
4480 			desc = tx_q->dma_tx + entry;
4481 
4482 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4483 				       DMA_TO_DEVICE);
4484 		if (dma_mapping_error(priv->device, des))
4485 			goto dma_map_err; /* should reuse desc w/o issues */
4486 
4487 		tx_q->tx_skbuff_dma[entry].buf = des;
4488 
4489 		stmmac_set_desc_addr(priv, desc, des);
4490 
4491 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4492 		tx_q->tx_skbuff_dma[entry].len = len;
4493 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4494 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4495 
4496 		/* Prepare the descriptor and set the own bit too */
4497 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4498 				priv->mode, 1, last_segment, skb->len);
4499 	}
4500 
4501 	/* Only the last descriptor gets to point to the skb. */
4502 	tx_q->tx_skbuff[entry] = skb;
4503 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4504 
4505 	/* According to the coalesce parameter the IC bit for the latest
4506 	 * segment is reset and the timer re-started to clean the tx status.
4507 	 * This approach takes care about the fragments: desc is the first
4508 	 * element in case of no SG.
4509 	 */
4510 	tx_packets = (entry + 1) - first_tx;
4511 	tx_q->tx_count_frames += tx_packets;
4512 
4513 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4514 		set_ic = true;
4515 	else if (!priv->tx_coal_frames[queue])
4516 		set_ic = false;
4517 	else if (tx_packets > priv->tx_coal_frames[queue])
4518 		set_ic = true;
4519 	else if ((tx_q->tx_count_frames %
4520 		  priv->tx_coal_frames[queue]) < tx_packets)
4521 		set_ic = true;
4522 	else
4523 		set_ic = false;
4524 
4525 	if (set_ic) {
4526 		if (likely(priv->extend_desc))
4527 			desc = &tx_q->dma_etx[entry].basic;
4528 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4529 			desc = &tx_q->dma_entx[entry].basic;
4530 		else
4531 			desc = &tx_q->dma_tx[entry];
4532 
4533 		tx_q->tx_count_frames = 0;
4534 		stmmac_set_tx_ic(priv, desc);
4535 	}
4536 
4537 	/* We've used all descriptors we need for this skb, however,
4538 	 * advance cur_tx so that it references a fresh descriptor.
4539 	 * ndo_start_xmit will fill this descriptor the next time it's
4540 	 * called and stmmac_tx_clean may clean up to this descriptor.
4541 	 */
4542 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4543 	tx_q->cur_tx = entry;
4544 
4545 	if (netif_msg_pktdata(priv)) {
4546 		netdev_dbg(priv->dev,
4547 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4548 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4549 			   entry, first, nfrags);
4550 
4551 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4552 		print_pkt(skb->data, skb->len);
4553 	}
4554 
4555 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4556 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4557 			  __func__);
4558 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4559 	}
4560 
4561 	u64_stats_update_begin(&txq_stats->q_syncp);
4562 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4563 	if (set_ic)
4564 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4565 	u64_stats_update_end(&txq_stats->q_syncp);
4566 
4567 	if (priv->sarc_type)
4568 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4569 
4570 	skb_tx_timestamp(skb);
4571 
4572 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4573 	 * problems because all the descriptors are actually ready to be
4574 	 * passed to the DMA engine.
4575 	 */
4576 	if (likely(!is_jumbo)) {
4577 		bool last_segment = (nfrags == 0);
4578 
4579 		des = dma_map_single(priv->device, skb->data,
4580 				     nopaged_len, DMA_TO_DEVICE);
4581 		if (dma_mapping_error(priv->device, des))
4582 			goto dma_map_err;
4583 
4584 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4585 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4586 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4587 
4588 		stmmac_set_desc_addr(priv, first, des);
4589 
4590 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4591 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4592 
4593 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4594 			     priv->hwts_tx_en)) {
4595 			/* declare that device is doing timestamping */
4596 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4597 			stmmac_enable_tx_timestamp(priv, first);
4598 		}
4599 
4600 		/* Prepare the first descriptor setting the OWN bit too */
4601 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4602 				csum_insertion, priv->mode, 0, last_segment,
4603 				skb->len);
4604 	}
4605 
4606 	if (tx_q->tbs & STMMAC_TBS_EN) {
4607 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4608 
4609 		tbs_desc = &tx_q->dma_entx[first_entry];
4610 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4611 	}
4612 
4613 	stmmac_set_tx_owner(priv, first);
4614 
4615 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4616 
4617 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4618 
4619 	stmmac_flush_tx_descriptors(priv, queue);
4620 	stmmac_tx_timer_arm(priv, queue);
4621 
4622 	return NETDEV_TX_OK;
4623 
4624 dma_map_err:
4625 	netdev_err(priv->dev, "Tx DMA map failed\n");
4626 	dev_kfree_skb(skb);
4627 	priv->xstats.tx_dropped++;
4628 	return NETDEV_TX_OK;
4629 }
4630 
4631 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4632 {
4633 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4634 	__be16 vlan_proto = veth->h_vlan_proto;
4635 	u16 vlanid;
4636 
4637 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4638 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4639 	    (vlan_proto == htons(ETH_P_8021AD) &&
4640 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4641 		/* pop the vlan tag */
4642 		vlanid = ntohs(veth->h_vlan_TCI);
4643 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4644 		skb_pull(skb, VLAN_HLEN);
4645 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4646 	}
4647 }
4648 
4649 /**
4650  * stmmac_rx_refill - refill used skb preallocated buffers
4651  * @priv: driver private structure
4652  * @queue: RX queue index
4653  * Description : this is to reallocate the skb for the reception process
4654  * that is based on zero-copy.
4655  */
4656 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4657 {
4658 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4659 	int dirty = stmmac_rx_dirty(priv, queue);
4660 	unsigned int entry = rx_q->dirty_rx;
4661 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4662 
4663 	if (priv->dma_cap.host_dma_width <= 32)
4664 		gfp |= GFP_DMA32;
4665 
4666 	while (dirty-- > 0) {
4667 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4668 		struct dma_desc *p;
4669 		bool use_rx_wd;
4670 
4671 		if (priv->extend_desc)
4672 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4673 		else
4674 			p = rx_q->dma_rx + entry;
4675 
4676 		if (!buf->page) {
4677 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4678 			if (!buf->page)
4679 				break;
4680 		}
4681 
4682 		if (priv->sph && !buf->sec_page) {
4683 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4684 			if (!buf->sec_page)
4685 				break;
4686 
4687 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4688 		}
4689 
4690 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4691 
4692 		stmmac_set_desc_addr(priv, p, buf->addr);
4693 		if (priv->sph)
4694 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4695 		else
4696 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4697 		stmmac_refill_desc3(priv, rx_q, p);
4698 
4699 		rx_q->rx_count_frames++;
4700 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4701 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4702 			rx_q->rx_count_frames = 0;
4703 
4704 		use_rx_wd = !priv->rx_coal_frames[queue];
4705 		use_rx_wd |= rx_q->rx_count_frames > 0;
4706 		if (!priv->use_riwt)
4707 			use_rx_wd = false;
4708 
4709 		dma_wmb();
4710 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4711 
4712 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4713 	}
4714 	rx_q->dirty_rx = entry;
4715 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4716 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4717 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4718 }
4719 
4720 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4721 				       struct dma_desc *p,
4722 				       int status, unsigned int len)
4723 {
4724 	unsigned int plen = 0, hlen = 0;
4725 	int coe = priv->hw->rx_csum;
4726 
4727 	/* Not first descriptor, buffer is always zero */
4728 	if (priv->sph && len)
4729 		return 0;
4730 
4731 	/* First descriptor, get split header length */
4732 	stmmac_get_rx_header_len(priv, p, &hlen);
4733 	if (priv->sph && hlen) {
4734 		priv->xstats.rx_split_hdr_pkt_n++;
4735 		return hlen;
4736 	}
4737 
4738 	/* First descriptor, not last descriptor and not split header */
4739 	if (status & rx_not_ls)
4740 		return priv->dma_conf.dma_buf_sz;
4741 
4742 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4743 
4744 	/* First descriptor and last descriptor and not split header */
4745 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4746 }
4747 
4748 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4749 				       struct dma_desc *p,
4750 				       int status, unsigned int len)
4751 {
4752 	int coe = priv->hw->rx_csum;
4753 	unsigned int plen = 0;
4754 
4755 	/* Not split header, buffer is not available */
4756 	if (!priv->sph)
4757 		return 0;
4758 
4759 	/* Not last descriptor */
4760 	if (status & rx_not_ls)
4761 		return priv->dma_conf.dma_buf_sz;
4762 
4763 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4764 
4765 	/* Last descriptor */
4766 	return plen - len;
4767 }
4768 
4769 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4770 				struct xdp_frame *xdpf, bool dma_map)
4771 {
4772 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4773 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4774 	unsigned int entry = tx_q->cur_tx;
4775 	struct dma_desc *tx_desc;
4776 	dma_addr_t dma_addr;
4777 	bool set_ic;
4778 
4779 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4780 		return STMMAC_XDP_CONSUMED;
4781 
4782 	if (likely(priv->extend_desc))
4783 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4784 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4785 		tx_desc = &tx_q->dma_entx[entry].basic;
4786 	else
4787 		tx_desc = tx_q->dma_tx + entry;
4788 
4789 	if (dma_map) {
4790 		dma_addr = dma_map_single(priv->device, xdpf->data,
4791 					  xdpf->len, DMA_TO_DEVICE);
4792 		if (dma_mapping_error(priv->device, dma_addr))
4793 			return STMMAC_XDP_CONSUMED;
4794 
4795 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4796 	} else {
4797 		struct page *page = virt_to_page(xdpf->data);
4798 
4799 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4800 			   xdpf->headroom;
4801 		dma_sync_single_for_device(priv->device, dma_addr,
4802 					   xdpf->len, DMA_BIDIRECTIONAL);
4803 
4804 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4805 	}
4806 
4807 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4808 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4809 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4810 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4811 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4812 
4813 	tx_q->xdpf[entry] = xdpf;
4814 
4815 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4816 
4817 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4818 			       true, priv->mode, true, true,
4819 			       xdpf->len);
4820 
4821 	tx_q->tx_count_frames++;
4822 
4823 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4824 		set_ic = true;
4825 	else
4826 		set_ic = false;
4827 
4828 	if (set_ic) {
4829 		tx_q->tx_count_frames = 0;
4830 		stmmac_set_tx_ic(priv, tx_desc);
4831 		u64_stats_update_begin(&txq_stats->q_syncp);
4832 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4833 		u64_stats_update_end(&txq_stats->q_syncp);
4834 	}
4835 
4836 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4837 
4838 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4839 	tx_q->cur_tx = entry;
4840 
4841 	return STMMAC_XDP_TX;
4842 }
4843 
4844 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4845 				   int cpu)
4846 {
4847 	int index = cpu;
4848 
4849 	if (unlikely(index < 0))
4850 		index = 0;
4851 
4852 	while (index >= priv->plat->tx_queues_to_use)
4853 		index -= priv->plat->tx_queues_to_use;
4854 
4855 	return index;
4856 }
4857 
4858 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4859 				struct xdp_buff *xdp)
4860 {
4861 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4862 	int cpu = smp_processor_id();
4863 	struct netdev_queue *nq;
4864 	int queue;
4865 	int res;
4866 
4867 	if (unlikely(!xdpf))
4868 		return STMMAC_XDP_CONSUMED;
4869 
4870 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4871 	nq = netdev_get_tx_queue(priv->dev, queue);
4872 
4873 	__netif_tx_lock(nq, cpu);
4874 	/* Avoids TX time-out as we are sharing with slow path */
4875 	txq_trans_cond_update(nq);
4876 
4877 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4878 	if (res == STMMAC_XDP_TX)
4879 		stmmac_flush_tx_descriptors(priv, queue);
4880 
4881 	__netif_tx_unlock(nq);
4882 
4883 	return res;
4884 }
4885 
4886 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4887 				 struct bpf_prog *prog,
4888 				 struct xdp_buff *xdp)
4889 {
4890 	u32 act;
4891 	int res;
4892 
4893 	act = bpf_prog_run_xdp(prog, xdp);
4894 	switch (act) {
4895 	case XDP_PASS:
4896 		res = STMMAC_XDP_PASS;
4897 		break;
4898 	case XDP_TX:
4899 		res = stmmac_xdp_xmit_back(priv, xdp);
4900 		break;
4901 	case XDP_REDIRECT:
4902 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4903 			res = STMMAC_XDP_CONSUMED;
4904 		else
4905 			res = STMMAC_XDP_REDIRECT;
4906 		break;
4907 	default:
4908 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4909 		fallthrough;
4910 	case XDP_ABORTED:
4911 		trace_xdp_exception(priv->dev, prog, act);
4912 		fallthrough;
4913 	case XDP_DROP:
4914 		res = STMMAC_XDP_CONSUMED;
4915 		break;
4916 	}
4917 
4918 	return res;
4919 }
4920 
4921 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4922 					   struct xdp_buff *xdp)
4923 {
4924 	struct bpf_prog *prog;
4925 	int res;
4926 
4927 	prog = READ_ONCE(priv->xdp_prog);
4928 	if (!prog) {
4929 		res = STMMAC_XDP_PASS;
4930 		goto out;
4931 	}
4932 
4933 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
4934 out:
4935 	return ERR_PTR(-res);
4936 }
4937 
4938 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4939 				   int xdp_status)
4940 {
4941 	int cpu = smp_processor_id();
4942 	int queue;
4943 
4944 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4945 
4946 	if (xdp_status & STMMAC_XDP_TX)
4947 		stmmac_tx_timer_arm(priv, queue);
4948 
4949 	if (xdp_status & STMMAC_XDP_REDIRECT)
4950 		xdp_do_flush();
4951 }
4952 
4953 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4954 					       struct xdp_buff *xdp)
4955 {
4956 	unsigned int metasize = xdp->data - xdp->data_meta;
4957 	unsigned int datasize = xdp->data_end - xdp->data;
4958 	struct sk_buff *skb;
4959 
4960 	skb = __napi_alloc_skb(&ch->rxtx_napi,
4961 			       xdp->data_end - xdp->data_hard_start,
4962 			       GFP_ATOMIC | __GFP_NOWARN);
4963 	if (unlikely(!skb))
4964 		return NULL;
4965 
4966 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
4967 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4968 	if (metasize)
4969 		skb_metadata_set(skb, metasize);
4970 
4971 	return skb;
4972 }
4973 
4974 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4975 				   struct dma_desc *p, struct dma_desc *np,
4976 				   struct xdp_buff *xdp)
4977 {
4978 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
4979 	struct stmmac_channel *ch = &priv->channel[queue];
4980 	unsigned int len = xdp->data_end - xdp->data;
4981 	enum pkt_hash_types hash_type;
4982 	int coe = priv->hw->rx_csum;
4983 	struct sk_buff *skb;
4984 	u32 hash;
4985 
4986 	skb = stmmac_construct_skb_zc(ch, xdp);
4987 	if (!skb) {
4988 		priv->xstats.rx_dropped++;
4989 		return;
4990 	}
4991 
4992 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
4993 	stmmac_rx_vlan(priv->dev, skb);
4994 	skb->protocol = eth_type_trans(skb, priv->dev);
4995 
4996 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
4997 		skb_checksum_none_assert(skb);
4998 	else
4999 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5000 
5001 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5002 		skb_set_hash(skb, hash, hash_type);
5003 
5004 	skb_record_rx_queue(skb, queue);
5005 	napi_gro_receive(&ch->rxtx_napi, skb);
5006 
5007 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5008 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5009 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5010 	u64_stats_update_end(&rxq_stats->napi_syncp);
5011 }
5012 
5013 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5014 {
5015 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5016 	unsigned int entry = rx_q->dirty_rx;
5017 	struct dma_desc *rx_desc = NULL;
5018 	bool ret = true;
5019 
5020 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5021 
5022 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5023 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5024 		dma_addr_t dma_addr;
5025 		bool use_rx_wd;
5026 
5027 		if (!buf->xdp) {
5028 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5029 			if (!buf->xdp) {
5030 				ret = false;
5031 				break;
5032 			}
5033 		}
5034 
5035 		if (priv->extend_desc)
5036 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5037 		else
5038 			rx_desc = rx_q->dma_rx + entry;
5039 
5040 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5041 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5042 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5043 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5044 
5045 		rx_q->rx_count_frames++;
5046 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5047 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5048 			rx_q->rx_count_frames = 0;
5049 
5050 		use_rx_wd = !priv->rx_coal_frames[queue];
5051 		use_rx_wd |= rx_q->rx_count_frames > 0;
5052 		if (!priv->use_riwt)
5053 			use_rx_wd = false;
5054 
5055 		dma_wmb();
5056 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5057 
5058 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5059 	}
5060 
5061 	if (rx_desc) {
5062 		rx_q->dirty_rx = entry;
5063 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5064 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5065 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5066 	}
5067 
5068 	return ret;
5069 }
5070 
5071 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5072 {
5073 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5074 	 * to represent incoming packet, whereas cb field in the same structure
5075 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5076 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5077 	 */
5078 	return (struct stmmac_xdp_buff *)xdp;
5079 }
5080 
5081 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5082 {
5083 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5084 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5085 	unsigned int count = 0, error = 0, len = 0;
5086 	int dirty = stmmac_rx_dirty(priv, queue);
5087 	unsigned int next_entry = rx_q->cur_rx;
5088 	u32 rx_errors = 0, rx_dropped = 0;
5089 	unsigned int desc_size;
5090 	struct bpf_prog *prog;
5091 	bool failure = false;
5092 	int xdp_status = 0;
5093 	int status = 0;
5094 
5095 	if (netif_msg_rx_status(priv)) {
5096 		void *rx_head;
5097 
5098 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5099 		if (priv->extend_desc) {
5100 			rx_head = (void *)rx_q->dma_erx;
5101 			desc_size = sizeof(struct dma_extended_desc);
5102 		} else {
5103 			rx_head = (void *)rx_q->dma_rx;
5104 			desc_size = sizeof(struct dma_desc);
5105 		}
5106 
5107 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5108 				    rx_q->dma_rx_phy, desc_size);
5109 	}
5110 	while (count < limit) {
5111 		struct stmmac_rx_buffer *buf;
5112 		struct stmmac_xdp_buff *ctx;
5113 		unsigned int buf1_len = 0;
5114 		struct dma_desc *np, *p;
5115 		int entry;
5116 		int res;
5117 
5118 		if (!count && rx_q->state_saved) {
5119 			error = rx_q->state.error;
5120 			len = rx_q->state.len;
5121 		} else {
5122 			rx_q->state_saved = false;
5123 			error = 0;
5124 			len = 0;
5125 		}
5126 
5127 		if (count >= limit)
5128 			break;
5129 
5130 read_again:
5131 		buf1_len = 0;
5132 		entry = next_entry;
5133 		buf = &rx_q->buf_pool[entry];
5134 
5135 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5136 			failure = failure ||
5137 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5138 			dirty = 0;
5139 		}
5140 
5141 		if (priv->extend_desc)
5142 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5143 		else
5144 			p = rx_q->dma_rx + entry;
5145 
5146 		/* read the status of the incoming frame */
5147 		status = stmmac_rx_status(priv, &priv->xstats, p);
5148 		/* check if managed by the DMA otherwise go ahead */
5149 		if (unlikely(status & dma_own))
5150 			break;
5151 
5152 		/* Prefetch the next RX descriptor */
5153 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5154 						priv->dma_conf.dma_rx_size);
5155 		next_entry = rx_q->cur_rx;
5156 
5157 		if (priv->extend_desc)
5158 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5159 		else
5160 			np = rx_q->dma_rx + next_entry;
5161 
5162 		prefetch(np);
5163 
5164 		/* Ensure a valid XSK buffer before proceed */
5165 		if (!buf->xdp)
5166 			break;
5167 
5168 		if (priv->extend_desc)
5169 			stmmac_rx_extended_status(priv, &priv->xstats,
5170 						  rx_q->dma_erx + entry);
5171 		if (unlikely(status == discard_frame)) {
5172 			xsk_buff_free(buf->xdp);
5173 			buf->xdp = NULL;
5174 			dirty++;
5175 			error = 1;
5176 			if (!priv->hwts_rx_en)
5177 				rx_errors++;
5178 		}
5179 
5180 		if (unlikely(error && (status & rx_not_ls)))
5181 			goto read_again;
5182 		if (unlikely(error)) {
5183 			count++;
5184 			continue;
5185 		}
5186 
5187 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5188 		if (likely(status & rx_not_ls)) {
5189 			xsk_buff_free(buf->xdp);
5190 			buf->xdp = NULL;
5191 			dirty++;
5192 			count++;
5193 			goto read_again;
5194 		}
5195 
5196 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5197 		ctx->priv = priv;
5198 		ctx->desc = p;
5199 		ctx->ndesc = np;
5200 
5201 		/* XDP ZC Frame only support primary buffers for now */
5202 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5203 		len += buf1_len;
5204 
5205 		/* ACS is disabled; strip manually. */
5206 		if (likely(!(status & rx_not_ls))) {
5207 			buf1_len -= ETH_FCS_LEN;
5208 			len -= ETH_FCS_LEN;
5209 		}
5210 
5211 		/* RX buffer is good and fit into a XSK pool buffer */
5212 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5213 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5214 
5215 		prog = READ_ONCE(priv->xdp_prog);
5216 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5217 
5218 		switch (res) {
5219 		case STMMAC_XDP_PASS:
5220 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5221 			xsk_buff_free(buf->xdp);
5222 			break;
5223 		case STMMAC_XDP_CONSUMED:
5224 			xsk_buff_free(buf->xdp);
5225 			rx_dropped++;
5226 			break;
5227 		case STMMAC_XDP_TX:
5228 		case STMMAC_XDP_REDIRECT:
5229 			xdp_status |= res;
5230 			break;
5231 		}
5232 
5233 		buf->xdp = NULL;
5234 		dirty++;
5235 		count++;
5236 	}
5237 
5238 	if (status & rx_not_ls) {
5239 		rx_q->state_saved = true;
5240 		rx_q->state.error = error;
5241 		rx_q->state.len = len;
5242 	}
5243 
5244 	stmmac_finalize_xdp_rx(priv, xdp_status);
5245 
5246 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5247 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5248 	u64_stats_update_end(&rxq_stats->napi_syncp);
5249 
5250 	priv->xstats.rx_dropped += rx_dropped;
5251 	priv->xstats.rx_errors += rx_errors;
5252 
5253 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5254 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5255 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5256 		else
5257 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5258 
5259 		return (int)count;
5260 	}
5261 
5262 	return failure ? limit : (int)count;
5263 }
5264 
5265 /**
5266  * stmmac_rx - manage the receive process
5267  * @priv: driver private structure
5268  * @limit: napi bugget
5269  * @queue: RX queue index.
5270  * Description :  this the function called by the napi poll method.
5271  * It gets all the frames inside the ring.
5272  */
5273 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5274 {
5275 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5276 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5277 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5278 	struct stmmac_channel *ch = &priv->channel[queue];
5279 	unsigned int count = 0, error = 0, len = 0;
5280 	int status = 0, coe = priv->hw->rx_csum;
5281 	unsigned int next_entry = rx_q->cur_rx;
5282 	enum dma_data_direction dma_dir;
5283 	unsigned int desc_size;
5284 	struct sk_buff *skb = NULL;
5285 	struct stmmac_xdp_buff ctx;
5286 	int xdp_status = 0;
5287 	int buf_sz;
5288 
5289 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5290 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5291 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5292 
5293 	if (netif_msg_rx_status(priv)) {
5294 		void *rx_head;
5295 
5296 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5297 		if (priv->extend_desc) {
5298 			rx_head = (void *)rx_q->dma_erx;
5299 			desc_size = sizeof(struct dma_extended_desc);
5300 		} else {
5301 			rx_head = (void *)rx_q->dma_rx;
5302 			desc_size = sizeof(struct dma_desc);
5303 		}
5304 
5305 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5306 				    rx_q->dma_rx_phy, desc_size);
5307 	}
5308 	while (count < limit) {
5309 		unsigned int buf1_len = 0, buf2_len = 0;
5310 		enum pkt_hash_types hash_type;
5311 		struct stmmac_rx_buffer *buf;
5312 		struct dma_desc *np, *p;
5313 		int entry;
5314 		u32 hash;
5315 
5316 		if (!count && rx_q->state_saved) {
5317 			skb = rx_q->state.skb;
5318 			error = rx_q->state.error;
5319 			len = rx_q->state.len;
5320 		} else {
5321 			rx_q->state_saved = false;
5322 			skb = NULL;
5323 			error = 0;
5324 			len = 0;
5325 		}
5326 
5327 read_again:
5328 		if (count >= limit)
5329 			break;
5330 
5331 		buf1_len = 0;
5332 		buf2_len = 0;
5333 		entry = next_entry;
5334 		buf = &rx_q->buf_pool[entry];
5335 
5336 		if (priv->extend_desc)
5337 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5338 		else
5339 			p = rx_q->dma_rx + entry;
5340 
5341 		/* read the status of the incoming frame */
5342 		status = stmmac_rx_status(priv, &priv->xstats, p);
5343 		/* check if managed by the DMA otherwise go ahead */
5344 		if (unlikely(status & dma_own))
5345 			break;
5346 
5347 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5348 						priv->dma_conf.dma_rx_size);
5349 		next_entry = rx_q->cur_rx;
5350 
5351 		if (priv->extend_desc)
5352 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5353 		else
5354 			np = rx_q->dma_rx + next_entry;
5355 
5356 		prefetch(np);
5357 
5358 		if (priv->extend_desc)
5359 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5360 		if (unlikely(status == discard_frame)) {
5361 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5362 			buf->page = NULL;
5363 			error = 1;
5364 			if (!priv->hwts_rx_en)
5365 				rx_errors++;
5366 		}
5367 
5368 		if (unlikely(error && (status & rx_not_ls)))
5369 			goto read_again;
5370 		if (unlikely(error)) {
5371 			dev_kfree_skb(skb);
5372 			skb = NULL;
5373 			count++;
5374 			continue;
5375 		}
5376 
5377 		/* Buffer is good. Go on. */
5378 
5379 		prefetch(page_address(buf->page) + buf->page_offset);
5380 		if (buf->sec_page)
5381 			prefetch(page_address(buf->sec_page));
5382 
5383 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5384 		len += buf1_len;
5385 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5386 		len += buf2_len;
5387 
5388 		/* ACS is disabled; strip manually. */
5389 		if (likely(!(status & rx_not_ls))) {
5390 			if (buf2_len) {
5391 				buf2_len -= ETH_FCS_LEN;
5392 				len -= ETH_FCS_LEN;
5393 			} else if (buf1_len) {
5394 				buf1_len -= ETH_FCS_LEN;
5395 				len -= ETH_FCS_LEN;
5396 			}
5397 		}
5398 
5399 		if (!skb) {
5400 			unsigned int pre_len, sync_len;
5401 
5402 			dma_sync_single_for_cpu(priv->device, buf->addr,
5403 						buf1_len, dma_dir);
5404 
5405 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5406 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5407 					 buf->page_offset, buf1_len, true);
5408 
5409 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5410 				  buf->page_offset;
5411 
5412 			ctx.priv = priv;
5413 			ctx.desc = p;
5414 			ctx.ndesc = np;
5415 
5416 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5417 			/* Due xdp_adjust_tail: DMA sync for_device
5418 			 * cover max len CPU touch
5419 			 */
5420 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5421 				   buf->page_offset;
5422 			sync_len = max(sync_len, pre_len);
5423 
5424 			/* For Not XDP_PASS verdict */
5425 			if (IS_ERR(skb)) {
5426 				unsigned int xdp_res = -PTR_ERR(skb);
5427 
5428 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5429 					page_pool_put_page(rx_q->page_pool,
5430 							   virt_to_head_page(ctx.xdp.data),
5431 							   sync_len, true);
5432 					buf->page = NULL;
5433 					rx_dropped++;
5434 
5435 					/* Clear skb as it was set as
5436 					 * status by XDP program.
5437 					 */
5438 					skb = NULL;
5439 
5440 					if (unlikely((status & rx_not_ls)))
5441 						goto read_again;
5442 
5443 					count++;
5444 					continue;
5445 				} else if (xdp_res & (STMMAC_XDP_TX |
5446 						      STMMAC_XDP_REDIRECT)) {
5447 					xdp_status |= xdp_res;
5448 					buf->page = NULL;
5449 					skb = NULL;
5450 					count++;
5451 					continue;
5452 				}
5453 			}
5454 		}
5455 
5456 		if (!skb) {
5457 			/* XDP program may expand or reduce tail */
5458 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5459 
5460 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5461 			if (!skb) {
5462 				rx_dropped++;
5463 				count++;
5464 				goto drain_data;
5465 			}
5466 
5467 			/* XDP program may adjust header */
5468 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5469 			skb_put(skb, buf1_len);
5470 
5471 			/* Data payload copied into SKB, page ready for recycle */
5472 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5473 			buf->page = NULL;
5474 		} else if (buf1_len) {
5475 			dma_sync_single_for_cpu(priv->device, buf->addr,
5476 						buf1_len, dma_dir);
5477 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5478 					buf->page, buf->page_offset, buf1_len,
5479 					priv->dma_conf.dma_buf_sz);
5480 
5481 			/* Data payload appended into SKB */
5482 			skb_mark_for_recycle(skb);
5483 			buf->page = NULL;
5484 		}
5485 
5486 		if (buf2_len) {
5487 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5488 						buf2_len, dma_dir);
5489 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5490 					buf->sec_page, 0, buf2_len,
5491 					priv->dma_conf.dma_buf_sz);
5492 
5493 			/* Data payload appended into SKB */
5494 			skb_mark_for_recycle(skb);
5495 			buf->sec_page = NULL;
5496 		}
5497 
5498 drain_data:
5499 		if (likely(status & rx_not_ls))
5500 			goto read_again;
5501 		if (!skb)
5502 			continue;
5503 
5504 		/* Got entire packet into SKB. Finish it. */
5505 
5506 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5507 		stmmac_rx_vlan(priv->dev, skb);
5508 		skb->protocol = eth_type_trans(skb, priv->dev);
5509 
5510 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5511 			skb_checksum_none_assert(skb);
5512 		else
5513 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5514 
5515 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5516 			skb_set_hash(skb, hash, hash_type);
5517 
5518 		skb_record_rx_queue(skb, queue);
5519 		napi_gro_receive(&ch->rx_napi, skb);
5520 		skb = NULL;
5521 
5522 		rx_packets++;
5523 		rx_bytes += len;
5524 		count++;
5525 	}
5526 
5527 	if (status & rx_not_ls || skb) {
5528 		rx_q->state_saved = true;
5529 		rx_q->state.skb = skb;
5530 		rx_q->state.error = error;
5531 		rx_q->state.len = len;
5532 	}
5533 
5534 	stmmac_finalize_xdp_rx(priv, xdp_status);
5535 
5536 	stmmac_rx_refill(priv, queue);
5537 
5538 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5539 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5540 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5541 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5542 	u64_stats_update_end(&rxq_stats->napi_syncp);
5543 
5544 	priv->xstats.rx_dropped += rx_dropped;
5545 	priv->xstats.rx_errors += rx_errors;
5546 
5547 	return count;
5548 }
5549 
5550 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5551 {
5552 	struct stmmac_channel *ch =
5553 		container_of(napi, struct stmmac_channel, rx_napi);
5554 	struct stmmac_priv *priv = ch->priv_data;
5555 	struct stmmac_rxq_stats *rxq_stats;
5556 	u32 chan = ch->index;
5557 	int work_done;
5558 
5559 	rxq_stats = &priv->xstats.rxq_stats[chan];
5560 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5561 	u64_stats_inc(&rxq_stats->napi.poll);
5562 	u64_stats_update_end(&rxq_stats->napi_syncp);
5563 
5564 	work_done = stmmac_rx(priv, budget, chan);
5565 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5566 		unsigned long flags;
5567 
5568 		spin_lock_irqsave(&ch->lock, flags);
5569 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5570 		spin_unlock_irqrestore(&ch->lock, flags);
5571 	}
5572 
5573 	return work_done;
5574 }
5575 
5576 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5577 {
5578 	struct stmmac_channel *ch =
5579 		container_of(napi, struct stmmac_channel, tx_napi);
5580 	struct stmmac_priv *priv = ch->priv_data;
5581 	struct stmmac_txq_stats *txq_stats;
5582 	u32 chan = ch->index;
5583 	int work_done;
5584 
5585 	txq_stats = &priv->xstats.txq_stats[chan];
5586 	u64_stats_update_begin(&txq_stats->napi_syncp);
5587 	u64_stats_inc(&txq_stats->napi.poll);
5588 	u64_stats_update_end(&txq_stats->napi_syncp);
5589 
5590 	work_done = stmmac_tx_clean(priv, budget, chan);
5591 	work_done = min(work_done, budget);
5592 
5593 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5594 		unsigned long flags;
5595 
5596 		spin_lock_irqsave(&ch->lock, flags);
5597 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5598 		spin_unlock_irqrestore(&ch->lock, flags);
5599 	}
5600 
5601 	return work_done;
5602 }
5603 
5604 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5605 {
5606 	struct stmmac_channel *ch =
5607 		container_of(napi, struct stmmac_channel, rxtx_napi);
5608 	struct stmmac_priv *priv = ch->priv_data;
5609 	int rx_done, tx_done, rxtx_done;
5610 	struct stmmac_rxq_stats *rxq_stats;
5611 	struct stmmac_txq_stats *txq_stats;
5612 	u32 chan = ch->index;
5613 
5614 	rxq_stats = &priv->xstats.rxq_stats[chan];
5615 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5616 	u64_stats_inc(&rxq_stats->napi.poll);
5617 	u64_stats_update_end(&rxq_stats->napi_syncp);
5618 
5619 	txq_stats = &priv->xstats.txq_stats[chan];
5620 	u64_stats_update_begin(&txq_stats->napi_syncp);
5621 	u64_stats_inc(&txq_stats->napi.poll);
5622 	u64_stats_update_end(&txq_stats->napi_syncp);
5623 
5624 	tx_done = stmmac_tx_clean(priv, budget, chan);
5625 	tx_done = min(tx_done, budget);
5626 
5627 	rx_done = stmmac_rx_zc(priv, budget, chan);
5628 
5629 	rxtx_done = max(tx_done, rx_done);
5630 
5631 	/* If either TX or RX work is not complete, return budget
5632 	 * and keep pooling
5633 	 */
5634 	if (rxtx_done >= budget)
5635 		return budget;
5636 
5637 	/* all work done, exit the polling mode */
5638 	if (napi_complete_done(napi, rxtx_done)) {
5639 		unsigned long flags;
5640 
5641 		spin_lock_irqsave(&ch->lock, flags);
5642 		/* Both RX and TX work done are compelte,
5643 		 * so enable both RX & TX IRQs.
5644 		 */
5645 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5646 		spin_unlock_irqrestore(&ch->lock, flags);
5647 	}
5648 
5649 	return min(rxtx_done, budget - 1);
5650 }
5651 
5652 /**
5653  *  stmmac_tx_timeout
5654  *  @dev : Pointer to net device structure
5655  *  @txqueue: the index of the hanging transmit queue
5656  *  Description: this function is called when a packet transmission fails to
5657  *   complete within a reasonable time. The driver will mark the error in the
5658  *   netdev structure and arrange for the device to be reset to a sane state
5659  *   in order to transmit a new packet.
5660  */
5661 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5662 {
5663 	struct stmmac_priv *priv = netdev_priv(dev);
5664 
5665 	stmmac_global_err(priv);
5666 }
5667 
5668 /**
5669  *  stmmac_set_rx_mode - entry point for multicast addressing
5670  *  @dev : pointer to the device structure
5671  *  Description:
5672  *  This function is a driver entry point which gets called by the kernel
5673  *  whenever multicast addresses must be enabled/disabled.
5674  *  Return value:
5675  *  void.
5676  */
5677 static void stmmac_set_rx_mode(struct net_device *dev)
5678 {
5679 	struct stmmac_priv *priv = netdev_priv(dev);
5680 
5681 	stmmac_set_filter(priv, priv->hw, dev);
5682 }
5683 
5684 /**
5685  *  stmmac_change_mtu - entry point to change MTU size for the device.
5686  *  @dev : device pointer.
5687  *  @new_mtu : the new MTU size for the device.
5688  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5689  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5690  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5691  *  Return value:
5692  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5693  *  file on failure.
5694  */
5695 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5696 {
5697 	struct stmmac_priv *priv = netdev_priv(dev);
5698 	int txfifosz = priv->plat->tx_fifo_size;
5699 	struct stmmac_dma_conf *dma_conf;
5700 	const int mtu = new_mtu;
5701 	int ret;
5702 
5703 	if (txfifosz == 0)
5704 		txfifosz = priv->dma_cap.tx_fifo_size;
5705 
5706 	txfifosz /= priv->plat->tx_queues_to_use;
5707 
5708 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5709 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5710 		return -EINVAL;
5711 	}
5712 
5713 	new_mtu = STMMAC_ALIGN(new_mtu);
5714 
5715 	/* If condition true, FIFO is too small or MTU too large */
5716 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5717 		return -EINVAL;
5718 
5719 	if (netif_running(dev)) {
5720 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5721 		/* Try to allocate the new DMA conf with the new mtu */
5722 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5723 		if (IS_ERR(dma_conf)) {
5724 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5725 				   mtu);
5726 			return PTR_ERR(dma_conf);
5727 		}
5728 
5729 		stmmac_release(dev);
5730 
5731 		ret = __stmmac_open(dev, dma_conf);
5732 		if (ret) {
5733 			free_dma_desc_resources(priv, dma_conf);
5734 			kfree(dma_conf);
5735 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5736 			return ret;
5737 		}
5738 
5739 		kfree(dma_conf);
5740 
5741 		stmmac_set_rx_mode(dev);
5742 	}
5743 
5744 	dev->mtu = mtu;
5745 	netdev_update_features(dev);
5746 
5747 	return 0;
5748 }
5749 
5750 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5751 					     netdev_features_t features)
5752 {
5753 	struct stmmac_priv *priv = netdev_priv(dev);
5754 
5755 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5756 		features &= ~NETIF_F_RXCSUM;
5757 
5758 	if (!priv->plat->tx_coe)
5759 		features &= ~NETIF_F_CSUM_MASK;
5760 
5761 	/* Some GMAC devices have a bugged Jumbo frame support that
5762 	 * needs to have the Tx COE disabled for oversized frames
5763 	 * (due to limited buffer sizes). In this case we disable
5764 	 * the TX csum insertion in the TDES and not use SF.
5765 	 */
5766 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5767 		features &= ~NETIF_F_CSUM_MASK;
5768 
5769 	/* Disable tso if asked by ethtool */
5770 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5771 		if (features & NETIF_F_TSO)
5772 			priv->tso = true;
5773 		else
5774 			priv->tso = false;
5775 	}
5776 
5777 	return features;
5778 }
5779 
5780 static int stmmac_set_features(struct net_device *netdev,
5781 			       netdev_features_t features)
5782 {
5783 	struct stmmac_priv *priv = netdev_priv(netdev);
5784 
5785 	/* Keep the COE Type in case of csum is supporting */
5786 	if (features & NETIF_F_RXCSUM)
5787 		priv->hw->rx_csum = priv->plat->rx_coe;
5788 	else
5789 		priv->hw->rx_csum = 0;
5790 	/* No check needed because rx_coe has been set before and it will be
5791 	 * fixed in case of issue.
5792 	 */
5793 	stmmac_rx_ipc(priv, priv->hw);
5794 
5795 	if (priv->sph_cap) {
5796 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5797 		u32 chan;
5798 
5799 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5800 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5801 	}
5802 
5803 	return 0;
5804 }
5805 
5806 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5807 {
5808 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5809 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5810 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5811 	bool *hs_enable = &fpe_cfg->hs_enable;
5812 
5813 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5814 		return;
5815 
5816 	/* If LP has sent verify mPacket, LP is FPE capable */
5817 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5818 		if (*lp_state < FPE_STATE_CAPABLE)
5819 			*lp_state = FPE_STATE_CAPABLE;
5820 
5821 		/* If user has requested FPE enable, quickly response */
5822 		if (*hs_enable)
5823 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5824 						fpe_cfg,
5825 						MPACKET_RESPONSE);
5826 	}
5827 
5828 	/* If Local has sent verify mPacket, Local is FPE capable */
5829 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5830 		if (*lo_state < FPE_STATE_CAPABLE)
5831 			*lo_state = FPE_STATE_CAPABLE;
5832 	}
5833 
5834 	/* If LP has sent response mPacket, LP is entering FPE ON */
5835 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5836 		*lp_state = FPE_STATE_ENTERING_ON;
5837 
5838 	/* If Local has sent response mPacket, Local is entering FPE ON */
5839 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5840 		*lo_state = FPE_STATE_ENTERING_ON;
5841 
5842 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5843 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5844 	    priv->fpe_wq) {
5845 		queue_work(priv->fpe_wq, &priv->fpe_task);
5846 	}
5847 }
5848 
5849 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5850 {
5851 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5852 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5853 	u32 queues_count;
5854 	u32 queue;
5855 	bool xmac;
5856 
5857 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5858 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5859 
5860 	if (priv->irq_wake)
5861 		pm_wakeup_event(priv->device, 0);
5862 
5863 	if (priv->dma_cap.estsel)
5864 		stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5865 				      &priv->xstats, tx_cnt);
5866 
5867 	if (priv->dma_cap.fpesel) {
5868 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5869 						   priv->dev);
5870 
5871 		stmmac_fpe_event_status(priv, status);
5872 	}
5873 
5874 	/* To handle GMAC own interrupts */
5875 	if ((priv->plat->has_gmac) || xmac) {
5876 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5877 
5878 		if (unlikely(status)) {
5879 			/* For LPI we need to save the tx status */
5880 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5881 				priv->tx_path_in_lpi_mode = true;
5882 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5883 				priv->tx_path_in_lpi_mode = false;
5884 		}
5885 
5886 		for (queue = 0; queue < queues_count; queue++) {
5887 			status = stmmac_host_mtl_irq_status(priv, priv->hw,
5888 							    queue);
5889 		}
5890 
5891 		/* PCS link status */
5892 		if (priv->hw->pcs &&
5893 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5894 			if (priv->xstats.pcs_link)
5895 				netif_carrier_on(priv->dev);
5896 			else
5897 				netif_carrier_off(priv->dev);
5898 		}
5899 
5900 		stmmac_timestamp_interrupt(priv, priv);
5901 	}
5902 }
5903 
5904 /**
5905  *  stmmac_interrupt - main ISR
5906  *  @irq: interrupt number.
5907  *  @dev_id: to pass the net device pointer.
5908  *  Description: this is the main driver interrupt service routine.
5909  *  It can call:
5910  *  o DMA service routine (to manage incoming frame reception and transmission
5911  *    status)
5912  *  o Core interrupts to manage: remote wake-up, management counter, LPI
5913  *    interrupts.
5914  */
5915 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5916 {
5917 	struct net_device *dev = (struct net_device *)dev_id;
5918 	struct stmmac_priv *priv = netdev_priv(dev);
5919 
5920 	/* Check if adapter is up */
5921 	if (test_bit(STMMAC_DOWN, &priv->state))
5922 		return IRQ_HANDLED;
5923 
5924 	/* Check if a fatal error happened */
5925 	if (stmmac_safety_feat_interrupt(priv))
5926 		return IRQ_HANDLED;
5927 
5928 	/* To handle Common interrupts */
5929 	stmmac_common_interrupt(priv);
5930 
5931 	/* To handle DMA interrupts */
5932 	stmmac_dma_interrupt(priv);
5933 
5934 	return IRQ_HANDLED;
5935 }
5936 
5937 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5938 {
5939 	struct net_device *dev = (struct net_device *)dev_id;
5940 	struct stmmac_priv *priv = netdev_priv(dev);
5941 
5942 	/* Check if adapter is up */
5943 	if (test_bit(STMMAC_DOWN, &priv->state))
5944 		return IRQ_HANDLED;
5945 
5946 	/* To handle Common interrupts */
5947 	stmmac_common_interrupt(priv);
5948 
5949 	return IRQ_HANDLED;
5950 }
5951 
5952 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5953 {
5954 	struct net_device *dev = (struct net_device *)dev_id;
5955 	struct stmmac_priv *priv = netdev_priv(dev);
5956 
5957 	/* Check if adapter is up */
5958 	if (test_bit(STMMAC_DOWN, &priv->state))
5959 		return IRQ_HANDLED;
5960 
5961 	/* Check if a fatal error happened */
5962 	stmmac_safety_feat_interrupt(priv);
5963 
5964 	return IRQ_HANDLED;
5965 }
5966 
5967 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5968 {
5969 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5970 	struct stmmac_dma_conf *dma_conf;
5971 	int chan = tx_q->queue_index;
5972 	struct stmmac_priv *priv;
5973 	int status;
5974 
5975 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
5976 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5977 
5978 	/* Check if adapter is up */
5979 	if (test_bit(STMMAC_DOWN, &priv->state))
5980 		return IRQ_HANDLED;
5981 
5982 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5983 
5984 	if (unlikely(status & tx_hard_error_bump_tc)) {
5985 		/* Try to bump up the dma threshold on this failure */
5986 		stmmac_bump_dma_threshold(priv, chan);
5987 	} else if (unlikely(status == tx_hard_error)) {
5988 		stmmac_tx_err(priv, chan);
5989 	}
5990 
5991 	return IRQ_HANDLED;
5992 }
5993 
5994 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
5995 {
5996 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
5997 	struct stmmac_dma_conf *dma_conf;
5998 	int chan = rx_q->queue_index;
5999 	struct stmmac_priv *priv;
6000 
6001 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6002 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6003 
6004 	/* Check if adapter is up */
6005 	if (test_bit(STMMAC_DOWN, &priv->state))
6006 		return IRQ_HANDLED;
6007 
6008 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6009 
6010 	return IRQ_HANDLED;
6011 }
6012 
6013 /**
6014  *  stmmac_ioctl - Entry point for the Ioctl
6015  *  @dev: Device pointer.
6016  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6017  *  a proprietary structure used to pass information to the driver.
6018  *  @cmd: IOCTL command
6019  *  Description:
6020  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6021  */
6022 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6023 {
6024 	struct stmmac_priv *priv = netdev_priv (dev);
6025 	int ret = -EOPNOTSUPP;
6026 
6027 	if (!netif_running(dev))
6028 		return -EINVAL;
6029 
6030 	switch (cmd) {
6031 	case SIOCGMIIPHY:
6032 	case SIOCGMIIREG:
6033 	case SIOCSMIIREG:
6034 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6035 		break;
6036 	case SIOCSHWTSTAMP:
6037 		ret = stmmac_hwtstamp_set(dev, rq);
6038 		break;
6039 	case SIOCGHWTSTAMP:
6040 		ret = stmmac_hwtstamp_get(dev, rq);
6041 		break;
6042 	default:
6043 		break;
6044 	}
6045 
6046 	return ret;
6047 }
6048 
6049 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6050 				    void *cb_priv)
6051 {
6052 	struct stmmac_priv *priv = cb_priv;
6053 	int ret = -EOPNOTSUPP;
6054 
6055 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6056 		return ret;
6057 
6058 	__stmmac_disable_all_queues(priv);
6059 
6060 	switch (type) {
6061 	case TC_SETUP_CLSU32:
6062 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6063 		break;
6064 	case TC_SETUP_CLSFLOWER:
6065 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6066 		break;
6067 	default:
6068 		break;
6069 	}
6070 
6071 	stmmac_enable_all_queues(priv);
6072 	return ret;
6073 }
6074 
6075 static LIST_HEAD(stmmac_block_cb_list);
6076 
6077 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6078 			   void *type_data)
6079 {
6080 	struct stmmac_priv *priv = netdev_priv(ndev);
6081 
6082 	switch (type) {
6083 	case TC_QUERY_CAPS:
6084 		return stmmac_tc_query_caps(priv, priv, type_data);
6085 	case TC_SETUP_BLOCK:
6086 		return flow_block_cb_setup_simple(type_data,
6087 						  &stmmac_block_cb_list,
6088 						  stmmac_setup_tc_block_cb,
6089 						  priv, priv, true);
6090 	case TC_SETUP_QDISC_CBS:
6091 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6092 	case TC_SETUP_QDISC_TAPRIO:
6093 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6094 	case TC_SETUP_QDISC_ETF:
6095 		return stmmac_tc_setup_etf(priv, priv, type_data);
6096 	default:
6097 		return -EOPNOTSUPP;
6098 	}
6099 }
6100 
6101 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6102 			       struct net_device *sb_dev)
6103 {
6104 	int gso = skb_shinfo(skb)->gso_type;
6105 
6106 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6107 		/*
6108 		 * There is no way to determine the number of TSO/USO
6109 		 * capable Queues. Let's use always the Queue 0
6110 		 * because if TSO/USO is supported then at least this
6111 		 * one will be capable.
6112 		 */
6113 		return 0;
6114 	}
6115 
6116 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6117 }
6118 
6119 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6120 {
6121 	struct stmmac_priv *priv = netdev_priv(ndev);
6122 	int ret = 0;
6123 
6124 	ret = pm_runtime_resume_and_get(priv->device);
6125 	if (ret < 0)
6126 		return ret;
6127 
6128 	ret = eth_mac_addr(ndev, addr);
6129 	if (ret)
6130 		goto set_mac_error;
6131 
6132 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6133 
6134 set_mac_error:
6135 	pm_runtime_put(priv->device);
6136 
6137 	return ret;
6138 }
6139 
6140 #ifdef CONFIG_DEBUG_FS
6141 static struct dentry *stmmac_fs_dir;
6142 
6143 static void sysfs_display_ring(void *head, int size, int extend_desc,
6144 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6145 {
6146 	int i;
6147 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6148 	struct dma_desc *p = (struct dma_desc *)head;
6149 	dma_addr_t dma_addr;
6150 
6151 	for (i = 0; i < size; i++) {
6152 		if (extend_desc) {
6153 			dma_addr = dma_phy_addr + i * sizeof(*ep);
6154 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6155 				   i, &dma_addr,
6156 				   le32_to_cpu(ep->basic.des0),
6157 				   le32_to_cpu(ep->basic.des1),
6158 				   le32_to_cpu(ep->basic.des2),
6159 				   le32_to_cpu(ep->basic.des3));
6160 			ep++;
6161 		} else {
6162 			dma_addr = dma_phy_addr + i * sizeof(*p);
6163 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6164 				   i, &dma_addr,
6165 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6166 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6167 			p++;
6168 		}
6169 		seq_printf(seq, "\n");
6170 	}
6171 }
6172 
6173 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6174 {
6175 	struct net_device *dev = seq->private;
6176 	struct stmmac_priv *priv = netdev_priv(dev);
6177 	u32 rx_count = priv->plat->rx_queues_to_use;
6178 	u32 tx_count = priv->plat->tx_queues_to_use;
6179 	u32 queue;
6180 
6181 	if ((dev->flags & IFF_UP) == 0)
6182 		return 0;
6183 
6184 	for (queue = 0; queue < rx_count; queue++) {
6185 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6186 
6187 		seq_printf(seq, "RX Queue %d:\n", queue);
6188 
6189 		if (priv->extend_desc) {
6190 			seq_printf(seq, "Extended descriptor ring:\n");
6191 			sysfs_display_ring((void *)rx_q->dma_erx,
6192 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6193 		} else {
6194 			seq_printf(seq, "Descriptor ring:\n");
6195 			sysfs_display_ring((void *)rx_q->dma_rx,
6196 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6197 		}
6198 	}
6199 
6200 	for (queue = 0; queue < tx_count; queue++) {
6201 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6202 
6203 		seq_printf(seq, "TX Queue %d:\n", queue);
6204 
6205 		if (priv->extend_desc) {
6206 			seq_printf(seq, "Extended descriptor ring:\n");
6207 			sysfs_display_ring((void *)tx_q->dma_etx,
6208 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6209 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6210 			seq_printf(seq, "Descriptor ring:\n");
6211 			sysfs_display_ring((void *)tx_q->dma_tx,
6212 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6213 		}
6214 	}
6215 
6216 	return 0;
6217 }
6218 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6219 
6220 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6221 {
6222 	static const char * const dwxgmac_timestamp_source[] = {
6223 		"None",
6224 		"Internal",
6225 		"External",
6226 		"Both",
6227 	};
6228 	static const char * const dwxgmac_safety_feature_desc[] = {
6229 		"No",
6230 		"All Safety Features with ECC and Parity",
6231 		"All Safety Features without ECC or Parity",
6232 		"All Safety Features with Parity Only",
6233 		"ECC Only",
6234 		"UNDEFINED",
6235 		"UNDEFINED",
6236 		"UNDEFINED",
6237 	};
6238 	struct net_device *dev = seq->private;
6239 	struct stmmac_priv *priv = netdev_priv(dev);
6240 
6241 	if (!priv->hw_cap_support) {
6242 		seq_printf(seq, "DMA HW features not supported\n");
6243 		return 0;
6244 	}
6245 
6246 	seq_printf(seq, "==============================\n");
6247 	seq_printf(seq, "\tDMA HW features\n");
6248 	seq_printf(seq, "==============================\n");
6249 
6250 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6251 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6252 	seq_printf(seq, "\t1000 Mbps: %s\n",
6253 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6254 	seq_printf(seq, "\tHalf duplex: %s\n",
6255 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6256 	if (priv->plat->has_xgmac) {
6257 		seq_printf(seq,
6258 			   "\tNumber of Additional MAC address registers: %d\n",
6259 			   priv->dma_cap.multi_addr);
6260 	} else {
6261 		seq_printf(seq, "\tHash Filter: %s\n",
6262 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6263 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6264 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6265 	}
6266 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6267 		   (priv->dma_cap.pcs) ? "Y" : "N");
6268 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6269 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6270 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6271 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6272 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6273 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6274 	seq_printf(seq, "\tRMON module: %s\n",
6275 		   (priv->dma_cap.rmon) ? "Y" : "N");
6276 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6277 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6278 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6279 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6280 	if (priv->plat->has_xgmac)
6281 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6282 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6283 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6284 		   (priv->dma_cap.eee) ? "Y" : "N");
6285 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6286 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6287 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6288 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6289 	    priv->plat->has_xgmac) {
6290 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6291 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6292 	} else {
6293 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6294 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6295 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6296 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6297 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6298 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6299 	}
6300 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6301 		   priv->dma_cap.number_rx_channel);
6302 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6303 		   priv->dma_cap.number_tx_channel);
6304 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6305 		   priv->dma_cap.number_rx_queues);
6306 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6307 		   priv->dma_cap.number_tx_queues);
6308 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6309 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6310 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6311 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6312 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6313 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6314 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6315 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6316 		   priv->dma_cap.pps_out_num);
6317 	seq_printf(seq, "\tSafety Features: %s\n",
6318 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6319 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6320 		   priv->dma_cap.frpsel ? "Y" : "N");
6321 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6322 		   priv->dma_cap.host_dma_width);
6323 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6324 		   priv->dma_cap.rssen ? "Y" : "N");
6325 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6326 		   priv->dma_cap.vlhash ? "Y" : "N");
6327 	seq_printf(seq, "\tSplit Header: %s\n",
6328 		   priv->dma_cap.sphen ? "Y" : "N");
6329 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6330 		   priv->dma_cap.vlins ? "Y" : "N");
6331 	seq_printf(seq, "\tDouble VLAN: %s\n",
6332 		   priv->dma_cap.dvlan ? "Y" : "N");
6333 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6334 		   priv->dma_cap.l3l4fnum);
6335 	seq_printf(seq, "\tARP Offloading: %s\n",
6336 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6337 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6338 		   priv->dma_cap.estsel ? "Y" : "N");
6339 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6340 		   priv->dma_cap.fpesel ? "Y" : "N");
6341 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6342 		   priv->dma_cap.tbssel ? "Y" : "N");
6343 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6344 		   priv->dma_cap.tbs_ch_num);
6345 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6346 		   priv->dma_cap.sgfsel ? "Y" : "N");
6347 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6348 		   BIT(priv->dma_cap.ttsfd) >> 1);
6349 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6350 		   priv->dma_cap.numtc);
6351 	seq_printf(seq, "\tDCB Feature: %s\n",
6352 		   priv->dma_cap.dcben ? "Y" : "N");
6353 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6354 		   priv->dma_cap.advthword ? "Y" : "N");
6355 	seq_printf(seq, "\tPTP Offload: %s\n",
6356 		   priv->dma_cap.ptoen ? "Y" : "N");
6357 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6358 		   priv->dma_cap.osten ? "Y" : "N");
6359 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6360 		   priv->dma_cap.pfcen ? "Y" : "N");
6361 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6362 		   BIT(priv->dma_cap.frpes) << 6);
6363 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6364 		   BIT(priv->dma_cap.frpbs) << 6);
6365 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6366 		   priv->dma_cap.frppipe_num);
6367 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6368 		   priv->dma_cap.nrvf_num ?
6369 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6370 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6371 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6372 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6373 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6374 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6375 		   priv->dma_cap.cbtisel ? "Y" : "N");
6376 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6377 		   priv->dma_cap.aux_snapshot_n);
6378 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6379 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6380 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6381 		   priv->dma_cap.edma ? "Y" : "N");
6382 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6383 		   priv->dma_cap.ediffc ? "Y" : "N");
6384 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6385 		   priv->dma_cap.vxn ? "Y" : "N");
6386 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6387 		   priv->dma_cap.dbgmem ? "Y" : "N");
6388 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6389 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6390 	return 0;
6391 }
6392 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6393 
6394 /* Use network device events to rename debugfs file entries.
6395  */
6396 static int stmmac_device_event(struct notifier_block *unused,
6397 			       unsigned long event, void *ptr)
6398 {
6399 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6400 	struct stmmac_priv *priv = netdev_priv(dev);
6401 
6402 	if (dev->netdev_ops != &stmmac_netdev_ops)
6403 		goto done;
6404 
6405 	switch (event) {
6406 	case NETDEV_CHANGENAME:
6407 		if (priv->dbgfs_dir)
6408 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6409 							 priv->dbgfs_dir,
6410 							 stmmac_fs_dir,
6411 							 dev->name);
6412 		break;
6413 	}
6414 done:
6415 	return NOTIFY_DONE;
6416 }
6417 
6418 static struct notifier_block stmmac_notifier = {
6419 	.notifier_call = stmmac_device_event,
6420 };
6421 
6422 static void stmmac_init_fs(struct net_device *dev)
6423 {
6424 	struct stmmac_priv *priv = netdev_priv(dev);
6425 
6426 	rtnl_lock();
6427 
6428 	/* Create per netdev entries */
6429 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6430 
6431 	/* Entry to report DMA RX/TX rings */
6432 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6433 			    &stmmac_rings_status_fops);
6434 
6435 	/* Entry to report the DMA HW features */
6436 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6437 			    &stmmac_dma_cap_fops);
6438 
6439 	rtnl_unlock();
6440 }
6441 
6442 static void stmmac_exit_fs(struct net_device *dev)
6443 {
6444 	struct stmmac_priv *priv = netdev_priv(dev);
6445 
6446 	debugfs_remove_recursive(priv->dbgfs_dir);
6447 }
6448 #endif /* CONFIG_DEBUG_FS */
6449 
6450 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6451 {
6452 	unsigned char *data = (unsigned char *)&vid_le;
6453 	unsigned char data_byte = 0;
6454 	u32 crc = ~0x0;
6455 	u32 temp = 0;
6456 	int i, bits;
6457 
6458 	bits = get_bitmask_order(VLAN_VID_MASK);
6459 	for (i = 0; i < bits; i++) {
6460 		if ((i % 8) == 0)
6461 			data_byte = data[i / 8];
6462 
6463 		temp = ((crc & 1) ^ data_byte) & 1;
6464 		crc >>= 1;
6465 		data_byte >>= 1;
6466 
6467 		if (temp)
6468 			crc ^= 0xedb88320;
6469 	}
6470 
6471 	return crc;
6472 }
6473 
6474 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6475 {
6476 	u32 crc, hash = 0;
6477 	__le16 pmatch = 0;
6478 	int count = 0;
6479 	u16 vid = 0;
6480 
6481 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6482 		__le16 vid_le = cpu_to_le16(vid);
6483 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6484 		hash |= (1 << crc);
6485 		count++;
6486 	}
6487 
6488 	if (!priv->dma_cap.vlhash) {
6489 		if (count > 2) /* VID = 0 always passes filter */
6490 			return -EOPNOTSUPP;
6491 
6492 		pmatch = cpu_to_le16(vid);
6493 		hash = 0;
6494 	}
6495 
6496 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6497 }
6498 
6499 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6500 {
6501 	struct stmmac_priv *priv = netdev_priv(ndev);
6502 	bool is_double = false;
6503 	int ret;
6504 
6505 	ret = pm_runtime_resume_and_get(priv->device);
6506 	if (ret < 0)
6507 		return ret;
6508 
6509 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6510 		is_double = true;
6511 
6512 	set_bit(vid, priv->active_vlans);
6513 	ret = stmmac_vlan_update(priv, is_double);
6514 	if (ret) {
6515 		clear_bit(vid, priv->active_vlans);
6516 		goto err_pm_put;
6517 	}
6518 
6519 	if (priv->hw->num_vlan) {
6520 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6521 		if (ret)
6522 			goto err_pm_put;
6523 	}
6524 err_pm_put:
6525 	pm_runtime_put(priv->device);
6526 
6527 	return ret;
6528 }
6529 
6530 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6531 {
6532 	struct stmmac_priv *priv = netdev_priv(ndev);
6533 	bool is_double = false;
6534 	int ret;
6535 
6536 	ret = pm_runtime_resume_and_get(priv->device);
6537 	if (ret < 0)
6538 		return ret;
6539 
6540 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6541 		is_double = true;
6542 
6543 	clear_bit(vid, priv->active_vlans);
6544 
6545 	if (priv->hw->num_vlan) {
6546 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6547 		if (ret)
6548 			goto del_vlan_error;
6549 	}
6550 
6551 	ret = stmmac_vlan_update(priv, is_double);
6552 
6553 del_vlan_error:
6554 	pm_runtime_put(priv->device);
6555 
6556 	return ret;
6557 }
6558 
6559 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6560 {
6561 	struct stmmac_priv *priv = netdev_priv(dev);
6562 
6563 	switch (bpf->command) {
6564 	case XDP_SETUP_PROG:
6565 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6566 	case XDP_SETUP_XSK_POOL:
6567 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6568 					     bpf->xsk.queue_id);
6569 	default:
6570 		return -EOPNOTSUPP;
6571 	}
6572 }
6573 
6574 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6575 			   struct xdp_frame **frames, u32 flags)
6576 {
6577 	struct stmmac_priv *priv = netdev_priv(dev);
6578 	int cpu = smp_processor_id();
6579 	struct netdev_queue *nq;
6580 	int i, nxmit = 0;
6581 	int queue;
6582 
6583 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6584 		return -ENETDOWN;
6585 
6586 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6587 		return -EINVAL;
6588 
6589 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6590 	nq = netdev_get_tx_queue(priv->dev, queue);
6591 
6592 	__netif_tx_lock(nq, cpu);
6593 	/* Avoids TX time-out as we are sharing with slow path */
6594 	txq_trans_cond_update(nq);
6595 
6596 	for (i = 0; i < num_frames; i++) {
6597 		int res;
6598 
6599 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6600 		if (res == STMMAC_XDP_CONSUMED)
6601 			break;
6602 
6603 		nxmit++;
6604 	}
6605 
6606 	if (flags & XDP_XMIT_FLUSH) {
6607 		stmmac_flush_tx_descriptors(priv, queue);
6608 		stmmac_tx_timer_arm(priv, queue);
6609 	}
6610 
6611 	__netif_tx_unlock(nq);
6612 
6613 	return nxmit;
6614 }
6615 
6616 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6617 {
6618 	struct stmmac_channel *ch = &priv->channel[queue];
6619 	unsigned long flags;
6620 
6621 	spin_lock_irqsave(&ch->lock, flags);
6622 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6623 	spin_unlock_irqrestore(&ch->lock, flags);
6624 
6625 	stmmac_stop_rx_dma(priv, queue);
6626 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6627 }
6628 
6629 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6630 {
6631 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6632 	struct stmmac_channel *ch = &priv->channel[queue];
6633 	unsigned long flags;
6634 	u32 buf_size;
6635 	int ret;
6636 
6637 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6638 	if (ret) {
6639 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6640 		return;
6641 	}
6642 
6643 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6644 	if (ret) {
6645 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6646 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6647 		return;
6648 	}
6649 
6650 	stmmac_reset_rx_queue(priv, queue);
6651 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6652 
6653 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6654 			    rx_q->dma_rx_phy, rx_q->queue_index);
6655 
6656 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6657 			     sizeof(struct dma_desc));
6658 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6659 			       rx_q->rx_tail_addr, rx_q->queue_index);
6660 
6661 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6662 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6663 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6664 				      buf_size,
6665 				      rx_q->queue_index);
6666 	} else {
6667 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6668 				      priv->dma_conf.dma_buf_sz,
6669 				      rx_q->queue_index);
6670 	}
6671 
6672 	stmmac_start_rx_dma(priv, queue);
6673 
6674 	spin_lock_irqsave(&ch->lock, flags);
6675 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6676 	spin_unlock_irqrestore(&ch->lock, flags);
6677 }
6678 
6679 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6680 {
6681 	struct stmmac_channel *ch = &priv->channel[queue];
6682 	unsigned long flags;
6683 
6684 	spin_lock_irqsave(&ch->lock, flags);
6685 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6686 	spin_unlock_irqrestore(&ch->lock, flags);
6687 
6688 	stmmac_stop_tx_dma(priv, queue);
6689 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6690 }
6691 
6692 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6693 {
6694 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6695 	struct stmmac_channel *ch = &priv->channel[queue];
6696 	unsigned long flags;
6697 	int ret;
6698 
6699 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6700 	if (ret) {
6701 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6702 		return;
6703 	}
6704 
6705 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6706 	if (ret) {
6707 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6708 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6709 		return;
6710 	}
6711 
6712 	stmmac_reset_tx_queue(priv, queue);
6713 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6714 
6715 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6716 			    tx_q->dma_tx_phy, tx_q->queue_index);
6717 
6718 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6719 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6720 
6721 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6722 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6723 			       tx_q->tx_tail_addr, tx_q->queue_index);
6724 
6725 	stmmac_start_tx_dma(priv, queue);
6726 
6727 	spin_lock_irqsave(&ch->lock, flags);
6728 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6729 	spin_unlock_irqrestore(&ch->lock, flags);
6730 }
6731 
6732 void stmmac_xdp_release(struct net_device *dev)
6733 {
6734 	struct stmmac_priv *priv = netdev_priv(dev);
6735 	u32 chan;
6736 
6737 	/* Ensure tx function is not running */
6738 	netif_tx_disable(dev);
6739 
6740 	/* Disable NAPI process */
6741 	stmmac_disable_all_queues(priv);
6742 
6743 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6744 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6745 
6746 	/* Free the IRQ lines */
6747 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6748 
6749 	/* Stop TX/RX DMA channels */
6750 	stmmac_stop_all_dma(priv);
6751 
6752 	/* Release and free the Rx/Tx resources */
6753 	free_dma_desc_resources(priv, &priv->dma_conf);
6754 
6755 	/* Disable the MAC Rx/Tx */
6756 	stmmac_mac_set(priv, priv->ioaddr, false);
6757 
6758 	/* set trans_start so we don't get spurious
6759 	 * watchdogs during reset
6760 	 */
6761 	netif_trans_update(dev);
6762 	netif_carrier_off(dev);
6763 }
6764 
6765 int stmmac_xdp_open(struct net_device *dev)
6766 {
6767 	struct stmmac_priv *priv = netdev_priv(dev);
6768 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6769 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6770 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6771 	struct stmmac_rx_queue *rx_q;
6772 	struct stmmac_tx_queue *tx_q;
6773 	u32 buf_size;
6774 	bool sph_en;
6775 	u32 chan;
6776 	int ret;
6777 
6778 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6779 	if (ret < 0) {
6780 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6781 			   __func__);
6782 		goto dma_desc_error;
6783 	}
6784 
6785 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6786 	if (ret < 0) {
6787 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6788 			   __func__);
6789 		goto init_error;
6790 	}
6791 
6792 	stmmac_reset_queues_param(priv);
6793 
6794 	/* DMA CSR Channel configuration */
6795 	for (chan = 0; chan < dma_csr_ch; chan++) {
6796 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6797 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6798 	}
6799 
6800 	/* Adjust Split header */
6801 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6802 
6803 	/* DMA RX Channel Configuration */
6804 	for (chan = 0; chan < rx_cnt; chan++) {
6805 		rx_q = &priv->dma_conf.rx_queue[chan];
6806 
6807 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6808 				    rx_q->dma_rx_phy, chan);
6809 
6810 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6811 				     (rx_q->buf_alloc_num *
6812 				      sizeof(struct dma_desc));
6813 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6814 				       rx_q->rx_tail_addr, chan);
6815 
6816 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6817 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6818 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6819 					      buf_size,
6820 					      rx_q->queue_index);
6821 		} else {
6822 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6823 					      priv->dma_conf.dma_buf_sz,
6824 					      rx_q->queue_index);
6825 		}
6826 
6827 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6828 	}
6829 
6830 	/* DMA TX Channel Configuration */
6831 	for (chan = 0; chan < tx_cnt; chan++) {
6832 		tx_q = &priv->dma_conf.tx_queue[chan];
6833 
6834 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6835 				    tx_q->dma_tx_phy, chan);
6836 
6837 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6838 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6839 				       tx_q->tx_tail_addr, chan);
6840 
6841 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6842 		tx_q->txtimer.function = stmmac_tx_timer;
6843 	}
6844 
6845 	/* Enable the MAC Rx/Tx */
6846 	stmmac_mac_set(priv, priv->ioaddr, true);
6847 
6848 	/* Start Rx & Tx DMA Channels */
6849 	stmmac_start_all_dma(priv);
6850 
6851 	ret = stmmac_request_irq(dev);
6852 	if (ret)
6853 		goto irq_error;
6854 
6855 	/* Enable NAPI process*/
6856 	stmmac_enable_all_queues(priv);
6857 	netif_carrier_on(dev);
6858 	netif_tx_start_all_queues(dev);
6859 	stmmac_enable_all_dma_irq(priv);
6860 
6861 	return 0;
6862 
6863 irq_error:
6864 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6865 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6866 
6867 	stmmac_hw_teardown(dev);
6868 init_error:
6869 	free_dma_desc_resources(priv, &priv->dma_conf);
6870 dma_desc_error:
6871 	return ret;
6872 }
6873 
6874 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6875 {
6876 	struct stmmac_priv *priv = netdev_priv(dev);
6877 	struct stmmac_rx_queue *rx_q;
6878 	struct stmmac_tx_queue *tx_q;
6879 	struct stmmac_channel *ch;
6880 
6881 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6882 	    !netif_carrier_ok(priv->dev))
6883 		return -ENETDOWN;
6884 
6885 	if (!stmmac_xdp_is_enabled(priv))
6886 		return -EINVAL;
6887 
6888 	if (queue >= priv->plat->rx_queues_to_use ||
6889 	    queue >= priv->plat->tx_queues_to_use)
6890 		return -EINVAL;
6891 
6892 	rx_q = &priv->dma_conf.rx_queue[queue];
6893 	tx_q = &priv->dma_conf.tx_queue[queue];
6894 	ch = &priv->channel[queue];
6895 
6896 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6897 		return -EINVAL;
6898 
6899 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6900 		/* EQoS does not have per-DMA channel SW interrupt,
6901 		 * so we schedule RX Napi straight-away.
6902 		 */
6903 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6904 			__napi_schedule(&ch->rxtx_napi);
6905 	}
6906 
6907 	return 0;
6908 }
6909 
6910 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6911 {
6912 	struct stmmac_priv *priv = netdev_priv(dev);
6913 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6914 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6915 	unsigned int start;
6916 	int q;
6917 
6918 	for (q = 0; q < tx_cnt; q++) {
6919 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
6920 		u64 tx_packets;
6921 		u64 tx_bytes;
6922 
6923 		do {
6924 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
6925 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
6926 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
6927 		do {
6928 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
6929 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
6930 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
6931 
6932 		stats->tx_packets += tx_packets;
6933 		stats->tx_bytes += tx_bytes;
6934 	}
6935 
6936 	for (q = 0; q < rx_cnt; q++) {
6937 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
6938 		u64 rx_packets;
6939 		u64 rx_bytes;
6940 
6941 		do {
6942 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
6943 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
6944 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
6945 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
6946 
6947 		stats->rx_packets += rx_packets;
6948 		stats->rx_bytes += rx_bytes;
6949 	}
6950 
6951 	stats->rx_dropped = priv->xstats.rx_dropped;
6952 	stats->rx_errors = priv->xstats.rx_errors;
6953 	stats->tx_dropped = priv->xstats.tx_dropped;
6954 	stats->tx_errors = priv->xstats.tx_errors;
6955 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
6956 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
6957 	stats->rx_length_errors = priv->xstats.rx_length;
6958 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
6959 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
6960 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
6961 }
6962 
6963 static const struct net_device_ops stmmac_netdev_ops = {
6964 	.ndo_open = stmmac_open,
6965 	.ndo_start_xmit = stmmac_xmit,
6966 	.ndo_stop = stmmac_release,
6967 	.ndo_change_mtu = stmmac_change_mtu,
6968 	.ndo_fix_features = stmmac_fix_features,
6969 	.ndo_set_features = stmmac_set_features,
6970 	.ndo_set_rx_mode = stmmac_set_rx_mode,
6971 	.ndo_tx_timeout = stmmac_tx_timeout,
6972 	.ndo_eth_ioctl = stmmac_ioctl,
6973 	.ndo_get_stats64 = stmmac_get_stats64,
6974 	.ndo_setup_tc = stmmac_setup_tc,
6975 	.ndo_select_queue = stmmac_select_queue,
6976 	.ndo_set_mac_address = stmmac_set_mac_address,
6977 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6978 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6979 	.ndo_bpf = stmmac_bpf,
6980 	.ndo_xdp_xmit = stmmac_xdp_xmit,
6981 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
6982 };
6983 
6984 static void stmmac_reset_subtask(struct stmmac_priv *priv)
6985 {
6986 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
6987 		return;
6988 	if (test_bit(STMMAC_DOWN, &priv->state))
6989 		return;
6990 
6991 	netdev_err(priv->dev, "Reset adapter.\n");
6992 
6993 	rtnl_lock();
6994 	netif_trans_update(priv->dev);
6995 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
6996 		usleep_range(1000, 2000);
6997 
6998 	set_bit(STMMAC_DOWN, &priv->state);
6999 	dev_close(priv->dev);
7000 	dev_open(priv->dev, NULL);
7001 	clear_bit(STMMAC_DOWN, &priv->state);
7002 	clear_bit(STMMAC_RESETING, &priv->state);
7003 	rtnl_unlock();
7004 }
7005 
7006 static void stmmac_service_task(struct work_struct *work)
7007 {
7008 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7009 			service_task);
7010 
7011 	stmmac_reset_subtask(priv);
7012 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7013 }
7014 
7015 /**
7016  *  stmmac_hw_init - Init the MAC device
7017  *  @priv: driver private structure
7018  *  Description: this function is to configure the MAC device according to
7019  *  some platform parameters or the HW capability register. It prepares the
7020  *  driver to use either ring or chain modes and to setup either enhanced or
7021  *  normal descriptors.
7022  */
7023 static int stmmac_hw_init(struct stmmac_priv *priv)
7024 {
7025 	int ret;
7026 
7027 	/* dwmac-sun8i only work in chain mode */
7028 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7029 		chain_mode = 1;
7030 	priv->chain_mode = chain_mode;
7031 
7032 	/* Initialize HW Interface */
7033 	ret = stmmac_hwif_init(priv);
7034 	if (ret)
7035 		return ret;
7036 
7037 	/* Get the HW capability (new GMAC newer than 3.50a) */
7038 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7039 	if (priv->hw_cap_support) {
7040 		dev_info(priv->device, "DMA HW capability register supported\n");
7041 
7042 		/* We can override some gmac/dma configuration fields: e.g.
7043 		 * enh_desc, tx_coe (e.g. that are passed through the
7044 		 * platform) with the values from the HW capability
7045 		 * register (if supported).
7046 		 */
7047 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7048 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7049 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7050 		priv->hw->pmt = priv->plat->pmt;
7051 		if (priv->dma_cap.hash_tb_sz) {
7052 			priv->hw->multicast_filter_bins =
7053 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7054 			priv->hw->mcast_bits_log2 =
7055 					ilog2(priv->hw->multicast_filter_bins);
7056 		}
7057 
7058 		/* TXCOE doesn't work in thresh DMA mode */
7059 		if (priv->plat->force_thresh_dma_mode)
7060 			priv->plat->tx_coe = 0;
7061 		else
7062 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7063 
7064 		/* In case of GMAC4 rx_coe is from HW cap register. */
7065 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7066 
7067 		if (priv->dma_cap.rx_coe_type2)
7068 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7069 		else if (priv->dma_cap.rx_coe_type1)
7070 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7071 
7072 	} else {
7073 		dev_info(priv->device, "No HW DMA feature register supported\n");
7074 	}
7075 
7076 	if (priv->plat->rx_coe) {
7077 		priv->hw->rx_csum = priv->plat->rx_coe;
7078 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7079 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7080 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7081 	}
7082 	if (priv->plat->tx_coe)
7083 		dev_info(priv->device, "TX Checksum insertion supported\n");
7084 
7085 	if (priv->plat->pmt) {
7086 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7087 		device_set_wakeup_capable(priv->device, 1);
7088 	}
7089 
7090 	if (priv->dma_cap.tsoen)
7091 		dev_info(priv->device, "TSO supported\n");
7092 
7093 	priv->hw->vlan_fail_q_en =
7094 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7095 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7096 
7097 	/* Run HW quirks, if any */
7098 	if (priv->hwif_quirks) {
7099 		ret = priv->hwif_quirks(priv);
7100 		if (ret)
7101 			return ret;
7102 	}
7103 
7104 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7105 	 * In some case, for example on bugged HW this feature
7106 	 * has to be disable and this can be done by passing the
7107 	 * riwt_off field from the platform.
7108 	 */
7109 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7110 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7111 		priv->use_riwt = 1;
7112 		dev_info(priv->device,
7113 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7114 	}
7115 
7116 	return 0;
7117 }
7118 
7119 static void stmmac_napi_add(struct net_device *dev)
7120 {
7121 	struct stmmac_priv *priv = netdev_priv(dev);
7122 	u32 queue, maxq;
7123 
7124 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7125 
7126 	for (queue = 0; queue < maxq; queue++) {
7127 		struct stmmac_channel *ch = &priv->channel[queue];
7128 
7129 		ch->priv_data = priv;
7130 		ch->index = queue;
7131 		spin_lock_init(&ch->lock);
7132 
7133 		if (queue < priv->plat->rx_queues_to_use) {
7134 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7135 		}
7136 		if (queue < priv->plat->tx_queues_to_use) {
7137 			netif_napi_add_tx(dev, &ch->tx_napi,
7138 					  stmmac_napi_poll_tx);
7139 		}
7140 		if (queue < priv->plat->rx_queues_to_use &&
7141 		    queue < priv->plat->tx_queues_to_use) {
7142 			netif_napi_add(dev, &ch->rxtx_napi,
7143 				       stmmac_napi_poll_rxtx);
7144 		}
7145 	}
7146 }
7147 
7148 static void stmmac_napi_del(struct net_device *dev)
7149 {
7150 	struct stmmac_priv *priv = netdev_priv(dev);
7151 	u32 queue, maxq;
7152 
7153 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7154 
7155 	for (queue = 0; queue < maxq; queue++) {
7156 		struct stmmac_channel *ch = &priv->channel[queue];
7157 
7158 		if (queue < priv->plat->rx_queues_to_use)
7159 			netif_napi_del(&ch->rx_napi);
7160 		if (queue < priv->plat->tx_queues_to_use)
7161 			netif_napi_del(&ch->tx_napi);
7162 		if (queue < priv->plat->rx_queues_to_use &&
7163 		    queue < priv->plat->tx_queues_to_use) {
7164 			netif_napi_del(&ch->rxtx_napi);
7165 		}
7166 	}
7167 }
7168 
7169 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7170 {
7171 	struct stmmac_priv *priv = netdev_priv(dev);
7172 	int ret = 0, i;
7173 	int max_speed;
7174 
7175 	if (netif_running(dev))
7176 		stmmac_release(dev);
7177 
7178 	stmmac_napi_del(dev);
7179 
7180 	priv->plat->rx_queues_to_use = rx_cnt;
7181 	priv->plat->tx_queues_to_use = tx_cnt;
7182 	if (!netif_is_rxfh_configured(dev))
7183 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7184 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7185 									rx_cnt);
7186 
7187 	stmmac_mac_phylink_get_caps(priv);
7188 
7189 	max_speed = priv->plat->max_speed;
7190 	if (max_speed)
7191 		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
7192 
7193 	stmmac_napi_add(dev);
7194 
7195 	if (netif_running(dev))
7196 		ret = stmmac_open(dev);
7197 
7198 	return ret;
7199 }
7200 
7201 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7202 {
7203 	struct stmmac_priv *priv = netdev_priv(dev);
7204 	int ret = 0;
7205 
7206 	if (netif_running(dev))
7207 		stmmac_release(dev);
7208 
7209 	priv->dma_conf.dma_rx_size = rx_size;
7210 	priv->dma_conf.dma_tx_size = tx_size;
7211 
7212 	if (netif_running(dev))
7213 		ret = stmmac_open(dev);
7214 
7215 	return ret;
7216 }
7217 
7218 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7219 static void stmmac_fpe_lp_task(struct work_struct *work)
7220 {
7221 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7222 						fpe_task);
7223 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7224 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7225 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7226 	bool *hs_enable = &fpe_cfg->hs_enable;
7227 	bool *enable = &fpe_cfg->enable;
7228 	int retries = 20;
7229 
7230 	while (retries-- > 0) {
7231 		/* Bail out immediately if FPE handshake is OFF */
7232 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7233 			break;
7234 
7235 		if (*lo_state == FPE_STATE_ENTERING_ON &&
7236 		    *lp_state == FPE_STATE_ENTERING_ON) {
7237 			stmmac_fpe_configure(priv, priv->ioaddr,
7238 					     fpe_cfg,
7239 					     priv->plat->tx_queues_to_use,
7240 					     priv->plat->rx_queues_to_use,
7241 					     *enable);
7242 
7243 			netdev_info(priv->dev, "configured FPE\n");
7244 
7245 			*lo_state = FPE_STATE_ON;
7246 			*lp_state = FPE_STATE_ON;
7247 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7248 			break;
7249 		}
7250 
7251 		if ((*lo_state == FPE_STATE_CAPABLE ||
7252 		     *lo_state == FPE_STATE_ENTERING_ON) &&
7253 		     *lp_state != FPE_STATE_ON) {
7254 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7255 				    *lo_state, *lp_state);
7256 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7257 						fpe_cfg,
7258 						MPACKET_VERIFY);
7259 		}
7260 		/* Sleep then retry */
7261 		msleep(500);
7262 	}
7263 
7264 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7265 }
7266 
7267 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7268 {
7269 	if (priv->plat->fpe_cfg->hs_enable != enable) {
7270 		if (enable) {
7271 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7272 						priv->plat->fpe_cfg,
7273 						MPACKET_VERIFY);
7274 		} else {
7275 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7276 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7277 		}
7278 
7279 		priv->plat->fpe_cfg->hs_enable = enable;
7280 	}
7281 }
7282 
7283 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7284 {
7285 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7286 	struct dma_desc *desc_contains_ts = ctx->desc;
7287 	struct stmmac_priv *priv = ctx->priv;
7288 	struct dma_desc *ndesc = ctx->ndesc;
7289 	struct dma_desc *desc = ctx->desc;
7290 	u64 ns = 0;
7291 
7292 	if (!priv->hwts_rx_en)
7293 		return -ENODATA;
7294 
7295 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7296 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7297 		desc_contains_ts = ndesc;
7298 
7299 	/* Check if timestamp is available */
7300 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7301 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7302 		ns -= priv->plat->cdc_error_adj;
7303 		*timestamp = ns_to_ktime(ns);
7304 		return 0;
7305 	}
7306 
7307 	return -ENODATA;
7308 }
7309 
7310 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7311 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7312 };
7313 
7314 /**
7315  * stmmac_dvr_probe
7316  * @device: device pointer
7317  * @plat_dat: platform data pointer
7318  * @res: stmmac resource pointer
7319  * Description: this is the main probe function used to
7320  * call the alloc_etherdev, allocate the priv structure.
7321  * Return:
7322  * returns 0 on success, otherwise errno.
7323  */
7324 int stmmac_dvr_probe(struct device *device,
7325 		     struct plat_stmmacenet_data *plat_dat,
7326 		     struct stmmac_resources *res)
7327 {
7328 	struct net_device *ndev = NULL;
7329 	struct stmmac_priv *priv;
7330 	u32 rxq;
7331 	int i, ret = 0;
7332 
7333 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7334 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7335 	if (!ndev)
7336 		return -ENOMEM;
7337 
7338 	SET_NETDEV_DEV(ndev, device);
7339 
7340 	priv = netdev_priv(ndev);
7341 	priv->device = device;
7342 	priv->dev = ndev;
7343 
7344 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7345 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7346 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7347 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7348 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7349 	}
7350 
7351 	priv->xstats.pcpu_stats =
7352 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7353 	if (!priv->xstats.pcpu_stats)
7354 		return -ENOMEM;
7355 
7356 	stmmac_set_ethtool_ops(ndev);
7357 	priv->pause = pause;
7358 	priv->plat = plat_dat;
7359 	priv->ioaddr = res->addr;
7360 	priv->dev->base_addr = (unsigned long)res->addr;
7361 	priv->plat->dma_cfg->multi_msi_en =
7362 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7363 
7364 	priv->dev->irq = res->irq;
7365 	priv->wol_irq = res->wol_irq;
7366 	priv->lpi_irq = res->lpi_irq;
7367 	priv->sfty_ce_irq = res->sfty_ce_irq;
7368 	priv->sfty_ue_irq = res->sfty_ue_irq;
7369 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7370 		priv->rx_irq[i] = res->rx_irq[i];
7371 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7372 		priv->tx_irq[i] = res->tx_irq[i];
7373 
7374 	if (!is_zero_ether_addr(res->mac))
7375 		eth_hw_addr_set(priv->dev, res->mac);
7376 
7377 	dev_set_drvdata(device, priv->dev);
7378 
7379 	/* Verify driver arguments */
7380 	stmmac_verify_args();
7381 
7382 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7383 	if (!priv->af_xdp_zc_qps)
7384 		return -ENOMEM;
7385 
7386 	/* Allocate workqueue */
7387 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7388 	if (!priv->wq) {
7389 		dev_err(priv->device, "failed to create workqueue\n");
7390 		ret = -ENOMEM;
7391 		goto error_wq_init;
7392 	}
7393 
7394 	INIT_WORK(&priv->service_task, stmmac_service_task);
7395 
7396 	/* Initialize Link Partner FPE workqueue */
7397 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7398 
7399 	/* Override with kernel parameters if supplied XXX CRS XXX
7400 	 * this needs to have multiple instances
7401 	 */
7402 	if ((phyaddr >= 0) && (phyaddr <= 31))
7403 		priv->plat->phy_addr = phyaddr;
7404 
7405 	if (priv->plat->stmmac_rst) {
7406 		ret = reset_control_assert(priv->plat->stmmac_rst);
7407 		reset_control_deassert(priv->plat->stmmac_rst);
7408 		/* Some reset controllers have only reset callback instead of
7409 		 * assert + deassert callbacks pair.
7410 		 */
7411 		if (ret == -ENOTSUPP)
7412 			reset_control_reset(priv->plat->stmmac_rst);
7413 	}
7414 
7415 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7416 	if (ret == -ENOTSUPP)
7417 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7418 			ERR_PTR(ret));
7419 
7420 	/* Wait a bit for the reset to take effect */
7421 	udelay(10);
7422 
7423 	/* Init MAC and get the capabilities */
7424 	ret = stmmac_hw_init(priv);
7425 	if (ret)
7426 		goto error_hw_init;
7427 
7428 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7429 	 */
7430 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7431 		priv->plat->dma_cfg->dche = false;
7432 
7433 	stmmac_check_ether_addr(priv);
7434 
7435 	ndev->netdev_ops = &stmmac_netdev_ops;
7436 
7437 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7438 
7439 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7440 			    NETIF_F_RXCSUM;
7441 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7442 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7443 
7444 	ret = stmmac_tc_init(priv, priv);
7445 	if (!ret) {
7446 		ndev->hw_features |= NETIF_F_HW_TC;
7447 	}
7448 
7449 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7450 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7451 		if (priv->plat->has_gmac4)
7452 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7453 		priv->tso = true;
7454 		dev_info(priv->device, "TSO feature enabled\n");
7455 	}
7456 
7457 	if (priv->dma_cap.sphen &&
7458 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7459 		ndev->hw_features |= NETIF_F_GRO;
7460 		priv->sph_cap = true;
7461 		priv->sph = priv->sph_cap;
7462 		dev_info(priv->device, "SPH feature enabled\n");
7463 	}
7464 
7465 	/* Ideally our host DMA address width is the same as for the
7466 	 * device. However, it may differ and then we have to use our
7467 	 * host DMA width for allocation and the device DMA width for
7468 	 * register handling.
7469 	 */
7470 	if (priv->plat->host_dma_width)
7471 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7472 	else
7473 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7474 
7475 	if (priv->dma_cap.host_dma_width) {
7476 		ret = dma_set_mask_and_coherent(device,
7477 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7478 		if (!ret) {
7479 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7480 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7481 
7482 			/*
7483 			 * If more than 32 bits can be addressed, make sure to
7484 			 * enable enhanced addressing mode.
7485 			 */
7486 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7487 				priv->plat->dma_cfg->eame = true;
7488 		} else {
7489 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7490 			if (ret) {
7491 				dev_err(priv->device, "Failed to set DMA Mask\n");
7492 				goto error_hw_init;
7493 			}
7494 
7495 			priv->dma_cap.host_dma_width = 32;
7496 		}
7497 	}
7498 
7499 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7500 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7501 #ifdef STMMAC_VLAN_TAG_USED
7502 	/* Both mac100 and gmac support receive VLAN tag detection */
7503 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7504 	if (priv->dma_cap.vlhash) {
7505 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7506 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7507 	}
7508 	if (priv->dma_cap.vlins) {
7509 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7510 		if (priv->dma_cap.dvlan)
7511 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7512 	}
7513 #endif
7514 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7515 
7516 	priv->xstats.threshold = tc;
7517 
7518 	/* Initialize RSS */
7519 	rxq = priv->plat->rx_queues_to_use;
7520 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7521 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7522 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7523 
7524 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7525 		ndev->features |= NETIF_F_RXHASH;
7526 
7527 	ndev->vlan_features |= ndev->features;
7528 	/* TSO doesn't work on VLANs yet */
7529 	ndev->vlan_features &= ~NETIF_F_TSO;
7530 
7531 	/* MTU range: 46 - hw-specific max */
7532 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7533 	if (priv->plat->has_xgmac)
7534 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7535 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7536 		ndev->max_mtu = JUMBO_LEN;
7537 	else
7538 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7539 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7540 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7541 	 */
7542 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7543 	    (priv->plat->maxmtu >= ndev->min_mtu))
7544 		ndev->max_mtu = priv->plat->maxmtu;
7545 	else if (priv->plat->maxmtu < ndev->min_mtu)
7546 		dev_warn(priv->device,
7547 			 "%s: warning: maxmtu having invalid value (%d)\n",
7548 			 __func__, priv->plat->maxmtu);
7549 
7550 	if (flow_ctrl)
7551 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7552 
7553 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7554 
7555 	/* Setup channels NAPI */
7556 	stmmac_napi_add(ndev);
7557 
7558 	mutex_init(&priv->lock);
7559 
7560 	/* If a specific clk_csr value is passed from the platform
7561 	 * this means that the CSR Clock Range selection cannot be
7562 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7563 	 * set the MDC clock dynamically according to the csr actual
7564 	 * clock input.
7565 	 */
7566 	if (priv->plat->clk_csr >= 0)
7567 		priv->clk_csr = priv->plat->clk_csr;
7568 	else
7569 		stmmac_clk_csr_set(priv);
7570 
7571 	stmmac_check_pcs_mode(priv);
7572 
7573 	pm_runtime_get_noresume(device);
7574 	pm_runtime_set_active(device);
7575 	if (!pm_runtime_enabled(device))
7576 		pm_runtime_enable(device);
7577 
7578 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7579 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
7580 		/* MDIO bus Registration */
7581 		ret = stmmac_mdio_register(ndev);
7582 		if (ret < 0) {
7583 			dev_err_probe(priv->device, ret,
7584 				      "%s: MDIO bus (id: %d) registration failed\n",
7585 				      __func__, priv->plat->bus_id);
7586 			goto error_mdio_register;
7587 		}
7588 	}
7589 
7590 	if (priv->plat->speed_mode_2500)
7591 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7592 
7593 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7594 		ret = stmmac_xpcs_setup(priv->mii);
7595 		if (ret)
7596 			goto error_xpcs_setup;
7597 	}
7598 
7599 	ret = stmmac_phy_setup(priv);
7600 	if (ret) {
7601 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7602 		goto error_phy_setup;
7603 	}
7604 
7605 	ret = register_netdev(ndev);
7606 	if (ret) {
7607 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7608 			__func__, ret);
7609 		goto error_netdev_register;
7610 	}
7611 
7612 #ifdef CONFIG_DEBUG_FS
7613 	stmmac_init_fs(ndev);
7614 #endif
7615 
7616 	if (priv->plat->dump_debug_regs)
7617 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7618 
7619 	/* Let pm_runtime_put() disable the clocks.
7620 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7621 	 */
7622 	pm_runtime_put(device);
7623 
7624 	return ret;
7625 
7626 error_netdev_register:
7627 	phylink_destroy(priv->phylink);
7628 error_xpcs_setup:
7629 error_phy_setup:
7630 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7631 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7632 		stmmac_mdio_unregister(ndev);
7633 error_mdio_register:
7634 	stmmac_napi_del(ndev);
7635 error_hw_init:
7636 	destroy_workqueue(priv->wq);
7637 error_wq_init:
7638 	bitmap_free(priv->af_xdp_zc_qps);
7639 
7640 	return ret;
7641 }
7642 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7643 
7644 /**
7645  * stmmac_dvr_remove
7646  * @dev: device pointer
7647  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7648  * changes the link status, releases the DMA descriptor rings.
7649  */
7650 void stmmac_dvr_remove(struct device *dev)
7651 {
7652 	struct net_device *ndev = dev_get_drvdata(dev);
7653 	struct stmmac_priv *priv = netdev_priv(ndev);
7654 
7655 	netdev_info(priv->dev, "%s: removing driver", __func__);
7656 
7657 	pm_runtime_get_sync(dev);
7658 
7659 	stmmac_stop_all_dma(priv);
7660 	stmmac_mac_set(priv, priv->ioaddr, false);
7661 	netif_carrier_off(ndev);
7662 	unregister_netdev(ndev);
7663 
7664 #ifdef CONFIG_DEBUG_FS
7665 	stmmac_exit_fs(ndev);
7666 #endif
7667 	phylink_destroy(priv->phylink);
7668 	if (priv->plat->stmmac_rst)
7669 		reset_control_assert(priv->plat->stmmac_rst);
7670 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7671 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7672 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7673 		stmmac_mdio_unregister(ndev);
7674 	destroy_workqueue(priv->wq);
7675 	mutex_destroy(&priv->lock);
7676 	bitmap_free(priv->af_xdp_zc_qps);
7677 
7678 	pm_runtime_disable(dev);
7679 	pm_runtime_put_noidle(dev);
7680 }
7681 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7682 
7683 /**
7684  * stmmac_suspend - suspend callback
7685  * @dev: device pointer
7686  * Description: this is the function to suspend the device and it is called
7687  * by the platform driver to stop the network queue, release the resources,
7688  * program the PMT register (for WoL), clean and release driver resources.
7689  */
7690 int stmmac_suspend(struct device *dev)
7691 {
7692 	struct net_device *ndev = dev_get_drvdata(dev);
7693 	struct stmmac_priv *priv = netdev_priv(ndev);
7694 	u32 chan;
7695 
7696 	if (!ndev || !netif_running(ndev))
7697 		return 0;
7698 
7699 	mutex_lock(&priv->lock);
7700 
7701 	netif_device_detach(ndev);
7702 
7703 	stmmac_disable_all_queues(priv);
7704 
7705 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7706 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7707 
7708 	if (priv->eee_enabled) {
7709 		priv->tx_path_in_lpi_mode = false;
7710 		del_timer_sync(&priv->eee_ctrl_timer);
7711 	}
7712 
7713 	/* Stop TX/RX DMA */
7714 	stmmac_stop_all_dma(priv);
7715 
7716 	if (priv->plat->serdes_powerdown)
7717 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7718 
7719 	/* Enable Power down mode by programming the PMT regs */
7720 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7721 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7722 		priv->irq_wake = 1;
7723 	} else {
7724 		stmmac_mac_set(priv, priv->ioaddr, false);
7725 		pinctrl_pm_select_sleep_state(priv->device);
7726 	}
7727 
7728 	mutex_unlock(&priv->lock);
7729 
7730 	rtnl_lock();
7731 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7732 		phylink_suspend(priv->phylink, true);
7733 	} else {
7734 		if (device_may_wakeup(priv->device))
7735 			phylink_speed_down(priv->phylink, false);
7736 		phylink_suspend(priv->phylink, false);
7737 	}
7738 	rtnl_unlock();
7739 
7740 	if (priv->dma_cap.fpesel) {
7741 		/* Disable FPE */
7742 		stmmac_fpe_configure(priv, priv->ioaddr,
7743 				     priv->plat->fpe_cfg,
7744 				     priv->plat->tx_queues_to_use,
7745 				     priv->plat->rx_queues_to_use, false);
7746 
7747 		stmmac_fpe_handshake(priv, false);
7748 		stmmac_fpe_stop_wq(priv);
7749 	}
7750 
7751 	priv->speed = SPEED_UNKNOWN;
7752 	return 0;
7753 }
7754 EXPORT_SYMBOL_GPL(stmmac_suspend);
7755 
7756 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7757 {
7758 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7759 
7760 	rx_q->cur_rx = 0;
7761 	rx_q->dirty_rx = 0;
7762 }
7763 
7764 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7765 {
7766 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7767 
7768 	tx_q->cur_tx = 0;
7769 	tx_q->dirty_tx = 0;
7770 	tx_q->mss = 0;
7771 
7772 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7773 }
7774 
7775 /**
7776  * stmmac_reset_queues_param - reset queue parameters
7777  * @priv: device pointer
7778  */
7779 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7780 {
7781 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7782 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7783 	u32 queue;
7784 
7785 	for (queue = 0; queue < rx_cnt; queue++)
7786 		stmmac_reset_rx_queue(priv, queue);
7787 
7788 	for (queue = 0; queue < tx_cnt; queue++)
7789 		stmmac_reset_tx_queue(priv, queue);
7790 }
7791 
7792 /**
7793  * stmmac_resume - resume callback
7794  * @dev: device pointer
7795  * Description: when resume this function is invoked to setup the DMA and CORE
7796  * in a usable state.
7797  */
7798 int stmmac_resume(struct device *dev)
7799 {
7800 	struct net_device *ndev = dev_get_drvdata(dev);
7801 	struct stmmac_priv *priv = netdev_priv(ndev);
7802 	int ret;
7803 
7804 	if (!netif_running(ndev))
7805 		return 0;
7806 
7807 	/* Power Down bit, into the PM register, is cleared
7808 	 * automatically as soon as a magic packet or a Wake-up frame
7809 	 * is received. Anyway, it's better to manually clear
7810 	 * this bit because it can generate problems while resuming
7811 	 * from another devices (e.g. serial console).
7812 	 */
7813 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7814 		mutex_lock(&priv->lock);
7815 		stmmac_pmt(priv, priv->hw, 0);
7816 		mutex_unlock(&priv->lock);
7817 		priv->irq_wake = 0;
7818 	} else {
7819 		pinctrl_pm_select_default_state(priv->device);
7820 		/* reset the phy so that it's ready */
7821 		if (priv->mii)
7822 			stmmac_mdio_reset(priv->mii);
7823 	}
7824 
7825 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7826 	    priv->plat->serdes_powerup) {
7827 		ret = priv->plat->serdes_powerup(ndev,
7828 						 priv->plat->bsp_priv);
7829 
7830 		if (ret < 0)
7831 			return ret;
7832 	}
7833 
7834 	rtnl_lock();
7835 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7836 		phylink_resume(priv->phylink);
7837 	} else {
7838 		phylink_resume(priv->phylink);
7839 		if (device_may_wakeup(priv->device))
7840 			phylink_speed_up(priv->phylink);
7841 	}
7842 	rtnl_unlock();
7843 
7844 	rtnl_lock();
7845 	mutex_lock(&priv->lock);
7846 
7847 	stmmac_reset_queues_param(priv);
7848 
7849 	stmmac_free_tx_skbufs(priv);
7850 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7851 
7852 	stmmac_hw_setup(ndev, false);
7853 	stmmac_init_coalesce(priv);
7854 	stmmac_set_rx_mode(ndev);
7855 
7856 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7857 
7858 	stmmac_enable_all_queues(priv);
7859 	stmmac_enable_all_dma_irq(priv);
7860 
7861 	mutex_unlock(&priv->lock);
7862 	rtnl_unlock();
7863 
7864 	netif_device_attach(ndev);
7865 
7866 	return 0;
7867 }
7868 EXPORT_SYMBOL_GPL(stmmac_resume);
7869 
7870 #ifndef MODULE
7871 static int __init stmmac_cmdline_opt(char *str)
7872 {
7873 	char *opt;
7874 
7875 	if (!str || !*str)
7876 		return 1;
7877 	while ((opt = strsep(&str, ",")) != NULL) {
7878 		if (!strncmp(opt, "debug:", 6)) {
7879 			if (kstrtoint(opt + 6, 0, &debug))
7880 				goto err;
7881 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7882 			if (kstrtoint(opt + 8, 0, &phyaddr))
7883 				goto err;
7884 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7885 			if (kstrtoint(opt + 7, 0, &buf_sz))
7886 				goto err;
7887 		} else if (!strncmp(opt, "tc:", 3)) {
7888 			if (kstrtoint(opt + 3, 0, &tc))
7889 				goto err;
7890 		} else if (!strncmp(opt, "watchdog:", 9)) {
7891 			if (kstrtoint(opt + 9, 0, &watchdog))
7892 				goto err;
7893 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7894 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7895 				goto err;
7896 		} else if (!strncmp(opt, "pause:", 6)) {
7897 			if (kstrtoint(opt + 6, 0, &pause))
7898 				goto err;
7899 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7900 			if (kstrtoint(opt + 10, 0, &eee_timer))
7901 				goto err;
7902 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7903 			if (kstrtoint(opt + 11, 0, &chain_mode))
7904 				goto err;
7905 		}
7906 	}
7907 	return 1;
7908 
7909 err:
7910 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7911 	return 1;
7912 }
7913 
7914 __setup("stmmaceth=", stmmac_cmdline_opt);
7915 #endif /* MODULE */
7916 
7917 static int __init stmmac_init(void)
7918 {
7919 #ifdef CONFIG_DEBUG_FS
7920 	/* Create debugfs main directory if it doesn't exist yet */
7921 	if (!stmmac_fs_dir)
7922 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7923 	register_netdevice_notifier(&stmmac_notifier);
7924 #endif
7925 
7926 	return 0;
7927 }
7928 
7929 static void __exit stmmac_exit(void)
7930 {
7931 #ifdef CONFIG_DEBUG_FS
7932 	unregister_netdevice_notifier(&stmmac_notifier);
7933 	debugfs_remove_recursive(stmmac_fs_dir);
7934 #endif
7935 }
7936 
7937 module_init(stmmac_init)
7938 module_exit(stmmac_exit)
7939 
7940 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7941 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7942 MODULE_LICENSE("GPL");
7943