1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Using PCS we cannot dial with the phy registers at this stage
475 	 * so we do not support extra feature like EEE.
476 	 */
477 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
478 	    priv->hw->pcs == STMMAC_PCS_RTBI)
479 		return false;
480 
481 	/* Check if MAC core supports the EEE feature. */
482 	if (!priv->dma_cap.eee)
483 		return false;
484 
485 	mutex_lock(&priv->lock);
486 
487 	/* Check if it needs to be deactivated */
488 	if (!priv->eee_active) {
489 		if (priv->eee_enabled) {
490 			netdev_dbg(priv->dev, "disable EEE\n");
491 			stmmac_lpi_entry_timer_config(priv, 0);
492 			del_timer_sync(&priv->eee_ctrl_timer);
493 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494 			if (priv->hw->xpcs)
495 				xpcs_config_eee(priv->hw->xpcs,
496 						priv->plat->mult_fact_100ns,
497 						false);
498 		}
499 		mutex_unlock(&priv->lock);
500 		return false;
501 	}
502 
503 	if (priv->eee_active && !priv->eee_enabled) {
504 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506 				     eee_tw_timer);
507 		if (priv->hw->xpcs)
508 			xpcs_config_eee(priv->hw->xpcs,
509 					priv->plat->mult_fact_100ns,
510 					true);
511 	}
512 
513 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514 		del_timer_sync(&priv->eee_ctrl_timer);
515 		priv->tx_path_in_lpi_mode = false;
516 		stmmac_lpi_entry_timer_config(priv, 1);
517 	} else {
518 		stmmac_lpi_entry_timer_config(priv, 0);
519 		mod_timer(&priv->eee_ctrl_timer,
520 			  STMMAC_LPI_T(priv->tx_lpi_timer));
521 	}
522 
523 	mutex_unlock(&priv->lock);
524 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525 	return true;
526 }
527 
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529  * @priv: driver private structure
530  * @p : descriptor pointer
531  * @skb : the socket buffer
532  * Description :
533  * This function will read timestamp from the descriptor & pass it to stack.
534  * and also perform some sanity checks.
535  */
536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537 				   struct dma_desc *p, struct sk_buff *skb)
538 {
539 	struct skb_shared_hwtstamps shhwtstamp;
540 	bool found = false;
541 	u64 ns = 0;
542 
543 	if (!priv->hwts_tx_en)
544 		return;
545 
546 	/* exit if skb doesn't support hw tstamp */
547 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548 		return;
549 
550 	/* check tx tstamp status */
551 	if (stmmac_get_tx_timestamp_status(priv, p)) {
552 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553 		found = true;
554 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555 		found = true;
556 	}
557 
558 	if (found) {
559 		ns -= priv->plat->cdc_error_adj;
560 
561 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
563 
564 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565 		/* pass tstamp to stack */
566 		skb_tstamp_tx(skb, &shhwtstamp);
567 	}
568 }
569 
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571  * @priv: driver private structure
572  * @p : descriptor pointer
573  * @np : next descriptor pointer
574  * @skb : the socket buffer
575  * Description :
576  * This function will read received packet's timestamp from the descriptor
577  * and pass it to stack. It also perform some sanity checks.
578  */
579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580 				   struct dma_desc *np, struct sk_buff *skb)
581 {
582 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
583 	struct dma_desc *desc = p;
584 	u64 ns = 0;
585 
586 	if (!priv->hwts_rx_en)
587 		return;
588 	/* For GMAC4, the valid timestamp is from CTX next desc. */
589 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590 		desc = np;
591 
592 	/* Check if timestamp is available */
593 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595 
596 		ns -= priv->plat->cdc_error_adj;
597 
598 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599 		shhwtstamp = skb_hwtstamps(skb);
600 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
602 	} else  {
603 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604 	}
605 }
606 
607 /**
608  *  stmmac_hwtstamp_set - control hardware timestamping.
609  *  @dev: device pointer.
610  *  @ifr: An IOCTL specific structure, that can contain a pointer to
611  *  a proprietary structure used to pass information to the driver.
612  *  Description:
613  *  This function configures the MAC to enable/disable both outgoing(TX)
614  *  and incoming(RX) packets time stamping based on user input.
615  *  Return Value:
616  *  0 on success and an appropriate -ve integer on failure.
617  */
618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620 	struct stmmac_priv *priv = netdev_priv(dev);
621 	struct hwtstamp_config config;
622 	u32 ptp_v2 = 0;
623 	u32 tstamp_all = 0;
624 	u32 ptp_over_ipv4_udp = 0;
625 	u32 ptp_over_ipv6_udp = 0;
626 	u32 ptp_over_ethernet = 0;
627 	u32 snap_type_sel = 0;
628 	u32 ts_master_en = 0;
629 	u32 ts_event_en = 0;
630 
631 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632 		netdev_alert(priv->dev, "No support for HW time stamping\n");
633 		priv->hwts_tx_en = 0;
634 		priv->hwts_rx_en = 0;
635 
636 		return -EOPNOTSUPP;
637 	}
638 
639 	if (copy_from_user(&config, ifr->ifr_data,
640 			   sizeof(config)))
641 		return -EFAULT;
642 
643 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 		   __func__, config.flags, config.tx_type, config.rx_filter);
645 
646 	if (config.tx_type != HWTSTAMP_TX_OFF &&
647 	    config.tx_type != HWTSTAMP_TX_ON)
648 		return -ERANGE;
649 
650 	if (priv->adv_ts) {
651 		switch (config.rx_filter) {
652 		case HWTSTAMP_FILTER_NONE:
653 			/* time stamp no incoming packet at all */
654 			config.rx_filter = HWTSTAMP_FILTER_NONE;
655 			break;
656 
657 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 			/* PTP v1, UDP, any kind of event packet */
659 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 			/* 'xmac' hardware can support Sync, Pdelay_Req and
661 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 			 * This leaves Delay_Req timestamps out.
663 			 * Enable all events *and* general purpose message
664 			 * timestamping
665 			 */
666 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			break;
670 
671 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 			/* PTP v1, UDP, Sync packet */
673 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 			/* take time stamp for SYNC messages only */
675 			ts_event_en = PTP_TCR_TSEVNTENA;
676 
677 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 			break;
680 
681 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 			/* PTP v1, UDP, Delay_req packet */
683 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 			/* take time stamp for Delay_Req messages only */
685 			ts_master_en = PTP_TCR_TSMSTRENA;
686 			ts_event_en = PTP_TCR_TSEVNTENA;
687 
688 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 			break;
691 
692 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 			/* PTP v2, UDP, any kind of event packet */
694 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 			ptp_v2 = PTP_TCR_TSVER2ENA;
696 			/* take time stamp for all event messages */
697 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698 
699 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 			break;
702 
703 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 			/* PTP v2, UDP, Sync packet */
705 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 			ptp_v2 = PTP_TCR_TSVER2ENA;
707 			/* take time stamp for SYNC messages only */
708 			ts_event_en = PTP_TCR_TSEVNTENA;
709 
710 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 			break;
713 
714 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 			/* PTP v2, UDP, Delay_req packet */
716 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 			ptp_v2 = PTP_TCR_TSVER2ENA;
718 			/* take time stamp for Delay_Req messages only */
719 			ts_master_en = PTP_TCR_TSMSTRENA;
720 			ts_event_en = PTP_TCR_TSEVNTENA;
721 
722 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 			break;
725 
726 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 			/* PTP v2/802.AS1 any layer, any kind of event packet */
728 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 			ptp_v2 = PTP_TCR_TSVER2ENA;
730 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 			if (priv->synopsys_id < DWMAC_CORE_4_10)
732 				ts_event_en = PTP_TCR_TSEVNTENA;
733 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 			ptp_over_ethernet = PTP_TCR_TSIPENA;
736 			break;
737 
738 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 			/* PTP v2/802.AS1, any layer, Sync packet */
740 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 			ptp_v2 = PTP_TCR_TSVER2ENA;
742 			/* take time stamp for SYNC messages only */
743 			ts_event_en = PTP_TCR_TSEVNTENA;
744 
745 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 			ptp_over_ethernet = PTP_TCR_TSIPENA;
748 			break;
749 
750 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 			/* PTP v2/802.AS1, any layer, Delay_req packet */
752 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 			ptp_v2 = PTP_TCR_TSVER2ENA;
754 			/* take time stamp for Delay_Req messages only */
755 			ts_master_en = PTP_TCR_TSMSTRENA;
756 			ts_event_en = PTP_TCR_TSEVNTENA;
757 
758 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 			ptp_over_ethernet = PTP_TCR_TSIPENA;
761 			break;
762 
763 		case HWTSTAMP_FILTER_NTP_ALL:
764 		case HWTSTAMP_FILTER_ALL:
765 			/* time stamp any incoming packet */
766 			config.rx_filter = HWTSTAMP_FILTER_ALL;
767 			tstamp_all = PTP_TCR_TSENALL;
768 			break;
769 
770 		default:
771 			return -ERANGE;
772 		}
773 	} else {
774 		switch (config.rx_filter) {
775 		case HWTSTAMP_FILTER_NONE:
776 			config.rx_filter = HWTSTAMP_FILTER_NONE;
777 			break;
778 		default:
779 			/* PTP v1, UDP, any kind of event packet */
780 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 			break;
782 		}
783 	}
784 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786 
787 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
788 
789 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
790 		priv->systime_flags |= tstamp_all | ptp_v2 |
791 				       ptp_over_ethernet | ptp_over_ipv6_udp |
792 				       ptp_over_ipv4_udp | ts_event_en |
793 				       ts_master_en | snap_type_sel;
794 	}
795 
796 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797 
798 	memcpy(&priv->tstamp_config, &config, sizeof(config));
799 
800 	return copy_to_user(ifr->ifr_data, &config,
801 			    sizeof(config)) ? -EFAULT : 0;
802 }
803 
804 /**
805  *  stmmac_hwtstamp_get - read hardware timestamping.
806  *  @dev: device pointer.
807  *  @ifr: An IOCTL specific structure, that can contain a pointer to
808  *  a proprietary structure used to pass information to the driver.
809  *  Description:
810  *  This function obtain the current hardware timestamping settings
811  *  as requested.
812  */
813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815 	struct stmmac_priv *priv = netdev_priv(dev);
816 	struct hwtstamp_config *config = &priv->tstamp_config;
817 
818 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819 		return -EOPNOTSUPP;
820 
821 	return copy_to_user(ifr->ifr_data, config,
822 			    sizeof(*config)) ? -EFAULT : 0;
823 }
824 
825 /**
826  * stmmac_init_tstamp_counter - init hardware timestamping counter
827  * @priv: driver private structure
828  * @systime_flags: timestamping flags
829  * Description:
830  * Initialize hardware counter for packet timestamping.
831  * This is valid as long as the interface is open and not suspended.
832  * Will be rerun after resuming from suspend, case in which the timestamping
833  * flags updated by stmmac_hwtstamp_set() also need to be restored.
834  */
835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838 	struct timespec64 now;
839 	u32 sec_inc = 0;
840 	u64 temp = 0;
841 
842 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843 		return -EOPNOTSUPP;
844 
845 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846 	priv->systime_flags = systime_flags;
847 
848 	/* program Sub Second Increment reg */
849 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850 					   priv->plat->clk_ptp_rate,
851 					   xmac, &sec_inc);
852 	temp = div_u64(1000000000ULL, sec_inc);
853 
854 	/* Store sub second increment for later use */
855 	priv->sub_second_inc = sec_inc;
856 
857 	/* calculate default added value:
858 	 * formula is :
859 	 * addend = (2^32)/freq_div_ratio;
860 	 * where, freq_div_ratio = 1e9ns/sec_inc
861 	 */
862 	temp = (u64)(temp << 32);
863 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865 
866 	/* initialize system time */
867 	ktime_get_real_ts64(&now);
868 
869 	/* lower 32 bits of tv_sec are safe until y2106 */
870 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871 
872 	return 0;
873 }
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875 
876 /**
877  * stmmac_init_ptp - init PTP
878  * @priv: driver private structure
879  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880  * This is done by looking at the HW cap. register.
881  * This function also registers the ptp driver.
882  */
883 static int stmmac_init_ptp(struct stmmac_priv *priv)
884 {
885 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886 	int ret;
887 
888 	if (priv->plat->ptp_clk_freq_config)
889 		priv->plat->ptp_clk_freq_config(priv);
890 
891 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892 	if (ret)
893 		return ret;
894 
895 	priv->adv_ts = 0;
896 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 	if (xmac && priv->dma_cap.atime_stamp)
898 		priv->adv_ts = 1;
899 	/* Dwmac 3.x core with extend_desc can support adv_ts */
900 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901 		priv->adv_ts = 1;
902 
903 	if (priv->dma_cap.time_stamp)
904 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
905 
906 	if (priv->adv_ts)
907 		netdev_info(priv->dev,
908 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
909 
910 	priv->hwts_tx_en = 0;
911 	priv->hwts_rx_en = 0;
912 
913 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
914 		stmmac_hwtstamp_correct_latency(priv, priv);
915 
916 	return 0;
917 }
918 
919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
922 	stmmac_ptp_unregister(priv);
923 }
924 
925 /**
926  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
927  *  @priv: driver private structure
928  *  @duplex: duplex passed to the next function
929  *  Description: It is used for configuring the flow control in all queues
930  */
931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933 	u32 tx_cnt = priv->plat->tx_queues_to_use;
934 
935 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936 			priv->pause, tx_cnt);
937 }
938 
939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
940 						 phy_interface_t interface)
941 {
942 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943 
944 	if (priv->hw->xpcs)
945 		return &priv->hw->xpcs->pcs;
946 
947 	if (priv->hw->lynx_pcs)
948 		return priv->hw->lynx_pcs;
949 
950 	return NULL;
951 }
952 
953 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
954 			      const struct phylink_link_state *state)
955 {
956 	/* Nothing to do, xpcs_config() handles everything */
957 }
958 
959 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
960 {
961 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
962 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
963 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
964 	bool *hs_enable = &fpe_cfg->hs_enable;
965 
966 	if (is_up && *hs_enable) {
967 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
968 					MPACKET_VERIFY);
969 	} else {
970 		*lo_state = FPE_STATE_OFF;
971 		*lp_state = FPE_STATE_OFF;
972 	}
973 }
974 
975 static void stmmac_mac_link_down(struct phylink_config *config,
976 				 unsigned int mode, phy_interface_t interface)
977 {
978 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
979 
980 	stmmac_mac_set(priv, priv->ioaddr, false);
981 	priv->eee_active = false;
982 	priv->tx_lpi_enabled = false;
983 	priv->eee_enabled = stmmac_eee_init(priv);
984 	stmmac_set_eee_pls(priv, priv->hw, false);
985 
986 	if (priv->dma_cap.fpesel)
987 		stmmac_fpe_link_state_handle(priv, false);
988 }
989 
990 static void stmmac_mac_link_up(struct phylink_config *config,
991 			       struct phy_device *phy,
992 			       unsigned int mode, phy_interface_t interface,
993 			       int speed, int duplex,
994 			       bool tx_pause, bool rx_pause)
995 {
996 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
997 	u32 old_ctrl, ctrl;
998 
999 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1000 	    priv->plat->serdes_powerup)
1001 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1002 
1003 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1004 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1005 
1006 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1007 		switch (speed) {
1008 		case SPEED_10000:
1009 			ctrl |= priv->hw->link.xgmii.speed10000;
1010 			break;
1011 		case SPEED_5000:
1012 			ctrl |= priv->hw->link.xgmii.speed5000;
1013 			break;
1014 		case SPEED_2500:
1015 			ctrl |= priv->hw->link.xgmii.speed2500;
1016 			break;
1017 		default:
1018 			return;
1019 		}
1020 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1021 		switch (speed) {
1022 		case SPEED_100000:
1023 			ctrl |= priv->hw->link.xlgmii.speed100000;
1024 			break;
1025 		case SPEED_50000:
1026 			ctrl |= priv->hw->link.xlgmii.speed50000;
1027 			break;
1028 		case SPEED_40000:
1029 			ctrl |= priv->hw->link.xlgmii.speed40000;
1030 			break;
1031 		case SPEED_25000:
1032 			ctrl |= priv->hw->link.xlgmii.speed25000;
1033 			break;
1034 		case SPEED_10000:
1035 			ctrl |= priv->hw->link.xgmii.speed10000;
1036 			break;
1037 		case SPEED_2500:
1038 			ctrl |= priv->hw->link.speed2500;
1039 			break;
1040 		case SPEED_1000:
1041 			ctrl |= priv->hw->link.speed1000;
1042 			break;
1043 		default:
1044 			return;
1045 		}
1046 	} else {
1047 		switch (speed) {
1048 		case SPEED_2500:
1049 			ctrl |= priv->hw->link.speed2500;
1050 			break;
1051 		case SPEED_1000:
1052 			ctrl |= priv->hw->link.speed1000;
1053 			break;
1054 		case SPEED_100:
1055 			ctrl |= priv->hw->link.speed100;
1056 			break;
1057 		case SPEED_10:
1058 			ctrl |= priv->hw->link.speed10;
1059 			break;
1060 		default:
1061 			return;
1062 		}
1063 	}
1064 
1065 	priv->speed = speed;
1066 
1067 	if (priv->plat->fix_mac_speed)
1068 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1069 
1070 	if (!duplex)
1071 		ctrl &= ~priv->hw->link.duplex;
1072 	else
1073 		ctrl |= priv->hw->link.duplex;
1074 
1075 	/* Flow Control operation */
1076 	if (rx_pause && tx_pause)
1077 		priv->flow_ctrl = FLOW_AUTO;
1078 	else if (rx_pause && !tx_pause)
1079 		priv->flow_ctrl = FLOW_RX;
1080 	else if (!rx_pause && tx_pause)
1081 		priv->flow_ctrl = FLOW_TX;
1082 	else
1083 		priv->flow_ctrl = FLOW_OFF;
1084 
1085 	stmmac_mac_flow_ctrl(priv, duplex);
1086 
1087 	if (ctrl != old_ctrl)
1088 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1089 
1090 	stmmac_mac_set(priv, priv->ioaddr, true);
1091 	if (phy && priv->dma_cap.eee) {
1092 		priv->eee_active =
1093 			phy_init_eee(phy, !(priv->plat->flags &
1094 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1095 		priv->eee_enabled = stmmac_eee_init(priv);
1096 		priv->tx_lpi_enabled = priv->eee_enabled;
1097 		stmmac_set_eee_pls(priv, priv->hw, true);
1098 	}
1099 
1100 	if (priv->dma_cap.fpesel)
1101 		stmmac_fpe_link_state_handle(priv, true);
1102 
1103 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1104 		stmmac_hwtstamp_correct_latency(priv, priv);
1105 }
1106 
1107 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1108 	.mac_select_pcs = stmmac_mac_select_pcs,
1109 	.mac_config = stmmac_mac_config,
1110 	.mac_link_down = stmmac_mac_link_down,
1111 	.mac_link_up = stmmac_mac_link_up,
1112 };
1113 
1114 /**
1115  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1116  * @priv: driver private structure
1117  * Description: this is to verify if the HW supports the PCS.
1118  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1119  * configured for the TBI, RTBI, or SGMII PHY interface.
1120  */
1121 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1122 {
1123 	int interface = priv->plat->mac_interface;
1124 
1125 	if (priv->dma_cap.pcs) {
1126 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1127 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1128 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1129 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1130 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1131 			priv->hw->pcs = STMMAC_PCS_RGMII;
1132 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1133 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1134 			priv->hw->pcs = STMMAC_PCS_SGMII;
1135 		}
1136 	}
1137 }
1138 
1139 /**
1140  * stmmac_init_phy - PHY initialization
1141  * @dev: net device structure
1142  * Description: it initializes the driver's PHY state, and attaches the PHY
1143  * to the mac driver.
1144  *  Return value:
1145  *  0 on success
1146  */
1147 static int stmmac_init_phy(struct net_device *dev)
1148 {
1149 	struct stmmac_priv *priv = netdev_priv(dev);
1150 	struct fwnode_handle *phy_fwnode;
1151 	struct fwnode_handle *fwnode;
1152 	int ret;
1153 
1154 	if (!phylink_expects_phy(priv->phylink))
1155 		return 0;
1156 
1157 	fwnode = priv->plat->port_node;
1158 	if (!fwnode)
1159 		fwnode = dev_fwnode(priv->device);
1160 
1161 	if (fwnode)
1162 		phy_fwnode = fwnode_get_phy_node(fwnode);
1163 	else
1164 		phy_fwnode = NULL;
1165 
1166 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1167 	 * manually parse it
1168 	 */
1169 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1170 		int addr = priv->plat->phy_addr;
1171 		struct phy_device *phydev;
1172 
1173 		if (addr < 0) {
1174 			netdev_err(priv->dev, "no phy found\n");
1175 			return -ENODEV;
1176 		}
1177 
1178 		phydev = mdiobus_get_phy(priv->mii, addr);
1179 		if (!phydev) {
1180 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1181 			return -ENODEV;
1182 		}
1183 
1184 		ret = phylink_connect_phy(priv->phylink, phydev);
1185 	} else {
1186 		fwnode_handle_put(phy_fwnode);
1187 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1188 	}
1189 
1190 	if (!priv->plat->pmt) {
1191 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1192 
1193 		phylink_ethtool_get_wol(priv->phylink, &wol);
1194 		device_set_wakeup_capable(priv->device, !!wol.supported);
1195 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1196 	}
1197 
1198 	return ret;
1199 }
1200 
1201 static void stmmac_set_half_duplex(struct stmmac_priv *priv)
1202 {
1203 	/* Half-Duplex can only work with single tx queue */
1204 	if (priv->plat->tx_queues_to_use > 1)
1205 		priv->phylink_config.mac_capabilities &=
1206 			~(MAC_10HD | MAC_100HD | MAC_1000HD);
1207 	else
1208 		priv->phylink_config.mac_capabilities |=
1209 			(MAC_10HD | MAC_100HD | MAC_1000HD);
1210 }
1211 
1212 static int stmmac_phy_setup(struct stmmac_priv *priv)
1213 {
1214 	struct stmmac_mdio_bus_data *mdio_bus_data;
1215 	int mode = priv->plat->phy_interface;
1216 	struct fwnode_handle *fwnode;
1217 	struct phylink *phylink;
1218 	int max_speed;
1219 
1220 	priv->phylink_config.dev = &priv->dev->dev;
1221 	priv->phylink_config.type = PHYLINK_NETDEV;
1222 	priv->phylink_config.mac_managed_pm = true;
1223 
1224 	mdio_bus_data = priv->plat->mdio_bus_data;
1225 	if (mdio_bus_data)
1226 		priv->phylink_config.ovr_an_inband =
1227 			mdio_bus_data->xpcs_an_inband;
1228 
1229 	/* Set the platform/firmware specified interface mode. Note, phylink
1230 	 * deals with the PHY interface mode, not the MAC interface mode.
1231 	 */
1232 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1233 
1234 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1235 	if (priv->hw->xpcs)
1236 		xpcs_get_interfaces(priv->hw->xpcs,
1237 				    priv->phylink_config.supported_interfaces);
1238 
1239 	priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1240 						MAC_10FD | MAC_100FD |
1241 						MAC_1000FD;
1242 
1243 	stmmac_set_half_duplex(priv);
1244 
1245 	/* Get the MAC specific capabilities */
1246 	stmmac_mac_phylink_get_caps(priv);
1247 
1248 	max_speed = priv->plat->max_speed;
1249 	if (max_speed)
1250 		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1251 
1252 	fwnode = priv->plat->port_node;
1253 	if (!fwnode)
1254 		fwnode = dev_fwnode(priv->device);
1255 
1256 	phylink = phylink_create(&priv->phylink_config, fwnode,
1257 				 mode, &stmmac_phylink_mac_ops);
1258 	if (IS_ERR(phylink))
1259 		return PTR_ERR(phylink);
1260 
1261 	priv->phylink = phylink;
1262 	return 0;
1263 }
1264 
1265 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1266 				    struct stmmac_dma_conf *dma_conf)
1267 {
1268 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1269 	unsigned int desc_size;
1270 	void *head_rx;
1271 	u32 queue;
1272 
1273 	/* Display RX rings */
1274 	for (queue = 0; queue < rx_cnt; queue++) {
1275 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1276 
1277 		pr_info("\tRX Queue %u rings\n", queue);
1278 
1279 		if (priv->extend_desc) {
1280 			head_rx = (void *)rx_q->dma_erx;
1281 			desc_size = sizeof(struct dma_extended_desc);
1282 		} else {
1283 			head_rx = (void *)rx_q->dma_rx;
1284 			desc_size = sizeof(struct dma_desc);
1285 		}
1286 
1287 		/* Display RX ring */
1288 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1289 				    rx_q->dma_rx_phy, desc_size);
1290 	}
1291 }
1292 
1293 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1294 				    struct stmmac_dma_conf *dma_conf)
1295 {
1296 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1297 	unsigned int desc_size;
1298 	void *head_tx;
1299 	u32 queue;
1300 
1301 	/* Display TX rings */
1302 	for (queue = 0; queue < tx_cnt; queue++) {
1303 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1304 
1305 		pr_info("\tTX Queue %d rings\n", queue);
1306 
1307 		if (priv->extend_desc) {
1308 			head_tx = (void *)tx_q->dma_etx;
1309 			desc_size = sizeof(struct dma_extended_desc);
1310 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1311 			head_tx = (void *)tx_q->dma_entx;
1312 			desc_size = sizeof(struct dma_edesc);
1313 		} else {
1314 			head_tx = (void *)tx_q->dma_tx;
1315 			desc_size = sizeof(struct dma_desc);
1316 		}
1317 
1318 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1319 				    tx_q->dma_tx_phy, desc_size);
1320 	}
1321 }
1322 
1323 static void stmmac_display_rings(struct stmmac_priv *priv,
1324 				 struct stmmac_dma_conf *dma_conf)
1325 {
1326 	/* Display RX ring */
1327 	stmmac_display_rx_rings(priv, dma_conf);
1328 
1329 	/* Display TX ring */
1330 	stmmac_display_tx_rings(priv, dma_conf);
1331 }
1332 
1333 static int stmmac_set_bfsize(int mtu, int bufsize)
1334 {
1335 	int ret = bufsize;
1336 
1337 	if (mtu >= BUF_SIZE_8KiB)
1338 		ret = BUF_SIZE_16KiB;
1339 	else if (mtu >= BUF_SIZE_4KiB)
1340 		ret = BUF_SIZE_8KiB;
1341 	else if (mtu >= BUF_SIZE_2KiB)
1342 		ret = BUF_SIZE_4KiB;
1343 	else if (mtu > DEFAULT_BUFSIZE)
1344 		ret = BUF_SIZE_2KiB;
1345 	else
1346 		ret = DEFAULT_BUFSIZE;
1347 
1348 	return ret;
1349 }
1350 
1351 /**
1352  * stmmac_clear_rx_descriptors - clear RX descriptors
1353  * @priv: driver private structure
1354  * @dma_conf: structure to take the dma data
1355  * @queue: RX queue index
1356  * Description: this function is called to clear the RX descriptors
1357  * in case of both basic and extended descriptors are used.
1358  */
1359 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1360 					struct stmmac_dma_conf *dma_conf,
1361 					u32 queue)
1362 {
1363 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1364 	int i;
1365 
1366 	/* Clear the RX descriptors */
1367 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1368 		if (priv->extend_desc)
1369 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1370 					priv->use_riwt, priv->mode,
1371 					(i == dma_conf->dma_rx_size - 1),
1372 					dma_conf->dma_buf_sz);
1373 		else
1374 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1375 					priv->use_riwt, priv->mode,
1376 					(i == dma_conf->dma_rx_size - 1),
1377 					dma_conf->dma_buf_sz);
1378 }
1379 
1380 /**
1381  * stmmac_clear_tx_descriptors - clear tx descriptors
1382  * @priv: driver private structure
1383  * @dma_conf: structure to take the dma data
1384  * @queue: TX queue index.
1385  * Description: this function is called to clear the TX descriptors
1386  * in case of both basic and extended descriptors are used.
1387  */
1388 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1389 					struct stmmac_dma_conf *dma_conf,
1390 					u32 queue)
1391 {
1392 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1393 	int i;
1394 
1395 	/* Clear the TX descriptors */
1396 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1397 		int last = (i == (dma_conf->dma_tx_size - 1));
1398 		struct dma_desc *p;
1399 
1400 		if (priv->extend_desc)
1401 			p = &tx_q->dma_etx[i].basic;
1402 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1403 			p = &tx_q->dma_entx[i].basic;
1404 		else
1405 			p = &tx_q->dma_tx[i];
1406 
1407 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1408 	}
1409 }
1410 
1411 /**
1412  * stmmac_clear_descriptors - clear descriptors
1413  * @priv: driver private structure
1414  * @dma_conf: structure to take the dma data
1415  * Description: this function is called to clear the TX and RX descriptors
1416  * in case of both basic and extended descriptors are used.
1417  */
1418 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1419 				     struct stmmac_dma_conf *dma_conf)
1420 {
1421 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1422 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1423 	u32 queue;
1424 
1425 	/* Clear the RX descriptors */
1426 	for (queue = 0; queue < rx_queue_cnt; queue++)
1427 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1428 
1429 	/* Clear the TX descriptors */
1430 	for (queue = 0; queue < tx_queue_cnt; queue++)
1431 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1432 }
1433 
1434 /**
1435  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1436  * @priv: driver private structure
1437  * @dma_conf: structure to take the dma data
1438  * @p: descriptor pointer
1439  * @i: descriptor index
1440  * @flags: gfp flag
1441  * @queue: RX queue index
1442  * Description: this function is called to allocate a receive buffer, perform
1443  * the DMA mapping and init the descriptor.
1444  */
1445 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1446 				  struct stmmac_dma_conf *dma_conf,
1447 				  struct dma_desc *p,
1448 				  int i, gfp_t flags, u32 queue)
1449 {
1450 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1451 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1452 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1453 
1454 	if (priv->dma_cap.host_dma_width <= 32)
1455 		gfp |= GFP_DMA32;
1456 
1457 	if (!buf->page) {
1458 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1459 		if (!buf->page)
1460 			return -ENOMEM;
1461 		buf->page_offset = stmmac_rx_offset(priv);
1462 	}
1463 
1464 	if (priv->sph && !buf->sec_page) {
1465 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1466 		if (!buf->sec_page)
1467 			return -ENOMEM;
1468 
1469 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1470 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1471 	} else {
1472 		buf->sec_page = NULL;
1473 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1474 	}
1475 
1476 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1477 
1478 	stmmac_set_desc_addr(priv, p, buf->addr);
1479 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1480 		stmmac_init_desc3(priv, p);
1481 
1482 	return 0;
1483 }
1484 
1485 /**
1486  * stmmac_free_rx_buffer - free RX dma buffers
1487  * @priv: private structure
1488  * @rx_q: RX queue
1489  * @i: buffer index.
1490  */
1491 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1492 				  struct stmmac_rx_queue *rx_q,
1493 				  int i)
1494 {
1495 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1496 
1497 	if (buf->page)
1498 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1499 	buf->page = NULL;
1500 
1501 	if (buf->sec_page)
1502 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1503 	buf->sec_page = NULL;
1504 }
1505 
1506 /**
1507  * stmmac_free_tx_buffer - free RX dma buffers
1508  * @priv: private structure
1509  * @dma_conf: structure to take the dma data
1510  * @queue: RX queue index
1511  * @i: buffer index.
1512  */
1513 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1514 				  struct stmmac_dma_conf *dma_conf,
1515 				  u32 queue, int i)
1516 {
1517 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1518 
1519 	if (tx_q->tx_skbuff_dma[i].buf &&
1520 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1521 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1522 			dma_unmap_page(priv->device,
1523 				       tx_q->tx_skbuff_dma[i].buf,
1524 				       tx_q->tx_skbuff_dma[i].len,
1525 				       DMA_TO_DEVICE);
1526 		else
1527 			dma_unmap_single(priv->device,
1528 					 tx_q->tx_skbuff_dma[i].buf,
1529 					 tx_q->tx_skbuff_dma[i].len,
1530 					 DMA_TO_DEVICE);
1531 	}
1532 
1533 	if (tx_q->xdpf[i] &&
1534 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1535 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1536 		xdp_return_frame(tx_q->xdpf[i]);
1537 		tx_q->xdpf[i] = NULL;
1538 	}
1539 
1540 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1541 		tx_q->xsk_frames_done++;
1542 
1543 	if (tx_q->tx_skbuff[i] &&
1544 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1545 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1546 		tx_q->tx_skbuff[i] = NULL;
1547 	}
1548 
1549 	tx_q->tx_skbuff_dma[i].buf = 0;
1550 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1551 }
1552 
1553 /**
1554  * dma_free_rx_skbufs - free RX dma buffers
1555  * @priv: private structure
1556  * @dma_conf: structure to take the dma data
1557  * @queue: RX queue index
1558  */
1559 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1560 			       struct stmmac_dma_conf *dma_conf,
1561 			       u32 queue)
1562 {
1563 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1564 	int i;
1565 
1566 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1567 		stmmac_free_rx_buffer(priv, rx_q, i);
1568 }
1569 
1570 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1571 				   struct stmmac_dma_conf *dma_conf,
1572 				   u32 queue, gfp_t flags)
1573 {
1574 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1575 	int i;
1576 
1577 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1578 		struct dma_desc *p;
1579 		int ret;
1580 
1581 		if (priv->extend_desc)
1582 			p = &((rx_q->dma_erx + i)->basic);
1583 		else
1584 			p = rx_q->dma_rx + i;
1585 
1586 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1587 					     queue);
1588 		if (ret)
1589 			return ret;
1590 
1591 		rx_q->buf_alloc_num++;
1592 	}
1593 
1594 	return 0;
1595 }
1596 
1597 /**
1598  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1599  * @priv: private structure
1600  * @dma_conf: structure to take the dma data
1601  * @queue: RX queue index
1602  */
1603 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1604 				struct stmmac_dma_conf *dma_conf,
1605 				u32 queue)
1606 {
1607 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1608 	int i;
1609 
1610 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1611 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1612 
1613 		if (!buf->xdp)
1614 			continue;
1615 
1616 		xsk_buff_free(buf->xdp);
1617 		buf->xdp = NULL;
1618 	}
1619 }
1620 
1621 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1622 				      struct stmmac_dma_conf *dma_conf,
1623 				      u32 queue)
1624 {
1625 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1626 	int i;
1627 
1628 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1629 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1630 	 * use this macro to make sure no size violations.
1631 	 */
1632 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1633 
1634 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1635 		struct stmmac_rx_buffer *buf;
1636 		dma_addr_t dma_addr;
1637 		struct dma_desc *p;
1638 
1639 		if (priv->extend_desc)
1640 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1641 		else
1642 			p = rx_q->dma_rx + i;
1643 
1644 		buf = &rx_q->buf_pool[i];
1645 
1646 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1647 		if (!buf->xdp)
1648 			return -ENOMEM;
1649 
1650 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1651 		stmmac_set_desc_addr(priv, p, dma_addr);
1652 		rx_q->buf_alloc_num++;
1653 	}
1654 
1655 	return 0;
1656 }
1657 
1658 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1659 {
1660 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1661 		return NULL;
1662 
1663 	return xsk_get_pool_from_qid(priv->dev, queue);
1664 }
1665 
1666 /**
1667  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1668  * @priv: driver private structure
1669  * @dma_conf: structure to take the dma data
1670  * @queue: RX queue index
1671  * @flags: gfp flag.
1672  * Description: this function initializes the DMA RX descriptors
1673  * and allocates the socket buffers. It supports the chained and ring
1674  * modes.
1675  */
1676 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1677 				    struct stmmac_dma_conf *dma_conf,
1678 				    u32 queue, gfp_t flags)
1679 {
1680 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1681 	int ret;
1682 
1683 	netif_dbg(priv, probe, priv->dev,
1684 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1685 		  (u32)rx_q->dma_rx_phy);
1686 
1687 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1688 
1689 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1690 
1691 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1692 
1693 	if (rx_q->xsk_pool) {
1694 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1695 						   MEM_TYPE_XSK_BUFF_POOL,
1696 						   NULL));
1697 		netdev_info(priv->dev,
1698 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1699 			    rx_q->queue_index);
1700 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1701 	} else {
1702 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1703 						   MEM_TYPE_PAGE_POOL,
1704 						   rx_q->page_pool));
1705 		netdev_info(priv->dev,
1706 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1707 			    rx_q->queue_index);
1708 	}
1709 
1710 	if (rx_q->xsk_pool) {
1711 		/* RX XDP ZC buffer pool may not be populated, e.g.
1712 		 * xdpsock TX-only.
1713 		 */
1714 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1715 	} else {
1716 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1717 		if (ret < 0)
1718 			return -ENOMEM;
1719 	}
1720 
1721 	/* Setup the chained descriptor addresses */
1722 	if (priv->mode == STMMAC_CHAIN_MODE) {
1723 		if (priv->extend_desc)
1724 			stmmac_mode_init(priv, rx_q->dma_erx,
1725 					 rx_q->dma_rx_phy,
1726 					 dma_conf->dma_rx_size, 1);
1727 		else
1728 			stmmac_mode_init(priv, rx_q->dma_rx,
1729 					 rx_q->dma_rx_phy,
1730 					 dma_conf->dma_rx_size, 0);
1731 	}
1732 
1733 	return 0;
1734 }
1735 
1736 static int init_dma_rx_desc_rings(struct net_device *dev,
1737 				  struct stmmac_dma_conf *dma_conf,
1738 				  gfp_t flags)
1739 {
1740 	struct stmmac_priv *priv = netdev_priv(dev);
1741 	u32 rx_count = priv->plat->rx_queues_to_use;
1742 	int queue;
1743 	int ret;
1744 
1745 	/* RX INITIALIZATION */
1746 	netif_dbg(priv, probe, priv->dev,
1747 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1748 
1749 	for (queue = 0; queue < rx_count; queue++) {
1750 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1751 		if (ret)
1752 			goto err_init_rx_buffers;
1753 	}
1754 
1755 	return 0;
1756 
1757 err_init_rx_buffers:
1758 	while (queue >= 0) {
1759 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1760 
1761 		if (rx_q->xsk_pool)
1762 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1763 		else
1764 			dma_free_rx_skbufs(priv, dma_conf, queue);
1765 
1766 		rx_q->buf_alloc_num = 0;
1767 		rx_q->xsk_pool = NULL;
1768 
1769 		queue--;
1770 	}
1771 
1772 	return ret;
1773 }
1774 
1775 /**
1776  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1777  * @priv: driver private structure
1778  * @dma_conf: structure to take the dma data
1779  * @queue: TX queue index
1780  * Description: this function initializes the DMA TX descriptors
1781  * and allocates the socket buffers. It supports the chained and ring
1782  * modes.
1783  */
1784 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1785 				    struct stmmac_dma_conf *dma_conf,
1786 				    u32 queue)
1787 {
1788 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1789 	int i;
1790 
1791 	netif_dbg(priv, probe, priv->dev,
1792 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1793 		  (u32)tx_q->dma_tx_phy);
1794 
1795 	/* Setup the chained descriptor addresses */
1796 	if (priv->mode == STMMAC_CHAIN_MODE) {
1797 		if (priv->extend_desc)
1798 			stmmac_mode_init(priv, tx_q->dma_etx,
1799 					 tx_q->dma_tx_phy,
1800 					 dma_conf->dma_tx_size, 1);
1801 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1802 			stmmac_mode_init(priv, tx_q->dma_tx,
1803 					 tx_q->dma_tx_phy,
1804 					 dma_conf->dma_tx_size, 0);
1805 	}
1806 
1807 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1808 
1809 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1810 		struct dma_desc *p;
1811 
1812 		if (priv->extend_desc)
1813 			p = &((tx_q->dma_etx + i)->basic);
1814 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1815 			p = &((tx_q->dma_entx + i)->basic);
1816 		else
1817 			p = tx_q->dma_tx + i;
1818 
1819 		stmmac_clear_desc(priv, p);
1820 
1821 		tx_q->tx_skbuff_dma[i].buf = 0;
1822 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1823 		tx_q->tx_skbuff_dma[i].len = 0;
1824 		tx_q->tx_skbuff_dma[i].last_segment = false;
1825 		tx_q->tx_skbuff[i] = NULL;
1826 	}
1827 
1828 	return 0;
1829 }
1830 
1831 static int init_dma_tx_desc_rings(struct net_device *dev,
1832 				  struct stmmac_dma_conf *dma_conf)
1833 {
1834 	struct stmmac_priv *priv = netdev_priv(dev);
1835 	u32 tx_queue_cnt;
1836 	u32 queue;
1837 
1838 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1839 
1840 	for (queue = 0; queue < tx_queue_cnt; queue++)
1841 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1842 
1843 	return 0;
1844 }
1845 
1846 /**
1847  * init_dma_desc_rings - init the RX/TX descriptor rings
1848  * @dev: net device structure
1849  * @dma_conf: structure to take the dma data
1850  * @flags: gfp flag.
1851  * Description: this function initializes the DMA RX/TX descriptors
1852  * and allocates the socket buffers. It supports the chained and ring
1853  * modes.
1854  */
1855 static int init_dma_desc_rings(struct net_device *dev,
1856 			       struct stmmac_dma_conf *dma_conf,
1857 			       gfp_t flags)
1858 {
1859 	struct stmmac_priv *priv = netdev_priv(dev);
1860 	int ret;
1861 
1862 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1863 	if (ret)
1864 		return ret;
1865 
1866 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1867 
1868 	stmmac_clear_descriptors(priv, dma_conf);
1869 
1870 	if (netif_msg_hw(priv))
1871 		stmmac_display_rings(priv, dma_conf);
1872 
1873 	return ret;
1874 }
1875 
1876 /**
1877  * dma_free_tx_skbufs - free TX dma buffers
1878  * @priv: private structure
1879  * @dma_conf: structure to take the dma data
1880  * @queue: TX queue index
1881  */
1882 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1883 			       struct stmmac_dma_conf *dma_conf,
1884 			       u32 queue)
1885 {
1886 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1887 	int i;
1888 
1889 	tx_q->xsk_frames_done = 0;
1890 
1891 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1892 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1893 
1894 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1895 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1896 		tx_q->xsk_frames_done = 0;
1897 		tx_q->xsk_pool = NULL;
1898 	}
1899 }
1900 
1901 /**
1902  * stmmac_free_tx_skbufs - free TX skb buffers
1903  * @priv: private structure
1904  */
1905 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1906 {
1907 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1908 	u32 queue;
1909 
1910 	for (queue = 0; queue < tx_queue_cnt; queue++)
1911 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1912 }
1913 
1914 /**
1915  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1916  * @priv: private structure
1917  * @dma_conf: structure to take the dma data
1918  * @queue: RX queue index
1919  */
1920 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1921 					 struct stmmac_dma_conf *dma_conf,
1922 					 u32 queue)
1923 {
1924 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1925 
1926 	/* Release the DMA RX socket buffers */
1927 	if (rx_q->xsk_pool)
1928 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1929 	else
1930 		dma_free_rx_skbufs(priv, dma_conf, queue);
1931 
1932 	rx_q->buf_alloc_num = 0;
1933 	rx_q->xsk_pool = NULL;
1934 
1935 	/* Free DMA regions of consistent memory previously allocated */
1936 	if (!priv->extend_desc)
1937 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1938 				  sizeof(struct dma_desc),
1939 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1940 	else
1941 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1942 				  sizeof(struct dma_extended_desc),
1943 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1944 
1945 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1946 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1947 
1948 	kfree(rx_q->buf_pool);
1949 	if (rx_q->page_pool)
1950 		page_pool_destroy(rx_q->page_pool);
1951 }
1952 
1953 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1954 				       struct stmmac_dma_conf *dma_conf)
1955 {
1956 	u32 rx_count = priv->plat->rx_queues_to_use;
1957 	u32 queue;
1958 
1959 	/* Free RX queue resources */
1960 	for (queue = 0; queue < rx_count; queue++)
1961 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1962 }
1963 
1964 /**
1965  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1966  * @priv: private structure
1967  * @dma_conf: structure to take the dma data
1968  * @queue: TX queue index
1969  */
1970 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1971 					 struct stmmac_dma_conf *dma_conf,
1972 					 u32 queue)
1973 {
1974 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1975 	size_t size;
1976 	void *addr;
1977 
1978 	/* Release the DMA TX socket buffers */
1979 	dma_free_tx_skbufs(priv, dma_conf, queue);
1980 
1981 	if (priv->extend_desc) {
1982 		size = sizeof(struct dma_extended_desc);
1983 		addr = tx_q->dma_etx;
1984 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1985 		size = sizeof(struct dma_edesc);
1986 		addr = tx_q->dma_entx;
1987 	} else {
1988 		size = sizeof(struct dma_desc);
1989 		addr = tx_q->dma_tx;
1990 	}
1991 
1992 	size *= dma_conf->dma_tx_size;
1993 
1994 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1995 
1996 	kfree(tx_q->tx_skbuff_dma);
1997 	kfree(tx_q->tx_skbuff);
1998 }
1999 
2000 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2001 				       struct stmmac_dma_conf *dma_conf)
2002 {
2003 	u32 tx_count = priv->plat->tx_queues_to_use;
2004 	u32 queue;
2005 
2006 	/* Free TX queue resources */
2007 	for (queue = 0; queue < tx_count; queue++)
2008 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2009 }
2010 
2011 /**
2012  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2013  * @priv: private structure
2014  * @dma_conf: structure to take the dma data
2015  * @queue: RX queue index
2016  * Description: according to which descriptor can be used (extend or basic)
2017  * this function allocates the resources for TX and RX paths. In case of
2018  * reception, for example, it pre-allocated the RX socket buffer in order to
2019  * allow zero-copy mechanism.
2020  */
2021 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2022 					 struct stmmac_dma_conf *dma_conf,
2023 					 u32 queue)
2024 {
2025 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2026 	struct stmmac_channel *ch = &priv->channel[queue];
2027 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2028 	struct page_pool_params pp_params = { 0 };
2029 	unsigned int num_pages;
2030 	unsigned int napi_id;
2031 	int ret;
2032 
2033 	rx_q->queue_index = queue;
2034 	rx_q->priv_data = priv;
2035 
2036 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2037 	pp_params.pool_size = dma_conf->dma_rx_size;
2038 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2039 	pp_params.order = ilog2(num_pages);
2040 	pp_params.nid = dev_to_node(priv->device);
2041 	pp_params.dev = priv->device;
2042 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2043 	pp_params.offset = stmmac_rx_offset(priv);
2044 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2045 
2046 	rx_q->page_pool = page_pool_create(&pp_params);
2047 	if (IS_ERR(rx_q->page_pool)) {
2048 		ret = PTR_ERR(rx_q->page_pool);
2049 		rx_q->page_pool = NULL;
2050 		return ret;
2051 	}
2052 
2053 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2054 				 sizeof(*rx_q->buf_pool),
2055 				 GFP_KERNEL);
2056 	if (!rx_q->buf_pool)
2057 		return -ENOMEM;
2058 
2059 	if (priv->extend_desc) {
2060 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2061 						   dma_conf->dma_rx_size *
2062 						   sizeof(struct dma_extended_desc),
2063 						   &rx_q->dma_rx_phy,
2064 						   GFP_KERNEL);
2065 		if (!rx_q->dma_erx)
2066 			return -ENOMEM;
2067 
2068 	} else {
2069 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2070 						  dma_conf->dma_rx_size *
2071 						  sizeof(struct dma_desc),
2072 						  &rx_q->dma_rx_phy,
2073 						  GFP_KERNEL);
2074 		if (!rx_q->dma_rx)
2075 			return -ENOMEM;
2076 	}
2077 
2078 	if (stmmac_xdp_is_enabled(priv) &&
2079 	    test_bit(queue, priv->af_xdp_zc_qps))
2080 		napi_id = ch->rxtx_napi.napi_id;
2081 	else
2082 		napi_id = ch->rx_napi.napi_id;
2083 
2084 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2085 			       rx_q->queue_index,
2086 			       napi_id);
2087 	if (ret) {
2088 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2089 		return -EINVAL;
2090 	}
2091 
2092 	return 0;
2093 }
2094 
2095 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2096 				       struct stmmac_dma_conf *dma_conf)
2097 {
2098 	u32 rx_count = priv->plat->rx_queues_to_use;
2099 	u32 queue;
2100 	int ret;
2101 
2102 	/* RX queues buffers and DMA */
2103 	for (queue = 0; queue < rx_count; queue++) {
2104 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2105 		if (ret)
2106 			goto err_dma;
2107 	}
2108 
2109 	return 0;
2110 
2111 err_dma:
2112 	free_dma_rx_desc_resources(priv, dma_conf);
2113 
2114 	return ret;
2115 }
2116 
2117 /**
2118  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2119  * @priv: private structure
2120  * @dma_conf: structure to take the dma data
2121  * @queue: TX queue index
2122  * Description: according to which descriptor can be used (extend or basic)
2123  * this function allocates the resources for TX and RX paths. In case of
2124  * reception, for example, it pre-allocated the RX socket buffer in order to
2125  * allow zero-copy mechanism.
2126  */
2127 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2128 					 struct stmmac_dma_conf *dma_conf,
2129 					 u32 queue)
2130 {
2131 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2132 	size_t size;
2133 	void *addr;
2134 
2135 	tx_q->queue_index = queue;
2136 	tx_q->priv_data = priv;
2137 
2138 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2139 				      sizeof(*tx_q->tx_skbuff_dma),
2140 				      GFP_KERNEL);
2141 	if (!tx_q->tx_skbuff_dma)
2142 		return -ENOMEM;
2143 
2144 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2145 				  sizeof(struct sk_buff *),
2146 				  GFP_KERNEL);
2147 	if (!tx_q->tx_skbuff)
2148 		return -ENOMEM;
2149 
2150 	if (priv->extend_desc)
2151 		size = sizeof(struct dma_extended_desc);
2152 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2153 		size = sizeof(struct dma_edesc);
2154 	else
2155 		size = sizeof(struct dma_desc);
2156 
2157 	size *= dma_conf->dma_tx_size;
2158 
2159 	addr = dma_alloc_coherent(priv->device, size,
2160 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2161 	if (!addr)
2162 		return -ENOMEM;
2163 
2164 	if (priv->extend_desc)
2165 		tx_q->dma_etx = addr;
2166 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2167 		tx_q->dma_entx = addr;
2168 	else
2169 		tx_q->dma_tx = addr;
2170 
2171 	return 0;
2172 }
2173 
2174 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2175 				       struct stmmac_dma_conf *dma_conf)
2176 {
2177 	u32 tx_count = priv->plat->tx_queues_to_use;
2178 	u32 queue;
2179 	int ret;
2180 
2181 	/* TX queues buffers and DMA */
2182 	for (queue = 0; queue < tx_count; queue++) {
2183 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2184 		if (ret)
2185 			goto err_dma;
2186 	}
2187 
2188 	return 0;
2189 
2190 err_dma:
2191 	free_dma_tx_desc_resources(priv, dma_conf);
2192 	return ret;
2193 }
2194 
2195 /**
2196  * alloc_dma_desc_resources - alloc TX/RX resources.
2197  * @priv: private structure
2198  * @dma_conf: structure to take the dma data
2199  * Description: according to which descriptor can be used (extend or basic)
2200  * this function allocates the resources for TX and RX paths. In case of
2201  * reception, for example, it pre-allocated the RX socket buffer in order to
2202  * allow zero-copy mechanism.
2203  */
2204 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2205 				    struct stmmac_dma_conf *dma_conf)
2206 {
2207 	/* RX Allocation */
2208 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2209 
2210 	if (ret)
2211 		return ret;
2212 
2213 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2214 
2215 	return ret;
2216 }
2217 
2218 /**
2219  * free_dma_desc_resources - free dma desc resources
2220  * @priv: private structure
2221  * @dma_conf: structure to take the dma data
2222  */
2223 static void free_dma_desc_resources(struct stmmac_priv *priv,
2224 				    struct stmmac_dma_conf *dma_conf)
2225 {
2226 	/* Release the DMA TX socket buffers */
2227 	free_dma_tx_desc_resources(priv, dma_conf);
2228 
2229 	/* Release the DMA RX socket buffers later
2230 	 * to ensure all pending XDP_TX buffers are returned.
2231 	 */
2232 	free_dma_rx_desc_resources(priv, dma_conf);
2233 }
2234 
2235 /**
2236  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2237  *  @priv: driver private structure
2238  *  Description: It is used for enabling the rx queues in the MAC
2239  */
2240 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2241 {
2242 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2243 	int queue;
2244 	u8 mode;
2245 
2246 	for (queue = 0; queue < rx_queues_count; queue++) {
2247 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2248 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2249 	}
2250 }
2251 
2252 /**
2253  * stmmac_start_rx_dma - start RX DMA channel
2254  * @priv: driver private structure
2255  * @chan: RX channel index
2256  * Description:
2257  * This starts a RX DMA channel
2258  */
2259 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2260 {
2261 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2262 	stmmac_start_rx(priv, priv->ioaddr, chan);
2263 }
2264 
2265 /**
2266  * stmmac_start_tx_dma - start TX DMA channel
2267  * @priv: driver private structure
2268  * @chan: TX channel index
2269  * Description:
2270  * This starts a TX DMA channel
2271  */
2272 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2273 {
2274 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2275 	stmmac_start_tx(priv, priv->ioaddr, chan);
2276 }
2277 
2278 /**
2279  * stmmac_stop_rx_dma - stop RX DMA channel
2280  * @priv: driver private structure
2281  * @chan: RX channel index
2282  * Description:
2283  * This stops a RX DMA channel
2284  */
2285 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2286 {
2287 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2288 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2289 }
2290 
2291 /**
2292  * stmmac_stop_tx_dma - stop TX DMA channel
2293  * @priv: driver private structure
2294  * @chan: TX channel index
2295  * Description:
2296  * This stops a TX DMA channel
2297  */
2298 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2299 {
2300 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2301 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2302 }
2303 
2304 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2305 {
2306 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2307 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2308 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2309 	u32 chan;
2310 
2311 	for (chan = 0; chan < dma_csr_ch; chan++) {
2312 		struct stmmac_channel *ch = &priv->channel[chan];
2313 		unsigned long flags;
2314 
2315 		spin_lock_irqsave(&ch->lock, flags);
2316 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2317 		spin_unlock_irqrestore(&ch->lock, flags);
2318 	}
2319 }
2320 
2321 /**
2322  * stmmac_start_all_dma - start all RX and TX DMA channels
2323  * @priv: driver private structure
2324  * Description:
2325  * This starts all the RX and TX DMA channels
2326  */
2327 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2328 {
2329 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2330 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2331 	u32 chan = 0;
2332 
2333 	for (chan = 0; chan < rx_channels_count; chan++)
2334 		stmmac_start_rx_dma(priv, chan);
2335 
2336 	for (chan = 0; chan < tx_channels_count; chan++)
2337 		stmmac_start_tx_dma(priv, chan);
2338 }
2339 
2340 /**
2341  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2342  * @priv: driver private structure
2343  * Description:
2344  * This stops the RX and TX DMA channels
2345  */
2346 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2347 {
2348 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2349 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2350 	u32 chan = 0;
2351 
2352 	for (chan = 0; chan < rx_channels_count; chan++)
2353 		stmmac_stop_rx_dma(priv, chan);
2354 
2355 	for (chan = 0; chan < tx_channels_count; chan++)
2356 		stmmac_stop_tx_dma(priv, chan);
2357 }
2358 
2359 /**
2360  *  stmmac_dma_operation_mode - HW DMA operation mode
2361  *  @priv: driver private structure
2362  *  Description: it is used for configuring the DMA operation mode register in
2363  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2364  */
2365 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2366 {
2367 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2368 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2369 	int rxfifosz = priv->plat->rx_fifo_size;
2370 	int txfifosz = priv->plat->tx_fifo_size;
2371 	u32 txmode = 0;
2372 	u32 rxmode = 0;
2373 	u32 chan = 0;
2374 	u8 qmode = 0;
2375 
2376 	if (rxfifosz == 0)
2377 		rxfifosz = priv->dma_cap.rx_fifo_size;
2378 	if (txfifosz == 0)
2379 		txfifosz = priv->dma_cap.tx_fifo_size;
2380 
2381 	/* Adjust for real per queue fifo size */
2382 	rxfifosz /= rx_channels_count;
2383 	txfifosz /= tx_channels_count;
2384 
2385 	if (priv->plat->force_thresh_dma_mode) {
2386 		txmode = tc;
2387 		rxmode = tc;
2388 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2389 		/*
2390 		 * In case of GMAC, SF mode can be enabled
2391 		 * to perform the TX COE in HW. This depends on:
2392 		 * 1) TX COE if actually supported
2393 		 * 2) There is no bugged Jumbo frame support
2394 		 *    that needs to not insert csum in the TDES.
2395 		 */
2396 		txmode = SF_DMA_MODE;
2397 		rxmode = SF_DMA_MODE;
2398 		priv->xstats.threshold = SF_DMA_MODE;
2399 	} else {
2400 		txmode = tc;
2401 		rxmode = SF_DMA_MODE;
2402 	}
2403 
2404 	/* configure all channels */
2405 	for (chan = 0; chan < rx_channels_count; chan++) {
2406 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2407 		u32 buf_size;
2408 
2409 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2410 
2411 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2412 				rxfifosz, qmode);
2413 
2414 		if (rx_q->xsk_pool) {
2415 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2416 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2417 					      buf_size,
2418 					      chan);
2419 		} else {
2420 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2421 					      priv->dma_conf.dma_buf_sz,
2422 					      chan);
2423 		}
2424 	}
2425 
2426 	for (chan = 0; chan < tx_channels_count; chan++) {
2427 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2428 
2429 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2430 				txfifosz, qmode);
2431 	}
2432 }
2433 
2434 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2435 {
2436 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2437 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2438 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2439 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2440 	unsigned int entry = tx_q->cur_tx;
2441 	struct dma_desc *tx_desc = NULL;
2442 	struct xdp_desc xdp_desc;
2443 	bool work_done = true;
2444 	u32 tx_set_ic_bit = 0;
2445 
2446 	/* Avoids TX time-out as we are sharing with slow path */
2447 	txq_trans_cond_update(nq);
2448 
2449 	budget = min(budget, stmmac_tx_avail(priv, queue));
2450 
2451 	while (budget-- > 0) {
2452 		dma_addr_t dma_addr;
2453 		bool set_ic;
2454 
2455 		/* We are sharing with slow path and stop XSK TX desc submission when
2456 		 * available TX ring is less than threshold.
2457 		 */
2458 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2459 		    !netif_carrier_ok(priv->dev)) {
2460 			work_done = false;
2461 			break;
2462 		}
2463 
2464 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2465 			break;
2466 
2467 		if (likely(priv->extend_desc))
2468 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2469 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2470 			tx_desc = &tx_q->dma_entx[entry].basic;
2471 		else
2472 			tx_desc = tx_q->dma_tx + entry;
2473 
2474 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2475 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2476 
2477 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2478 
2479 		/* To return XDP buffer to XSK pool, we simple call
2480 		 * xsk_tx_completed(), so we don't need to fill up
2481 		 * 'buf' and 'xdpf'.
2482 		 */
2483 		tx_q->tx_skbuff_dma[entry].buf = 0;
2484 		tx_q->xdpf[entry] = NULL;
2485 
2486 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2487 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2488 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2489 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2490 
2491 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2492 
2493 		tx_q->tx_count_frames++;
2494 
2495 		if (!priv->tx_coal_frames[queue])
2496 			set_ic = false;
2497 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2498 			set_ic = true;
2499 		else
2500 			set_ic = false;
2501 
2502 		if (set_ic) {
2503 			tx_q->tx_count_frames = 0;
2504 			stmmac_set_tx_ic(priv, tx_desc);
2505 			tx_set_ic_bit++;
2506 		}
2507 
2508 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2509 				       true, priv->mode, true, true,
2510 				       xdp_desc.len);
2511 
2512 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2513 
2514 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2515 		entry = tx_q->cur_tx;
2516 	}
2517 	u64_stats_update_begin(&txq_stats->napi_syncp);
2518 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2519 	u64_stats_update_end(&txq_stats->napi_syncp);
2520 
2521 	if (tx_desc) {
2522 		stmmac_flush_tx_descriptors(priv, queue);
2523 		xsk_tx_release(pool);
2524 	}
2525 
2526 	/* Return true if all of the 3 conditions are met
2527 	 *  a) TX Budget is still available
2528 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2529 	 *     pending XSK TX for transmission)
2530 	 */
2531 	return !!budget && work_done;
2532 }
2533 
2534 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2535 {
2536 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2537 		tc += 64;
2538 
2539 		if (priv->plat->force_thresh_dma_mode)
2540 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2541 		else
2542 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2543 						      chan);
2544 
2545 		priv->xstats.threshold = tc;
2546 	}
2547 }
2548 
2549 /**
2550  * stmmac_tx_clean - to manage the transmission completion
2551  * @priv: driver private structure
2552  * @budget: napi budget limiting this functions packet handling
2553  * @queue: TX queue index
2554  * Description: it reclaims the transmit resources after transmission completes.
2555  */
2556 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2557 {
2558 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2559 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2560 	unsigned int bytes_compl = 0, pkts_compl = 0;
2561 	unsigned int entry, xmits = 0, count = 0;
2562 	u32 tx_packets = 0, tx_errors = 0;
2563 
2564 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2565 
2566 	tx_q->xsk_frames_done = 0;
2567 
2568 	entry = tx_q->dirty_tx;
2569 
2570 	/* Try to clean all TX complete frame in 1 shot */
2571 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2572 		struct xdp_frame *xdpf;
2573 		struct sk_buff *skb;
2574 		struct dma_desc *p;
2575 		int status;
2576 
2577 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2578 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2579 			xdpf = tx_q->xdpf[entry];
2580 			skb = NULL;
2581 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2582 			xdpf = NULL;
2583 			skb = tx_q->tx_skbuff[entry];
2584 		} else {
2585 			xdpf = NULL;
2586 			skb = NULL;
2587 		}
2588 
2589 		if (priv->extend_desc)
2590 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2591 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2592 			p = &tx_q->dma_entx[entry].basic;
2593 		else
2594 			p = tx_q->dma_tx + entry;
2595 
2596 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2597 		/* Check if the descriptor is owned by the DMA */
2598 		if (unlikely(status & tx_dma_own))
2599 			break;
2600 
2601 		count++;
2602 
2603 		/* Make sure descriptor fields are read after reading
2604 		 * the own bit.
2605 		 */
2606 		dma_rmb();
2607 
2608 		/* Just consider the last segment and ...*/
2609 		if (likely(!(status & tx_not_ls))) {
2610 			/* ... verify the status error condition */
2611 			if (unlikely(status & tx_err)) {
2612 				tx_errors++;
2613 				if (unlikely(status & tx_err_bump_tc))
2614 					stmmac_bump_dma_threshold(priv, queue);
2615 			} else {
2616 				tx_packets++;
2617 			}
2618 			if (skb)
2619 				stmmac_get_tx_hwtstamp(priv, p, skb);
2620 		}
2621 
2622 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2623 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2624 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2625 				dma_unmap_page(priv->device,
2626 					       tx_q->tx_skbuff_dma[entry].buf,
2627 					       tx_q->tx_skbuff_dma[entry].len,
2628 					       DMA_TO_DEVICE);
2629 			else
2630 				dma_unmap_single(priv->device,
2631 						 tx_q->tx_skbuff_dma[entry].buf,
2632 						 tx_q->tx_skbuff_dma[entry].len,
2633 						 DMA_TO_DEVICE);
2634 			tx_q->tx_skbuff_dma[entry].buf = 0;
2635 			tx_q->tx_skbuff_dma[entry].len = 0;
2636 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2637 		}
2638 
2639 		stmmac_clean_desc3(priv, tx_q, p);
2640 
2641 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2642 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2643 
2644 		if (xdpf &&
2645 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2646 			xdp_return_frame_rx_napi(xdpf);
2647 			tx_q->xdpf[entry] = NULL;
2648 		}
2649 
2650 		if (xdpf &&
2651 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2652 			xdp_return_frame(xdpf);
2653 			tx_q->xdpf[entry] = NULL;
2654 		}
2655 
2656 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2657 			tx_q->xsk_frames_done++;
2658 
2659 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2660 			if (likely(skb)) {
2661 				pkts_compl++;
2662 				bytes_compl += skb->len;
2663 				dev_consume_skb_any(skb);
2664 				tx_q->tx_skbuff[entry] = NULL;
2665 			}
2666 		}
2667 
2668 		stmmac_release_tx_desc(priv, p, priv->mode);
2669 
2670 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2671 	}
2672 	tx_q->dirty_tx = entry;
2673 
2674 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2675 				  pkts_compl, bytes_compl);
2676 
2677 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2678 								queue))) &&
2679 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2680 
2681 		netif_dbg(priv, tx_done, priv->dev,
2682 			  "%s: restart transmit\n", __func__);
2683 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2684 	}
2685 
2686 	if (tx_q->xsk_pool) {
2687 		bool work_done;
2688 
2689 		if (tx_q->xsk_frames_done)
2690 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2691 
2692 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2693 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2694 
2695 		/* For XSK TX, we try to send as many as possible.
2696 		 * If XSK work done (XSK TX desc empty and budget still
2697 		 * available), return "budget - 1" to reenable TX IRQ.
2698 		 * Else, return "budget" to make NAPI continue polling.
2699 		 */
2700 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2701 					       STMMAC_XSK_TX_BUDGET_MAX);
2702 		if (work_done)
2703 			xmits = budget - 1;
2704 		else
2705 			xmits = budget;
2706 	}
2707 
2708 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2709 	    priv->eee_sw_timer_en) {
2710 		if (stmmac_enable_eee_mode(priv))
2711 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2712 	}
2713 
2714 	/* We still have pending packets, let's call for a new scheduling */
2715 	if (tx_q->dirty_tx != tx_q->cur_tx)
2716 		stmmac_tx_timer_arm(priv, queue);
2717 
2718 	u64_stats_update_begin(&txq_stats->napi_syncp);
2719 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2720 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2721 	u64_stats_inc(&txq_stats->napi.tx_clean);
2722 	u64_stats_update_end(&txq_stats->napi_syncp);
2723 
2724 	priv->xstats.tx_errors += tx_errors;
2725 
2726 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2727 
2728 	/* Combine decisions from TX clean and XSK TX */
2729 	return max(count, xmits);
2730 }
2731 
2732 /**
2733  * stmmac_tx_err - to manage the tx error
2734  * @priv: driver private structure
2735  * @chan: channel index
2736  * Description: it cleans the descriptors and restarts the transmission
2737  * in case of transmission errors.
2738  */
2739 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2740 {
2741 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2742 
2743 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2744 
2745 	stmmac_stop_tx_dma(priv, chan);
2746 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2747 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2748 	stmmac_reset_tx_queue(priv, chan);
2749 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2750 			    tx_q->dma_tx_phy, chan);
2751 	stmmac_start_tx_dma(priv, chan);
2752 
2753 	priv->xstats.tx_errors++;
2754 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2755 }
2756 
2757 /**
2758  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2759  *  @priv: driver private structure
2760  *  @txmode: TX operating mode
2761  *  @rxmode: RX operating mode
2762  *  @chan: channel index
2763  *  Description: it is used for configuring of the DMA operation mode in
2764  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2765  *  mode.
2766  */
2767 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2768 					  u32 rxmode, u32 chan)
2769 {
2770 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2771 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2772 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2773 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2774 	int rxfifosz = priv->plat->rx_fifo_size;
2775 	int txfifosz = priv->plat->tx_fifo_size;
2776 
2777 	if (rxfifosz == 0)
2778 		rxfifosz = priv->dma_cap.rx_fifo_size;
2779 	if (txfifosz == 0)
2780 		txfifosz = priv->dma_cap.tx_fifo_size;
2781 
2782 	/* Adjust for real per queue fifo size */
2783 	rxfifosz /= rx_channels_count;
2784 	txfifosz /= tx_channels_count;
2785 
2786 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2787 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2788 }
2789 
2790 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2791 {
2792 	int ret;
2793 
2794 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2795 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2796 	if (ret && (ret != -EINVAL)) {
2797 		stmmac_global_err(priv);
2798 		return true;
2799 	}
2800 
2801 	return false;
2802 }
2803 
2804 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2805 {
2806 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2807 						 &priv->xstats, chan, dir);
2808 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2809 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2810 	struct stmmac_channel *ch = &priv->channel[chan];
2811 	struct napi_struct *rx_napi;
2812 	struct napi_struct *tx_napi;
2813 	unsigned long flags;
2814 
2815 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2816 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2817 
2818 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2819 		if (napi_schedule_prep(rx_napi)) {
2820 			spin_lock_irqsave(&ch->lock, flags);
2821 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2822 			spin_unlock_irqrestore(&ch->lock, flags);
2823 			__napi_schedule(rx_napi);
2824 		}
2825 	}
2826 
2827 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2828 		if (napi_schedule_prep(tx_napi)) {
2829 			spin_lock_irqsave(&ch->lock, flags);
2830 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2831 			spin_unlock_irqrestore(&ch->lock, flags);
2832 			__napi_schedule(tx_napi);
2833 		}
2834 	}
2835 
2836 	return status;
2837 }
2838 
2839 /**
2840  * stmmac_dma_interrupt - DMA ISR
2841  * @priv: driver private structure
2842  * Description: this is the DMA ISR. It is called by the main ISR.
2843  * It calls the dwmac dma routine and schedule poll method in case of some
2844  * work can be done.
2845  */
2846 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2847 {
2848 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2849 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2850 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2851 				tx_channel_count : rx_channel_count;
2852 	u32 chan;
2853 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2854 
2855 	/* Make sure we never check beyond our status buffer. */
2856 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2857 		channels_to_check = ARRAY_SIZE(status);
2858 
2859 	for (chan = 0; chan < channels_to_check; chan++)
2860 		status[chan] = stmmac_napi_check(priv, chan,
2861 						 DMA_DIR_RXTX);
2862 
2863 	for (chan = 0; chan < tx_channel_count; chan++) {
2864 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2865 			/* Try to bump up the dma threshold on this failure */
2866 			stmmac_bump_dma_threshold(priv, chan);
2867 		} else if (unlikely(status[chan] == tx_hard_error)) {
2868 			stmmac_tx_err(priv, chan);
2869 		}
2870 	}
2871 }
2872 
2873 /**
2874  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2875  * @priv: driver private structure
2876  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2877  */
2878 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2879 {
2880 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2881 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2882 
2883 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2884 
2885 	if (priv->dma_cap.rmon) {
2886 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2887 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2888 	} else
2889 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2890 }
2891 
2892 /**
2893  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2894  * @priv: driver private structure
2895  * Description:
2896  *  new GMAC chip generations have a new register to indicate the
2897  *  presence of the optional feature/functions.
2898  *  This can be also used to override the value passed through the
2899  *  platform and necessary for old MAC10/100 and GMAC chips.
2900  */
2901 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2902 {
2903 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2904 }
2905 
2906 /**
2907  * stmmac_check_ether_addr - check if the MAC addr is valid
2908  * @priv: driver private structure
2909  * Description:
2910  * it is to verify if the MAC address is valid, in case of failures it
2911  * generates a random MAC address
2912  */
2913 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2914 {
2915 	u8 addr[ETH_ALEN];
2916 
2917 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2918 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2919 		if (is_valid_ether_addr(addr))
2920 			eth_hw_addr_set(priv->dev, addr);
2921 		else
2922 			eth_hw_addr_random(priv->dev);
2923 		dev_info(priv->device, "device MAC address %pM\n",
2924 			 priv->dev->dev_addr);
2925 	}
2926 }
2927 
2928 /**
2929  * stmmac_init_dma_engine - DMA init.
2930  * @priv: driver private structure
2931  * Description:
2932  * It inits the DMA invoking the specific MAC/GMAC callback.
2933  * Some DMA parameters can be passed from the platform;
2934  * in case of these are not passed a default is kept for the MAC or GMAC.
2935  */
2936 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2937 {
2938 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2939 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2940 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2941 	struct stmmac_rx_queue *rx_q;
2942 	struct stmmac_tx_queue *tx_q;
2943 	u32 chan = 0;
2944 	int atds = 0;
2945 	int ret = 0;
2946 
2947 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2948 		dev_err(priv->device, "Invalid DMA configuration\n");
2949 		return -EINVAL;
2950 	}
2951 
2952 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2953 		atds = 1;
2954 
2955 	ret = stmmac_reset(priv, priv->ioaddr);
2956 	if (ret) {
2957 		dev_err(priv->device, "Failed to reset the dma\n");
2958 		return ret;
2959 	}
2960 
2961 	/* DMA Configuration */
2962 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2963 
2964 	if (priv->plat->axi)
2965 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2966 
2967 	/* DMA CSR Channel configuration */
2968 	for (chan = 0; chan < dma_csr_ch; chan++) {
2969 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2970 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2971 	}
2972 
2973 	/* DMA RX Channel Configuration */
2974 	for (chan = 0; chan < rx_channels_count; chan++) {
2975 		rx_q = &priv->dma_conf.rx_queue[chan];
2976 
2977 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2978 				    rx_q->dma_rx_phy, chan);
2979 
2980 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2981 				     (rx_q->buf_alloc_num *
2982 				      sizeof(struct dma_desc));
2983 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2984 				       rx_q->rx_tail_addr, chan);
2985 	}
2986 
2987 	/* DMA TX Channel Configuration */
2988 	for (chan = 0; chan < tx_channels_count; chan++) {
2989 		tx_q = &priv->dma_conf.tx_queue[chan];
2990 
2991 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2992 				    tx_q->dma_tx_phy, chan);
2993 
2994 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2995 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2996 				       tx_q->tx_tail_addr, chan);
2997 	}
2998 
2999 	return ret;
3000 }
3001 
3002 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3003 {
3004 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3005 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3006 
3007 	if (!tx_coal_timer)
3008 		return;
3009 
3010 	hrtimer_start(&tx_q->txtimer,
3011 		      STMMAC_COAL_TIMER(tx_coal_timer),
3012 		      HRTIMER_MODE_REL);
3013 }
3014 
3015 /**
3016  * stmmac_tx_timer - mitigation sw timer for tx.
3017  * @t: data pointer
3018  * Description:
3019  * This is the timer handler to directly invoke the stmmac_tx_clean.
3020  */
3021 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3022 {
3023 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3024 	struct stmmac_priv *priv = tx_q->priv_data;
3025 	struct stmmac_channel *ch;
3026 	struct napi_struct *napi;
3027 
3028 	ch = &priv->channel[tx_q->queue_index];
3029 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3030 
3031 	if (likely(napi_schedule_prep(napi))) {
3032 		unsigned long flags;
3033 
3034 		spin_lock_irqsave(&ch->lock, flags);
3035 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3036 		spin_unlock_irqrestore(&ch->lock, flags);
3037 		__napi_schedule(napi);
3038 	}
3039 
3040 	return HRTIMER_NORESTART;
3041 }
3042 
3043 /**
3044  * stmmac_init_coalesce - init mitigation options.
3045  * @priv: driver private structure
3046  * Description:
3047  * This inits the coalesce parameters: i.e. timer rate,
3048  * timer handler and default threshold used for enabling the
3049  * interrupt on completion bit.
3050  */
3051 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3052 {
3053 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3054 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3055 	u32 chan;
3056 
3057 	for (chan = 0; chan < tx_channel_count; chan++) {
3058 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3059 
3060 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3061 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3062 
3063 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3064 		tx_q->txtimer.function = stmmac_tx_timer;
3065 	}
3066 
3067 	for (chan = 0; chan < rx_channel_count; chan++)
3068 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3069 }
3070 
3071 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3072 {
3073 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3074 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3075 	u32 chan;
3076 
3077 	/* set TX ring length */
3078 	for (chan = 0; chan < tx_channels_count; chan++)
3079 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3080 				       (priv->dma_conf.dma_tx_size - 1), chan);
3081 
3082 	/* set RX ring length */
3083 	for (chan = 0; chan < rx_channels_count; chan++)
3084 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3085 				       (priv->dma_conf.dma_rx_size - 1), chan);
3086 }
3087 
3088 /**
3089  *  stmmac_set_tx_queue_weight - Set TX queue weight
3090  *  @priv: driver private structure
3091  *  Description: It is used for setting TX queues weight
3092  */
3093 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3094 {
3095 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3096 	u32 weight;
3097 	u32 queue;
3098 
3099 	for (queue = 0; queue < tx_queues_count; queue++) {
3100 		weight = priv->plat->tx_queues_cfg[queue].weight;
3101 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3102 	}
3103 }
3104 
3105 /**
3106  *  stmmac_configure_cbs - Configure CBS in TX queue
3107  *  @priv: driver private structure
3108  *  Description: It is used for configuring CBS in AVB TX queues
3109  */
3110 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3111 {
3112 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3113 	u32 mode_to_use;
3114 	u32 queue;
3115 
3116 	/* queue 0 is reserved for legacy traffic */
3117 	for (queue = 1; queue < tx_queues_count; queue++) {
3118 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3119 		if (mode_to_use == MTL_QUEUE_DCB)
3120 			continue;
3121 
3122 		stmmac_config_cbs(priv, priv->hw,
3123 				priv->plat->tx_queues_cfg[queue].send_slope,
3124 				priv->plat->tx_queues_cfg[queue].idle_slope,
3125 				priv->plat->tx_queues_cfg[queue].high_credit,
3126 				priv->plat->tx_queues_cfg[queue].low_credit,
3127 				queue);
3128 	}
3129 }
3130 
3131 /**
3132  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3133  *  @priv: driver private structure
3134  *  Description: It is used for mapping RX queues to RX dma channels
3135  */
3136 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3137 {
3138 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3139 	u32 queue;
3140 	u32 chan;
3141 
3142 	for (queue = 0; queue < rx_queues_count; queue++) {
3143 		chan = priv->plat->rx_queues_cfg[queue].chan;
3144 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3145 	}
3146 }
3147 
3148 /**
3149  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3150  *  @priv: driver private structure
3151  *  Description: It is used for configuring the RX Queue Priority
3152  */
3153 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3154 {
3155 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3156 	u32 queue;
3157 	u32 prio;
3158 
3159 	for (queue = 0; queue < rx_queues_count; queue++) {
3160 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3161 			continue;
3162 
3163 		prio = priv->plat->rx_queues_cfg[queue].prio;
3164 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3165 	}
3166 }
3167 
3168 /**
3169  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3170  *  @priv: driver private structure
3171  *  Description: It is used for configuring the TX Queue Priority
3172  */
3173 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3174 {
3175 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3176 	u32 queue;
3177 	u32 prio;
3178 
3179 	for (queue = 0; queue < tx_queues_count; queue++) {
3180 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3181 			continue;
3182 
3183 		prio = priv->plat->tx_queues_cfg[queue].prio;
3184 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3185 	}
3186 }
3187 
3188 /**
3189  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3190  *  @priv: driver private structure
3191  *  Description: It is used for configuring the RX queue routing
3192  */
3193 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3194 {
3195 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3196 	u32 queue;
3197 	u8 packet;
3198 
3199 	for (queue = 0; queue < rx_queues_count; queue++) {
3200 		/* no specific packet type routing specified for the queue */
3201 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3202 			continue;
3203 
3204 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3205 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3206 	}
3207 }
3208 
3209 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3210 {
3211 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3212 		priv->rss.enable = false;
3213 		return;
3214 	}
3215 
3216 	if (priv->dev->features & NETIF_F_RXHASH)
3217 		priv->rss.enable = true;
3218 	else
3219 		priv->rss.enable = false;
3220 
3221 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3222 			     priv->plat->rx_queues_to_use);
3223 }
3224 
3225 /**
3226  *  stmmac_mtl_configuration - Configure MTL
3227  *  @priv: driver private structure
3228  *  Description: It is used for configurring MTL
3229  */
3230 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3231 {
3232 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3233 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3234 
3235 	if (tx_queues_count > 1)
3236 		stmmac_set_tx_queue_weight(priv);
3237 
3238 	/* Configure MTL RX algorithms */
3239 	if (rx_queues_count > 1)
3240 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3241 				priv->plat->rx_sched_algorithm);
3242 
3243 	/* Configure MTL TX algorithms */
3244 	if (tx_queues_count > 1)
3245 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3246 				priv->plat->tx_sched_algorithm);
3247 
3248 	/* Configure CBS in AVB TX queues */
3249 	if (tx_queues_count > 1)
3250 		stmmac_configure_cbs(priv);
3251 
3252 	/* Map RX MTL to DMA channels */
3253 	stmmac_rx_queue_dma_chan_map(priv);
3254 
3255 	/* Enable MAC RX Queues */
3256 	stmmac_mac_enable_rx_queues(priv);
3257 
3258 	/* Set RX priorities */
3259 	if (rx_queues_count > 1)
3260 		stmmac_mac_config_rx_queues_prio(priv);
3261 
3262 	/* Set TX priorities */
3263 	if (tx_queues_count > 1)
3264 		stmmac_mac_config_tx_queues_prio(priv);
3265 
3266 	/* Set RX routing */
3267 	if (rx_queues_count > 1)
3268 		stmmac_mac_config_rx_queues_routing(priv);
3269 
3270 	/* Receive Side Scaling */
3271 	if (rx_queues_count > 1)
3272 		stmmac_mac_config_rss(priv);
3273 }
3274 
3275 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3276 {
3277 	if (priv->dma_cap.asp) {
3278 		netdev_info(priv->dev, "Enabling Safety Features\n");
3279 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3280 					  priv->plat->safety_feat_cfg);
3281 	} else {
3282 		netdev_info(priv->dev, "No Safety Features support found\n");
3283 	}
3284 }
3285 
3286 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3287 {
3288 	char *name;
3289 
3290 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3291 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3292 
3293 	name = priv->wq_name;
3294 	sprintf(name, "%s-fpe", priv->dev->name);
3295 
3296 	priv->fpe_wq = create_singlethread_workqueue(name);
3297 	if (!priv->fpe_wq) {
3298 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3299 
3300 		return -ENOMEM;
3301 	}
3302 	netdev_info(priv->dev, "FPE workqueue start");
3303 
3304 	return 0;
3305 }
3306 
3307 /**
3308  * stmmac_hw_setup - setup mac in a usable state.
3309  *  @dev : pointer to the device structure.
3310  *  @ptp_register: register PTP if set
3311  *  Description:
3312  *  this is the main function to setup the HW in a usable state because the
3313  *  dma engine is reset, the core registers are configured (e.g. AXI,
3314  *  Checksum features, timers). The DMA is ready to start receiving and
3315  *  transmitting.
3316  *  Return value:
3317  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3318  *  file on failure.
3319  */
3320 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3321 {
3322 	struct stmmac_priv *priv = netdev_priv(dev);
3323 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3324 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3325 	bool sph_en;
3326 	u32 chan;
3327 	int ret;
3328 
3329 	/* DMA initialization and SW reset */
3330 	ret = stmmac_init_dma_engine(priv);
3331 	if (ret < 0) {
3332 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3333 			   __func__);
3334 		return ret;
3335 	}
3336 
3337 	/* Copy the MAC addr into the HW  */
3338 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3339 
3340 	/* PS and related bits will be programmed according to the speed */
3341 	if (priv->hw->pcs) {
3342 		int speed = priv->plat->mac_port_sel_speed;
3343 
3344 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3345 		    (speed == SPEED_1000)) {
3346 			priv->hw->ps = speed;
3347 		} else {
3348 			dev_warn(priv->device, "invalid port speed\n");
3349 			priv->hw->ps = 0;
3350 		}
3351 	}
3352 
3353 	/* Initialize the MAC Core */
3354 	stmmac_core_init(priv, priv->hw, dev);
3355 
3356 	/* Initialize MTL*/
3357 	stmmac_mtl_configuration(priv);
3358 
3359 	/* Initialize Safety Features */
3360 	stmmac_safety_feat_configuration(priv);
3361 
3362 	ret = stmmac_rx_ipc(priv, priv->hw);
3363 	if (!ret) {
3364 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3365 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3366 		priv->hw->rx_csum = 0;
3367 	}
3368 
3369 	/* Enable the MAC Rx/Tx */
3370 	stmmac_mac_set(priv, priv->ioaddr, true);
3371 
3372 	/* Set the HW DMA mode and the COE */
3373 	stmmac_dma_operation_mode(priv);
3374 
3375 	stmmac_mmc_setup(priv);
3376 
3377 	if (ptp_register) {
3378 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3379 		if (ret < 0)
3380 			netdev_warn(priv->dev,
3381 				    "failed to enable PTP reference clock: %pe\n",
3382 				    ERR_PTR(ret));
3383 	}
3384 
3385 	ret = stmmac_init_ptp(priv);
3386 	if (ret == -EOPNOTSUPP)
3387 		netdev_info(priv->dev, "PTP not supported by HW\n");
3388 	else if (ret)
3389 		netdev_warn(priv->dev, "PTP init failed\n");
3390 	else if (ptp_register)
3391 		stmmac_ptp_register(priv);
3392 
3393 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3394 
3395 	/* Convert the timer from msec to usec */
3396 	if (!priv->tx_lpi_timer)
3397 		priv->tx_lpi_timer = eee_timer * 1000;
3398 
3399 	if (priv->use_riwt) {
3400 		u32 queue;
3401 
3402 		for (queue = 0; queue < rx_cnt; queue++) {
3403 			if (!priv->rx_riwt[queue])
3404 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3405 
3406 			stmmac_rx_watchdog(priv, priv->ioaddr,
3407 					   priv->rx_riwt[queue], queue);
3408 		}
3409 	}
3410 
3411 	if (priv->hw->pcs)
3412 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3413 
3414 	/* set TX and RX rings length */
3415 	stmmac_set_rings_length(priv);
3416 
3417 	/* Enable TSO */
3418 	if (priv->tso) {
3419 		for (chan = 0; chan < tx_cnt; chan++) {
3420 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3421 
3422 			/* TSO and TBS cannot co-exist */
3423 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3424 				continue;
3425 
3426 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3427 		}
3428 	}
3429 
3430 	/* Enable Split Header */
3431 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3432 	for (chan = 0; chan < rx_cnt; chan++)
3433 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3434 
3435 
3436 	/* VLAN Tag Insertion */
3437 	if (priv->dma_cap.vlins)
3438 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3439 
3440 	/* TBS */
3441 	for (chan = 0; chan < tx_cnt; chan++) {
3442 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3443 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3444 
3445 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3446 	}
3447 
3448 	/* Configure real RX and TX queues */
3449 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3450 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3451 
3452 	/* Start the ball rolling... */
3453 	stmmac_start_all_dma(priv);
3454 
3455 	if (priv->dma_cap.fpesel) {
3456 		stmmac_fpe_start_wq(priv);
3457 
3458 		if (priv->plat->fpe_cfg->enable)
3459 			stmmac_fpe_handshake(priv, true);
3460 	}
3461 
3462 	return 0;
3463 }
3464 
3465 static void stmmac_hw_teardown(struct net_device *dev)
3466 {
3467 	struct stmmac_priv *priv = netdev_priv(dev);
3468 
3469 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3470 }
3471 
3472 static void stmmac_free_irq(struct net_device *dev,
3473 			    enum request_irq_err irq_err, int irq_idx)
3474 {
3475 	struct stmmac_priv *priv = netdev_priv(dev);
3476 	int j;
3477 
3478 	switch (irq_err) {
3479 	case REQ_IRQ_ERR_ALL:
3480 		irq_idx = priv->plat->tx_queues_to_use;
3481 		fallthrough;
3482 	case REQ_IRQ_ERR_TX:
3483 		for (j = irq_idx - 1; j >= 0; j--) {
3484 			if (priv->tx_irq[j] > 0) {
3485 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3486 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3487 			}
3488 		}
3489 		irq_idx = priv->plat->rx_queues_to_use;
3490 		fallthrough;
3491 	case REQ_IRQ_ERR_RX:
3492 		for (j = irq_idx - 1; j >= 0; j--) {
3493 			if (priv->rx_irq[j] > 0) {
3494 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3495 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3496 			}
3497 		}
3498 
3499 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3500 			free_irq(priv->sfty_ue_irq, dev);
3501 		fallthrough;
3502 	case REQ_IRQ_ERR_SFTY_UE:
3503 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3504 			free_irq(priv->sfty_ce_irq, dev);
3505 		fallthrough;
3506 	case REQ_IRQ_ERR_SFTY_CE:
3507 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3508 			free_irq(priv->lpi_irq, dev);
3509 		fallthrough;
3510 	case REQ_IRQ_ERR_LPI:
3511 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3512 			free_irq(priv->wol_irq, dev);
3513 		fallthrough;
3514 	case REQ_IRQ_ERR_WOL:
3515 		free_irq(dev->irq, dev);
3516 		fallthrough;
3517 	case REQ_IRQ_ERR_MAC:
3518 	case REQ_IRQ_ERR_NO:
3519 		/* If MAC IRQ request error, no more IRQ to free */
3520 		break;
3521 	}
3522 }
3523 
3524 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3525 {
3526 	struct stmmac_priv *priv = netdev_priv(dev);
3527 	enum request_irq_err irq_err;
3528 	cpumask_t cpu_mask;
3529 	int irq_idx = 0;
3530 	char *int_name;
3531 	int ret;
3532 	int i;
3533 
3534 	/* For common interrupt */
3535 	int_name = priv->int_name_mac;
3536 	sprintf(int_name, "%s:%s", dev->name, "mac");
3537 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3538 			  0, int_name, dev);
3539 	if (unlikely(ret < 0)) {
3540 		netdev_err(priv->dev,
3541 			   "%s: alloc mac MSI %d (error: %d)\n",
3542 			   __func__, dev->irq, ret);
3543 		irq_err = REQ_IRQ_ERR_MAC;
3544 		goto irq_error;
3545 	}
3546 
3547 	/* Request the Wake IRQ in case of another line
3548 	 * is used for WoL
3549 	 */
3550 	priv->wol_irq_disabled = true;
3551 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3552 		int_name = priv->int_name_wol;
3553 		sprintf(int_name, "%s:%s", dev->name, "wol");
3554 		ret = request_irq(priv->wol_irq,
3555 				  stmmac_mac_interrupt,
3556 				  0, int_name, dev);
3557 		if (unlikely(ret < 0)) {
3558 			netdev_err(priv->dev,
3559 				   "%s: alloc wol MSI %d (error: %d)\n",
3560 				   __func__, priv->wol_irq, ret);
3561 			irq_err = REQ_IRQ_ERR_WOL;
3562 			goto irq_error;
3563 		}
3564 	}
3565 
3566 	/* Request the LPI IRQ in case of another line
3567 	 * is used for LPI
3568 	 */
3569 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3570 		int_name = priv->int_name_lpi;
3571 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3572 		ret = request_irq(priv->lpi_irq,
3573 				  stmmac_mac_interrupt,
3574 				  0, int_name, dev);
3575 		if (unlikely(ret < 0)) {
3576 			netdev_err(priv->dev,
3577 				   "%s: alloc lpi MSI %d (error: %d)\n",
3578 				   __func__, priv->lpi_irq, ret);
3579 			irq_err = REQ_IRQ_ERR_LPI;
3580 			goto irq_error;
3581 		}
3582 	}
3583 
3584 	/* Request the Safety Feature Correctible Error line in
3585 	 * case of another line is used
3586 	 */
3587 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3588 		int_name = priv->int_name_sfty_ce;
3589 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3590 		ret = request_irq(priv->sfty_ce_irq,
3591 				  stmmac_safety_interrupt,
3592 				  0, int_name, dev);
3593 		if (unlikely(ret < 0)) {
3594 			netdev_err(priv->dev,
3595 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3596 				   __func__, priv->sfty_ce_irq, ret);
3597 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3598 			goto irq_error;
3599 		}
3600 	}
3601 
3602 	/* Request the Safety Feature Uncorrectible Error line in
3603 	 * case of another line is used
3604 	 */
3605 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3606 		int_name = priv->int_name_sfty_ue;
3607 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3608 		ret = request_irq(priv->sfty_ue_irq,
3609 				  stmmac_safety_interrupt,
3610 				  0, int_name, dev);
3611 		if (unlikely(ret < 0)) {
3612 			netdev_err(priv->dev,
3613 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3614 				   __func__, priv->sfty_ue_irq, ret);
3615 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3616 			goto irq_error;
3617 		}
3618 	}
3619 
3620 	/* Request Rx MSI irq */
3621 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3622 		if (i >= MTL_MAX_RX_QUEUES)
3623 			break;
3624 		if (priv->rx_irq[i] == 0)
3625 			continue;
3626 
3627 		int_name = priv->int_name_rx_irq[i];
3628 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3629 		ret = request_irq(priv->rx_irq[i],
3630 				  stmmac_msi_intr_rx,
3631 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3632 		if (unlikely(ret < 0)) {
3633 			netdev_err(priv->dev,
3634 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3635 				   __func__, i, priv->rx_irq[i], ret);
3636 			irq_err = REQ_IRQ_ERR_RX;
3637 			irq_idx = i;
3638 			goto irq_error;
3639 		}
3640 		cpumask_clear(&cpu_mask);
3641 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3642 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3643 	}
3644 
3645 	/* Request Tx MSI irq */
3646 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3647 		if (i >= MTL_MAX_TX_QUEUES)
3648 			break;
3649 		if (priv->tx_irq[i] == 0)
3650 			continue;
3651 
3652 		int_name = priv->int_name_tx_irq[i];
3653 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3654 		ret = request_irq(priv->tx_irq[i],
3655 				  stmmac_msi_intr_tx,
3656 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3657 		if (unlikely(ret < 0)) {
3658 			netdev_err(priv->dev,
3659 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3660 				   __func__, i, priv->tx_irq[i], ret);
3661 			irq_err = REQ_IRQ_ERR_TX;
3662 			irq_idx = i;
3663 			goto irq_error;
3664 		}
3665 		cpumask_clear(&cpu_mask);
3666 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3667 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3668 	}
3669 
3670 	return 0;
3671 
3672 irq_error:
3673 	stmmac_free_irq(dev, irq_err, irq_idx);
3674 	return ret;
3675 }
3676 
3677 static int stmmac_request_irq_single(struct net_device *dev)
3678 {
3679 	struct stmmac_priv *priv = netdev_priv(dev);
3680 	enum request_irq_err irq_err;
3681 	int ret;
3682 
3683 	ret = request_irq(dev->irq, stmmac_interrupt,
3684 			  IRQF_SHARED, dev->name, dev);
3685 	if (unlikely(ret < 0)) {
3686 		netdev_err(priv->dev,
3687 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3688 			   __func__, dev->irq, ret);
3689 		irq_err = REQ_IRQ_ERR_MAC;
3690 		goto irq_error;
3691 	}
3692 
3693 	/* Request the Wake IRQ in case of another line
3694 	 * is used for WoL
3695 	 */
3696 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3697 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3698 				  IRQF_SHARED, dev->name, dev);
3699 		if (unlikely(ret < 0)) {
3700 			netdev_err(priv->dev,
3701 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3702 				   __func__, priv->wol_irq, ret);
3703 			irq_err = REQ_IRQ_ERR_WOL;
3704 			goto irq_error;
3705 		}
3706 	}
3707 
3708 	/* Request the IRQ lines */
3709 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3710 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3711 				  IRQF_SHARED, dev->name, dev);
3712 		if (unlikely(ret < 0)) {
3713 			netdev_err(priv->dev,
3714 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3715 				   __func__, priv->lpi_irq, ret);
3716 			irq_err = REQ_IRQ_ERR_LPI;
3717 			goto irq_error;
3718 		}
3719 	}
3720 
3721 	return 0;
3722 
3723 irq_error:
3724 	stmmac_free_irq(dev, irq_err, 0);
3725 	return ret;
3726 }
3727 
3728 static int stmmac_request_irq(struct net_device *dev)
3729 {
3730 	struct stmmac_priv *priv = netdev_priv(dev);
3731 	int ret;
3732 
3733 	/* Request the IRQ lines */
3734 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3735 		ret = stmmac_request_irq_multi_msi(dev);
3736 	else
3737 		ret = stmmac_request_irq_single(dev);
3738 
3739 	return ret;
3740 }
3741 
3742 /**
3743  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3744  *  @priv: driver private structure
3745  *  @mtu: MTU to setup the dma queue and buf with
3746  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3747  *  Allocate the Tx/Rx DMA queue and init them.
3748  *  Return value:
3749  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3750  */
3751 static struct stmmac_dma_conf *
3752 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3753 {
3754 	struct stmmac_dma_conf *dma_conf;
3755 	int chan, bfsize, ret;
3756 
3757 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3758 	if (!dma_conf) {
3759 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3760 			   __func__);
3761 		return ERR_PTR(-ENOMEM);
3762 	}
3763 
3764 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3765 	if (bfsize < 0)
3766 		bfsize = 0;
3767 
3768 	if (bfsize < BUF_SIZE_16KiB)
3769 		bfsize = stmmac_set_bfsize(mtu, 0);
3770 
3771 	dma_conf->dma_buf_sz = bfsize;
3772 	/* Chose the tx/rx size from the already defined one in the
3773 	 * priv struct. (if defined)
3774 	 */
3775 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3776 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3777 
3778 	if (!dma_conf->dma_tx_size)
3779 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3780 	if (!dma_conf->dma_rx_size)
3781 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3782 
3783 	/* Earlier check for TBS */
3784 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3785 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3786 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3787 
3788 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3789 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3790 	}
3791 
3792 	ret = alloc_dma_desc_resources(priv, dma_conf);
3793 	if (ret < 0) {
3794 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3795 			   __func__);
3796 		goto alloc_error;
3797 	}
3798 
3799 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3800 	if (ret < 0) {
3801 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3802 			   __func__);
3803 		goto init_error;
3804 	}
3805 
3806 	return dma_conf;
3807 
3808 init_error:
3809 	free_dma_desc_resources(priv, dma_conf);
3810 alloc_error:
3811 	kfree(dma_conf);
3812 	return ERR_PTR(ret);
3813 }
3814 
3815 /**
3816  *  __stmmac_open - open entry point of the driver
3817  *  @dev : pointer to the device structure.
3818  *  @dma_conf :  structure to take the dma data
3819  *  Description:
3820  *  This function is the open entry point of the driver.
3821  *  Return value:
3822  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3823  *  file on failure.
3824  */
3825 static int __stmmac_open(struct net_device *dev,
3826 			 struct stmmac_dma_conf *dma_conf)
3827 {
3828 	struct stmmac_priv *priv = netdev_priv(dev);
3829 	int mode = priv->plat->phy_interface;
3830 	u32 chan;
3831 	int ret;
3832 
3833 	ret = pm_runtime_resume_and_get(priv->device);
3834 	if (ret < 0)
3835 		return ret;
3836 
3837 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3838 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
3839 	    (!priv->hw->xpcs ||
3840 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
3841 	    !priv->hw->lynx_pcs) {
3842 		ret = stmmac_init_phy(dev);
3843 		if (ret) {
3844 			netdev_err(priv->dev,
3845 				   "%s: Cannot attach to PHY (error: %d)\n",
3846 				   __func__, ret);
3847 			goto init_phy_error;
3848 		}
3849 	}
3850 
3851 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3852 
3853 	buf_sz = dma_conf->dma_buf_sz;
3854 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3855 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3856 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3857 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3858 
3859 	stmmac_reset_queues_param(priv);
3860 
3861 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3862 	    priv->plat->serdes_powerup) {
3863 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3864 		if (ret < 0) {
3865 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3866 				   __func__);
3867 			goto init_error;
3868 		}
3869 	}
3870 
3871 	ret = stmmac_hw_setup(dev, true);
3872 	if (ret < 0) {
3873 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3874 		goto init_error;
3875 	}
3876 
3877 	stmmac_init_coalesce(priv);
3878 
3879 	phylink_start(priv->phylink);
3880 	/* We may have called phylink_speed_down before */
3881 	phylink_speed_up(priv->phylink);
3882 
3883 	ret = stmmac_request_irq(dev);
3884 	if (ret)
3885 		goto irq_error;
3886 
3887 	stmmac_enable_all_queues(priv);
3888 	netif_tx_start_all_queues(priv->dev);
3889 	stmmac_enable_all_dma_irq(priv);
3890 
3891 	return 0;
3892 
3893 irq_error:
3894 	phylink_stop(priv->phylink);
3895 
3896 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3897 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3898 
3899 	stmmac_hw_teardown(dev);
3900 init_error:
3901 	phylink_disconnect_phy(priv->phylink);
3902 init_phy_error:
3903 	pm_runtime_put(priv->device);
3904 	return ret;
3905 }
3906 
3907 static int stmmac_open(struct net_device *dev)
3908 {
3909 	struct stmmac_priv *priv = netdev_priv(dev);
3910 	struct stmmac_dma_conf *dma_conf;
3911 	int ret;
3912 
3913 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3914 	if (IS_ERR(dma_conf))
3915 		return PTR_ERR(dma_conf);
3916 
3917 	ret = __stmmac_open(dev, dma_conf);
3918 	if (ret)
3919 		free_dma_desc_resources(priv, dma_conf);
3920 
3921 	kfree(dma_conf);
3922 	return ret;
3923 }
3924 
3925 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3926 {
3927 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3928 
3929 	if (priv->fpe_wq)
3930 		destroy_workqueue(priv->fpe_wq);
3931 
3932 	netdev_info(priv->dev, "FPE workqueue stop");
3933 }
3934 
3935 /**
3936  *  stmmac_release - close entry point of the driver
3937  *  @dev : device pointer.
3938  *  Description:
3939  *  This is the stop entry point of the driver.
3940  */
3941 static int stmmac_release(struct net_device *dev)
3942 {
3943 	struct stmmac_priv *priv = netdev_priv(dev);
3944 	u32 chan;
3945 
3946 	if (device_may_wakeup(priv->device))
3947 		phylink_speed_down(priv->phylink, false);
3948 	/* Stop and disconnect the PHY */
3949 	phylink_stop(priv->phylink);
3950 	phylink_disconnect_phy(priv->phylink);
3951 
3952 	stmmac_disable_all_queues(priv);
3953 
3954 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3955 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3956 
3957 	netif_tx_disable(dev);
3958 
3959 	/* Free the IRQ lines */
3960 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3961 
3962 	if (priv->eee_enabled) {
3963 		priv->tx_path_in_lpi_mode = false;
3964 		del_timer_sync(&priv->eee_ctrl_timer);
3965 	}
3966 
3967 	/* Stop TX/RX DMA and clear the descriptors */
3968 	stmmac_stop_all_dma(priv);
3969 
3970 	/* Release and free the Rx/Tx resources */
3971 	free_dma_desc_resources(priv, &priv->dma_conf);
3972 
3973 	/* Disable the MAC Rx/Tx */
3974 	stmmac_mac_set(priv, priv->ioaddr, false);
3975 
3976 	/* Powerdown Serdes if there is */
3977 	if (priv->plat->serdes_powerdown)
3978 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3979 
3980 	netif_carrier_off(dev);
3981 
3982 	stmmac_release_ptp(priv);
3983 
3984 	pm_runtime_put(priv->device);
3985 
3986 	if (priv->dma_cap.fpesel)
3987 		stmmac_fpe_stop_wq(priv);
3988 
3989 	return 0;
3990 }
3991 
3992 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3993 			       struct stmmac_tx_queue *tx_q)
3994 {
3995 	u16 tag = 0x0, inner_tag = 0x0;
3996 	u32 inner_type = 0x0;
3997 	struct dma_desc *p;
3998 
3999 	if (!priv->dma_cap.vlins)
4000 		return false;
4001 	if (!skb_vlan_tag_present(skb))
4002 		return false;
4003 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4004 		inner_tag = skb_vlan_tag_get(skb);
4005 		inner_type = STMMAC_VLAN_INSERT;
4006 	}
4007 
4008 	tag = skb_vlan_tag_get(skb);
4009 
4010 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4011 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4012 	else
4013 		p = &tx_q->dma_tx[tx_q->cur_tx];
4014 
4015 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4016 		return false;
4017 
4018 	stmmac_set_tx_owner(priv, p);
4019 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4020 	return true;
4021 }
4022 
4023 /**
4024  *  stmmac_tso_allocator - close entry point of the driver
4025  *  @priv: driver private structure
4026  *  @des: buffer start address
4027  *  @total_len: total length to fill in descriptors
4028  *  @last_segment: condition for the last descriptor
4029  *  @queue: TX queue index
4030  *  Description:
4031  *  This function fills descriptor and request new descriptors according to
4032  *  buffer length to fill
4033  */
4034 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4035 				 int total_len, bool last_segment, u32 queue)
4036 {
4037 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4038 	struct dma_desc *desc;
4039 	u32 buff_size;
4040 	int tmp_len;
4041 
4042 	tmp_len = total_len;
4043 
4044 	while (tmp_len > 0) {
4045 		dma_addr_t curr_addr;
4046 
4047 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4048 						priv->dma_conf.dma_tx_size);
4049 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4050 
4051 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4052 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4053 		else
4054 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4055 
4056 		curr_addr = des + (total_len - tmp_len);
4057 		if (priv->dma_cap.addr64 <= 32)
4058 			desc->des0 = cpu_to_le32(curr_addr);
4059 		else
4060 			stmmac_set_desc_addr(priv, desc, curr_addr);
4061 
4062 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4063 			    TSO_MAX_BUFF_SIZE : tmp_len;
4064 
4065 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4066 				0, 1,
4067 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4068 				0, 0);
4069 
4070 		tmp_len -= TSO_MAX_BUFF_SIZE;
4071 	}
4072 }
4073 
4074 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4075 {
4076 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4077 	int desc_size;
4078 
4079 	if (likely(priv->extend_desc))
4080 		desc_size = sizeof(struct dma_extended_desc);
4081 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4082 		desc_size = sizeof(struct dma_edesc);
4083 	else
4084 		desc_size = sizeof(struct dma_desc);
4085 
4086 	/* The own bit must be the latest setting done when prepare the
4087 	 * descriptor and then barrier is needed to make sure that
4088 	 * all is coherent before granting the DMA engine.
4089 	 */
4090 	wmb();
4091 
4092 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4093 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4094 }
4095 
4096 /**
4097  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4098  *  @skb : the socket buffer
4099  *  @dev : device pointer
4100  *  Description: this is the transmit function that is called on TSO frames
4101  *  (support available on GMAC4 and newer chips).
4102  *  Diagram below show the ring programming in case of TSO frames:
4103  *
4104  *  First Descriptor
4105  *   --------
4106  *   | DES0 |---> buffer1 = L2/L3/L4 header
4107  *   | DES1 |---> TCP Payload (can continue on next descr...)
4108  *   | DES2 |---> buffer 1 and 2 len
4109  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4110  *   --------
4111  *	|
4112  *     ...
4113  *	|
4114  *   --------
4115  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4116  *   | DES1 | --|
4117  *   | DES2 | --> buffer 1 and 2 len
4118  *   | DES3 |
4119  *   --------
4120  *
4121  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4122  */
4123 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4124 {
4125 	struct dma_desc *desc, *first, *mss_desc = NULL;
4126 	struct stmmac_priv *priv = netdev_priv(dev);
4127 	int nfrags = skb_shinfo(skb)->nr_frags;
4128 	u32 queue = skb_get_queue_mapping(skb);
4129 	unsigned int first_entry, tx_packets;
4130 	struct stmmac_txq_stats *txq_stats;
4131 	int tmp_pay_len = 0, first_tx;
4132 	struct stmmac_tx_queue *tx_q;
4133 	bool has_vlan, set_ic;
4134 	u8 proto_hdr_len, hdr;
4135 	u32 pay_len, mss;
4136 	dma_addr_t des;
4137 	int i;
4138 
4139 	tx_q = &priv->dma_conf.tx_queue[queue];
4140 	txq_stats = &priv->xstats.txq_stats[queue];
4141 	first_tx = tx_q->cur_tx;
4142 
4143 	/* Compute header lengths */
4144 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4145 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4146 		hdr = sizeof(struct udphdr);
4147 	} else {
4148 		proto_hdr_len = skb_tcp_all_headers(skb);
4149 		hdr = tcp_hdrlen(skb);
4150 	}
4151 
4152 	/* Desc availability based on threshold should be enough safe */
4153 	if (unlikely(stmmac_tx_avail(priv, queue) <
4154 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4155 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4156 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4157 								queue));
4158 			/* This is a hard error, log it. */
4159 			netdev_err(priv->dev,
4160 				   "%s: Tx Ring full when queue awake\n",
4161 				   __func__);
4162 		}
4163 		return NETDEV_TX_BUSY;
4164 	}
4165 
4166 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4167 
4168 	mss = skb_shinfo(skb)->gso_size;
4169 
4170 	/* set new MSS value if needed */
4171 	if (mss != tx_q->mss) {
4172 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4173 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4174 		else
4175 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4176 
4177 		stmmac_set_mss(priv, mss_desc, mss);
4178 		tx_q->mss = mss;
4179 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4180 						priv->dma_conf.dma_tx_size);
4181 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4182 	}
4183 
4184 	if (netif_msg_tx_queued(priv)) {
4185 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4186 			__func__, hdr, proto_hdr_len, pay_len, mss);
4187 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4188 			skb->data_len);
4189 	}
4190 
4191 	/* Check if VLAN can be inserted by HW */
4192 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4193 
4194 	first_entry = tx_q->cur_tx;
4195 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4196 
4197 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4198 		desc = &tx_q->dma_entx[first_entry].basic;
4199 	else
4200 		desc = &tx_q->dma_tx[first_entry];
4201 	first = desc;
4202 
4203 	if (has_vlan)
4204 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4205 
4206 	/* first descriptor: fill Headers on Buf1 */
4207 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4208 			     DMA_TO_DEVICE);
4209 	if (dma_mapping_error(priv->device, des))
4210 		goto dma_map_err;
4211 
4212 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4213 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4214 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4215 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4216 
4217 	if (priv->dma_cap.addr64 <= 32) {
4218 		first->des0 = cpu_to_le32(des);
4219 
4220 		/* Fill start of payload in buff2 of first descriptor */
4221 		if (pay_len)
4222 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4223 
4224 		/* If needed take extra descriptors to fill the remaining payload */
4225 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4226 	} else {
4227 		stmmac_set_desc_addr(priv, first, des);
4228 		tmp_pay_len = pay_len;
4229 		des += proto_hdr_len;
4230 		pay_len = 0;
4231 	}
4232 
4233 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4234 
4235 	/* Prepare fragments */
4236 	for (i = 0; i < nfrags; i++) {
4237 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4238 
4239 		des = skb_frag_dma_map(priv->device, frag, 0,
4240 				       skb_frag_size(frag),
4241 				       DMA_TO_DEVICE);
4242 		if (dma_mapping_error(priv->device, des))
4243 			goto dma_map_err;
4244 
4245 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4246 				     (i == nfrags - 1), queue);
4247 
4248 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4249 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4250 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4251 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4252 	}
4253 
4254 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4255 
4256 	/* Only the last descriptor gets to point to the skb. */
4257 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4258 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4259 
4260 	/* Manage tx mitigation */
4261 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4262 	tx_q->tx_count_frames += tx_packets;
4263 
4264 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4265 		set_ic = true;
4266 	else if (!priv->tx_coal_frames[queue])
4267 		set_ic = false;
4268 	else if (tx_packets > priv->tx_coal_frames[queue])
4269 		set_ic = true;
4270 	else if ((tx_q->tx_count_frames %
4271 		  priv->tx_coal_frames[queue]) < tx_packets)
4272 		set_ic = true;
4273 	else
4274 		set_ic = false;
4275 
4276 	if (set_ic) {
4277 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4278 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4279 		else
4280 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4281 
4282 		tx_q->tx_count_frames = 0;
4283 		stmmac_set_tx_ic(priv, desc);
4284 	}
4285 
4286 	/* We've used all descriptors we need for this skb, however,
4287 	 * advance cur_tx so that it references a fresh descriptor.
4288 	 * ndo_start_xmit will fill this descriptor the next time it's
4289 	 * called and stmmac_tx_clean may clean up to this descriptor.
4290 	 */
4291 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4292 
4293 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4294 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4295 			  __func__);
4296 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4297 	}
4298 
4299 	u64_stats_update_begin(&txq_stats->q_syncp);
4300 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4301 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4302 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4303 	if (set_ic)
4304 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4305 	u64_stats_update_end(&txq_stats->q_syncp);
4306 
4307 	if (priv->sarc_type)
4308 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4309 
4310 	skb_tx_timestamp(skb);
4311 
4312 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4313 		     priv->hwts_tx_en)) {
4314 		/* declare that device is doing timestamping */
4315 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4316 		stmmac_enable_tx_timestamp(priv, first);
4317 	}
4318 
4319 	/* Complete the first descriptor before granting the DMA */
4320 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4321 			proto_hdr_len,
4322 			pay_len,
4323 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4324 			hdr / 4, (skb->len - proto_hdr_len));
4325 
4326 	/* If context desc is used to change MSS */
4327 	if (mss_desc) {
4328 		/* Make sure that first descriptor has been completely
4329 		 * written, including its own bit. This is because MSS is
4330 		 * actually before first descriptor, so we need to make
4331 		 * sure that MSS's own bit is the last thing written.
4332 		 */
4333 		dma_wmb();
4334 		stmmac_set_tx_owner(priv, mss_desc);
4335 	}
4336 
4337 	if (netif_msg_pktdata(priv)) {
4338 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4339 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4340 			tx_q->cur_tx, first, nfrags);
4341 		pr_info(">>> frame to be transmitted: ");
4342 		print_pkt(skb->data, skb_headlen(skb));
4343 	}
4344 
4345 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4346 
4347 	stmmac_flush_tx_descriptors(priv, queue);
4348 	stmmac_tx_timer_arm(priv, queue);
4349 
4350 	return NETDEV_TX_OK;
4351 
4352 dma_map_err:
4353 	dev_err(priv->device, "Tx dma map failed\n");
4354 	dev_kfree_skb(skb);
4355 	priv->xstats.tx_dropped++;
4356 	return NETDEV_TX_OK;
4357 }
4358 
4359 /**
4360  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4361  * @skb: socket buffer to check
4362  *
4363  * Check if a packet has an ethertype that will trigger the IP header checks
4364  * and IP/TCP checksum engine of the stmmac core.
4365  *
4366  * Return: true if the ethertype can trigger the checksum engine, false
4367  * otherwise
4368  */
4369 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4370 {
4371 	int depth = 0;
4372 	__be16 proto;
4373 
4374 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4375 				    &depth);
4376 
4377 	return (depth <= ETH_HLEN) &&
4378 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4379 }
4380 
4381 /**
4382  *  stmmac_xmit - Tx entry point of the driver
4383  *  @skb : the socket buffer
4384  *  @dev : device pointer
4385  *  Description : this is the tx entry point of the driver.
4386  *  It programs the chain or the ring and supports oversized frames
4387  *  and SG feature.
4388  */
4389 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4390 {
4391 	unsigned int first_entry, tx_packets, enh_desc;
4392 	struct stmmac_priv *priv = netdev_priv(dev);
4393 	unsigned int nopaged_len = skb_headlen(skb);
4394 	int i, csum_insertion = 0, is_jumbo = 0;
4395 	u32 queue = skb_get_queue_mapping(skb);
4396 	int nfrags = skb_shinfo(skb)->nr_frags;
4397 	int gso = skb_shinfo(skb)->gso_type;
4398 	struct stmmac_txq_stats *txq_stats;
4399 	struct dma_edesc *tbs_desc = NULL;
4400 	struct dma_desc *desc, *first;
4401 	struct stmmac_tx_queue *tx_q;
4402 	bool has_vlan, set_ic;
4403 	int entry, first_tx;
4404 	dma_addr_t des;
4405 
4406 	tx_q = &priv->dma_conf.tx_queue[queue];
4407 	txq_stats = &priv->xstats.txq_stats[queue];
4408 	first_tx = tx_q->cur_tx;
4409 
4410 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4411 		stmmac_disable_eee_mode(priv);
4412 
4413 	/* Manage oversized TCP frames for GMAC4 device */
4414 	if (skb_is_gso(skb) && priv->tso) {
4415 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4416 			return stmmac_tso_xmit(skb, dev);
4417 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4418 			return stmmac_tso_xmit(skb, dev);
4419 	}
4420 
4421 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4422 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4423 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4424 								queue));
4425 			/* This is a hard error, log it. */
4426 			netdev_err(priv->dev,
4427 				   "%s: Tx Ring full when queue awake\n",
4428 				   __func__);
4429 		}
4430 		return NETDEV_TX_BUSY;
4431 	}
4432 
4433 	/* Check if VLAN can be inserted by HW */
4434 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4435 
4436 	entry = tx_q->cur_tx;
4437 	first_entry = entry;
4438 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4439 
4440 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4441 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4442 	 * queues. In that case, checksum offloading for those queues that don't
4443 	 * support tx coe needs to fallback to software checksum calculation.
4444 	 *
4445 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4446 	 * also have to be checksummed in software.
4447 	 */
4448 	if (csum_insertion &&
4449 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4450 	     !stmmac_has_ip_ethertype(skb))) {
4451 		if (unlikely(skb_checksum_help(skb)))
4452 			goto dma_map_err;
4453 		csum_insertion = !csum_insertion;
4454 	}
4455 
4456 	if (likely(priv->extend_desc))
4457 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4458 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4459 		desc = &tx_q->dma_entx[entry].basic;
4460 	else
4461 		desc = tx_q->dma_tx + entry;
4462 
4463 	first = desc;
4464 
4465 	if (has_vlan)
4466 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4467 
4468 	enh_desc = priv->plat->enh_desc;
4469 	/* To program the descriptors according to the size of the frame */
4470 	if (enh_desc)
4471 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4472 
4473 	if (unlikely(is_jumbo)) {
4474 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4475 		if (unlikely(entry < 0) && (entry != -EINVAL))
4476 			goto dma_map_err;
4477 	}
4478 
4479 	for (i = 0; i < nfrags; i++) {
4480 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4481 		int len = skb_frag_size(frag);
4482 		bool last_segment = (i == (nfrags - 1));
4483 
4484 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4485 		WARN_ON(tx_q->tx_skbuff[entry]);
4486 
4487 		if (likely(priv->extend_desc))
4488 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4489 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4490 			desc = &tx_q->dma_entx[entry].basic;
4491 		else
4492 			desc = tx_q->dma_tx + entry;
4493 
4494 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4495 				       DMA_TO_DEVICE);
4496 		if (dma_mapping_error(priv->device, des))
4497 			goto dma_map_err; /* should reuse desc w/o issues */
4498 
4499 		tx_q->tx_skbuff_dma[entry].buf = des;
4500 
4501 		stmmac_set_desc_addr(priv, desc, des);
4502 
4503 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4504 		tx_q->tx_skbuff_dma[entry].len = len;
4505 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4506 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4507 
4508 		/* Prepare the descriptor and set the own bit too */
4509 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4510 				priv->mode, 1, last_segment, skb->len);
4511 	}
4512 
4513 	/* Only the last descriptor gets to point to the skb. */
4514 	tx_q->tx_skbuff[entry] = skb;
4515 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4516 
4517 	/* According to the coalesce parameter the IC bit for the latest
4518 	 * segment is reset and the timer re-started to clean the tx status.
4519 	 * This approach takes care about the fragments: desc is the first
4520 	 * element in case of no SG.
4521 	 */
4522 	tx_packets = (entry + 1) - first_tx;
4523 	tx_q->tx_count_frames += tx_packets;
4524 
4525 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4526 		set_ic = true;
4527 	else if (!priv->tx_coal_frames[queue])
4528 		set_ic = false;
4529 	else if (tx_packets > priv->tx_coal_frames[queue])
4530 		set_ic = true;
4531 	else if ((tx_q->tx_count_frames %
4532 		  priv->tx_coal_frames[queue]) < tx_packets)
4533 		set_ic = true;
4534 	else
4535 		set_ic = false;
4536 
4537 	if (set_ic) {
4538 		if (likely(priv->extend_desc))
4539 			desc = &tx_q->dma_etx[entry].basic;
4540 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4541 			desc = &tx_q->dma_entx[entry].basic;
4542 		else
4543 			desc = &tx_q->dma_tx[entry];
4544 
4545 		tx_q->tx_count_frames = 0;
4546 		stmmac_set_tx_ic(priv, desc);
4547 	}
4548 
4549 	/* We've used all descriptors we need for this skb, however,
4550 	 * advance cur_tx so that it references a fresh descriptor.
4551 	 * ndo_start_xmit will fill this descriptor the next time it's
4552 	 * called and stmmac_tx_clean may clean up to this descriptor.
4553 	 */
4554 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4555 	tx_q->cur_tx = entry;
4556 
4557 	if (netif_msg_pktdata(priv)) {
4558 		netdev_dbg(priv->dev,
4559 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4560 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4561 			   entry, first, nfrags);
4562 
4563 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4564 		print_pkt(skb->data, skb->len);
4565 	}
4566 
4567 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4568 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4569 			  __func__);
4570 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4571 	}
4572 
4573 	u64_stats_update_begin(&txq_stats->q_syncp);
4574 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4575 	if (set_ic)
4576 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4577 	u64_stats_update_end(&txq_stats->q_syncp);
4578 
4579 	if (priv->sarc_type)
4580 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4581 
4582 	skb_tx_timestamp(skb);
4583 
4584 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4585 	 * problems because all the descriptors are actually ready to be
4586 	 * passed to the DMA engine.
4587 	 */
4588 	if (likely(!is_jumbo)) {
4589 		bool last_segment = (nfrags == 0);
4590 
4591 		des = dma_map_single(priv->device, skb->data,
4592 				     nopaged_len, DMA_TO_DEVICE);
4593 		if (dma_mapping_error(priv->device, des))
4594 			goto dma_map_err;
4595 
4596 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4597 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4598 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4599 
4600 		stmmac_set_desc_addr(priv, first, des);
4601 
4602 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4603 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4604 
4605 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4606 			     priv->hwts_tx_en)) {
4607 			/* declare that device is doing timestamping */
4608 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4609 			stmmac_enable_tx_timestamp(priv, first);
4610 		}
4611 
4612 		/* Prepare the first descriptor setting the OWN bit too */
4613 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4614 				csum_insertion, priv->mode, 0, last_segment,
4615 				skb->len);
4616 	}
4617 
4618 	if (tx_q->tbs & STMMAC_TBS_EN) {
4619 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4620 
4621 		tbs_desc = &tx_q->dma_entx[first_entry];
4622 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4623 	}
4624 
4625 	stmmac_set_tx_owner(priv, first);
4626 
4627 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4628 
4629 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4630 
4631 	stmmac_flush_tx_descriptors(priv, queue);
4632 	stmmac_tx_timer_arm(priv, queue);
4633 
4634 	return NETDEV_TX_OK;
4635 
4636 dma_map_err:
4637 	netdev_err(priv->dev, "Tx DMA map failed\n");
4638 	dev_kfree_skb(skb);
4639 	priv->xstats.tx_dropped++;
4640 	return NETDEV_TX_OK;
4641 }
4642 
4643 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4644 {
4645 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4646 	__be16 vlan_proto = veth->h_vlan_proto;
4647 	u16 vlanid;
4648 
4649 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4650 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4651 	    (vlan_proto == htons(ETH_P_8021AD) &&
4652 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4653 		/* pop the vlan tag */
4654 		vlanid = ntohs(veth->h_vlan_TCI);
4655 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4656 		skb_pull(skb, VLAN_HLEN);
4657 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4658 	}
4659 }
4660 
4661 /**
4662  * stmmac_rx_refill - refill used skb preallocated buffers
4663  * @priv: driver private structure
4664  * @queue: RX queue index
4665  * Description : this is to reallocate the skb for the reception process
4666  * that is based on zero-copy.
4667  */
4668 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4669 {
4670 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4671 	int dirty = stmmac_rx_dirty(priv, queue);
4672 	unsigned int entry = rx_q->dirty_rx;
4673 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4674 
4675 	if (priv->dma_cap.host_dma_width <= 32)
4676 		gfp |= GFP_DMA32;
4677 
4678 	while (dirty-- > 0) {
4679 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4680 		struct dma_desc *p;
4681 		bool use_rx_wd;
4682 
4683 		if (priv->extend_desc)
4684 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4685 		else
4686 			p = rx_q->dma_rx + entry;
4687 
4688 		if (!buf->page) {
4689 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4690 			if (!buf->page)
4691 				break;
4692 		}
4693 
4694 		if (priv->sph && !buf->sec_page) {
4695 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4696 			if (!buf->sec_page)
4697 				break;
4698 
4699 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4700 		}
4701 
4702 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4703 
4704 		stmmac_set_desc_addr(priv, p, buf->addr);
4705 		if (priv->sph)
4706 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4707 		else
4708 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4709 		stmmac_refill_desc3(priv, rx_q, p);
4710 
4711 		rx_q->rx_count_frames++;
4712 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4713 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4714 			rx_q->rx_count_frames = 0;
4715 
4716 		use_rx_wd = !priv->rx_coal_frames[queue];
4717 		use_rx_wd |= rx_q->rx_count_frames > 0;
4718 		if (!priv->use_riwt)
4719 			use_rx_wd = false;
4720 
4721 		dma_wmb();
4722 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4723 
4724 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4725 	}
4726 	rx_q->dirty_rx = entry;
4727 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4728 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4729 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4730 }
4731 
4732 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4733 				       struct dma_desc *p,
4734 				       int status, unsigned int len)
4735 {
4736 	unsigned int plen = 0, hlen = 0;
4737 	int coe = priv->hw->rx_csum;
4738 
4739 	/* Not first descriptor, buffer is always zero */
4740 	if (priv->sph && len)
4741 		return 0;
4742 
4743 	/* First descriptor, get split header length */
4744 	stmmac_get_rx_header_len(priv, p, &hlen);
4745 	if (priv->sph && hlen) {
4746 		priv->xstats.rx_split_hdr_pkt_n++;
4747 		return hlen;
4748 	}
4749 
4750 	/* First descriptor, not last descriptor and not split header */
4751 	if (status & rx_not_ls)
4752 		return priv->dma_conf.dma_buf_sz;
4753 
4754 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4755 
4756 	/* First descriptor and last descriptor and not split header */
4757 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4758 }
4759 
4760 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4761 				       struct dma_desc *p,
4762 				       int status, unsigned int len)
4763 {
4764 	int coe = priv->hw->rx_csum;
4765 	unsigned int plen = 0;
4766 
4767 	/* Not split header, buffer is not available */
4768 	if (!priv->sph)
4769 		return 0;
4770 
4771 	/* Not last descriptor */
4772 	if (status & rx_not_ls)
4773 		return priv->dma_conf.dma_buf_sz;
4774 
4775 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4776 
4777 	/* Last descriptor */
4778 	return plen - len;
4779 }
4780 
4781 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4782 				struct xdp_frame *xdpf, bool dma_map)
4783 {
4784 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4785 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4786 	unsigned int entry = tx_q->cur_tx;
4787 	struct dma_desc *tx_desc;
4788 	dma_addr_t dma_addr;
4789 	bool set_ic;
4790 
4791 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4792 		return STMMAC_XDP_CONSUMED;
4793 
4794 	if (likely(priv->extend_desc))
4795 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4796 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4797 		tx_desc = &tx_q->dma_entx[entry].basic;
4798 	else
4799 		tx_desc = tx_q->dma_tx + entry;
4800 
4801 	if (dma_map) {
4802 		dma_addr = dma_map_single(priv->device, xdpf->data,
4803 					  xdpf->len, DMA_TO_DEVICE);
4804 		if (dma_mapping_error(priv->device, dma_addr))
4805 			return STMMAC_XDP_CONSUMED;
4806 
4807 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4808 	} else {
4809 		struct page *page = virt_to_page(xdpf->data);
4810 
4811 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4812 			   xdpf->headroom;
4813 		dma_sync_single_for_device(priv->device, dma_addr,
4814 					   xdpf->len, DMA_BIDIRECTIONAL);
4815 
4816 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4817 	}
4818 
4819 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4820 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4821 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4822 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4823 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4824 
4825 	tx_q->xdpf[entry] = xdpf;
4826 
4827 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4828 
4829 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4830 			       true, priv->mode, true, true,
4831 			       xdpf->len);
4832 
4833 	tx_q->tx_count_frames++;
4834 
4835 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4836 		set_ic = true;
4837 	else
4838 		set_ic = false;
4839 
4840 	if (set_ic) {
4841 		tx_q->tx_count_frames = 0;
4842 		stmmac_set_tx_ic(priv, tx_desc);
4843 		u64_stats_update_begin(&txq_stats->q_syncp);
4844 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4845 		u64_stats_update_end(&txq_stats->q_syncp);
4846 	}
4847 
4848 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4849 
4850 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4851 	tx_q->cur_tx = entry;
4852 
4853 	return STMMAC_XDP_TX;
4854 }
4855 
4856 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4857 				   int cpu)
4858 {
4859 	int index = cpu;
4860 
4861 	if (unlikely(index < 0))
4862 		index = 0;
4863 
4864 	while (index >= priv->plat->tx_queues_to_use)
4865 		index -= priv->plat->tx_queues_to_use;
4866 
4867 	return index;
4868 }
4869 
4870 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4871 				struct xdp_buff *xdp)
4872 {
4873 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4874 	int cpu = smp_processor_id();
4875 	struct netdev_queue *nq;
4876 	int queue;
4877 	int res;
4878 
4879 	if (unlikely(!xdpf))
4880 		return STMMAC_XDP_CONSUMED;
4881 
4882 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4883 	nq = netdev_get_tx_queue(priv->dev, queue);
4884 
4885 	__netif_tx_lock(nq, cpu);
4886 	/* Avoids TX time-out as we are sharing with slow path */
4887 	txq_trans_cond_update(nq);
4888 
4889 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4890 	if (res == STMMAC_XDP_TX)
4891 		stmmac_flush_tx_descriptors(priv, queue);
4892 
4893 	__netif_tx_unlock(nq);
4894 
4895 	return res;
4896 }
4897 
4898 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4899 				 struct bpf_prog *prog,
4900 				 struct xdp_buff *xdp)
4901 {
4902 	u32 act;
4903 	int res;
4904 
4905 	act = bpf_prog_run_xdp(prog, xdp);
4906 	switch (act) {
4907 	case XDP_PASS:
4908 		res = STMMAC_XDP_PASS;
4909 		break;
4910 	case XDP_TX:
4911 		res = stmmac_xdp_xmit_back(priv, xdp);
4912 		break;
4913 	case XDP_REDIRECT:
4914 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4915 			res = STMMAC_XDP_CONSUMED;
4916 		else
4917 			res = STMMAC_XDP_REDIRECT;
4918 		break;
4919 	default:
4920 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4921 		fallthrough;
4922 	case XDP_ABORTED:
4923 		trace_xdp_exception(priv->dev, prog, act);
4924 		fallthrough;
4925 	case XDP_DROP:
4926 		res = STMMAC_XDP_CONSUMED;
4927 		break;
4928 	}
4929 
4930 	return res;
4931 }
4932 
4933 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4934 					   struct xdp_buff *xdp)
4935 {
4936 	struct bpf_prog *prog;
4937 	int res;
4938 
4939 	prog = READ_ONCE(priv->xdp_prog);
4940 	if (!prog) {
4941 		res = STMMAC_XDP_PASS;
4942 		goto out;
4943 	}
4944 
4945 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
4946 out:
4947 	return ERR_PTR(-res);
4948 }
4949 
4950 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4951 				   int xdp_status)
4952 {
4953 	int cpu = smp_processor_id();
4954 	int queue;
4955 
4956 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4957 
4958 	if (xdp_status & STMMAC_XDP_TX)
4959 		stmmac_tx_timer_arm(priv, queue);
4960 
4961 	if (xdp_status & STMMAC_XDP_REDIRECT)
4962 		xdp_do_flush();
4963 }
4964 
4965 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4966 					       struct xdp_buff *xdp)
4967 {
4968 	unsigned int metasize = xdp->data - xdp->data_meta;
4969 	unsigned int datasize = xdp->data_end - xdp->data;
4970 	struct sk_buff *skb;
4971 
4972 	skb = __napi_alloc_skb(&ch->rxtx_napi,
4973 			       xdp->data_end - xdp->data_hard_start,
4974 			       GFP_ATOMIC | __GFP_NOWARN);
4975 	if (unlikely(!skb))
4976 		return NULL;
4977 
4978 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
4979 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4980 	if (metasize)
4981 		skb_metadata_set(skb, metasize);
4982 
4983 	return skb;
4984 }
4985 
4986 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4987 				   struct dma_desc *p, struct dma_desc *np,
4988 				   struct xdp_buff *xdp)
4989 {
4990 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
4991 	struct stmmac_channel *ch = &priv->channel[queue];
4992 	unsigned int len = xdp->data_end - xdp->data;
4993 	enum pkt_hash_types hash_type;
4994 	int coe = priv->hw->rx_csum;
4995 	struct sk_buff *skb;
4996 	u32 hash;
4997 
4998 	skb = stmmac_construct_skb_zc(ch, xdp);
4999 	if (!skb) {
5000 		priv->xstats.rx_dropped++;
5001 		return;
5002 	}
5003 
5004 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5005 	stmmac_rx_vlan(priv->dev, skb);
5006 	skb->protocol = eth_type_trans(skb, priv->dev);
5007 
5008 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5009 		skb_checksum_none_assert(skb);
5010 	else
5011 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5012 
5013 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5014 		skb_set_hash(skb, hash, hash_type);
5015 
5016 	skb_record_rx_queue(skb, queue);
5017 	napi_gro_receive(&ch->rxtx_napi, skb);
5018 
5019 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5020 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5021 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5022 	u64_stats_update_end(&rxq_stats->napi_syncp);
5023 }
5024 
5025 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5026 {
5027 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5028 	unsigned int entry = rx_q->dirty_rx;
5029 	struct dma_desc *rx_desc = NULL;
5030 	bool ret = true;
5031 
5032 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5033 
5034 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5035 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5036 		dma_addr_t dma_addr;
5037 		bool use_rx_wd;
5038 
5039 		if (!buf->xdp) {
5040 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5041 			if (!buf->xdp) {
5042 				ret = false;
5043 				break;
5044 			}
5045 		}
5046 
5047 		if (priv->extend_desc)
5048 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5049 		else
5050 			rx_desc = rx_q->dma_rx + entry;
5051 
5052 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5053 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5054 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5055 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5056 
5057 		rx_q->rx_count_frames++;
5058 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5059 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5060 			rx_q->rx_count_frames = 0;
5061 
5062 		use_rx_wd = !priv->rx_coal_frames[queue];
5063 		use_rx_wd |= rx_q->rx_count_frames > 0;
5064 		if (!priv->use_riwt)
5065 			use_rx_wd = false;
5066 
5067 		dma_wmb();
5068 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5069 
5070 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5071 	}
5072 
5073 	if (rx_desc) {
5074 		rx_q->dirty_rx = entry;
5075 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5076 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5077 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5078 	}
5079 
5080 	return ret;
5081 }
5082 
5083 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5084 {
5085 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5086 	 * to represent incoming packet, whereas cb field in the same structure
5087 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5088 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5089 	 */
5090 	return (struct stmmac_xdp_buff *)xdp;
5091 }
5092 
5093 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5094 {
5095 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5096 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5097 	unsigned int count = 0, error = 0, len = 0;
5098 	int dirty = stmmac_rx_dirty(priv, queue);
5099 	unsigned int next_entry = rx_q->cur_rx;
5100 	u32 rx_errors = 0, rx_dropped = 0;
5101 	unsigned int desc_size;
5102 	struct bpf_prog *prog;
5103 	bool failure = false;
5104 	int xdp_status = 0;
5105 	int status = 0;
5106 
5107 	if (netif_msg_rx_status(priv)) {
5108 		void *rx_head;
5109 
5110 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5111 		if (priv->extend_desc) {
5112 			rx_head = (void *)rx_q->dma_erx;
5113 			desc_size = sizeof(struct dma_extended_desc);
5114 		} else {
5115 			rx_head = (void *)rx_q->dma_rx;
5116 			desc_size = sizeof(struct dma_desc);
5117 		}
5118 
5119 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5120 				    rx_q->dma_rx_phy, desc_size);
5121 	}
5122 	while (count < limit) {
5123 		struct stmmac_rx_buffer *buf;
5124 		struct stmmac_xdp_buff *ctx;
5125 		unsigned int buf1_len = 0;
5126 		struct dma_desc *np, *p;
5127 		int entry;
5128 		int res;
5129 
5130 		if (!count && rx_q->state_saved) {
5131 			error = rx_q->state.error;
5132 			len = rx_q->state.len;
5133 		} else {
5134 			rx_q->state_saved = false;
5135 			error = 0;
5136 			len = 0;
5137 		}
5138 
5139 		if (count >= limit)
5140 			break;
5141 
5142 read_again:
5143 		buf1_len = 0;
5144 		entry = next_entry;
5145 		buf = &rx_q->buf_pool[entry];
5146 
5147 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5148 			failure = failure ||
5149 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5150 			dirty = 0;
5151 		}
5152 
5153 		if (priv->extend_desc)
5154 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5155 		else
5156 			p = rx_q->dma_rx + entry;
5157 
5158 		/* read the status of the incoming frame */
5159 		status = stmmac_rx_status(priv, &priv->xstats, p);
5160 		/* check if managed by the DMA otherwise go ahead */
5161 		if (unlikely(status & dma_own))
5162 			break;
5163 
5164 		/* Prefetch the next RX descriptor */
5165 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5166 						priv->dma_conf.dma_rx_size);
5167 		next_entry = rx_q->cur_rx;
5168 
5169 		if (priv->extend_desc)
5170 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5171 		else
5172 			np = rx_q->dma_rx + next_entry;
5173 
5174 		prefetch(np);
5175 
5176 		/* Ensure a valid XSK buffer before proceed */
5177 		if (!buf->xdp)
5178 			break;
5179 
5180 		if (priv->extend_desc)
5181 			stmmac_rx_extended_status(priv, &priv->xstats,
5182 						  rx_q->dma_erx + entry);
5183 		if (unlikely(status == discard_frame)) {
5184 			xsk_buff_free(buf->xdp);
5185 			buf->xdp = NULL;
5186 			dirty++;
5187 			error = 1;
5188 			if (!priv->hwts_rx_en)
5189 				rx_errors++;
5190 		}
5191 
5192 		if (unlikely(error && (status & rx_not_ls)))
5193 			goto read_again;
5194 		if (unlikely(error)) {
5195 			count++;
5196 			continue;
5197 		}
5198 
5199 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5200 		if (likely(status & rx_not_ls)) {
5201 			xsk_buff_free(buf->xdp);
5202 			buf->xdp = NULL;
5203 			dirty++;
5204 			count++;
5205 			goto read_again;
5206 		}
5207 
5208 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5209 		ctx->priv = priv;
5210 		ctx->desc = p;
5211 		ctx->ndesc = np;
5212 
5213 		/* XDP ZC Frame only support primary buffers for now */
5214 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5215 		len += buf1_len;
5216 
5217 		/* ACS is disabled; strip manually. */
5218 		if (likely(!(status & rx_not_ls))) {
5219 			buf1_len -= ETH_FCS_LEN;
5220 			len -= ETH_FCS_LEN;
5221 		}
5222 
5223 		/* RX buffer is good and fit into a XSK pool buffer */
5224 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5225 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5226 
5227 		prog = READ_ONCE(priv->xdp_prog);
5228 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5229 
5230 		switch (res) {
5231 		case STMMAC_XDP_PASS:
5232 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5233 			xsk_buff_free(buf->xdp);
5234 			break;
5235 		case STMMAC_XDP_CONSUMED:
5236 			xsk_buff_free(buf->xdp);
5237 			rx_dropped++;
5238 			break;
5239 		case STMMAC_XDP_TX:
5240 		case STMMAC_XDP_REDIRECT:
5241 			xdp_status |= res;
5242 			break;
5243 		}
5244 
5245 		buf->xdp = NULL;
5246 		dirty++;
5247 		count++;
5248 	}
5249 
5250 	if (status & rx_not_ls) {
5251 		rx_q->state_saved = true;
5252 		rx_q->state.error = error;
5253 		rx_q->state.len = len;
5254 	}
5255 
5256 	stmmac_finalize_xdp_rx(priv, xdp_status);
5257 
5258 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5259 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5260 	u64_stats_update_end(&rxq_stats->napi_syncp);
5261 
5262 	priv->xstats.rx_dropped += rx_dropped;
5263 	priv->xstats.rx_errors += rx_errors;
5264 
5265 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5266 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5267 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5268 		else
5269 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5270 
5271 		return (int)count;
5272 	}
5273 
5274 	return failure ? limit : (int)count;
5275 }
5276 
5277 /**
5278  * stmmac_rx - manage the receive process
5279  * @priv: driver private structure
5280  * @limit: napi bugget
5281  * @queue: RX queue index.
5282  * Description :  this the function called by the napi poll method.
5283  * It gets all the frames inside the ring.
5284  */
5285 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5286 {
5287 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5288 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5289 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5290 	struct stmmac_channel *ch = &priv->channel[queue];
5291 	unsigned int count = 0, error = 0, len = 0;
5292 	int status = 0, coe = priv->hw->rx_csum;
5293 	unsigned int next_entry = rx_q->cur_rx;
5294 	enum dma_data_direction dma_dir;
5295 	unsigned int desc_size;
5296 	struct sk_buff *skb = NULL;
5297 	struct stmmac_xdp_buff ctx;
5298 	int xdp_status = 0;
5299 	int buf_sz;
5300 
5301 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5302 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5303 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5304 
5305 	if (netif_msg_rx_status(priv)) {
5306 		void *rx_head;
5307 
5308 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5309 		if (priv->extend_desc) {
5310 			rx_head = (void *)rx_q->dma_erx;
5311 			desc_size = sizeof(struct dma_extended_desc);
5312 		} else {
5313 			rx_head = (void *)rx_q->dma_rx;
5314 			desc_size = sizeof(struct dma_desc);
5315 		}
5316 
5317 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5318 				    rx_q->dma_rx_phy, desc_size);
5319 	}
5320 	while (count < limit) {
5321 		unsigned int buf1_len = 0, buf2_len = 0;
5322 		enum pkt_hash_types hash_type;
5323 		struct stmmac_rx_buffer *buf;
5324 		struct dma_desc *np, *p;
5325 		int entry;
5326 		u32 hash;
5327 
5328 		if (!count && rx_q->state_saved) {
5329 			skb = rx_q->state.skb;
5330 			error = rx_q->state.error;
5331 			len = rx_q->state.len;
5332 		} else {
5333 			rx_q->state_saved = false;
5334 			skb = NULL;
5335 			error = 0;
5336 			len = 0;
5337 		}
5338 
5339 read_again:
5340 		if (count >= limit)
5341 			break;
5342 
5343 		buf1_len = 0;
5344 		buf2_len = 0;
5345 		entry = next_entry;
5346 		buf = &rx_q->buf_pool[entry];
5347 
5348 		if (priv->extend_desc)
5349 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5350 		else
5351 			p = rx_q->dma_rx + entry;
5352 
5353 		/* read the status of the incoming frame */
5354 		status = stmmac_rx_status(priv, &priv->xstats, p);
5355 		/* check if managed by the DMA otherwise go ahead */
5356 		if (unlikely(status & dma_own))
5357 			break;
5358 
5359 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5360 						priv->dma_conf.dma_rx_size);
5361 		next_entry = rx_q->cur_rx;
5362 
5363 		if (priv->extend_desc)
5364 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5365 		else
5366 			np = rx_q->dma_rx + next_entry;
5367 
5368 		prefetch(np);
5369 
5370 		if (priv->extend_desc)
5371 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5372 		if (unlikely(status == discard_frame)) {
5373 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5374 			buf->page = NULL;
5375 			error = 1;
5376 			if (!priv->hwts_rx_en)
5377 				rx_errors++;
5378 		}
5379 
5380 		if (unlikely(error && (status & rx_not_ls)))
5381 			goto read_again;
5382 		if (unlikely(error)) {
5383 			dev_kfree_skb(skb);
5384 			skb = NULL;
5385 			count++;
5386 			continue;
5387 		}
5388 
5389 		/* Buffer is good. Go on. */
5390 
5391 		prefetch(page_address(buf->page) + buf->page_offset);
5392 		if (buf->sec_page)
5393 			prefetch(page_address(buf->sec_page));
5394 
5395 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5396 		len += buf1_len;
5397 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5398 		len += buf2_len;
5399 
5400 		/* ACS is disabled; strip manually. */
5401 		if (likely(!(status & rx_not_ls))) {
5402 			if (buf2_len) {
5403 				buf2_len -= ETH_FCS_LEN;
5404 				len -= ETH_FCS_LEN;
5405 			} else if (buf1_len) {
5406 				buf1_len -= ETH_FCS_LEN;
5407 				len -= ETH_FCS_LEN;
5408 			}
5409 		}
5410 
5411 		if (!skb) {
5412 			unsigned int pre_len, sync_len;
5413 
5414 			dma_sync_single_for_cpu(priv->device, buf->addr,
5415 						buf1_len, dma_dir);
5416 
5417 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5418 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5419 					 buf->page_offset, buf1_len, true);
5420 
5421 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5422 				  buf->page_offset;
5423 
5424 			ctx.priv = priv;
5425 			ctx.desc = p;
5426 			ctx.ndesc = np;
5427 
5428 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5429 			/* Due xdp_adjust_tail: DMA sync for_device
5430 			 * cover max len CPU touch
5431 			 */
5432 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5433 				   buf->page_offset;
5434 			sync_len = max(sync_len, pre_len);
5435 
5436 			/* For Not XDP_PASS verdict */
5437 			if (IS_ERR(skb)) {
5438 				unsigned int xdp_res = -PTR_ERR(skb);
5439 
5440 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5441 					page_pool_put_page(rx_q->page_pool,
5442 							   virt_to_head_page(ctx.xdp.data),
5443 							   sync_len, true);
5444 					buf->page = NULL;
5445 					rx_dropped++;
5446 
5447 					/* Clear skb as it was set as
5448 					 * status by XDP program.
5449 					 */
5450 					skb = NULL;
5451 
5452 					if (unlikely((status & rx_not_ls)))
5453 						goto read_again;
5454 
5455 					count++;
5456 					continue;
5457 				} else if (xdp_res & (STMMAC_XDP_TX |
5458 						      STMMAC_XDP_REDIRECT)) {
5459 					xdp_status |= xdp_res;
5460 					buf->page = NULL;
5461 					skb = NULL;
5462 					count++;
5463 					continue;
5464 				}
5465 			}
5466 		}
5467 
5468 		if (!skb) {
5469 			/* XDP program may expand or reduce tail */
5470 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5471 
5472 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5473 			if (!skb) {
5474 				rx_dropped++;
5475 				count++;
5476 				goto drain_data;
5477 			}
5478 
5479 			/* XDP program may adjust header */
5480 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5481 			skb_put(skb, buf1_len);
5482 
5483 			/* Data payload copied into SKB, page ready for recycle */
5484 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5485 			buf->page = NULL;
5486 		} else if (buf1_len) {
5487 			dma_sync_single_for_cpu(priv->device, buf->addr,
5488 						buf1_len, dma_dir);
5489 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5490 					buf->page, buf->page_offset, buf1_len,
5491 					priv->dma_conf.dma_buf_sz);
5492 
5493 			/* Data payload appended into SKB */
5494 			skb_mark_for_recycle(skb);
5495 			buf->page = NULL;
5496 		}
5497 
5498 		if (buf2_len) {
5499 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5500 						buf2_len, dma_dir);
5501 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5502 					buf->sec_page, 0, buf2_len,
5503 					priv->dma_conf.dma_buf_sz);
5504 
5505 			/* Data payload appended into SKB */
5506 			skb_mark_for_recycle(skb);
5507 			buf->sec_page = NULL;
5508 		}
5509 
5510 drain_data:
5511 		if (likely(status & rx_not_ls))
5512 			goto read_again;
5513 		if (!skb)
5514 			continue;
5515 
5516 		/* Got entire packet into SKB. Finish it. */
5517 
5518 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5519 		stmmac_rx_vlan(priv->dev, skb);
5520 		skb->protocol = eth_type_trans(skb, priv->dev);
5521 
5522 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5523 			skb_checksum_none_assert(skb);
5524 		else
5525 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5526 
5527 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5528 			skb_set_hash(skb, hash, hash_type);
5529 
5530 		skb_record_rx_queue(skb, queue);
5531 		napi_gro_receive(&ch->rx_napi, skb);
5532 		skb = NULL;
5533 
5534 		rx_packets++;
5535 		rx_bytes += len;
5536 		count++;
5537 	}
5538 
5539 	if (status & rx_not_ls || skb) {
5540 		rx_q->state_saved = true;
5541 		rx_q->state.skb = skb;
5542 		rx_q->state.error = error;
5543 		rx_q->state.len = len;
5544 	}
5545 
5546 	stmmac_finalize_xdp_rx(priv, xdp_status);
5547 
5548 	stmmac_rx_refill(priv, queue);
5549 
5550 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5551 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5552 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5553 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5554 	u64_stats_update_end(&rxq_stats->napi_syncp);
5555 
5556 	priv->xstats.rx_dropped += rx_dropped;
5557 	priv->xstats.rx_errors += rx_errors;
5558 
5559 	return count;
5560 }
5561 
5562 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5563 {
5564 	struct stmmac_channel *ch =
5565 		container_of(napi, struct stmmac_channel, rx_napi);
5566 	struct stmmac_priv *priv = ch->priv_data;
5567 	struct stmmac_rxq_stats *rxq_stats;
5568 	u32 chan = ch->index;
5569 	int work_done;
5570 
5571 	rxq_stats = &priv->xstats.rxq_stats[chan];
5572 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5573 	u64_stats_inc(&rxq_stats->napi.poll);
5574 	u64_stats_update_end(&rxq_stats->napi_syncp);
5575 
5576 	work_done = stmmac_rx(priv, budget, chan);
5577 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5578 		unsigned long flags;
5579 
5580 		spin_lock_irqsave(&ch->lock, flags);
5581 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5582 		spin_unlock_irqrestore(&ch->lock, flags);
5583 	}
5584 
5585 	return work_done;
5586 }
5587 
5588 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5589 {
5590 	struct stmmac_channel *ch =
5591 		container_of(napi, struct stmmac_channel, tx_napi);
5592 	struct stmmac_priv *priv = ch->priv_data;
5593 	struct stmmac_txq_stats *txq_stats;
5594 	u32 chan = ch->index;
5595 	int work_done;
5596 
5597 	txq_stats = &priv->xstats.txq_stats[chan];
5598 	u64_stats_update_begin(&txq_stats->napi_syncp);
5599 	u64_stats_inc(&txq_stats->napi.poll);
5600 	u64_stats_update_end(&txq_stats->napi_syncp);
5601 
5602 	work_done = stmmac_tx_clean(priv, budget, chan);
5603 	work_done = min(work_done, budget);
5604 
5605 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5606 		unsigned long flags;
5607 
5608 		spin_lock_irqsave(&ch->lock, flags);
5609 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5610 		spin_unlock_irqrestore(&ch->lock, flags);
5611 	}
5612 
5613 	return work_done;
5614 }
5615 
5616 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5617 {
5618 	struct stmmac_channel *ch =
5619 		container_of(napi, struct stmmac_channel, rxtx_napi);
5620 	struct stmmac_priv *priv = ch->priv_data;
5621 	int rx_done, tx_done, rxtx_done;
5622 	struct stmmac_rxq_stats *rxq_stats;
5623 	struct stmmac_txq_stats *txq_stats;
5624 	u32 chan = ch->index;
5625 
5626 	rxq_stats = &priv->xstats.rxq_stats[chan];
5627 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5628 	u64_stats_inc(&rxq_stats->napi.poll);
5629 	u64_stats_update_end(&rxq_stats->napi_syncp);
5630 
5631 	txq_stats = &priv->xstats.txq_stats[chan];
5632 	u64_stats_update_begin(&txq_stats->napi_syncp);
5633 	u64_stats_inc(&txq_stats->napi.poll);
5634 	u64_stats_update_end(&txq_stats->napi_syncp);
5635 
5636 	tx_done = stmmac_tx_clean(priv, budget, chan);
5637 	tx_done = min(tx_done, budget);
5638 
5639 	rx_done = stmmac_rx_zc(priv, budget, chan);
5640 
5641 	rxtx_done = max(tx_done, rx_done);
5642 
5643 	/* If either TX or RX work is not complete, return budget
5644 	 * and keep pooling
5645 	 */
5646 	if (rxtx_done >= budget)
5647 		return budget;
5648 
5649 	/* all work done, exit the polling mode */
5650 	if (napi_complete_done(napi, rxtx_done)) {
5651 		unsigned long flags;
5652 
5653 		spin_lock_irqsave(&ch->lock, flags);
5654 		/* Both RX and TX work done are compelte,
5655 		 * so enable both RX & TX IRQs.
5656 		 */
5657 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5658 		spin_unlock_irqrestore(&ch->lock, flags);
5659 	}
5660 
5661 	return min(rxtx_done, budget - 1);
5662 }
5663 
5664 /**
5665  *  stmmac_tx_timeout
5666  *  @dev : Pointer to net device structure
5667  *  @txqueue: the index of the hanging transmit queue
5668  *  Description: this function is called when a packet transmission fails to
5669  *   complete within a reasonable time. The driver will mark the error in the
5670  *   netdev structure and arrange for the device to be reset to a sane state
5671  *   in order to transmit a new packet.
5672  */
5673 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5674 {
5675 	struct stmmac_priv *priv = netdev_priv(dev);
5676 
5677 	stmmac_global_err(priv);
5678 }
5679 
5680 /**
5681  *  stmmac_set_rx_mode - entry point for multicast addressing
5682  *  @dev : pointer to the device structure
5683  *  Description:
5684  *  This function is a driver entry point which gets called by the kernel
5685  *  whenever multicast addresses must be enabled/disabled.
5686  *  Return value:
5687  *  void.
5688  */
5689 static void stmmac_set_rx_mode(struct net_device *dev)
5690 {
5691 	struct stmmac_priv *priv = netdev_priv(dev);
5692 
5693 	stmmac_set_filter(priv, priv->hw, dev);
5694 }
5695 
5696 /**
5697  *  stmmac_change_mtu - entry point to change MTU size for the device.
5698  *  @dev : device pointer.
5699  *  @new_mtu : the new MTU size for the device.
5700  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5701  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5702  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5703  *  Return value:
5704  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5705  *  file on failure.
5706  */
5707 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5708 {
5709 	struct stmmac_priv *priv = netdev_priv(dev);
5710 	int txfifosz = priv->plat->tx_fifo_size;
5711 	struct stmmac_dma_conf *dma_conf;
5712 	const int mtu = new_mtu;
5713 	int ret;
5714 
5715 	if (txfifosz == 0)
5716 		txfifosz = priv->dma_cap.tx_fifo_size;
5717 
5718 	txfifosz /= priv->plat->tx_queues_to_use;
5719 
5720 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5721 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5722 		return -EINVAL;
5723 	}
5724 
5725 	new_mtu = STMMAC_ALIGN(new_mtu);
5726 
5727 	/* If condition true, FIFO is too small or MTU too large */
5728 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5729 		return -EINVAL;
5730 
5731 	if (netif_running(dev)) {
5732 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5733 		/* Try to allocate the new DMA conf with the new mtu */
5734 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5735 		if (IS_ERR(dma_conf)) {
5736 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5737 				   mtu);
5738 			return PTR_ERR(dma_conf);
5739 		}
5740 
5741 		stmmac_release(dev);
5742 
5743 		ret = __stmmac_open(dev, dma_conf);
5744 		if (ret) {
5745 			free_dma_desc_resources(priv, dma_conf);
5746 			kfree(dma_conf);
5747 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5748 			return ret;
5749 		}
5750 
5751 		kfree(dma_conf);
5752 
5753 		stmmac_set_rx_mode(dev);
5754 	}
5755 
5756 	dev->mtu = mtu;
5757 	netdev_update_features(dev);
5758 
5759 	return 0;
5760 }
5761 
5762 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5763 					     netdev_features_t features)
5764 {
5765 	struct stmmac_priv *priv = netdev_priv(dev);
5766 
5767 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5768 		features &= ~NETIF_F_RXCSUM;
5769 
5770 	if (!priv->plat->tx_coe)
5771 		features &= ~NETIF_F_CSUM_MASK;
5772 
5773 	/* Some GMAC devices have a bugged Jumbo frame support that
5774 	 * needs to have the Tx COE disabled for oversized frames
5775 	 * (due to limited buffer sizes). In this case we disable
5776 	 * the TX csum insertion in the TDES and not use SF.
5777 	 */
5778 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5779 		features &= ~NETIF_F_CSUM_MASK;
5780 
5781 	/* Disable tso if asked by ethtool */
5782 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5783 		if (features & NETIF_F_TSO)
5784 			priv->tso = true;
5785 		else
5786 			priv->tso = false;
5787 	}
5788 
5789 	return features;
5790 }
5791 
5792 static int stmmac_set_features(struct net_device *netdev,
5793 			       netdev_features_t features)
5794 {
5795 	struct stmmac_priv *priv = netdev_priv(netdev);
5796 
5797 	/* Keep the COE Type in case of csum is supporting */
5798 	if (features & NETIF_F_RXCSUM)
5799 		priv->hw->rx_csum = priv->plat->rx_coe;
5800 	else
5801 		priv->hw->rx_csum = 0;
5802 	/* No check needed because rx_coe has been set before and it will be
5803 	 * fixed in case of issue.
5804 	 */
5805 	stmmac_rx_ipc(priv, priv->hw);
5806 
5807 	if (priv->sph_cap) {
5808 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5809 		u32 chan;
5810 
5811 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5812 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5813 	}
5814 
5815 	return 0;
5816 }
5817 
5818 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5819 {
5820 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5821 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5822 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5823 	bool *hs_enable = &fpe_cfg->hs_enable;
5824 
5825 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5826 		return;
5827 
5828 	/* If LP has sent verify mPacket, LP is FPE capable */
5829 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5830 		if (*lp_state < FPE_STATE_CAPABLE)
5831 			*lp_state = FPE_STATE_CAPABLE;
5832 
5833 		/* If user has requested FPE enable, quickly response */
5834 		if (*hs_enable)
5835 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5836 						fpe_cfg,
5837 						MPACKET_RESPONSE);
5838 	}
5839 
5840 	/* If Local has sent verify mPacket, Local is FPE capable */
5841 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5842 		if (*lo_state < FPE_STATE_CAPABLE)
5843 			*lo_state = FPE_STATE_CAPABLE;
5844 	}
5845 
5846 	/* If LP has sent response mPacket, LP is entering FPE ON */
5847 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5848 		*lp_state = FPE_STATE_ENTERING_ON;
5849 
5850 	/* If Local has sent response mPacket, Local is entering FPE ON */
5851 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5852 		*lo_state = FPE_STATE_ENTERING_ON;
5853 
5854 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5855 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5856 	    priv->fpe_wq) {
5857 		queue_work(priv->fpe_wq, &priv->fpe_task);
5858 	}
5859 }
5860 
5861 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5862 {
5863 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5864 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5865 	u32 queues_count;
5866 	u32 queue;
5867 	bool xmac;
5868 
5869 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5870 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5871 
5872 	if (priv->irq_wake)
5873 		pm_wakeup_event(priv->device, 0);
5874 
5875 	if (priv->dma_cap.estsel)
5876 		stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5877 				      &priv->xstats, tx_cnt);
5878 
5879 	if (priv->dma_cap.fpesel) {
5880 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5881 						   priv->dev);
5882 
5883 		stmmac_fpe_event_status(priv, status);
5884 	}
5885 
5886 	/* To handle GMAC own interrupts */
5887 	if ((priv->plat->has_gmac) || xmac) {
5888 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5889 
5890 		if (unlikely(status)) {
5891 			/* For LPI we need to save the tx status */
5892 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5893 				priv->tx_path_in_lpi_mode = true;
5894 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5895 				priv->tx_path_in_lpi_mode = false;
5896 		}
5897 
5898 		for (queue = 0; queue < queues_count; queue++) {
5899 			status = stmmac_host_mtl_irq_status(priv, priv->hw,
5900 							    queue);
5901 		}
5902 
5903 		/* PCS link status */
5904 		if (priv->hw->pcs &&
5905 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5906 			if (priv->xstats.pcs_link)
5907 				netif_carrier_on(priv->dev);
5908 			else
5909 				netif_carrier_off(priv->dev);
5910 		}
5911 
5912 		stmmac_timestamp_interrupt(priv, priv);
5913 	}
5914 }
5915 
5916 /**
5917  *  stmmac_interrupt - main ISR
5918  *  @irq: interrupt number.
5919  *  @dev_id: to pass the net device pointer.
5920  *  Description: this is the main driver interrupt service routine.
5921  *  It can call:
5922  *  o DMA service routine (to manage incoming frame reception and transmission
5923  *    status)
5924  *  o Core interrupts to manage: remote wake-up, management counter, LPI
5925  *    interrupts.
5926  */
5927 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5928 {
5929 	struct net_device *dev = (struct net_device *)dev_id;
5930 	struct stmmac_priv *priv = netdev_priv(dev);
5931 
5932 	/* Check if adapter is up */
5933 	if (test_bit(STMMAC_DOWN, &priv->state))
5934 		return IRQ_HANDLED;
5935 
5936 	/* Check if a fatal error happened */
5937 	if (stmmac_safety_feat_interrupt(priv))
5938 		return IRQ_HANDLED;
5939 
5940 	/* To handle Common interrupts */
5941 	stmmac_common_interrupt(priv);
5942 
5943 	/* To handle DMA interrupts */
5944 	stmmac_dma_interrupt(priv);
5945 
5946 	return IRQ_HANDLED;
5947 }
5948 
5949 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5950 {
5951 	struct net_device *dev = (struct net_device *)dev_id;
5952 	struct stmmac_priv *priv = netdev_priv(dev);
5953 
5954 	if (unlikely(!dev)) {
5955 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5956 		return IRQ_NONE;
5957 	}
5958 
5959 	/* Check if adapter is up */
5960 	if (test_bit(STMMAC_DOWN, &priv->state))
5961 		return IRQ_HANDLED;
5962 
5963 	/* To handle Common interrupts */
5964 	stmmac_common_interrupt(priv);
5965 
5966 	return IRQ_HANDLED;
5967 }
5968 
5969 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5970 {
5971 	struct net_device *dev = (struct net_device *)dev_id;
5972 	struct stmmac_priv *priv = netdev_priv(dev);
5973 
5974 	if (unlikely(!dev)) {
5975 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5976 		return IRQ_NONE;
5977 	}
5978 
5979 	/* Check if adapter is up */
5980 	if (test_bit(STMMAC_DOWN, &priv->state))
5981 		return IRQ_HANDLED;
5982 
5983 	/* Check if a fatal error happened */
5984 	stmmac_safety_feat_interrupt(priv);
5985 
5986 	return IRQ_HANDLED;
5987 }
5988 
5989 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5990 {
5991 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5992 	struct stmmac_dma_conf *dma_conf;
5993 	int chan = tx_q->queue_index;
5994 	struct stmmac_priv *priv;
5995 	int status;
5996 
5997 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
5998 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5999 
6000 	if (unlikely(!data)) {
6001 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6002 		return IRQ_NONE;
6003 	}
6004 
6005 	/* Check if adapter is up */
6006 	if (test_bit(STMMAC_DOWN, &priv->state))
6007 		return IRQ_HANDLED;
6008 
6009 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6010 
6011 	if (unlikely(status & tx_hard_error_bump_tc)) {
6012 		/* Try to bump up the dma threshold on this failure */
6013 		stmmac_bump_dma_threshold(priv, chan);
6014 	} else if (unlikely(status == tx_hard_error)) {
6015 		stmmac_tx_err(priv, chan);
6016 	}
6017 
6018 	return IRQ_HANDLED;
6019 }
6020 
6021 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6022 {
6023 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6024 	struct stmmac_dma_conf *dma_conf;
6025 	int chan = rx_q->queue_index;
6026 	struct stmmac_priv *priv;
6027 
6028 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6029 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6030 
6031 	if (unlikely(!data)) {
6032 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6033 		return IRQ_NONE;
6034 	}
6035 
6036 	/* Check if adapter is up */
6037 	if (test_bit(STMMAC_DOWN, &priv->state))
6038 		return IRQ_HANDLED;
6039 
6040 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6041 
6042 	return IRQ_HANDLED;
6043 }
6044 
6045 /**
6046  *  stmmac_ioctl - Entry point for the Ioctl
6047  *  @dev: Device pointer.
6048  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6049  *  a proprietary structure used to pass information to the driver.
6050  *  @cmd: IOCTL command
6051  *  Description:
6052  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6053  */
6054 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6055 {
6056 	struct stmmac_priv *priv = netdev_priv (dev);
6057 	int ret = -EOPNOTSUPP;
6058 
6059 	if (!netif_running(dev))
6060 		return -EINVAL;
6061 
6062 	switch (cmd) {
6063 	case SIOCGMIIPHY:
6064 	case SIOCGMIIREG:
6065 	case SIOCSMIIREG:
6066 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6067 		break;
6068 	case SIOCSHWTSTAMP:
6069 		ret = stmmac_hwtstamp_set(dev, rq);
6070 		break;
6071 	case SIOCGHWTSTAMP:
6072 		ret = stmmac_hwtstamp_get(dev, rq);
6073 		break;
6074 	default:
6075 		break;
6076 	}
6077 
6078 	return ret;
6079 }
6080 
6081 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6082 				    void *cb_priv)
6083 {
6084 	struct stmmac_priv *priv = cb_priv;
6085 	int ret = -EOPNOTSUPP;
6086 
6087 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6088 		return ret;
6089 
6090 	__stmmac_disable_all_queues(priv);
6091 
6092 	switch (type) {
6093 	case TC_SETUP_CLSU32:
6094 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6095 		break;
6096 	case TC_SETUP_CLSFLOWER:
6097 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6098 		break;
6099 	default:
6100 		break;
6101 	}
6102 
6103 	stmmac_enable_all_queues(priv);
6104 	return ret;
6105 }
6106 
6107 static LIST_HEAD(stmmac_block_cb_list);
6108 
6109 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6110 			   void *type_data)
6111 {
6112 	struct stmmac_priv *priv = netdev_priv(ndev);
6113 
6114 	switch (type) {
6115 	case TC_QUERY_CAPS:
6116 		return stmmac_tc_query_caps(priv, priv, type_data);
6117 	case TC_SETUP_BLOCK:
6118 		return flow_block_cb_setup_simple(type_data,
6119 						  &stmmac_block_cb_list,
6120 						  stmmac_setup_tc_block_cb,
6121 						  priv, priv, true);
6122 	case TC_SETUP_QDISC_CBS:
6123 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6124 	case TC_SETUP_QDISC_TAPRIO:
6125 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6126 	case TC_SETUP_QDISC_ETF:
6127 		return stmmac_tc_setup_etf(priv, priv, type_data);
6128 	default:
6129 		return -EOPNOTSUPP;
6130 	}
6131 }
6132 
6133 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6134 			       struct net_device *sb_dev)
6135 {
6136 	int gso = skb_shinfo(skb)->gso_type;
6137 
6138 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6139 		/*
6140 		 * There is no way to determine the number of TSO/USO
6141 		 * capable Queues. Let's use always the Queue 0
6142 		 * because if TSO/USO is supported then at least this
6143 		 * one will be capable.
6144 		 */
6145 		return 0;
6146 	}
6147 
6148 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6149 }
6150 
6151 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6152 {
6153 	struct stmmac_priv *priv = netdev_priv(ndev);
6154 	int ret = 0;
6155 
6156 	ret = pm_runtime_resume_and_get(priv->device);
6157 	if (ret < 0)
6158 		return ret;
6159 
6160 	ret = eth_mac_addr(ndev, addr);
6161 	if (ret)
6162 		goto set_mac_error;
6163 
6164 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6165 
6166 set_mac_error:
6167 	pm_runtime_put(priv->device);
6168 
6169 	return ret;
6170 }
6171 
6172 #ifdef CONFIG_DEBUG_FS
6173 static struct dentry *stmmac_fs_dir;
6174 
6175 static void sysfs_display_ring(void *head, int size, int extend_desc,
6176 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6177 {
6178 	int i;
6179 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6180 	struct dma_desc *p = (struct dma_desc *)head;
6181 	dma_addr_t dma_addr;
6182 
6183 	for (i = 0; i < size; i++) {
6184 		if (extend_desc) {
6185 			dma_addr = dma_phy_addr + i * sizeof(*ep);
6186 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6187 				   i, &dma_addr,
6188 				   le32_to_cpu(ep->basic.des0),
6189 				   le32_to_cpu(ep->basic.des1),
6190 				   le32_to_cpu(ep->basic.des2),
6191 				   le32_to_cpu(ep->basic.des3));
6192 			ep++;
6193 		} else {
6194 			dma_addr = dma_phy_addr + i * sizeof(*p);
6195 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6196 				   i, &dma_addr,
6197 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6198 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6199 			p++;
6200 		}
6201 		seq_printf(seq, "\n");
6202 	}
6203 }
6204 
6205 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6206 {
6207 	struct net_device *dev = seq->private;
6208 	struct stmmac_priv *priv = netdev_priv(dev);
6209 	u32 rx_count = priv->plat->rx_queues_to_use;
6210 	u32 tx_count = priv->plat->tx_queues_to_use;
6211 	u32 queue;
6212 
6213 	if ((dev->flags & IFF_UP) == 0)
6214 		return 0;
6215 
6216 	for (queue = 0; queue < rx_count; queue++) {
6217 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6218 
6219 		seq_printf(seq, "RX Queue %d:\n", queue);
6220 
6221 		if (priv->extend_desc) {
6222 			seq_printf(seq, "Extended descriptor ring:\n");
6223 			sysfs_display_ring((void *)rx_q->dma_erx,
6224 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6225 		} else {
6226 			seq_printf(seq, "Descriptor ring:\n");
6227 			sysfs_display_ring((void *)rx_q->dma_rx,
6228 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6229 		}
6230 	}
6231 
6232 	for (queue = 0; queue < tx_count; queue++) {
6233 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6234 
6235 		seq_printf(seq, "TX Queue %d:\n", queue);
6236 
6237 		if (priv->extend_desc) {
6238 			seq_printf(seq, "Extended descriptor ring:\n");
6239 			sysfs_display_ring((void *)tx_q->dma_etx,
6240 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6241 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6242 			seq_printf(seq, "Descriptor ring:\n");
6243 			sysfs_display_ring((void *)tx_q->dma_tx,
6244 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6245 		}
6246 	}
6247 
6248 	return 0;
6249 }
6250 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6251 
6252 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6253 {
6254 	static const char * const dwxgmac_timestamp_source[] = {
6255 		"None",
6256 		"Internal",
6257 		"External",
6258 		"Both",
6259 	};
6260 	static const char * const dwxgmac_safety_feature_desc[] = {
6261 		"No",
6262 		"All Safety Features with ECC and Parity",
6263 		"All Safety Features without ECC or Parity",
6264 		"All Safety Features with Parity Only",
6265 		"ECC Only",
6266 		"UNDEFINED",
6267 		"UNDEFINED",
6268 		"UNDEFINED",
6269 	};
6270 	struct net_device *dev = seq->private;
6271 	struct stmmac_priv *priv = netdev_priv(dev);
6272 
6273 	if (!priv->hw_cap_support) {
6274 		seq_printf(seq, "DMA HW features not supported\n");
6275 		return 0;
6276 	}
6277 
6278 	seq_printf(seq, "==============================\n");
6279 	seq_printf(seq, "\tDMA HW features\n");
6280 	seq_printf(seq, "==============================\n");
6281 
6282 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6283 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6284 	seq_printf(seq, "\t1000 Mbps: %s\n",
6285 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6286 	seq_printf(seq, "\tHalf duplex: %s\n",
6287 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6288 	if (priv->plat->has_xgmac) {
6289 		seq_printf(seq,
6290 			   "\tNumber of Additional MAC address registers: %d\n",
6291 			   priv->dma_cap.multi_addr);
6292 	} else {
6293 		seq_printf(seq, "\tHash Filter: %s\n",
6294 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6295 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6296 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6297 	}
6298 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6299 		   (priv->dma_cap.pcs) ? "Y" : "N");
6300 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6301 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6302 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6303 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6304 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6305 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6306 	seq_printf(seq, "\tRMON module: %s\n",
6307 		   (priv->dma_cap.rmon) ? "Y" : "N");
6308 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6309 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6310 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6311 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6312 	if (priv->plat->has_xgmac)
6313 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6314 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6315 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6316 		   (priv->dma_cap.eee) ? "Y" : "N");
6317 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6318 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6319 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6320 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6321 	    priv->plat->has_xgmac) {
6322 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6323 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6324 	} else {
6325 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6326 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6327 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6328 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6329 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6330 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6331 	}
6332 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6333 		   priv->dma_cap.number_rx_channel);
6334 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6335 		   priv->dma_cap.number_tx_channel);
6336 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6337 		   priv->dma_cap.number_rx_queues);
6338 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6339 		   priv->dma_cap.number_tx_queues);
6340 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6341 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6342 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6343 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6344 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6345 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6346 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6347 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6348 		   priv->dma_cap.pps_out_num);
6349 	seq_printf(seq, "\tSafety Features: %s\n",
6350 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6351 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6352 		   priv->dma_cap.frpsel ? "Y" : "N");
6353 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6354 		   priv->dma_cap.host_dma_width);
6355 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6356 		   priv->dma_cap.rssen ? "Y" : "N");
6357 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6358 		   priv->dma_cap.vlhash ? "Y" : "N");
6359 	seq_printf(seq, "\tSplit Header: %s\n",
6360 		   priv->dma_cap.sphen ? "Y" : "N");
6361 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6362 		   priv->dma_cap.vlins ? "Y" : "N");
6363 	seq_printf(seq, "\tDouble VLAN: %s\n",
6364 		   priv->dma_cap.dvlan ? "Y" : "N");
6365 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6366 		   priv->dma_cap.l3l4fnum);
6367 	seq_printf(seq, "\tARP Offloading: %s\n",
6368 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6369 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6370 		   priv->dma_cap.estsel ? "Y" : "N");
6371 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6372 		   priv->dma_cap.fpesel ? "Y" : "N");
6373 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6374 		   priv->dma_cap.tbssel ? "Y" : "N");
6375 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6376 		   priv->dma_cap.tbs_ch_num);
6377 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6378 		   priv->dma_cap.sgfsel ? "Y" : "N");
6379 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6380 		   BIT(priv->dma_cap.ttsfd) >> 1);
6381 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6382 		   priv->dma_cap.numtc);
6383 	seq_printf(seq, "\tDCB Feature: %s\n",
6384 		   priv->dma_cap.dcben ? "Y" : "N");
6385 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6386 		   priv->dma_cap.advthword ? "Y" : "N");
6387 	seq_printf(seq, "\tPTP Offload: %s\n",
6388 		   priv->dma_cap.ptoen ? "Y" : "N");
6389 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6390 		   priv->dma_cap.osten ? "Y" : "N");
6391 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6392 		   priv->dma_cap.pfcen ? "Y" : "N");
6393 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6394 		   BIT(priv->dma_cap.frpes) << 6);
6395 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6396 		   BIT(priv->dma_cap.frpbs) << 6);
6397 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6398 		   priv->dma_cap.frppipe_num);
6399 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6400 		   priv->dma_cap.nrvf_num ?
6401 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6402 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6403 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6404 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6405 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6406 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6407 		   priv->dma_cap.cbtisel ? "Y" : "N");
6408 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6409 		   priv->dma_cap.aux_snapshot_n);
6410 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6411 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6412 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6413 		   priv->dma_cap.edma ? "Y" : "N");
6414 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6415 		   priv->dma_cap.ediffc ? "Y" : "N");
6416 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6417 		   priv->dma_cap.vxn ? "Y" : "N");
6418 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6419 		   priv->dma_cap.dbgmem ? "Y" : "N");
6420 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6421 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6422 	return 0;
6423 }
6424 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6425 
6426 /* Use network device events to rename debugfs file entries.
6427  */
6428 static int stmmac_device_event(struct notifier_block *unused,
6429 			       unsigned long event, void *ptr)
6430 {
6431 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6432 	struct stmmac_priv *priv = netdev_priv(dev);
6433 
6434 	if (dev->netdev_ops != &stmmac_netdev_ops)
6435 		goto done;
6436 
6437 	switch (event) {
6438 	case NETDEV_CHANGENAME:
6439 		if (priv->dbgfs_dir)
6440 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6441 							 priv->dbgfs_dir,
6442 							 stmmac_fs_dir,
6443 							 dev->name);
6444 		break;
6445 	}
6446 done:
6447 	return NOTIFY_DONE;
6448 }
6449 
6450 static struct notifier_block stmmac_notifier = {
6451 	.notifier_call = stmmac_device_event,
6452 };
6453 
6454 static void stmmac_init_fs(struct net_device *dev)
6455 {
6456 	struct stmmac_priv *priv = netdev_priv(dev);
6457 
6458 	rtnl_lock();
6459 
6460 	/* Create per netdev entries */
6461 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6462 
6463 	/* Entry to report DMA RX/TX rings */
6464 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6465 			    &stmmac_rings_status_fops);
6466 
6467 	/* Entry to report the DMA HW features */
6468 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6469 			    &stmmac_dma_cap_fops);
6470 
6471 	rtnl_unlock();
6472 }
6473 
6474 static void stmmac_exit_fs(struct net_device *dev)
6475 {
6476 	struct stmmac_priv *priv = netdev_priv(dev);
6477 
6478 	debugfs_remove_recursive(priv->dbgfs_dir);
6479 }
6480 #endif /* CONFIG_DEBUG_FS */
6481 
6482 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6483 {
6484 	unsigned char *data = (unsigned char *)&vid_le;
6485 	unsigned char data_byte = 0;
6486 	u32 crc = ~0x0;
6487 	u32 temp = 0;
6488 	int i, bits;
6489 
6490 	bits = get_bitmask_order(VLAN_VID_MASK);
6491 	for (i = 0; i < bits; i++) {
6492 		if ((i % 8) == 0)
6493 			data_byte = data[i / 8];
6494 
6495 		temp = ((crc & 1) ^ data_byte) & 1;
6496 		crc >>= 1;
6497 		data_byte >>= 1;
6498 
6499 		if (temp)
6500 			crc ^= 0xedb88320;
6501 	}
6502 
6503 	return crc;
6504 }
6505 
6506 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6507 {
6508 	u32 crc, hash = 0;
6509 	__le16 pmatch = 0;
6510 	int count = 0;
6511 	u16 vid = 0;
6512 
6513 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6514 		__le16 vid_le = cpu_to_le16(vid);
6515 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6516 		hash |= (1 << crc);
6517 		count++;
6518 	}
6519 
6520 	if (!priv->dma_cap.vlhash) {
6521 		if (count > 2) /* VID = 0 always passes filter */
6522 			return -EOPNOTSUPP;
6523 
6524 		pmatch = cpu_to_le16(vid);
6525 		hash = 0;
6526 	}
6527 
6528 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6529 }
6530 
6531 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6532 {
6533 	struct stmmac_priv *priv = netdev_priv(ndev);
6534 	bool is_double = false;
6535 	int ret;
6536 
6537 	ret = pm_runtime_resume_and_get(priv->device);
6538 	if (ret < 0)
6539 		return ret;
6540 
6541 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6542 		is_double = true;
6543 
6544 	set_bit(vid, priv->active_vlans);
6545 	ret = stmmac_vlan_update(priv, is_double);
6546 	if (ret) {
6547 		clear_bit(vid, priv->active_vlans);
6548 		goto err_pm_put;
6549 	}
6550 
6551 	if (priv->hw->num_vlan) {
6552 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6553 		if (ret)
6554 			goto err_pm_put;
6555 	}
6556 err_pm_put:
6557 	pm_runtime_put(priv->device);
6558 
6559 	return ret;
6560 }
6561 
6562 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6563 {
6564 	struct stmmac_priv *priv = netdev_priv(ndev);
6565 	bool is_double = false;
6566 	int ret;
6567 
6568 	ret = pm_runtime_resume_and_get(priv->device);
6569 	if (ret < 0)
6570 		return ret;
6571 
6572 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6573 		is_double = true;
6574 
6575 	clear_bit(vid, priv->active_vlans);
6576 
6577 	if (priv->hw->num_vlan) {
6578 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6579 		if (ret)
6580 			goto del_vlan_error;
6581 	}
6582 
6583 	ret = stmmac_vlan_update(priv, is_double);
6584 
6585 del_vlan_error:
6586 	pm_runtime_put(priv->device);
6587 
6588 	return ret;
6589 }
6590 
6591 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6592 {
6593 	struct stmmac_priv *priv = netdev_priv(dev);
6594 
6595 	switch (bpf->command) {
6596 	case XDP_SETUP_PROG:
6597 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6598 	case XDP_SETUP_XSK_POOL:
6599 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6600 					     bpf->xsk.queue_id);
6601 	default:
6602 		return -EOPNOTSUPP;
6603 	}
6604 }
6605 
6606 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6607 			   struct xdp_frame **frames, u32 flags)
6608 {
6609 	struct stmmac_priv *priv = netdev_priv(dev);
6610 	int cpu = smp_processor_id();
6611 	struct netdev_queue *nq;
6612 	int i, nxmit = 0;
6613 	int queue;
6614 
6615 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6616 		return -ENETDOWN;
6617 
6618 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6619 		return -EINVAL;
6620 
6621 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6622 	nq = netdev_get_tx_queue(priv->dev, queue);
6623 
6624 	__netif_tx_lock(nq, cpu);
6625 	/* Avoids TX time-out as we are sharing with slow path */
6626 	txq_trans_cond_update(nq);
6627 
6628 	for (i = 0; i < num_frames; i++) {
6629 		int res;
6630 
6631 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6632 		if (res == STMMAC_XDP_CONSUMED)
6633 			break;
6634 
6635 		nxmit++;
6636 	}
6637 
6638 	if (flags & XDP_XMIT_FLUSH) {
6639 		stmmac_flush_tx_descriptors(priv, queue);
6640 		stmmac_tx_timer_arm(priv, queue);
6641 	}
6642 
6643 	__netif_tx_unlock(nq);
6644 
6645 	return nxmit;
6646 }
6647 
6648 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6649 {
6650 	struct stmmac_channel *ch = &priv->channel[queue];
6651 	unsigned long flags;
6652 
6653 	spin_lock_irqsave(&ch->lock, flags);
6654 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6655 	spin_unlock_irqrestore(&ch->lock, flags);
6656 
6657 	stmmac_stop_rx_dma(priv, queue);
6658 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6659 }
6660 
6661 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6662 {
6663 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6664 	struct stmmac_channel *ch = &priv->channel[queue];
6665 	unsigned long flags;
6666 	u32 buf_size;
6667 	int ret;
6668 
6669 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6670 	if (ret) {
6671 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6672 		return;
6673 	}
6674 
6675 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6676 	if (ret) {
6677 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6678 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6679 		return;
6680 	}
6681 
6682 	stmmac_reset_rx_queue(priv, queue);
6683 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6684 
6685 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6686 			    rx_q->dma_rx_phy, rx_q->queue_index);
6687 
6688 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6689 			     sizeof(struct dma_desc));
6690 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6691 			       rx_q->rx_tail_addr, rx_q->queue_index);
6692 
6693 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6694 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6695 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6696 				      buf_size,
6697 				      rx_q->queue_index);
6698 	} else {
6699 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6700 				      priv->dma_conf.dma_buf_sz,
6701 				      rx_q->queue_index);
6702 	}
6703 
6704 	stmmac_start_rx_dma(priv, queue);
6705 
6706 	spin_lock_irqsave(&ch->lock, flags);
6707 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6708 	spin_unlock_irqrestore(&ch->lock, flags);
6709 }
6710 
6711 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6712 {
6713 	struct stmmac_channel *ch = &priv->channel[queue];
6714 	unsigned long flags;
6715 
6716 	spin_lock_irqsave(&ch->lock, flags);
6717 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6718 	spin_unlock_irqrestore(&ch->lock, flags);
6719 
6720 	stmmac_stop_tx_dma(priv, queue);
6721 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6722 }
6723 
6724 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6725 {
6726 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6727 	struct stmmac_channel *ch = &priv->channel[queue];
6728 	unsigned long flags;
6729 	int ret;
6730 
6731 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6732 	if (ret) {
6733 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6734 		return;
6735 	}
6736 
6737 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6738 	if (ret) {
6739 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6740 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6741 		return;
6742 	}
6743 
6744 	stmmac_reset_tx_queue(priv, queue);
6745 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6746 
6747 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6748 			    tx_q->dma_tx_phy, tx_q->queue_index);
6749 
6750 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6751 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6752 
6753 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6754 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6755 			       tx_q->tx_tail_addr, tx_q->queue_index);
6756 
6757 	stmmac_start_tx_dma(priv, queue);
6758 
6759 	spin_lock_irqsave(&ch->lock, flags);
6760 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6761 	spin_unlock_irqrestore(&ch->lock, flags);
6762 }
6763 
6764 void stmmac_xdp_release(struct net_device *dev)
6765 {
6766 	struct stmmac_priv *priv = netdev_priv(dev);
6767 	u32 chan;
6768 
6769 	/* Ensure tx function is not running */
6770 	netif_tx_disable(dev);
6771 
6772 	/* Disable NAPI process */
6773 	stmmac_disable_all_queues(priv);
6774 
6775 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6776 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6777 
6778 	/* Free the IRQ lines */
6779 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6780 
6781 	/* Stop TX/RX DMA channels */
6782 	stmmac_stop_all_dma(priv);
6783 
6784 	/* Release and free the Rx/Tx resources */
6785 	free_dma_desc_resources(priv, &priv->dma_conf);
6786 
6787 	/* Disable the MAC Rx/Tx */
6788 	stmmac_mac_set(priv, priv->ioaddr, false);
6789 
6790 	/* set trans_start so we don't get spurious
6791 	 * watchdogs during reset
6792 	 */
6793 	netif_trans_update(dev);
6794 	netif_carrier_off(dev);
6795 }
6796 
6797 int stmmac_xdp_open(struct net_device *dev)
6798 {
6799 	struct stmmac_priv *priv = netdev_priv(dev);
6800 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6801 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6802 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6803 	struct stmmac_rx_queue *rx_q;
6804 	struct stmmac_tx_queue *tx_q;
6805 	u32 buf_size;
6806 	bool sph_en;
6807 	u32 chan;
6808 	int ret;
6809 
6810 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6811 	if (ret < 0) {
6812 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6813 			   __func__);
6814 		goto dma_desc_error;
6815 	}
6816 
6817 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6818 	if (ret < 0) {
6819 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6820 			   __func__);
6821 		goto init_error;
6822 	}
6823 
6824 	stmmac_reset_queues_param(priv);
6825 
6826 	/* DMA CSR Channel configuration */
6827 	for (chan = 0; chan < dma_csr_ch; chan++) {
6828 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6829 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6830 	}
6831 
6832 	/* Adjust Split header */
6833 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6834 
6835 	/* DMA RX Channel Configuration */
6836 	for (chan = 0; chan < rx_cnt; chan++) {
6837 		rx_q = &priv->dma_conf.rx_queue[chan];
6838 
6839 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6840 				    rx_q->dma_rx_phy, chan);
6841 
6842 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6843 				     (rx_q->buf_alloc_num *
6844 				      sizeof(struct dma_desc));
6845 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6846 				       rx_q->rx_tail_addr, chan);
6847 
6848 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6849 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6850 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6851 					      buf_size,
6852 					      rx_q->queue_index);
6853 		} else {
6854 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6855 					      priv->dma_conf.dma_buf_sz,
6856 					      rx_q->queue_index);
6857 		}
6858 
6859 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6860 	}
6861 
6862 	/* DMA TX Channel Configuration */
6863 	for (chan = 0; chan < tx_cnt; chan++) {
6864 		tx_q = &priv->dma_conf.tx_queue[chan];
6865 
6866 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6867 				    tx_q->dma_tx_phy, chan);
6868 
6869 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6870 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6871 				       tx_q->tx_tail_addr, chan);
6872 
6873 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6874 		tx_q->txtimer.function = stmmac_tx_timer;
6875 	}
6876 
6877 	/* Enable the MAC Rx/Tx */
6878 	stmmac_mac_set(priv, priv->ioaddr, true);
6879 
6880 	/* Start Rx & Tx DMA Channels */
6881 	stmmac_start_all_dma(priv);
6882 
6883 	ret = stmmac_request_irq(dev);
6884 	if (ret)
6885 		goto irq_error;
6886 
6887 	/* Enable NAPI process*/
6888 	stmmac_enable_all_queues(priv);
6889 	netif_carrier_on(dev);
6890 	netif_tx_start_all_queues(dev);
6891 	stmmac_enable_all_dma_irq(priv);
6892 
6893 	return 0;
6894 
6895 irq_error:
6896 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6897 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6898 
6899 	stmmac_hw_teardown(dev);
6900 init_error:
6901 	free_dma_desc_resources(priv, &priv->dma_conf);
6902 dma_desc_error:
6903 	return ret;
6904 }
6905 
6906 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6907 {
6908 	struct stmmac_priv *priv = netdev_priv(dev);
6909 	struct stmmac_rx_queue *rx_q;
6910 	struct stmmac_tx_queue *tx_q;
6911 	struct stmmac_channel *ch;
6912 
6913 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6914 	    !netif_carrier_ok(priv->dev))
6915 		return -ENETDOWN;
6916 
6917 	if (!stmmac_xdp_is_enabled(priv))
6918 		return -EINVAL;
6919 
6920 	if (queue >= priv->plat->rx_queues_to_use ||
6921 	    queue >= priv->plat->tx_queues_to_use)
6922 		return -EINVAL;
6923 
6924 	rx_q = &priv->dma_conf.rx_queue[queue];
6925 	tx_q = &priv->dma_conf.tx_queue[queue];
6926 	ch = &priv->channel[queue];
6927 
6928 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6929 		return -EINVAL;
6930 
6931 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6932 		/* EQoS does not have per-DMA channel SW interrupt,
6933 		 * so we schedule RX Napi straight-away.
6934 		 */
6935 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6936 			__napi_schedule(&ch->rxtx_napi);
6937 	}
6938 
6939 	return 0;
6940 }
6941 
6942 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6943 {
6944 	struct stmmac_priv *priv = netdev_priv(dev);
6945 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6946 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6947 	unsigned int start;
6948 	int q;
6949 
6950 	for (q = 0; q < tx_cnt; q++) {
6951 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
6952 		u64 tx_packets;
6953 		u64 tx_bytes;
6954 
6955 		do {
6956 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
6957 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
6958 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
6959 		do {
6960 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
6961 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
6962 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
6963 
6964 		stats->tx_packets += tx_packets;
6965 		stats->tx_bytes += tx_bytes;
6966 	}
6967 
6968 	for (q = 0; q < rx_cnt; q++) {
6969 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
6970 		u64 rx_packets;
6971 		u64 rx_bytes;
6972 
6973 		do {
6974 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
6975 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
6976 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
6977 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
6978 
6979 		stats->rx_packets += rx_packets;
6980 		stats->rx_bytes += rx_bytes;
6981 	}
6982 
6983 	stats->rx_dropped = priv->xstats.rx_dropped;
6984 	stats->rx_errors = priv->xstats.rx_errors;
6985 	stats->tx_dropped = priv->xstats.tx_dropped;
6986 	stats->tx_errors = priv->xstats.tx_errors;
6987 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
6988 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
6989 	stats->rx_length_errors = priv->xstats.rx_length;
6990 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
6991 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
6992 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
6993 }
6994 
6995 static const struct net_device_ops stmmac_netdev_ops = {
6996 	.ndo_open = stmmac_open,
6997 	.ndo_start_xmit = stmmac_xmit,
6998 	.ndo_stop = stmmac_release,
6999 	.ndo_change_mtu = stmmac_change_mtu,
7000 	.ndo_fix_features = stmmac_fix_features,
7001 	.ndo_set_features = stmmac_set_features,
7002 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7003 	.ndo_tx_timeout = stmmac_tx_timeout,
7004 	.ndo_eth_ioctl = stmmac_ioctl,
7005 	.ndo_get_stats64 = stmmac_get_stats64,
7006 	.ndo_setup_tc = stmmac_setup_tc,
7007 	.ndo_select_queue = stmmac_select_queue,
7008 	.ndo_set_mac_address = stmmac_set_mac_address,
7009 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7010 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7011 	.ndo_bpf = stmmac_bpf,
7012 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7013 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7014 };
7015 
7016 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7017 {
7018 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7019 		return;
7020 	if (test_bit(STMMAC_DOWN, &priv->state))
7021 		return;
7022 
7023 	netdev_err(priv->dev, "Reset adapter.\n");
7024 
7025 	rtnl_lock();
7026 	netif_trans_update(priv->dev);
7027 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7028 		usleep_range(1000, 2000);
7029 
7030 	set_bit(STMMAC_DOWN, &priv->state);
7031 	dev_close(priv->dev);
7032 	dev_open(priv->dev, NULL);
7033 	clear_bit(STMMAC_DOWN, &priv->state);
7034 	clear_bit(STMMAC_RESETING, &priv->state);
7035 	rtnl_unlock();
7036 }
7037 
7038 static void stmmac_service_task(struct work_struct *work)
7039 {
7040 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7041 			service_task);
7042 
7043 	stmmac_reset_subtask(priv);
7044 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7045 }
7046 
7047 /**
7048  *  stmmac_hw_init - Init the MAC device
7049  *  @priv: driver private structure
7050  *  Description: this function is to configure the MAC device according to
7051  *  some platform parameters or the HW capability register. It prepares the
7052  *  driver to use either ring or chain modes and to setup either enhanced or
7053  *  normal descriptors.
7054  */
7055 static int stmmac_hw_init(struct stmmac_priv *priv)
7056 {
7057 	int ret;
7058 
7059 	/* dwmac-sun8i only work in chain mode */
7060 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7061 		chain_mode = 1;
7062 	priv->chain_mode = chain_mode;
7063 
7064 	/* Initialize HW Interface */
7065 	ret = stmmac_hwif_init(priv);
7066 	if (ret)
7067 		return ret;
7068 
7069 	/* Get the HW capability (new GMAC newer than 3.50a) */
7070 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7071 	if (priv->hw_cap_support) {
7072 		dev_info(priv->device, "DMA HW capability register supported\n");
7073 
7074 		/* We can override some gmac/dma configuration fields: e.g.
7075 		 * enh_desc, tx_coe (e.g. that are passed through the
7076 		 * platform) with the values from the HW capability
7077 		 * register (if supported).
7078 		 */
7079 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7080 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7081 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7082 		priv->hw->pmt = priv->plat->pmt;
7083 		if (priv->dma_cap.hash_tb_sz) {
7084 			priv->hw->multicast_filter_bins =
7085 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7086 			priv->hw->mcast_bits_log2 =
7087 					ilog2(priv->hw->multicast_filter_bins);
7088 		}
7089 
7090 		/* TXCOE doesn't work in thresh DMA mode */
7091 		if (priv->plat->force_thresh_dma_mode)
7092 			priv->plat->tx_coe = 0;
7093 		else
7094 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7095 
7096 		/* In case of GMAC4 rx_coe is from HW cap register. */
7097 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7098 
7099 		if (priv->dma_cap.rx_coe_type2)
7100 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7101 		else if (priv->dma_cap.rx_coe_type1)
7102 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7103 
7104 	} else {
7105 		dev_info(priv->device, "No HW DMA feature register supported\n");
7106 	}
7107 
7108 	if (priv->plat->rx_coe) {
7109 		priv->hw->rx_csum = priv->plat->rx_coe;
7110 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7111 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7112 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7113 	}
7114 	if (priv->plat->tx_coe)
7115 		dev_info(priv->device, "TX Checksum insertion supported\n");
7116 
7117 	if (priv->plat->pmt) {
7118 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7119 		device_set_wakeup_capable(priv->device, 1);
7120 	}
7121 
7122 	if (priv->dma_cap.tsoen)
7123 		dev_info(priv->device, "TSO supported\n");
7124 
7125 	priv->hw->vlan_fail_q_en =
7126 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7127 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7128 
7129 	/* Run HW quirks, if any */
7130 	if (priv->hwif_quirks) {
7131 		ret = priv->hwif_quirks(priv);
7132 		if (ret)
7133 			return ret;
7134 	}
7135 
7136 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7137 	 * In some case, for example on bugged HW this feature
7138 	 * has to be disable and this can be done by passing the
7139 	 * riwt_off field from the platform.
7140 	 */
7141 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7142 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7143 		priv->use_riwt = 1;
7144 		dev_info(priv->device,
7145 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7146 	}
7147 
7148 	return 0;
7149 }
7150 
7151 static void stmmac_napi_add(struct net_device *dev)
7152 {
7153 	struct stmmac_priv *priv = netdev_priv(dev);
7154 	u32 queue, maxq;
7155 
7156 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7157 
7158 	for (queue = 0; queue < maxq; queue++) {
7159 		struct stmmac_channel *ch = &priv->channel[queue];
7160 
7161 		ch->priv_data = priv;
7162 		ch->index = queue;
7163 		spin_lock_init(&ch->lock);
7164 
7165 		if (queue < priv->plat->rx_queues_to_use) {
7166 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7167 		}
7168 		if (queue < priv->plat->tx_queues_to_use) {
7169 			netif_napi_add_tx(dev, &ch->tx_napi,
7170 					  stmmac_napi_poll_tx);
7171 		}
7172 		if (queue < priv->plat->rx_queues_to_use &&
7173 		    queue < priv->plat->tx_queues_to_use) {
7174 			netif_napi_add(dev, &ch->rxtx_napi,
7175 				       stmmac_napi_poll_rxtx);
7176 		}
7177 	}
7178 }
7179 
7180 static void stmmac_napi_del(struct net_device *dev)
7181 {
7182 	struct stmmac_priv *priv = netdev_priv(dev);
7183 	u32 queue, maxq;
7184 
7185 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7186 
7187 	for (queue = 0; queue < maxq; queue++) {
7188 		struct stmmac_channel *ch = &priv->channel[queue];
7189 
7190 		if (queue < priv->plat->rx_queues_to_use)
7191 			netif_napi_del(&ch->rx_napi);
7192 		if (queue < priv->plat->tx_queues_to_use)
7193 			netif_napi_del(&ch->tx_napi);
7194 		if (queue < priv->plat->rx_queues_to_use &&
7195 		    queue < priv->plat->tx_queues_to_use) {
7196 			netif_napi_del(&ch->rxtx_napi);
7197 		}
7198 	}
7199 }
7200 
7201 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7202 {
7203 	struct stmmac_priv *priv = netdev_priv(dev);
7204 	int ret = 0, i;
7205 
7206 	if (netif_running(dev))
7207 		stmmac_release(dev);
7208 
7209 	stmmac_napi_del(dev);
7210 
7211 	priv->plat->rx_queues_to_use = rx_cnt;
7212 	priv->plat->tx_queues_to_use = tx_cnt;
7213 	if (!netif_is_rxfh_configured(dev))
7214 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7215 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7216 									rx_cnt);
7217 
7218 	stmmac_set_half_duplex(priv);
7219 	stmmac_napi_add(dev);
7220 
7221 	if (netif_running(dev))
7222 		ret = stmmac_open(dev);
7223 
7224 	return ret;
7225 }
7226 
7227 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7228 {
7229 	struct stmmac_priv *priv = netdev_priv(dev);
7230 	int ret = 0;
7231 
7232 	if (netif_running(dev))
7233 		stmmac_release(dev);
7234 
7235 	priv->dma_conf.dma_rx_size = rx_size;
7236 	priv->dma_conf.dma_tx_size = tx_size;
7237 
7238 	if (netif_running(dev))
7239 		ret = stmmac_open(dev);
7240 
7241 	return ret;
7242 }
7243 
7244 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7245 static void stmmac_fpe_lp_task(struct work_struct *work)
7246 {
7247 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7248 						fpe_task);
7249 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7250 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7251 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7252 	bool *hs_enable = &fpe_cfg->hs_enable;
7253 	bool *enable = &fpe_cfg->enable;
7254 	int retries = 20;
7255 
7256 	while (retries-- > 0) {
7257 		/* Bail out immediately if FPE handshake is OFF */
7258 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7259 			break;
7260 
7261 		if (*lo_state == FPE_STATE_ENTERING_ON &&
7262 		    *lp_state == FPE_STATE_ENTERING_ON) {
7263 			stmmac_fpe_configure(priv, priv->ioaddr,
7264 					     fpe_cfg,
7265 					     priv->plat->tx_queues_to_use,
7266 					     priv->plat->rx_queues_to_use,
7267 					     *enable);
7268 
7269 			netdev_info(priv->dev, "configured FPE\n");
7270 
7271 			*lo_state = FPE_STATE_ON;
7272 			*lp_state = FPE_STATE_ON;
7273 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7274 			break;
7275 		}
7276 
7277 		if ((*lo_state == FPE_STATE_CAPABLE ||
7278 		     *lo_state == FPE_STATE_ENTERING_ON) &&
7279 		     *lp_state != FPE_STATE_ON) {
7280 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7281 				    *lo_state, *lp_state);
7282 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7283 						fpe_cfg,
7284 						MPACKET_VERIFY);
7285 		}
7286 		/* Sleep then retry */
7287 		msleep(500);
7288 	}
7289 
7290 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7291 }
7292 
7293 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7294 {
7295 	if (priv->plat->fpe_cfg->hs_enable != enable) {
7296 		if (enable) {
7297 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7298 						priv->plat->fpe_cfg,
7299 						MPACKET_VERIFY);
7300 		} else {
7301 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7302 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7303 		}
7304 
7305 		priv->plat->fpe_cfg->hs_enable = enable;
7306 	}
7307 }
7308 
7309 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7310 {
7311 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7312 	struct dma_desc *desc_contains_ts = ctx->desc;
7313 	struct stmmac_priv *priv = ctx->priv;
7314 	struct dma_desc *ndesc = ctx->ndesc;
7315 	struct dma_desc *desc = ctx->desc;
7316 	u64 ns = 0;
7317 
7318 	if (!priv->hwts_rx_en)
7319 		return -ENODATA;
7320 
7321 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7322 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7323 		desc_contains_ts = ndesc;
7324 
7325 	/* Check if timestamp is available */
7326 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7327 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7328 		ns -= priv->plat->cdc_error_adj;
7329 		*timestamp = ns_to_ktime(ns);
7330 		return 0;
7331 	}
7332 
7333 	return -ENODATA;
7334 }
7335 
7336 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7337 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7338 };
7339 
7340 /**
7341  * stmmac_dvr_probe
7342  * @device: device pointer
7343  * @plat_dat: platform data pointer
7344  * @res: stmmac resource pointer
7345  * Description: this is the main probe function used to
7346  * call the alloc_etherdev, allocate the priv structure.
7347  * Return:
7348  * returns 0 on success, otherwise errno.
7349  */
7350 int stmmac_dvr_probe(struct device *device,
7351 		     struct plat_stmmacenet_data *plat_dat,
7352 		     struct stmmac_resources *res)
7353 {
7354 	struct net_device *ndev = NULL;
7355 	struct stmmac_priv *priv;
7356 	u32 rxq;
7357 	int i, ret = 0;
7358 
7359 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7360 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7361 	if (!ndev)
7362 		return -ENOMEM;
7363 
7364 	SET_NETDEV_DEV(ndev, device);
7365 
7366 	priv = netdev_priv(ndev);
7367 	priv->device = device;
7368 	priv->dev = ndev;
7369 
7370 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7371 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7372 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7373 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7374 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7375 	}
7376 
7377 	priv->xstats.pcpu_stats =
7378 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7379 	if (!priv->xstats.pcpu_stats)
7380 		return -ENOMEM;
7381 
7382 	stmmac_set_ethtool_ops(ndev);
7383 	priv->pause = pause;
7384 	priv->plat = plat_dat;
7385 	priv->ioaddr = res->addr;
7386 	priv->dev->base_addr = (unsigned long)res->addr;
7387 	priv->plat->dma_cfg->multi_msi_en =
7388 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7389 
7390 	priv->dev->irq = res->irq;
7391 	priv->wol_irq = res->wol_irq;
7392 	priv->lpi_irq = res->lpi_irq;
7393 	priv->sfty_ce_irq = res->sfty_ce_irq;
7394 	priv->sfty_ue_irq = res->sfty_ue_irq;
7395 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7396 		priv->rx_irq[i] = res->rx_irq[i];
7397 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7398 		priv->tx_irq[i] = res->tx_irq[i];
7399 
7400 	if (!is_zero_ether_addr(res->mac))
7401 		eth_hw_addr_set(priv->dev, res->mac);
7402 
7403 	dev_set_drvdata(device, priv->dev);
7404 
7405 	/* Verify driver arguments */
7406 	stmmac_verify_args();
7407 
7408 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7409 	if (!priv->af_xdp_zc_qps)
7410 		return -ENOMEM;
7411 
7412 	/* Allocate workqueue */
7413 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7414 	if (!priv->wq) {
7415 		dev_err(priv->device, "failed to create workqueue\n");
7416 		ret = -ENOMEM;
7417 		goto error_wq_init;
7418 	}
7419 
7420 	INIT_WORK(&priv->service_task, stmmac_service_task);
7421 
7422 	/* Initialize Link Partner FPE workqueue */
7423 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7424 
7425 	/* Override with kernel parameters if supplied XXX CRS XXX
7426 	 * this needs to have multiple instances
7427 	 */
7428 	if ((phyaddr >= 0) && (phyaddr <= 31))
7429 		priv->plat->phy_addr = phyaddr;
7430 
7431 	if (priv->plat->stmmac_rst) {
7432 		ret = reset_control_assert(priv->plat->stmmac_rst);
7433 		reset_control_deassert(priv->plat->stmmac_rst);
7434 		/* Some reset controllers have only reset callback instead of
7435 		 * assert + deassert callbacks pair.
7436 		 */
7437 		if (ret == -ENOTSUPP)
7438 			reset_control_reset(priv->plat->stmmac_rst);
7439 	}
7440 
7441 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7442 	if (ret == -ENOTSUPP)
7443 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7444 			ERR_PTR(ret));
7445 
7446 	/* Wait a bit for the reset to take effect */
7447 	udelay(10);
7448 
7449 	/* Init MAC and get the capabilities */
7450 	ret = stmmac_hw_init(priv);
7451 	if (ret)
7452 		goto error_hw_init;
7453 
7454 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7455 	 */
7456 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7457 		priv->plat->dma_cfg->dche = false;
7458 
7459 	stmmac_check_ether_addr(priv);
7460 
7461 	ndev->netdev_ops = &stmmac_netdev_ops;
7462 
7463 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7464 
7465 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7466 			    NETIF_F_RXCSUM;
7467 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7468 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7469 
7470 	ret = stmmac_tc_init(priv, priv);
7471 	if (!ret) {
7472 		ndev->hw_features |= NETIF_F_HW_TC;
7473 	}
7474 
7475 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7476 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7477 		if (priv->plat->has_gmac4)
7478 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7479 		priv->tso = true;
7480 		dev_info(priv->device, "TSO feature enabled\n");
7481 	}
7482 
7483 	if (priv->dma_cap.sphen &&
7484 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7485 		ndev->hw_features |= NETIF_F_GRO;
7486 		priv->sph_cap = true;
7487 		priv->sph = priv->sph_cap;
7488 		dev_info(priv->device, "SPH feature enabled\n");
7489 	}
7490 
7491 	/* Ideally our host DMA address width is the same as for the
7492 	 * device. However, it may differ and then we have to use our
7493 	 * host DMA width for allocation and the device DMA width for
7494 	 * register handling.
7495 	 */
7496 	if (priv->plat->host_dma_width)
7497 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7498 	else
7499 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7500 
7501 	if (priv->dma_cap.host_dma_width) {
7502 		ret = dma_set_mask_and_coherent(device,
7503 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7504 		if (!ret) {
7505 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7506 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7507 
7508 			/*
7509 			 * If more than 32 bits can be addressed, make sure to
7510 			 * enable enhanced addressing mode.
7511 			 */
7512 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7513 				priv->plat->dma_cfg->eame = true;
7514 		} else {
7515 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7516 			if (ret) {
7517 				dev_err(priv->device, "Failed to set DMA Mask\n");
7518 				goto error_hw_init;
7519 			}
7520 
7521 			priv->dma_cap.host_dma_width = 32;
7522 		}
7523 	}
7524 
7525 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7526 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7527 #ifdef STMMAC_VLAN_TAG_USED
7528 	/* Both mac100 and gmac support receive VLAN tag detection */
7529 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7530 	if (priv->dma_cap.vlhash) {
7531 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7532 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7533 	}
7534 	if (priv->dma_cap.vlins) {
7535 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7536 		if (priv->dma_cap.dvlan)
7537 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7538 	}
7539 #endif
7540 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7541 
7542 	priv->xstats.threshold = tc;
7543 
7544 	/* Initialize RSS */
7545 	rxq = priv->plat->rx_queues_to_use;
7546 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7547 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7548 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7549 
7550 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7551 		ndev->features |= NETIF_F_RXHASH;
7552 
7553 	ndev->vlan_features |= ndev->features;
7554 	/* TSO doesn't work on VLANs yet */
7555 	ndev->vlan_features &= ~NETIF_F_TSO;
7556 
7557 	/* MTU range: 46 - hw-specific max */
7558 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7559 	if (priv->plat->has_xgmac)
7560 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7561 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7562 		ndev->max_mtu = JUMBO_LEN;
7563 	else
7564 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7565 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7566 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7567 	 */
7568 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7569 	    (priv->plat->maxmtu >= ndev->min_mtu))
7570 		ndev->max_mtu = priv->plat->maxmtu;
7571 	else if (priv->plat->maxmtu < ndev->min_mtu)
7572 		dev_warn(priv->device,
7573 			 "%s: warning: maxmtu having invalid value (%d)\n",
7574 			 __func__, priv->plat->maxmtu);
7575 
7576 	if (flow_ctrl)
7577 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7578 
7579 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7580 
7581 	/* Setup channels NAPI */
7582 	stmmac_napi_add(ndev);
7583 
7584 	mutex_init(&priv->lock);
7585 
7586 	/* If a specific clk_csr value is passed from the platform
7587 	 * this means that the CSR Clock Range selection cannot be
7588 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7589 	 * set the MDC clock dynamically according to the csr actual
7590 	 * clock input.
7591 	 */
7592 	if (priv->plat->clk_csr >= 0)
7593 		priv->clk_csr = priv->plat->clk_csr;
7594 	else
7595 		stmmac_clk_csr_set(priv);
7596 
7597 	stmmac_check_pcs_mode(priv);
7598 
7599 	pm_runtime_get_noresume(device);
7600 	pm_runtime_set_active(device);
7601 	if (!pm_runtime_enabled(device))
7602 		pm_runtime_enable(device);
7603 
7604 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7605 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
7606 		/* MDIO bus Registration */
7607 		ret = stmmac_mdio_register(ndev);
7608 		if (ret < 0) {
7609 			dev_err_probe(priv->device, ret,
7610 				      "%s: MDIO bus (id: %d) registration failed\n",
7611 				      __func__, priv->plat->bus_id);
7612 			goto error_mdio_register;
7613 		}
7614 	}
7615 
7616 	if (priv->plat->speed_mode_2500)
7617 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7618 
7619 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7620 		ret = stmmac_xpcs_setup(priv->mii);
7621 		if (ret)
7622 			goto error_xpcs_setup;
7623 	}
7624 
7625 	ret = stmmac_phy_setup(priv);
7626 	if (ret) {
7627 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7628 		goto error_phy_setup;
7629 	}
7630 
7631 	ret = register_netdev(ndev);
7632 	if (ret) {
7633 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7634 			__func__, ret);
7635 		goto error_netdev_register;
7636 	}
7637 
7638 #ifdef CONFIG_DEBUG_FS
7639 	stmmac_init_fs(ndev);
7640 #endif
7641 
7642 	if (priv->plat->dump_debug_regs)
7643 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7644 
7645 	/* Let pm_runtime_put() disable the clocks.
7646 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7647 	 */
7648 	pm_runtime_put(device);
7649 
7650 	return ret;
7651 
7652 error_netdev_register:
7653 	phylink_destroy(priv->phylink);
7654 error_xpcs_setup:
7655 error_phy_setup:
7656 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7657 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7658 		stmmac_mdio_unregister(ndev);
7659 error_mdio_register:
7660 	stmmac_napi_del(ndev);
7661 error_hw_init:
7662 	destroy_workqueue(priv->wq);
7663 error_wq_init:
7664 	bitmap_free(priv->af_xdp_zc_qps);
7665 
7666 	return ret;
7667 }
7668 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7669 
7670 /**
7671  * stmmac_dvr_remove
7672  * @dev: device pointer
7673  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7674  * changes the link status, releases the DMA descriptor rings.
7675  */
7676 void stmmac_dvr_remove(struct device *dev)
7677 {
7678 	struct net_device *ndev = dev_get_drvdata(dev);
7679 	struct stmmac_priv *priv = netdev_priv(ndev);
7680 
7681 	netdev_info(priv->dev, "%s: removing driver", __func__);
7682 
7683 	pm_runtime_get_sync(dev);
7684 
7685 	stmmac_stop_all_dma(priv);
7686 	stmmac_mac_set(priv, priv->ioaddr, false);
7687 	netif_carrier_off(ndev);
7688 	unregister_netdev(ndev);
7689 
7690 #ifdef CONFIG_DEBUG_FS
7691 	stmmac_exit_fs(ndev);
7692 #endif
7693 	phylink_destroy(priv->phylink);
7694 	if (priv->plat->stmmac_rst)
7695 		reset_control_assert(priv->plat->stmmac_rst);
7696 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7697 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7698 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7699 		stmmac_mdio_unregister(ndev);
7700 	destroy_workqueue(priv->wq);
7701 	mutex_destroy(&priv->lock);
7702 	bitmap_free(priv->af_xdp_zc_qps);
7703 
7704 	pm_runtime_disable(dev);
7705 	pm_runtime_put_noidle(dev);
7706 }
7707 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7708 
7709 /**
7710  * stmmac_suspend - suspend callback
7711  * @dev: device pointer
7712  * Description: this is the function to suspend the device and it is called
7713  * by the platform driver to stop the network queue, release the resources,
7714  * program the PMT register (for WoL), clean and release driver resources.
7715  */
7716 int stmmac_suspend(struct device *dev)
7717 {
7718 	struct net_device *ndev = dev_get_drvdata(dev);
7719 	struct stmmac_priv *priv = netdev_priv(ndev);
7720 	u32 chan;
7721 
7722 	if (!ndev || !netif_running(ndev))
7723 		return 0;
7724 
7725 	mutex_lock(&priv->lock);
7726 
7727 	netif_device_detach(ndev);
7728 
7729 	stmmac_disable_all_queues(priv);
7730 
7731 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7732 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7733 
7734 	if (priv->eee_enabled) {
7735 		priv->tx_path_in_lpi_mode = false;
7736 		del_timer_sync(&priv->eee_ctrl_timer);
7737 	}
7738 
7739 	/* Stop TX/RX DMA */
7740 	stmmac_stop_all_dma(priv);
7741 
7742 	if (priv->plat->serdes_powerdown)
7743 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7744 
7745 	/* Enable Power down mode by programming the PMT regs */
7746 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7747 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7748 		priv->irq_wake = 1;
7749 	} else {
7750 		stmmac_mac_set(priv, priv->ioaddr, false);
7751 		pinctrl_pm_select_sleep_state(priv->device);
7752 	}
7753 
7754 	mutex_unlock(&priv->lock);
7755 
7756 	rtnl_lock();
7757 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7758 		phylink_suspend(priv->phylink, true);
7759 	} else {
7760 		if (device_may_wakeup(priv->device))
7761 			phylink_speed_down(priv->phylink, false);
7762 		phylink_suspend(priv->phylink, false);
7763 	}
7764 	rtnl_unlock();
7765 
7766 	if (priv->dma_cap.fpesel) {
7767 		/* Disable FPE */
7768 		stmmac_fpe_configure(priv, priv->ioaddr,
7769 				     priv->plat->fpe_cfg,
7770 				     priv->plat->tx_queues_to_use,
7771 				     priv->plat->rx_queues_to_use, false);
7772 
7773 		stmmac_fpe_handshake(priv, false);
7774 		stmmac_fpe_stop_wq(priv);
7775 	}
7776 
7777 	priv->speed = SPEED_UNKNOWN;
7778 	return 0;
7779 }
7780 EXPORT_SYMBOL_GPL(stmmac_suspend);
7781 
7782 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7783 {
7784 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7785 
7786 	rx_q->cur_rx = 0;
7787 	rx_q->dirty_rx = 0;
7788 }
7789 
7790 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7791 {
7792 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7793 
7794 	tx_q->cur_tx = 0;
7795 	tx_q->dirty_tx = 0;
7796 	tx_q->mss = 0;
7797 
7798 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7799 }
7800 
7801 /**
7802  * stmmac_reset_queues_param - reset queue parameters
7803  * @priv: device pointer
7804  */
7805 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7806 {
7807 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7808 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7809 	u32 queue;
7810 
7811 	for (queue = 0; queue < rx_cnt; queue++)
7812 		stmmac_reset_rx_queue(priv, queue);
7813 
7814 	for (queue = 0; queue < tx_cnt; queue++)
7815 		stmmac_reset_tx_queue(priv, queue);
7816 }
7817 
7818 /**
7819  * stmmac_resume - resume callback
7820  * @dev: device pointer
7821  * Description: when resume this function is invoked to setup the DMA and CORE
7822  * in a usable state.
7823  */
7824 int stmmac_resume(struct device *dev)
7825 {
7826 	struct net_device *ndev = dev_get_drvdata(dev);
7827 	struct stmmac_priv *priv = netdev_priv(ndev);
7828 	int ret;
7829 
7830 	if (!netif_running(ndev))
7831 		return 0;
7832 
7833 	/* Power Down bit, into the PM register, is cleared
7834 	 * automatically as soon as a magic packet or a Wake-up frame
7835 	 * is received. Anyway, it's better to manually clear
7836 	 * this bit because it can generate problems while resuming
7837 	 * from another devices (e.g. serial console).
7838 	 */
7839 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7840 		mutex_lock(&priv->lock);
7841 		stmmac_pmt(priv, priv->hw, 0);
7842 		mutex_unlock(&priv->lock);
7843 		priv->irq_wake = 0;
7844 	} else {
7845 		pinctrl_pm_select_default_state(priv->device);
7846 		/* reset the phy so that it's ready */
7847 		if (priv->mii)
7848 			stmmac_mdio_reset(priv->mii);
7849 	}
7850 
7851 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7852 	    priv->plat->serdes_powerup) {
7853 		ret = priv->plat->serdes_powerup(ndev,
7854 						 priv->plat->bsp_priv);
7855 
7856 		if (ret < 0)
7857 			return ret;
7858 	}
7859 
7860 	rtnl_lock();
7861 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7862 		phylink_resume(priv->phylink);
7863 	} else {
7864 		phylink_resume(priv->phylink);
7865 		if (device_may_wakeup(priv->device))
7866 			phylink_speed_up(priv->phylink);
7867 	}
7868 	rtnl_unlock();
7869 
7870 	rtnl_lock();
7871 	mutex_lock(&priv->lock);
7872 
7873 	stmmac_reset_queues_param(priv);
7874 
7875 	stmmac_free_tx_skbufs(priv);
7876 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7877 
7878 	stmmac_hw_setup(ndev, false);
7879 	stmmac_init_coalesce(priv);
7880 	stmmac_set_rx_mode(ndev);
7881 
7882 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7883 
7884 	stmmac_enable_all_queues(priv);
7885 	stmmac_enable_all_dma_irq(priv);
7886 
7887 	mutex_unlock(&priv->lock);
7888 	rtnl_unlock();
7889 
7890 	netif_device_attach(ndev);
7891 
7892 	return 0;
7893 }
7894 EXPORT_SYMBOL_GPL(stmmac_resume);
7895 
7896 #ifndef MODULE
7897 static int __init stmmac_cmdline_opt(char *str)
7898 {
7899 	char *opt;
7900 
7901 	if (!str || !*str)
7902 		return 1;
7903 	while ((opt = strsep(&str, ",")) != NULL) {
7904 		if (!strncmp(opt, "debug:", 6)) {
7905 			if (kstrtoint(opt + 6, 0, &debug))
7906 				goto err;
7907 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7908 			if (kstrtoint(opt + 8, 0, &phyaddr))
7909 				goto err;
7910 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7911 			if (kstrtoint(opt + 7, 0, &buf_sz))
7912 				goto err;
7913 		} else if (!strncmp(opt, "tc:", 3)) {
7914 			if (kstrtoint(opt + 3, 0, &tc))
7915 				goto err;
7916 		} else if (!strncmp(opt, "watchdog:", 9)) {
7917 			if (kstrtoint(opt + 9, 0, &watchdog))
7918 				goto err;
7919 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7920 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7921 				goto err;
7922 		} else if (!strncmp(opt, "pause:", 6)) {
7923 			if (kstrtoint(opt + 6, 0, &pause))
7924 				goto err;
7925 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7926 			if (kstrtoint(opt + 10, 0, &eee_timer))
7927 				goto err;
7928 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7929 			if (kstrtoint(opt + 11, 0, &chain_mode))
7930 				goto err;
7931 		}
7932 	}
7933 	return 1;
7934 
7935 err:
7936 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7937 	return 1;
7938 }
7939 
7940 __setup("stmmaceth=", stmmac_cmdline_opt);
7941 #endif /* MODULE */
7942 
7943 static int __init stmmac_init(void)
7944 {
7945 #ifdef CONFIG_DEBUG_FS
7946 	/* Create debugfs main directory if it doesn't exist yet */
7947 	if (!stmmac_fs_dir)
7948 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7949 	register_netdevice_notifier(&stmmac_notifier);
7950 #endif
7951 
7952 	return 0;
7953 }
7954 
7955 static void __exit stmmac_exit(void)
7956 {
7957 #ifdef CONFIG_DEBUG_FS
7958 	unregister_netdevice_notifier(&stmmac_notifier);
7959 	debugfs_remove_recursive(stmmac_fs_dir);
7960 #endif
7961 }
7962 
7963 module_init(stmmac_init)
7964 module_exit(stmmac_exit)
7965 
7966 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7967 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7968 MODULE_LICENSE("GPL");
7969