1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
stmmac_bus_clks_config(struct stmmac_priv * priv,bool enabled)150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
stmmac_verify_args(void)187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
__stmmac_disable_all_queues(struct stmmac_priv * priv)203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
stmmac_disable_all_queues(struct stmmac_priv * priv)230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
stmmac_enable_all_queues(struct stmmac_priv * priv)252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
stmmac_service_event_schedule(struct stmmac_priv * priv)275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
stmmac_global_err(struct stmmac_priv * priv)282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
stmmac_clk_csr_set(struct stmmac_priv * priv)301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
print_pkt(unsigned char * buf,int len)356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
stmmac_lpi_entry_timer_config(struct stmmac_priv * priv,bool en)393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
stmmac_enable_eee_mode(struct stmmac_priv * priv)409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
stmmac_disable_eee_mode(struct stmmac_priv * priv)435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
stmmac_eee_ctrl_timer(struct timer_list * t)454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
stmmac_eee_init(struct stmmac_priv * priv)470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Using PCS we cannot dial with the phy registers at this stage
475 	 * so we do not support extra feature like EEE.
476 	 */
477 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
478 	    priv->hw->pcs == STMMAC_PCS_RTBI)
479 		return false;
480 
481 	/* Check if MAC core supports the EEE feature. */
482 	if (!priv->dma_cap.eee)
483 		return false;
484 
485 	mutex_lock(&priv->lock);
486 
487 	/* Check if it needs to be deactivated */
488 	if (!priv->eee_active) {
489 		if (priv->eee_enabled) {
490 			netdev_dbg(priv->dev, "disable EEE\n");
491 			stmmac_lpi_entry_timer_config(priv, 0);
492 			del_timer_sync(&priv->eee_ctrl_timer);
493 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494 			if (priv->hw->xpcs)
495 				xpcs_config_eee(priv->hw->xpcs,
496 						priv->plat->mult_fact_100ns,
497 						false);
498 		}
499 		mutex_unlock(&priv->lock);
500 		return false;
501 	}
502 
503 	if (priv->eee_active && !priv->eee_enabled) {
504 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506 				     eee_tw_timer);
507 		if (priv->hw->xpcs)
508 			xpcs_config_eee(priv->hw->xpcs,
509 					priv->plat->mult_fact_100ns,
510 					true);
511 	}
512 
513 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514 		del_timer_sync(&priv->eee_ctrl_timer);
515 		priv->tx_path_in_lpi_mode = false;
516 		stmmac_lpi_entry_timer_config(priv, 1);
517 	} else {
518 		stmmac_lpi_entry_timer_config(priv, 0);
519 		mod_timer(&priv->eee_ctrl_timer,
520 			  STMMAC_LPI_T(priv->tx_lpi_timer));
521 	}
522 
523 	mutex_unlock(&priv->lock);
524 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525 	return true;
526 }
527 
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529  * @priv: driver private structure
530  * @p : descriptor pointer
531  * @skb : the socket buffer
532  * Description :
533  * This function will read timestamp from the descriptor & pass it to stack.
534  * and also perform some sanity checks.
535  */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537 				   struct dma_desc *p, struct sk_buff *skb)
538 {
539 	struct skb_shared_hwtstamps shhwtstamp;
540 	bool found = false;
541 	u64 ns = 0;
542 
543 	if (!priv->hwts_tx_en)
544 		return;
545 
546 	/* exit if skb doesn't support hw tstamp */
547 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548 		return;
549 
550 	/* check tx tstamp status */
551 	if (stmmac_get_tx_timestamp_status(priv, p)) {
552 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553 		found = true;
554 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555 		found = true;
556 	}
557 
558 	if (found) {
559 		ns -= priv->plat->cdc_error_adj;
560 
561 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
563 
564 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565 		/* pass tstamp to stack */
566 		skb_tstamp_tx(skb, &shhwtstamp);
567 	}
568 }
569 
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571  * @priv: driver private structure
572  * @p : descriptor pointer
573  * @np : next descriptor pointer
574  * @skb : the socket buffer
575  * Description :
576  * This function will read received packet's timestamp from the descriptor
577  * and pass it to stack. It also perform some sanity checks.
578  */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580 				   struct dma_desc *np, struct sk_buff *skb)
581 {
582 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
583 	struct dma_desc *desc = p;
584 	u64 ns = 0;
585 
586 	if (!priv->hwts_rx_en)
587 		return;
588 	/* For GMAC4, the valid timestamp is from CTX next desc. */
589 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590 		desc = np;
591 
592 	/* Check if timestamp is available */
593 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595 
596 		ns -= priv->plat->cdc_error_adj;
597 
598 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599 		shhwtstamp = skb_hwtstamps(skb);
600 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
602 	} else  {
603 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604 	}
605 }
606 
607 /**
608  *  stmmac_hwtstamp_set - control hardware timestamping.
609  *  @dev: device pointer.
610  *  @ifr: An IOCTL specific structure, that can contain a pointer to
611  *  a proprietary structure used to pass information to the driver.
612  *  Description:
613  *  This function configures the MAC to enable/disable both outgoing(TX)
614  *  and incoming(RX) packets time stamping based on user input.
615  *  Return Value:
616  *  0 on success and an appropriate -ve integer on failure.
617  */
stmmac_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620 	struct stmmac_priv *priv = netdev_priv(dev);
621 	struct hwtstamp_config config;
622 	u32 ptp_v2 = 0;
623 	u32 tstamp_all = 0;
624 	u32 ptp_over_ipv4_udp = 0;
625 	u32 ptp_over_ipv6_udp = 0;
626 	u32 ptp_over_ethernet = 0;
627 	u32 snap_type_sel = 0;
628 	u32 ts_master_en = 0;
629 	u32 ts_event_en = 0;
630 
631 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632 		netdev_alert(priv->dev, "No support for HW time stamping\n");
633 		priv->hwts_tx_en = 0;
634 		priv->hwts_rx_en = 0;
635 
636 		return -EOPNOTSUPP;
637 	}
638 
639 	if (copy_from_user(&config, ifr->ifr_data,
640 			   sizeof(config)))
641 		return -EFAULT;
642 
643 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 		   __func__, config.flags, config.tx_type, config.rx_filter);
645 
646 	if (config.tx_type != HWTSTAMP_TX_OFF &&
647 	    config.tx_type != HWTSTAMP_TX_ON)
648 		return -ERANGE;
649 
650 	if (priv->adv_ts) {
651 		switch (config.rx_filter) {
652 		case HWTSTAMP_FILTER_NONE:
653 			/* time stamp no incoming packet at all */
654 			config.rx_filter = HWTSTAMP_FILTER_NONE;
655 			break;
656 
657 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 			/* PTP v1, UDP, any kind of event packet */
659 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 			/* 'xmac' hardware can support Sync, Pdelay_Req and
661 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 			 * This leaves Delay_Req timestamps out.
663 			 * Enable all events *and* general purpose message
664 			 * timestamping
665 			 */
666 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			break;
670 
671 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 			/* PTP v1, UDP, Sync packet */
673 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 			/* take time stamp for SYNC messages only */
675 			ts_event_en = PTP_TCR_TSEVNTENA;
676 
677 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 			break;
680 
681 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 			/* PTP v1, UDP, Delay_req packet */
683 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 			/* take time stamp for Delay_Req messages only */
685 			ts_master_en = PTP_TCR_TSMSTRENA;
686 			ts_event_en = PTP_TCR_TSEVNTENA;
687 
688 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 			break;
691 
692 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 			/* PTP v2, UDP, any kind of event packet */
694 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 			ptp_v2 = PTP_TCR_TSVER2ENA;
696 			/* take time stamp for all event messages */
697 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698 
699 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 			break;
702 
703 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 			/* PTP v2, UDP, Sync packet */
705 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 			ptp_v2 = PTP_TCR_TSVER2ENA;
707 			/* take time stamp for SYNC messages only */
708 			ts_event_en = PTP_TCR_TSEVNTENA;
709 
710 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 			break;
713 
714 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 			/* PTP v2, UDP, Delay_req packet */
716 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 			ptp_v2 = PTP_TCR_TSVER2ENA;
718 			/* take time stamp for Delay_Req messages only */
719 			ts_master_en = PTP_TCR_TSMSTRENA;
720 			ts_event_en = PTP_TCR_TSEVNTENA;
721 
722 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 			break;
725 
726 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 			/* PTP v2/802.AS1 any layer, any kind of event packet */
728 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 			ptp_v2 = PTP_TCR_TSVER2ENA;
730 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 			if (priv->synopsys_id < DWMAC_CORE_4_10)
732 				ts_event_en = PTP_TCR_TSEVNTENA;
733 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 			ptp_over_ethernet = PTP_TCR_TSIPENA;
736 			break;
737 
738 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 			/* PTP v2/802.AS1, any layer, Sync packet */
740 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 			ptp_v2 = PTP_TCR_TSVER2ENA;
742 			/* take time stamp for SYNC messages only */
743 			ts_event_en = PTP_TCR_TSEVNTENA;
744 
745 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 			ptp_over_ethernet = PTP_TCR_TSIPENA;
748 			break;
749 
750 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 			/* PTP v2/802.AS1, any layer, Delay_req packet */
752 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 			ptp_v2 = PTP_TCR_TSVER2ENA;
754 			/* take time stamp for Delay_Req messages only */
755 			ts_master_en = PTP_TCR_TSMSTRENA;
756 			ts_event_en = PTP_TCR_TSEVNTENA;
757 
758 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 			ptp_over_ethernet = PTP_TCR_TSIPENA;
761 			break;
762 
763 		case HWTSTAMP_FILTER_NTP_ALL:
764 		case HWTSTAMP_FILTER_ALL:
765 			/* time stamp any incoming packet */
766 			config.rx_filter = HWTSTAMP_FILTER_ALL;
767 			tstamp_all = PTP_TCR_TSENALL;
768 			break;
769 
770 		default:
771 			return -ERANGE;
772 		}
773 	} else {
774 		switch (config.rx_filter) {
775 		case HWTSTAMP_FILTER_NONE:
776 			config.rx_filter = HWTSTAMP_FILTER_NONE;
777 			break;
778 		default:
779 			/* PTP v1, UDP, any kind of event packet */
780 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 			break;
782 		}
783 	}
784 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786 
787 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
788 
789 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
790 		priv->systime_flags |= tstamp_all | ptp_v2 |
791 				       ptp_over_ethernet | ptp_over_ipv6_udp |
792 				       ptp_over_ipv4_udp | ts_event_en |
793 				       ts_master_en | snap_type_sel;
794 	}
795 
796 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797 
798 	memcpy(&priv->tstamp_config, &config, sizeof(config));
799 
800 	return copy_to_user(ifr->ifr_data, &config,
801 			    sizeof(config)) ? -EFAULT : 0;
802 }
803 
804 /**
805  *  stmmac_hwtstamp_get - read hardware timestamping.
806  *  @dev: device pointer.
807  *  @ifr: An IOCTL specific structure, that can contain a pointer to
808  *  a proprietary structure used to pass information to the driver.
809  *  Description:
810  *  This function obtain the current hardware timestamping settings
811  *  as requested.
812  */
stmmac_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815 	struct stmmac_priv *priv = netdev_priv(dev);
816 	struct hwtstamp_config *config = &priv->tstamp_config;
817 
818 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819 		return -EOPNOTSUPP;
820 
821 	return copy_to_user(ifr->ifr_data, config,
822 			    sizeof(*config)) ? -EFAULT : 0;
823 }
824 
825 /**
826  * stmmac_init_tstamp_counter - init hardware timestamping counter
827  * @priv: driver private structure
828  * @systime_flags: timestamping flags
829  * Description:
830  * Initialize hardware counter for packet timestamping.
831  * This is valid as long as the interface is open and not suspended.
832  * Will be rerun after resuming from suspend, case in which the timestamping
833  * flags updated by stmmac_hwtstamp_set() also need to be restored.
834  */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838 	struct timespec64 now;
839 	u32 sec_inc = 0;
840 	u64 temp = 0;
841 
842 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843 		return -EOPNOTSUPP;
844 
845 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846 	priv->systime_flags = systime_flags;
847 
848 	/* program Sub Second Increment reg */
849 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850 					   priv->plat->clk_ptp_rate,
851 					   xmac, &sec_inc);
852 	temp = div_u64(1000000000ULL, sec_inc);
853 
854 	/* Store sub second increment for later use */
855 	priv->sub_second_inc = sec_inc;
856 
857 	/* calculate default added value:
858 	 * formula is :
859 	 * addend = (2^32)/freq_div_ratio;
860 	 * where, freq_div_ratio = 1e9ns/sec_inc
861 	 */
862 	temp = (u64)(temp << 32);
863 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865 
866 	/* initialize system time */
867 	ktime_get_real_ts64(&now);
868 
869 	/* lower 32 bits of tv_sec are safe until y2106 */
870 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871 
872 	return 0;
873 }
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875 
876 /**
877  * stmmac_init_ptp - init PTP
878  * @priv: driver private structure
879  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880  * This is done by looking at the HW cap. register.
881  * This function also registers the ptp driver.
882  */
stmmac_init_ptp(struct stmmac_priv * priv)883 static int stmmac_init_ptp(struct stmmac_priv *priv)
884 {
885 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886 	int ret;
887 
888 	if (priv->plat->ptp_clk_freq_config)
889 		priv->plat->ptp_clk_freq_config(priv);
890 
891 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892 	if (ret)
893 		return ret;
894 
895 	priv->adv_ts = 0;
896 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 	if (xmac && priv->dma_cap.atime_stamp)
898 		priv->adv_ts = 1;
899 	/* Dwmac 3.x core with extend_desc can support adv_ts */
900 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901 		priv->adv_ts = 1;
902 
903 	if (priv->dma_cap.time_stamp)
904 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
905 
906 	if (priv->adv_ts)
907 		netdev_info(priv->dev,
908 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
909 
910 	priv->hwts_tx_en = 0;
911 	priv->hwts_rx_en = 0;
912 
913 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
914 		stmmac_hwtstamp_correct_latency(priv, priv);
915 
916 	return 0;
917 }
918 
stmmac_release_ptp(struct stmmac_priv * priv)919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
922 	stmmac_ptp_unregister(priv);
923 }
924 
925 /**
926  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
927  *  @priv: driver private structure
928  *  @duplex: duplex passed to the next function
929  *  Description: It is used for configuring the flow control in all queues
930  */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex)931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933 	u32 tx_cnt = priv->plat->tx_queues_to_use;
934 
935 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936 			priv->pause, tx_cnt);
937 }
938 
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
940 						 phy_interface_t interface)
941 {
942 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943 
944 	if (priv->hw->xpcs)
945 		return &priv->hw->xpcs->pcs;
946 
947 	if (priv->hw->lynx_pcs)
948 		return priv->hw->lynx_pcs;
949 
950 	return NULL;
951 }
952 
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)953 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
954 			      const struct phylink_link_state *state)
955 {
956 	/* Nothing to do, xpcs_config() handles everything */
957 }
958 
stmmac_fpe_link_state_handle(struct stmmac_priv * priv,bool is_up)959 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
960 {
961 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
962 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
963 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
964 	bool *hs_enable = &fpe_cfg->hs_enable;
965 
966 	if (is_up && *hs_enable) {
967 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
968 					MPACKET_VERIFY);
969 	} else {
970 		*lo_state = FPE_STATE_OFF;
971 		*lp_state = FPE_STATE_OFF;
972 	}
973 }
974 
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)975 static void stmmac_mac_link_down(struct phylink_config *config,
976 				 unsigned int mode, phy_interface_t interface)
977 {
978 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
979 
980 	stmmac_mac_set(priv, priv->ioaddr, false);
981 	priv->eee_active = false;
982 	priv->tx_lpi_enabled = false;
983 	priv->eee_enabled = stmmac_eee_init(priv);
984 	stmmac_set_eee_pls(priv, priv->hw, false);
985 
986 	if (priv->dma_cap.fpesel)
987 		stmmac_fpe_link_state_handle(priv, false);
988 }
989 
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)990 static void stmmac_mac_link_up(struct phylink_config *config,
991 			       struct phy_device *phy,
992 			       unsigned int mode, phy_interface_t interface,
993 			       int speed, int duplex,
994 			       bool tx_pause, bool rx_pause)
995 {
996 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
997 	u32 old_ctrl, ctrl;
998 
999 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1000 	    priv->plat->serdes_powerup)
1001 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1002 
1003 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1004 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1005 
1006 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1007 		switch (speed) {
1008 		case SPEED_10000:
1009 			ctrl |= priv->hw->link.xgmii.speed10000;
1010 			break;
1011 		case SPEED_5000:
1012 			ctrl |= priv->hw->link.xgmii.speed5000;
1013 			break;
1014 		case SPEED_2500:
1015 			ctrl |= priv->hw->link.xgmii.speed2500;
1016 			break;
1017 		default:
1018 			return;
1019 		}
1020 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1021 		switch (speed) {
1022 		case SPEED_100000:
1023 			ctrl |= priv->hw->link.xlgmii.speed100000;
1024 			break;
1025 		case SPEED_50000:
1026 			ctrl |= priv->hw->link.xlgmii.speed50000;
1027 			break;
1028 		case SPEED_40000:
1029 			ctrl |= priv->hw->link.xlgmii.speed40000;
1030 			break;
1031 		case SPEED_25000:
1032 			ctrl |= priv->hw->link.xlgmii.speed25000;
1033 			break;
1034 		case SPEED_10000:
1035 			ctrl |= priv->hw->link.xgmii.speed10000;
1036 			break;
1037 		case SPEED_2500:
1038 			ctrl |= priv->hw->link.speed2500;
1039 			break;
1040 		case SPEED_1000:
1041 			ctrl |= priv->hw->link.speed1000;
1042 			break;
1043 		default:
1044 			return;
1045 		}
1046 	} else {
1047 		switch (speed) {
1048 		case SPEED_2500:
1049 			ctrl |= priv->hw->link.speed2500;
1050 			break;
1051 		case SPEED_1000:
1052 			ctrl |= priv->hw->link.speed1000;
1053 			break;
1054 		case SPEED_100:
1055 			ctrl |= priv->hw->link.speed100;
1056 			break;
1057 		case SPEED_10:
1058 			ctrl |= priv->hw->link.speed10;
1059 			break;
1060 		default:
1061 			return;
1062 		}
1063 	}
1064 
1065 	priv->speed = speed;
1066 
1067 	if (priv->plat->fix_mac_speed)
1068 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1069 
1070 	if (!duplex)
1071 		ctrl &= ~priv->hw->link.duplex;
1072 	else
1073 		ctrl |= priv->hw->link.duplex;
1074 
1075 	/* Flow Control operation */
1076 	if (rx_pause && tx_pause)
1077 		priv->flow_ctrl = FLOW_AUTO;
1078 	else if (rx_pause && !tx_pause)
1079 		priv->flow_ctrl = FLOW_RX;
1080 	else if (!rx_pause && tx_pause)
1081 		priv->flow_ctrl = FLOW_TX;
1082 	else
1083 		priv->flow_ctrl = FLOW_OFF;
1084 
1085 	stmmac_mac_flow_ctrl(priv, duplex);
1086 
1087 	if (ctrl != old_ctrl)
1088 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1089 
1090 	stmmac_mac_set(priv, priv->ioaddr, true);
1091 	if (phy && priv->dma_cap.eee) {
1092 		priv->eee_active =
1093 			phy_init_eee(phy, !(priv->plat->flags &
1094 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1095 		priv->eee_enabled = stmmac_eee_init(priv);
1096 		priv->tx_lpi_enabled = priv->eee_enabled;
1097 		stmmac_set_eee_pls(priv, priv->hw, true);
1098 	}
1099 
1100 	if (priv->dma_cap.fpesel)
1101 		stmmac_fpe_link_state_handle(priv, true);
1102 
1103 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1104 		stmmac_hwtstamp_correct_latency(priv, priv);
1105 }
1106 
1107 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1108 	.mac_select_pcs = stmmac_mac_select_pcs,
1109 	.mac_config = stmmac_mac_config,
1110 	.mac_link_down = stmmac_mac_link_down,
1111 	.mac_link_up = stmmac_mac_link_up,
1112 };
1113 
1114 /**
1115  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1116  * @priv: driver private structure
1117  * Description: this is to verify if the HW supports the PCS.
1118  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1119  * configured for the TBI, RTBI, or SGMII PHY interface.
1120  */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1121 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1122 {
1123 	int interface = priv->plat->mac_interface;
1124 
1125 	if (priv->dma_cap.pcs) {
1126 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1127 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1128 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1129 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1130 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1131 			priv->hw->pcs = STMMAC_PCS_RGMII;
1132 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1133 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1134 			priv->hw->pcs = STMMAC_PCS_SGMII;
1135 		}
1136 	}
1137 }
1138 
1139 /**
1140  * stmmac_init_phy - PHY initialization
1141  * @dev: net device structure
1142  * Description: it initializes the driver's PHY state, and attaches the PHY
1143  * to the mac driver.
1144  *  Return value:
1145  *  0 on success
1146  */
stmmac_init_phy(struct net_device * dev)1147 static int stmmac_init_phy(struct net_device *dev)
1148 {
1149 	struct stmmac_priv *priv = netdev_priv(dev);
1150 	struct fwnode_handle *phy_fwnode;
1151 	struct fwnode_handle *fwnode;
1152 	int ret;
1153 
1154 	if (!phylink_expects_phy(priv->phylink))
1155 		return 0;
1156 
1157 	fwnode = priv->plat->port_node;
1158 	if (!fwnode)
1159 		fwnode = dev_fwnode(priv->device);
1160 
1161 	if (fwnode)
1162 		phy_fwnode = fwnode_get_phy_node(fwnode);
1163 	else
1164 		phy_fwnode = NULL;
1165 
1166 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1167 	 * manually parse it
1168 	 */
1169 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1170 		int addr = priv->plat->phy_addr;
1171 		struct phy_device *phydev;
1172 
1173 		if (addr < 0) {
1174 			netdev_err(priv->dev, "no phy found\n");
1175 			return -ENODEV;
1176 		}
1177 
1178 		phydev = mdiobus_get_phy(priv->mii, addr);
1179 		if (!phydev) {
1180 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1181 			return -ENODEV;
1182 		}
1183 
1184 		ret = phylink_connect_phy(priv->phylink, phydev);
1185 	} else {
1186 		fwnode_handle_put(phy_fwnode);
1187 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1188 	}
1189 
1190 	if (!priv->plat->pmt) {
1191 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1192 
1193 		phylink_ethtool_get_wol(priv->phylink, &wol);
1194 		device_set_wakeup_capable(priv->device, !!wol.supported);
1195 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1196 	}
1197 
1198 	return ret;
1199 }
1200 
stmmac_phy_setup(struct stmmac_priv * priv)1201 static int stmmac_phy_setup(struct stmmac_priv *priv)
1202 {
1203 	struct stmmac_mdio_bus_data *mdio_bus_data;
1204 	int mode = priv->plat->phy_interface;
1205 	struct fwnode_handle *fwnode;
1206 	struct phylink *phylink;
1207 	int max_speed;
1208 
1209 	priv->phylink_config.dev = &priv->dev->dev;
1210 	priv->phylink_config.type = PHYLINK_NETDEV;
1211 	priv->phylink_config.mac_managed_pm = true;
1212 
1213 	mdio_bus_data = priv->plat->mdio_bus_data;
1214 	if (mdio_bus_data)
1215 		priv->phylink_config.ovr_an_inband =
1216 			mdio_bus_data->xpcs_an_inband;
1217 
1218 	/* Set the platform/firmware specified interface mode. Note, phylink
1219 	 * deals with the PHY interface mode, not the MAC interface mode.
1220 	 */
1221 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1222 
1223 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1224 	if (priv->hw->xpcs)
1225 		xpcs_get_interfaces(priv->hw->xpcs,
1226 				    priv->phylink_config.supported_interfaces);
1227 
1228 	/* Get the MAC specific capabilities */
1229 	stmmac_mac_phylink_get_caps(priv);
1230 
1231 	priv->phylink_config.mac_capabilities = priv->hw->link.caps;
1232 
1233 	max_speed = priv->plat->max_speed;
1234 	if (max_speed)
1235 		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1236 
1237 	fwnode = priv->plat->port_node;
1238 	if (!fwnode)
1239 		fwnode = dev_fwnode(priv->device);
1240 
1241 	phylink = phylink_create(&priv->phylink_config, fwnode,
1242 				 mode, &stmmac_phylink_mac_ops);
1243 	if (IS_ERR(phylink))
1244 		return PTR_ERR(phylink);
1245 
1246 	priv->phylink = phylink;
1247 	return 0;
1248 }
1249 
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1250 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1251 				    struct stmmac_dma_conf *dma_conf)
1252 {
1253 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1254 	unsigned int desc_size;
1255 	void *head_rx;
1256 	u32 queue;
1257 
1258 	/* Display RX rings */
1259 	for (queue = 0; queue < rx_cnt; queue++) {
1260 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1261 
1262 		pr_info("\tRX Queue %u rings\n", queue);
1263 
1264 		if (priv->extend_desc) {
1265 			head_rx = (void *)rx_q->dma_erx;
1266 			desc_size = sizeof(struct dma_extended_desc);
1267 		} else {
1268 			head_rx = (void *)rx_q->dma_rx;
1269 			desc_size = sizeof(struct dma_desc);
1270 		}
1271 
1272 		/* Display RX ring */
1273 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1274 				    rx_q->dma_rx_phy, desc_size);
1275 	}
1276 }
1277 
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1278 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1279 				    struct stmmac_dma_conf *dma_conf)
1280 {
1281 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1282 	unsigned int desc_size;
1283 	void *head_tx;
1284 	u32 queue;
1285 
1286 	/* Display TX rings */
1287 	for (queue = 0; queue < tx_cnt; queue++) {
1288 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1289 
1290 		pr_info("\tTX Queue %d rings\n", queue);
1291 
1292 		if (priv->extend_desc) {
1293 			head_tx = (void *)tx_q->dma_etx;
1294 			desc_size = sizeof(struct dma_extended_desc);
1295 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1296 			head_tx = (void *)tx_q->dma_entx;
1297 			desc_size = sizeof(struct dma_edesc);
1298 		} else {
1299 			head_tx = (void *)tx_q->dma_tx;
1300 			desc_size = sizeof(struct dma_desc);
1301 		}
1302 
1303 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1304 				    tx_q->dma_tx_phy, desc_size);
1305 	}
1306 }
1307 
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1308 static void stmmac_display_rings(struct stmmac_priv *priv,
1309 				 struct stmmac_dma_conf *dma_conf)
1310 {
1311 	/* Display RX ring */
1312 	stmmac_display_rx_rings(priv, dma_conf);
1313 
1314 	/* Display TX ring */
1315 	stmmac_display_tx_rings(priv, dma_conf);
1316 }
1317 
stmmac_set_bfsize(int mtu,int bufsize)1318 static int stmmac_set_bfsize(int mtu, int bufsize)
1319 {
1320 	int ret = bufsize;
1321 
1322 	if (mtu >= BUF_SIZE_8KiB)
1323 		ret = BUF_SIZE_16KiB;
1324 	else if (mtu >= BUF_SIZE_4KiB)
1325 		ret = BUF_SIZE_8KiB;
1326 	else if (mtu >= BUF_SIZE_2KiB)
1327 		ret = BUF_SIZE_4KiB;
1328 	else if (mtu > DEFAULT_BUFSIZE)
1329 		ret = BUF_SIZE_2KiB;
1330 	else
1331 		ret = DEFAULT_BUFSIZE;
1332 
1333 	return ret;
1334 }
1335 
1336 /**
1337  * stmmac_clear_rx_descriptors - clear RX descriptors
1338  * @priv: driver private structure
1339  * @dma_conf: structure to take the dma data
1340  * @queue: RX queue index
1341  * Description: this function is called to clear the RX descriptors
1342  * in case of both basic and extended descriptors are used.
1343  */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1344 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1345 					struct stmmac_dma_conf *dma_conf,
1346 					u32 queue)
1347 {
1348 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1349 	int i;
1350 
1351 	/* Clear the RX descriptors */
1352 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1353 		if (priv->extend_desc)
1354 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1355 					priv->use_riwt, priv->mode,
1356 					(i == dma_conf->dma_rx_size - 1),
1357 					dma_conf->dma_buf_sz);
1358 		else
1359 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1360 					priv->use_riwt, priv->mode,
1361 					(i == dma_conf->dma_rx_size - 1),
1362 					dma_conf->dma_buf_sz);
1363 }
1364 
1365 /**
1366  * stmmac_clear_tx_descriptors - clear tx descriptors
1367  * @priv: driver private structure
1368  * @dma_conf: structure to take the dma data
1369  * @queue: TX queue index.
1370  * Description: this function is called to clear the TX descriptors
1371  * in case of both basic and extended descriptors are used.
1372  */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1373 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1374 					struct stmmac_dma_conf *dma_conf,
1375 					u32 queue)
1376 {
1377 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1378 	int i;
1379 
1380 	/* Clear the TX descriptors */
1381 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1382 		int last = (i == (dma_conf->dma_tx_size - 1));
1383 		struct dma_desc *p;
1384 
1385 		if (priv->extend_desc)
1386 			p = &tx_q->dma_etx[i].basic;
1387 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1388 			p = &tx_q->dma_entx[i].basic;
1389 		else
1390 			p = &tx_q->dma_tx[i];
1391 
1392 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1393 	}
1394 }
1395 
1396 /**
1397  * stmmac_clear_descriptors - clear descriptors
1398  * @priv: driver private structure
1399  * @dma_conf: structure to take the dma data
1400  * Description: this function is called to clear the TX and RX descriptors
1401  * in case of both basic and extended descriptors are used.
1402  */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1403 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1404 				     struct stmmac_dma_conf *dma_conf)
1405 {
1406 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1407 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1408 	u32 queue;
1409 
1410 	/* Clear the RX descriptors */
1411 	for (queue = 0; queue < rx_queue_cnt; queue++)
1412 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1413 
1414 	/* Clear the TX descriptors */
1415 	for (queue = 0; queue < tx_queue_cnt; queue++)
1416 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1417 }
1418 
1419 /**
1420  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1421  * @priv: driver private structure
1422  * @dma_conf: structure to take the dma data
1423  * @p: descriptor pointer
1424  * @i: descriptor index
1425  * @flags: gfp flag
1426  * @queue: RX queue index
1427  * Description: this function is called to allocate a receive buffer, perform
1428  * the DMA mapping and init the descriptor.
1429  */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1430 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1431 				  struct stmmac_dma_conf *dma_conf,
1432 				  struct dma_desc *p,
1433 				  int i, gfp_t flags, u32 queue)
1434 {
1435 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1436 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1437 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1438 
1439 	if (priv->dma_cap.host_dma_width <= 32)
1440 		gfp |= GFP_DMA32;
1441 
1442 	if (!buf->page) {
1443 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1444 		if (!buf->page)
1445 			return -ENOMEM;
1446 		buf->page_offset = stmmac_rx_offset(priv);
1447 	}
1448 
1449 	if (priv->sph && !buf->sec_page) {
1450 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1451 		if (!buf->sec_page)
1452 			return -ENOMEM;
1453 
1454 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1455 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1456 	} else {
1457 		buf->sec_page = NULL;
1458 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1459 	}
1460 
1461 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1462 
1463 	stmmac_set_desc_addr(priv, p, buf->addr);
1464 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1465 		stmmac_init_desc3(priv, p);
1466 
1467 	return 0;
1468 }
1469 
1470 /**
1471  * stmmac_free_rx_buffer - free RX dma buffers
1472  * @priv: private structure
1473  * @rx_q: RX queue
1474  * @i: buffer index.
1475  */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1476 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1477 				  struct stmmac_rx_queue *rx_q,
1478 				  int i)
1479 {
1480 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1481 
1482 	if (buf->page)
1483 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1484 	buf->page = NULL;
1485 
1486 	if (buf->sec_page)
1487 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1488 	buf->sec_page = NULL;
1489 }
1490 
1491 /**
1492  * stmmac_free_tx_buffer - free RX dma buffers
1493  * @priv: private structure
1494  * @dma_conf: structure to take the dma data
1495  * @queue: RX queue index
1496  * @i: buffer index.
1497  */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1498 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1499 				  struct stmmac_dma_conf *dma_conf,
1500 				  u32 queue, int i)
1501 {
1502 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1503 
1504 	if (tx_q->tx_skbuff_dma[i].buf &&
1505 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1506 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1507 			dma_unmap_page(priv->device,
1508 				       tx_q->tx_skbuff_dma[i].buf,
1509 				       tx_q->tx_skbuff_dma[i].len,
1510 				       DMA_TO_DEVICE);
1511 		else
1512 			dma_unmap_single(priv->device,
1513 					 tx_q->tx_skbuff_dma[i].buf,
1514 					 tx_q->tx_skbuff_dma[i].len,
1515 					 DMA_TO_DEVICE);
1516 	}
1517 
1518 	if (tx_q->xdpf[i] &&
1519 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1520 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1521 		xdp_return_frame(tx_q->xdpf[i]);
1522 		tx_q->xdpf[i] = NULL;
1523 	}
1524 
1525 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1526 		tx_q->xsk_frames_done++;
1527 
1528 	if (tx_q->tx_skbuff[i] &&
1529 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1530 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1531 		tx_q->tx_skbuff[i] = NULL;
1532 	}
1533 
1534 	tx_q->tx_skbuff_dma[i].buf = 0;
1535 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1536 }
1537 
1538 /**
1539  * dma_free_rx_skbufs - free RX dma buffers
1540  * @priv: private structure
1541  * @dma_conf: structure to take the dma data
1542  * @queue: RX queue index
1543  */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1544 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1545 			       struct stmmac_dma_conf *dma_conf,
1546 			       u32 queue)
1547 {
1548 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1549 	int i;
1550 
1551 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1552 		stmmac_free_rx_buffer(priv, rx_q, i);
1553 }
1554 
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1555 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1556 				   struct stmmac_dma_conf *dma_conf,
1557 				   u32 queue, gfp_t flags)
1558 {
1559 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1560 	int i;
1561 
1562 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1563 		struct dma_desc *p;
1564 		int ret;
1565 
1566 		if (priv->extend_desc)
1567 			p = &((rx_q->dma_erx + i)->basic);
1568 		else
1569 			p = rx_q->dma_rx + i;
1570 
1571 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1572 					     queue);
1573 		if (ret)
1574 			return ret;
1575 
1576 		rx_q->buf_alloc_num++;
1577 	}
1578 
1579 	return 0;
1580 }
1581 
1582 /**
1583  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1584  * @priv: private structure
1585  * @dma_conf: structure to take the dma data
1586  * @queue: RX queue index
1587  */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1588 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1589 				struct stmmac_dma_conf *dma_conf,
1590 				u32 queue)
1591 {
1592 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1593 	int i;
1594 
1595 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1596 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1597 
1598 		if (!buf->xdp)
1599 			continue;
1600 
1601 		xsk_buff_free(buf->xdp);
1602 		buf->xdp = NULL;
1603 	}
1604 }
1605 
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1606 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1607 				      struct stmmac_dma_conf *dma_conf,
1608 				      u32 queue)
1609 {
1610 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1611 	int i;
1612 
1613 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1614 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1615 	 * use this macro to make sure no size violations.
1616 	 */
1617 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1618 
1619 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1620 		struct stmmac_rx_buffer *buf;
1621 		dma_addr_t dma_addr;
1622 		struct dma_desc *p;
1623 
1624 		if (priv->extend_desc)
1625 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1626 		else
1627 			p = rx_q->dma_rx + i;
1628 
1629 		buf = &rx_q->buf_pool[i];
1630 
1631 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1632 		if (!buf->xdp)
1633 			return -ENOMEM;
1634 
1635 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1636 		stmmac_set_desc_addr(priv, p, dma_addr);
1637 		rx_q->buf_alloc_num++;
1638 	}
1639 
1640 	return 0;
1641 }
1642 
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1643 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1644 {
1645 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1646 		return NULL;
1647 
1648 	return xsk_get_pool_from_qid(priv->dev, queue);
1649 }
1650 
1651 /**
1652  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1653  * @priv: driver private structure
1654  * @dma_conf: structure to take the dma data
1655  * @queue: RX queue index
1656  * @flags: gfp flag.
1657  * Description: this function initializes the DMA RX descriptors
1658  * and allocates the socket buffers. It supports the chained and ring
1659  * modes.
1660  */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1661 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1662 				    struct stmmac_dma_conf *dma_conf,
1663 				    u32 queue, gfp_t flags)
1664 {
1665 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1666 	int ret;
1667 
1668 	netif_dbg(priv, probe, priv->dev,
1669 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1670 		  (u32)rx_q->dma_rx_phy);
1671 
1672 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1673 
1674 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1675 
1676 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1677 
1678 	if (rx_q->xsk_pool) {
1679 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1680 						   MEM_TYPE_XSK_BUFF_POOL,
1681 						   NULL));
1682 		netdev_info(priv->dev,
1683 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1684 			    rx_q->queue_index);
1685 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1686 	} else {
1687 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1688 						   MEM_TYPE_PAGE_POOL,
1689 						   rx_q->page_pool));
1690 		netdev_info(priv->dev,
1691 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1692 			    rx_q->queue_index);
1693 	}
1694 
1695 	if (rx_q->xsk_pool) {
1696 		/* RX XDP ZC buffer pool may not be populated, e.g.
1697 		 * xdpsock TX-only.
1698 		 */
1699 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1700 	} else {
1701 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1702 		if (ret < 0)
1703 			return -ENOMEM;
1704 	}
1705 
1706 	/* Setup the chained descriptor addresses */
1707 	if (priv->mode == STMMAC_CHAIN_MODE) {
1708 		if (priv->extend_desc)
1709 			stmmac_mode_init(priv, rx_q->dma_erx,
1710 					 rx_q->dma_rx_phy,
1711 					 dma_conf->dma_rx_size, 1);
1712 		else
1713 			stmmac_mode_init(priv, rx_q->dma_rx,
1714 					 rx_q->dma_rx_phy,
1715 					 dma_conf->dma_rx_size, 0);
1716 	}
1717 
1718 	return 0;
1719 }
1720 
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1721 static int init_dma_rx_desc_rings(struct net_device *dev,
1722 				  struct stmmac_dma_conf *dma_conf,
1723 				  gfp_t flags)
1724 {
1725 	struct stmmac_priv *priv = netdev_priv(dev);
1726 	u32 rx_count = priv->plat->rx_queues_to_use;
1727 	int queue;
1728 	int ret;
1729 
1730 	/* RX INITIALIZATION */
1731 	netif_dbg(priv, probe, priv->dev,
1732 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1733 
1734 	for (queue = 0; queue < rx_count; queue++) {
1735 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1736 		if (ret)
1737 			goto err_init_rx_buffers;
1738 	}
1739 
1740 	return 0;
1741 
1742 err_init_rx_buffers:
1743 	while (queue >= 0) {
1744 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1745 
1746 		if (rx_q->xsk_pool)
1747 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1748 		else
1749 			dma_free_rx_skbufs(priv, dma_conf, queue);
1750 
1751 		rx_q->buf_alloc_num = 0;
1752 		rx_q->xsk_pool = NULL;
1753 
1754 		queue--;
1755 	}
1756 
1757 	return ret;
1758 }
1759 
1760 /**
1761  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1762  * @priv: driver private structure
1763  * @dma_conf: structure to take the dma data
1764  * @queue: TX queue index
1765  * Description: this function initializes the DMA TX descriptors
1766  * and allocates the socket buffers. It supports the chained and ring
1767  * modes.
1768  */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1769 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1770 				    struct stmmac_dma_conf *dma_conf,
1771 				    u32 queue)
1772 {
1773 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1774 	int i;
1775 
1776 	netif_dbg(priv, probe, priv->dev,
1777 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1778 		  (u32)tx_q->dma_tx_phy);
1779 
1780 	/* Setup the chained descriptor addresses */
1781 	if (priv->mode == STMMAC_CHAIN_MODE) {
1782 		if (priv->extend_desc)
1783 			stmmac_mode_init(priv, tx_q->dma_etx,
1784 					 tx_q->dma_tx_phy,
1785 					 dma_conf->dma_tx_size, 1);
1786 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1787 			stmmac_mode_init(priv, tx_q->dma_tx,
1788 					 tx_q->dma_tx_phy,
1789 					 dma_conf->dma_tx_size, 0);
1790 	}
1791 
1792 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1793 
1794 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1795 		struct dma_desc *p;
1796 
1797 		if (priv->extend_desc)
1798 			p = &((tx_q->dma_etx + i)->basic);
1799 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1800 			p = &((tx_q->dma_entx + i)->basic);
1801 		else
1802 			p = tx_q->dma_tx + i;
1803 
1804 		stmmac_clear_desc(priv, p);
1805 
1806 		tx_q->tx_skbuff_dma[i].buf = 0;
1807 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1808 		tx_q->tx_skbuff_dma[i].len = 0;
1809 		tx_q->tx_skbuff_dma[i].last_segment = false;
1810 		tx_q->tx_skbuff[i] = NULL;
1811 	}
1812 
1813 	return 0;
1814 }
1815 
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)1816 static int init_dma_tx_desc_rings(struct net_device *dev,
1817 				  struct stmmac_dma_conf *dma_conf)
1818 {
1819 	struct stmmac_priv *priv = netdev_priv(dev);
1820 	u32 tx_queue_cnt;
1821 	u32 queue;
1822 
1823 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1824 
1825 	for (queue = 0; queue < tx_queue_cnt; queue++)
1826 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1827 
1828 	return 0;
1829 }
1830 
1831 /**
1832  * init_dma_desc_rings - init the RX/TX descriptor rings
1833  * @dev: net device structure
1834  * @dma_conf: structure to take the dma data
1835  * @flags: gfp flag.
1836  * Description: this function initializes the DMA RX/TX descriptors
1837  * and allocates the socket buffers. It supports the chained and ring
1838  * modes.
1839  */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1840 static int init_dma_desc_rings(struct net_device *dev,
1841 			       struct stmmac_dma_conf *dma_conf,
1842 			       gfp_t flags)
1843 {
1844 	struct stmmac_priv *priv = netdev_priv(dev);
1845 	int ret;
1846 
1847 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1848 	if (ret)
1849 		return ret;
1850 
1851 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1852 
1853 	stmmac_clear_descriptors(priv, dma_conf);
1854 
1855 	if (netif_msg_hw(priv))
1856 		stmmac_display_rings(priv, dma_conf);
1857 
1858 	return ret;
1859 }
1860 
1861 /**
1862  * dma_free_tx_skbufs - free TX dma buffers
1863  * @priv: private structure
1864  * @dma_conf: structure to take the dma data
1865  * @queue: TX queue index
1866  */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1867 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1868 			       struct stmmac_dma_conf *dma_conf,
1869 			       u32 queue)
1870 {
1871 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1872 	int i;
1873 
1874 	tx_q->xsk_frames_done = 0;
1875 
1876 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1877 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1878 
1879 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1880 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1881 		tx_q->xsk_frames_done = 0;
1882 		tx_q->xsk_pool = NULL;
1883 	}
1884 }
1885 
1886 /**
1887  * stmmac_free_tx_skbufs - free TX skb buffers
1888  * @priv: private structure
1889  */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)1890 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1891 {
1892 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1893 	u32 queue;
1894 
1895 	for (queue = 0; queue < tx_queue_cnt; queue++)
1896 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1897 }
1898 
1899 /**
1900  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1901  * @priv: private structure
1902  * @dma_conf: structure to take the dma data
1903  * @queue: RX queue index
1904  */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1905 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1906 					 struct stmmac_dma_conf *dma_conf,
1907 					 u32 queue)
1908 {
1909 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1910 
1911 	/* Release the DMA RX socket buffers */
1912 	if (rx_q->xsk_pool)
1913 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1914 	else
1915 		dma_free_rx_skbufs(priv, dma_conf, queue);
1916 
1917 	rx_q->buf_alloc_num = 0;
1918 	rx_q->xsk_pool = NULL;
1919 
1920 	/* Free DMA regions of consistent memory previously allocated */
1921 	if (!priv->extend_desc)
1922 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1923 				  sizeof(struct dma_desc),
1924 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1925 	else
1926 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1927 				  sizeof(struct dma_extended_desc),
1928 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1929 
1930 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1931 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1932 
1933 	kfree(rx_q->buf_pool);
1934 	if (rx_q->page_pool)
1935 		page_pool_destroy(rx_q->page_pool);
1936 }
1937 
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1938 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1939 				       struct stmmac_dma_conf *dma_conf)
1940 {
1941 	u32 rx_count = priv->plat->rx_queues_to_use;
1942 	u32 queue;
1943 
1944 	/* Free RX queue resources */
1945 	for (queue = 0; queue < rx_count; queue++)
1946 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1947 }
1948 
1949 /**
1950  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1951  * @priv: private structure
1952  * @dma_conf: structure to take the dma data
1953  * @queue: TX queue index
1954  */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1955 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1956 					 struct stmmac_dma_conf *dma_conf,
1957 					 u32 queue)
1958 {
1959 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1960 	size_t size;
1961 	void *addr;
1962 
1963 	/* Release the DMA TX socket buffers */
1964 	dma_free_tx_skbufs(priv, dma_conf, queue);
1965 
1966 	if (priv->extend_desc) {
1967 		size = sizeof(struct dma_extended_desc);
1968 		addr = tx_q->dma_etx;
1969 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1970 		size = sizeof(struct dma_edesc);
1971 		addr = tx_q->dma_entx;
1972 	} else {
1973 		size = sizeof(struct dma_desc);
1974 		addr = tx_q->dma_tx;
1975 	}
1976 
1977 	size *= dma_conf->dma_tx_size;
1978 
1979 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1980 
1981 	kfree(tx_q->tx_skbuff_dma);
1982 	kfree(tx_q->tx_skbuff);
1983 }
1984 
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1985 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1986 				       struct stmmac_dma_conf *dma_conf)
1987 {
1988 	u32 tx_count = priv->plat->tx_queues_to_use;
1989 	u32 queue;
1990 
1991 	/* Free TX queue resources */
1992 	for (queue = 0; queue < tx_count; queue++)
1993 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
1994 }
1995 
1996 /**
1997  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1998  * @priv: private structure
1999  * @dma_conf: structure to take the dma data
2000  * @queue: RX queue index
2001  * Description: according to which descriptor can be used (extend or basic)
2002  * this function allocates the resources for TX and RX paths. In case of
2003  * reception, for example, it pre-allocated the RX socket buffer in order to
2004  * allow zero-copy mechanism.
2005  */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2006 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2007 					 struct stmmac_dma_conf *dma_conf,
2008 					 u32 queue)
2009 {
2010 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2011 	struct stmmac_channel *ch = &priv->channel[queue];
2012 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2013 	struct page_pool_params pp_params = { 0 };
2014 	unsigned int num_pages;
2015 	unsigned int napi_id;
2016 	int ret;
2017 
2018 	rx_q->queue_index = queue;
2019 	rx_q->priv_data = priv;
2020 
2021 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2022 	pp_params.pool_size = dma_conf->dma_rx_size;
2023 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2024 	pp_params.order = ilog2(num_pages);
2025 	pp_params.nid = dev_to_node(priv->device);
2026 	pp_params.dev = priv->device;
2027 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2028 	pp_params.offset = stmmac_rx_offset(priv);
2029 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2030 
2031 	rx_q->page_pool = page_pool_create(&pp_params);
2032 	if (IS_ERR(rx_q->page_pool)) {
2033 		ret = PTR_ERR(rx_q->page_pool);
2034 		rx_q->page_pool = NULL;
2035 		return ret;
2036 	}
2037 
2038 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2039 				 sizeof(*rx_q->buf_pool),
2040 				 GFP_KERNEL);
2041 	if (!rx_q->buf_pool)
2042 		return -ENOMEM;
2043 
2044 	if (priv->extend_desc) {
2045 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2046 						   dma_conf->dma_rx_size *
2047 						   sizeof(struct dma_extended_desc),
2048 						   &rx_q->dma_rx_phy,
2049 						   GFP_KERNEL);
2050 		if (!rx_q->dma_erx)
2051 			return -ENOMEM;
2052 
2053 	} else {
2054 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2055 						  dma_conf->dma_rx_size *
2056 						  sizeof(struct dma_desc),
2057 						  &rx_q->dma_rx_phy,
2058 						  GFP_KERNEL);
2059 		if (!rx_q->dma_rx)
2060 			return -ENOMEM;
2061 	}
2062 
2063 	if (stmmac_xdp_is_enabled(priv) &&
2064 	    test_bit(queue, priv->af_xdp_zc_qps))
2065 		napi_id = ch->rxtx_napi.napi_id;
2066 	else
2067 		napi_id = ch->rx_napi.napi_id;
2068 
2069 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2070 			       rx_q->queue_index,
2071 			       napi_id);
2072 	if (ret) {
2073 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2074 		return -EINVAL;
2075 	}
2076 
2077 	return 0;
2078 }
2079 
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2080 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2081 				       struct stmmac_dma_conf *dma_conf)
2082 {
2083 	u32 rx_count = priv->plat->rx_queues_to_use;
2084 	u32 queue;
2085 	int ret;
2086 
2087 	/* RX queues buffers and DMA */
2088 	for (queue = 0; queue < rx_count; queue++) {
2089 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2090 		if (ret)
2091 			goto err_dma;
2092 	}
2093 
2094 	return 0;
2095 
2096 err_dma:
2097 	free_dma_rx_desc_resources(priv, dma_conf);
2098 
2099 	return ret;
2100 }
2101 
2102 /**
2103  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2104  * @priv: private structure
2105  * @dma_conf: structure to take the dma data
2106  * @queue: TX queue index
2107  * Description: according to which descriptor can be used (extend or basic)
2108  * this function allocates the resources for TX and RX paths. In case of
2109  * reception, for example, it pre-allocated the RX socket buffer in order to
2110  * allow zero-copy mechanism.
2111  */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2112 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2113 					 struct stmmac_dma_conf *dma_conf,
2114 					 u32 queue)
2115 {
2116 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2117 	size_t size;
2118 	void *addr;
2119 
2120 	tx_q->queue_index = queue;
2121 	tx_q->priv_data = priv;
2122 
2123 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2124 				      sizeof(*tx_q->tx_skbuff_dma),
2125 				      GFP_KERNEL);
2126 	if (!tx_q->tx_skbuff_dma)
2127 		return -ENOMEM;
2128 
2129 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2130 				  sizeof(struct sk_buff *),
2131 				  GFP_KERNEL);
2132 	if (!tx_q->tx_skbuff)
2133 		return -ENOMEM;
2134 
2135 	if (priv->extend_desc)
2136 		size = sizeof(struct dma_extended_desc);
2137 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2138 		size = sizeof(struct dma_edesc);
2139 	else
2140 		size = sizeof(struct dma_desc);
2141 
2142 	size *= dma_conf->dma_tx_size;
2143 
2144 	addr = dma_alloc_coherent(priv->device, size,
2145 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2146 	if (!addr)
2147 		return -ENOMEM;
2148 
2149 	if (priv->extend_desc)
2150 		tx_q->dma_etx = addr;
2151 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2152 		tx_q->dma_entx = addr;
2153 	else
2154 		tx_q->dma_tx = addr;
2155 
2156 	return 0;
2157 }
2158 
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2159 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2160 				       struct stmmac_dma_conf *dma_conf)
2161 {
2162 	u32 tx_count = priv->plat->tx_queues_to_use;
2163 	u32 queue;
2164 	int ret;
2165 
2166 	/* TX queues buffers and DMA */
2167 	for (queue = 0; queue < tx_count; queue++) {
2168 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2169 		if (ret)
2170 			goto err_dma;
2171 	}
2172 
2173 	return 0;
2174 
2175 err_dma:
2176 	free_dma_tx_desc_resources(priv, dma_conf);
2177 	return ret;
2178 }
2179 
2180 /**
2181  * alloc_dma_desc_resources - alloc TX/RX resources.
2182  * @priv: private structure
2183  * @dma_conf: structure to take the dma data
2184  * Description: according to which descriptor can be used (extend or basic)
2185  * this function allocates the resources for TX and RX paths. In case of
2186  * reception, for example, it pre-allocated the RX socket buffer in order to
2187  * allow zero-copy mechanism.
2188  */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2189 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2190 				    struct stmmac_dma_conf *dma_conf)
2191 {
2192 	/* RX Allocation */
2193 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2194 
2195 	if (ret)
2196 		return ret;
2197 
2198 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2199 
2200 	return ret;
2201 }
2202 
2203 /**
2204  * free_dma_desc_resources - free dma desc resources
2205  * @priv: private structure
2206  * @dma_conf: structure to take the dma data
2207  */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2208 static void free_dma_desc_resources(struct stmmac_priv *priv,
2209 				    struct stmmac_dma_conf *dma_conf)
2210 {
2211 	/* Release the DMA TX socket buffers */
2212 	free_dma_tx_desc_resources(priv, dma_conf);
2213 
2214 	/* Release the DMA RX socket buffers later
2215 	 * to ensure all pending XDP_TX buffers are returned.
2216 	 */
2217 	free_dma_rx_desc_resources(priv, dma_conf);
2218 }
2219 
2220 /**
2221  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2222  *  @priv: driver private structure
2223  *  Description: It is used for enabling the rx queues in the MAC
2224  */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)2225 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2226 {
2227 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2228 	int queue;
2229 	u8 mode;
2230 
2231 	for (queue = 0; queue < rx_queues_count; queue++) {
2232 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2233 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2234 	}
2235 }
2236 
2237 /**
2238  * stmmac_start_rx_dma - start RX DMA channel
2239  * @priv: driver private structure
2240  * @chan: RX channel index
2241  * Description:
2242  * This starts a RX DMA channel
2243  */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2244 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2245 {
2246 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2247 	stmmac_start_rx(priv, priv->ioaddr, chan);
2248 }
2249 
2250 /**
2251  * stmmac_start_tx_dma - start TX DMA channel
2252  * @priv: driver private structure
2253  * @chan: TX channel index
2254  * Description:
2255  * This starts a TX DMA channel
2256  */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2257 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2258 {
2259 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2260 	stmmac_start_tx(priv, priv->ioaddr, chan);
2261 }
2262 
2263 /**
2264  * stmmac_stop_rx_dma - stop RX DMA channel
2265  * @priv: driver private structure
2266  * @chan: RX channel index
2267  * Description:
2268  * This stops a RX DMA channel
2269  */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2270 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2271 {
2272 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2273 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2274 }
2275 
2276 /**
2277  * stmmac_stop_tx_dma - stop TX DMA channel
2278  * @priv: driver private structure
2279  * @chan: TX channel index
2280  * Description:
2281  * This stops a TX DMA channel
2282  */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2283 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2284 {
2285 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2286 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2287 }
2288 
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2289 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2290 {
2291 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2292 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2293 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2294 	u32 chan;
2295 
2296 	for (chan = 0; chan < dma_csr_ch; chan++) {
2297 		struct stmmac_channel *ch = &priv->channel[chan];
2298 		unsigned long flags;
2299 
2300 		spin_lock_irqsave(&ch->lock, flags);
2301 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2302 		spin_unlock_irqrestore(&ch->lock, flags);
2303 	}
2304 }
2305 
2306 /**
2307  * stmmac_start_all_dma - start all RX and TX DMA channels
2308  * @priv: driver private structure
2309  * Description:
2310  * This starts all the RX and TX DMA channels
2311  */
stmmac_start_all_dma(struct stmmac_priv * priv)2312 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2313 {
2314 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2315 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2316 	u32 chan = 0;
2317 
2318 	for (chan = 0; chan < rx_channels_count; chan++)
2319 		stmmac_start_rx_dma(priv, chan);
2320 
2321 	for (chan = 0; chan < tx_channels_count; chan++)
2322 		stmmac_start_tx_dma(priv, chan);
2323 }
2324 
2325 /**
2326  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2327  * @priv: driver private structure
2328  * Description:
2329  * This stops the RX and TX DMA channels
2330  */
stmmac_stop_all_dma(struct stmmac_priv * priv)2331 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2332 {
2333 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2334 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2335 	u32 chan = 0;
2336 
2337 	for (chan = 0; chan < rx_channels_count; chan++)
2338 		stmmac_stop_rx_dma(priv, chan);
2339 
2340 	for (chan = 0; chan < tx_channels_count; chan++)
2341 		stmmac_stop_tx_dma(priv, chan);
2342 }
2343 
2344 /**
2345  *  stmmac_dma_operation_mode - HW DMA operation mode
2346  *  @priv: driver private structure
2347  *  Description: it is used for configuring the DMA operation mode register in
2348  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2349  */
stmmac_dma_operation_mode(struct stmmac_priv * priv)2350 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2351 {
2352 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2353 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2354 	int rxfifosz = priv->plat->rx_fifo_size;
2355 	int txfifosz = priv->plat->tx_fifo_size;
2356 	u32 txmode = 0;
2357 	u32 rxmode = 0;
2358 	u32 chan = 0;
2359 	u8 qmode = 0;
2360 
2361 	if (rxfifosz == 0)
2362 		rxfifosz = priv->dma_cap.rx_fifo_size;
2363 	if (txfifosz == 0)
2364 		txfifosz = priv->dma_cap.tx_fifo_size;
2365 
2366 	/* Adjust for real per queue fifo size */
2367 	rxfifosz /= rx_channels_count;
2368 	txfifosz /= tx_channels_count;
2369 
2370 	if (priv->plat->force_thresh_dma_mode) {
2371 		txmode = tc;
2372 		rxmode = tc;
2373 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2374 		/*
2375 		 * In case of GMAC, SF mode can be enabled
2376 		 * to perform the TX COE in HW. This depends on:
2377 		 * 1) TX COE if actually supported
2378 		 * 2) There is no bugged Jumbo frame support
2379 		 *    that needs to not insert csum in the TDES.
2380 		 */
2381 		txmode = SF_DMA_MODE;
2382 		rxmode = SF_DMA_MODE;
2383 		priv->xstats.threshold = SF_DMA_MODE;
2384 	} else {
2385 		txmode = tc;
2386 		rxmode = SF_DMA_MODE;
2387 	}
2388 
2389 	/* configure all channels */
2390 	for (chan = 0; chan < rx_channels_count; chan++) {
2391 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2392 		u32 buf_size;
2393 
2394 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2395 
2396 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2397 				rxfifosz, qmode);
2398 
2399 		if (rx_q->xsk_pool) {
2400 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2401 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2402 					      buf_size,
2403 					      chan);
2404 		} else {
2405 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2406 					      priv->dma_conf.dma_buf_sz,
2407 					      chan);
2408 		}
2409 	}
2410 
2411 	for (chan = 0; chan < tx_channels_count; chan++) {
2412 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2413 
2414 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2415 				txfifosz, qmode);
2416 	}
2417 }
2418 
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2419 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2420 {
2421 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2422 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2423 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2424 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2425 	unsigned int entry = tx_q->cur_tx;
2426 	struct dma_desc *tx_desc = NULL;
2427 	struct xdp_desc xdp_desc;
2428 	bool work_done = true;
2429 	u32 tx_set_ic_bit = 0;
2430 
2431 	/* Avoids TX time-out as we are sharing with slow path */
2432 	txq_trans_cond_update(nq);
2433 
2434 	budget = min(budget, stmmac_tx_avail(priv, queue));
2435 
2436 	while (budget-- > 0) {
2437 		dma_addr_t dma_addr;
2438 		bool set_ic;
2439 
2440 		/* We are sharing with slow path and stop XSK TX desc submission when
2441 		 * available TX ring is less than threshold.
2442 		 */
2443 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2444 		    !netif_carrier_ok(priv->dev)) {
2445 			work_done = false;
2446 			break;
2447 		}
2448 
2449 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2450 			break;
2451 
2452 		if (likely(priv->extend_desc))
2453 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2454 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2455 			tx_desc = &tx_q->dma_entx[entry].basic;
2456 		else
2457 			tx_desc = tx_q->dma_tx + entry;
2458 
2459 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2460 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2461 
2462 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2463 
2464 		/* To return XDP buffer to XSK pool, we simple call
2465 		 * xsk_tx_completed(), so we don't need to fill up
2466 		 * 'buf' and 'xdpf'.
2467 		 */
2468 		tx_q->tx_skbuff_dma[entry].buf = 0;
2469 		tx_q->xdpf[entry] = NULL;
2470 
2471 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2472 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2473 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2474 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2475 
2476 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2477 
2478 		tx_q->tx_count_frames++;
2479 
2480 		if (!priv->tx_coal_frames[queue])
2481 			set_ic = false;
2482 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2483 			set_ic = true;
2484 		else
2485 			set_ic = false;
2486 
2487 		if (set_ic) {
2488 			tx_q->tx_count_frames = 0;
2489 			stmmac_set_tx_ic(priv, tx_desc);
2490 			tx_set_ic_bit++;
2491 		}
2492 
2493 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2494 				       true, priv->mode, true, true,
2495 				       xdp_desc.len);
2496 
2497 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2498 
2499 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2500 		entry = tx_q->cur_tx;
2501 	}
2502 	u64_stats_update_begin(&txq_stats->napi_syncp);
2503 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2504 	u64_stats_update_end(&txq_stats->napi_syncp);
2505 
2506 	if (tx_desc) {
2507 		stmmac_flush_tx_descriptors(priv, queue);
2508 		xsk_tx_release(pool);
2509 	}
2510 
2511 	/* Return true if all of the 3 conditions are met
2512 	 *  a) TX Budget is still available
2513 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2514 	 *     pending XSK TX for transmission)
2515 	 */
2516 	return !!budget && work_done;
2517 }
2518 
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)2519 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2520 {
2521 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2522 		tc += 64;
2523 
2524 		if (priv->plat->force_thresh_dma_mode)
2525 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2526 		else
2527 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2528 						      chan);
2529 
2530 		priv->xstats.threshold = tc;
2531 	}
2532 }
2533 
2534 /**
2535  * stmmac_tx_clean - to manage the transmission completion
2536  * @priv: driver private structure
2537  * @budget: napi budget limiting this functions packet handling
2538  * @queue: TX queue index
2539  * Description: it reclaims the transmit resources after transmission completes.
2540  */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue)2541 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2542 {
2543 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2544 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2545 	unsigned int bytes_compl = 0, pkts_compl = 0;
2546 	unsigned int entry, xmits = 0, count = 0;
2547 	u32 tx_packets = 0, tx_errors = 0;
2548 
2549 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2550 
2551 	tx_q->xsk_frames_done = 0;
2552 
2553 	entry = tx_q->dirty_tx;
2554 
2555 	/* Try to clean all TX complete frame in 1 shot */
2556 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2557 		struct xdp_frame *xdpf;
2558 		struct sk_buff *skb;
2559 		struct dma_desc *p;
2560 		int status;
2561 
2562 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2563 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2564 			xdpf = tx_q->xdpf[entry];
2565 			skb = NULL;
2566 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2567 			xdpf = NULL;
2568 			skb = tx_q->tx_skbuff[entry];
2569 		} else {
2570 			xdpf = NULL;
2571 			skb = NULL;
2572 		}
2573 
2574 		if (priv->extend_desc)
2575 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2576 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2577 			p = &tx_q->dma_entx[entry].basic;
2578 		else
2579 			p = tx_q->dma_tx + entry;
2580 
2581 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2582 		/* Check if the descriptor is owned by the DMA */
2583 		if (unlikely(status & tx_dma_own))
2584 			break;
2585 
2586 		count++;
2587 
2588 		/* Make sure descriptor fields are read after reading
2589 		 * the own bit.
2590 		 */
2591 		dma_rmb();
2592 
2593 		/* Just consider the last segment and ...*/
2594 		if (likely(!(status & tx_not_ls))) {
2595 			/* ... verify the status error condition */
2596 			if (unlikely(status & tx_err)) {
2597 				tx_errors++;
2598 				if (unlikely(status & tx_err_bump_tc))
2599 					stmmac_bump_dma_threshold(priv, queue);
2600 			} else {
2601 				tx_packets++;
2602 			}
2603 			if (skb)
2604 				stmmac_get_tx_hwtstamp(priv, p, skb);
2605 		}
2606 
2607 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2608 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2609 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2610 				dma_unmap_page(priv->device,
2611 					       tx_q->tx_skbuff_dma[entry].buf,
2612 					       tx_q->tx_skbuff_dma[entry].len,
2613 					       DMA_TO_DEVICE);
2614 			else
2615 				dma_unmap_single(priv->device,
2616 						 tx_q->tx_skbuff_dma[entry].buf,
2617 						 tx_q->tx_skbuff_dma[entry].len,
2618 						 DMA_TO_DEVICE);
2619 			tx_q->tx_skbuff_dma[entry].buf = 0;
2620 			tx_q->tx_skbuff_dma[entry].len = 0;
2621 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2622 		}
2623 
2624 		stmmac_clean_desc3(priv, tx_q, p);
2625 
2626 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2627 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2628 
2629 		if (xdpf &&
2630 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2631 			xdp_return_frame_rx_napi(xdpf);
2632 			tx_q->xdpf[entry] = NULL;
2633 		}
2634 
2635 		if (xdpf &&
2636 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2637 			xdp_return_frame(xdpf);
2638 			tx_q->xdpf[entry] = NULL;
2639 		}
2640 
2641 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2642 			tx_q->xsk_frames_done++;
2643 
2644 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2645 			if (likely(skb)) {
2646 				pkts_compl++;
2647 				bytes_compl += skb->len;
2648 				dev_consume_skb_any(skb);
2649 				tx_q->tx_skbuff[entry] = NULL;
2650 			}
2651 		}
2652 
2653 		stmmac_release_tx_desc(priv, p, priv->mode);
2654 
2655 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2656 	}
2657 	tx_q->dirty_tx = entry;
2658 
2659 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2660 				  pkts_compl, bytes_compl);
2661 
2662 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2663 								queue))) &&
2664 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2665 
2666 		netif_dbg(priv, tx_done, priv->dev,
2667 			  "%s: restart transmit\n", __func__);
2668 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2669 	}
2670 
2671 	if (tx_q->xsk_pool) {
2672 		bool work_done;
2673 
2674 		if (tx_q->xsk_frames_done)
2675 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2676 
2677 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2678 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2679 
2680 		/* For XSK TX, we try to send as many as possible.
2681 		 * If XSK work done (XSK TX desc empty and budget still
2682 		 * available), return "budget - 1" to reenable TX IRQ.
2683 		 * Else, return "budget" to make NAPI continue polling.
2684 		 */
2685 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2686 					       STMMAC_XSK_TX_BUDGET_MAX);
2687 		if (work_done)
2688 			xmits = budget - 1;
2689 		else
2690 			xmits = budget;
2691 	}
2692 
2693 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2694 	    priv->eee_sw_timer_en) {
2695 		if (stmmac_enable_eee_mode(priv))
2696 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2697 	}
2698 
2699 	/* We still have pending packets, let's call for a new scheduling */
2700 	if (tx_q->dirty_tx != tx_q->cur_tx)
2701 		stmmac_tx_timer_arm(priv, queue);
2702 
2703 	u64_stats_update_begin(&txq_stats->napi_syncp);
2704 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2705 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2706 	u64_stats_inc(&txq_stats->napi.tx_clean);
2707 	u64_stats_update_end(&txq_stats->napi_syncp);
2708 
2709 	priv->xstats.tx_errors += tx_errors;
2710 
2711 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2712 
2713 	/* Combine decisions from TX clean and XSK TX */
2714 	return max(count, xmits);
2715 }
2716 
2717 /**
2718  * stmmac_tx_err - to manage the tx error
2719  * @priv: driver private structure
2720  * @chan: channel index
2721  * Description: it cleans the descriptors and restarts the transmission
2722  * in case of transmission errors.
2723  */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)2724 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2725 {
2726 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2727 
2728 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2729 
2730 	stmmac_stop_tx_dma(priv, chan);
2731 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2732 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2733 	stmmac_reset_tx_queue(priv, chan);
2734 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2735 			    tx_q->dma_tx_phy, chan);
2736 	stmmac_start_tx_dma(priv, chan);
2737 
2738 	priv->xstats.tx_errors++;
2739 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2740 }
2741 
2742 /**
2743  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2744  *  @priv: driver private structure
2745  *  @txmode: TX operating mode
2746  *  @rxmode: RX operating mode
2747  *  @chan: channel index
2748  *  Description: it is used for configuring of the DMA operation mode in
2749  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2750  *  mode.
2751  */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)2752 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2753 					  u32 rxmode, u32 chan)
2754 {
2755 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2756 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2757 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2758 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2759 	int rxfifosz = priv->plat->rx_fifo_size;
2760 	int txfifosz = priv->plat->tx_fifo_size;
2761 
2762 	if (rxfifosz == 0)
2763 		rxfifosz = priv->dma_cap.rx_fifo_size;
2764 	if (txfifosz == 0)
2765 		txfifosz = priv->dma_cap.tx_fifo_size;
2766 
2767 	/* Adjust for real per queue fifo size */
2768 	rxfifosz /= rx_channels_count;
2769 	txfifosz /= tx_channels_count;
2770 
2771 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2772 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2773 }
2774 
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)2775 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2776 {
2777 	int ret;
2778 
2779 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2780 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2781 	if (ret && (ret != -EINVAL)) {
2782 		stmmac_global_err(priv);
2783 		return true;
2784 	}
2785 
2786 	return false;
2787 }
2788 
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)2789 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2790 {
2791 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2792 						 &priv->xstats, chan, dir);
2793 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2794 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2795 	struct stmmac_channel *ch = &priv->channel[chan];
2796 	struct napi_struct *rx_napi;
2797 	struct napi_struct *tx_napi;
2798 	unsigned long flags;
2799 
2800 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2801 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2802 
2803 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2804 		if (napi_schedule_prep(rx_napi)) {
2805 			spin_lock_irqsave(&ch->lock, flags);
2806 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2807 			spin_unlock_irqrestore(&ch->lock, flags);
2808 			__napi_schedule(rx_napi);
2809 		}
2810 	}
2811 
2812 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2813 		if (napi_schedule_prep(tx_napi)) {
2814 			spin_lock_irqsave(&ch->lock, flags);
2815 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2816 			spin_unlock_irqrestore(&ch->lock, flags);
2817 			__napi_schedule(tx_napi);
2818 		}
2819 	}
2820 
2821 	return status;
2822 }
2823 
2824 /**
2825  * stmmac_dma_interrupt - DMA ISR
2826  * @priv: driver private structure
2827  * Description: this is the DMA ISR. It is called by the main ISR.
2828  * It calls the dwmac dma routine and schedule poll method in case of some
2829  * work can be done.
2830  */
stmmac_dma_interrupt(struct stmmac_priv * priv)2831 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2832 {
2833 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2834 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2835 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2836 				tx_channel_count : rx_channel_count;
2837 	u32 chan;
2838 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2839 
2840 	/* Make sure we never check beyond our status buffer. */
2841 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2842 		channels_to_check = ARRAY_SIZE(status);
2843 
2844 	for (chan = 0; chan < channels_to_check; chan++)
2845 		status[chan] = stmmac_napi_check(priv, chan,
2846 						 DMA_DIR_RXTX);
2847 
2848 	for (chan = 0; chan < tx_channel_count; chan++) {
2849 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2850 			/* Try to bump up the dma threshold on this failure */
2851 			stmmac_bump_dma_threshold(priv, chan);
2852 		} else if (unlikely(status[chan] == tx_hard_error)) {
2853 			stmmac_tx_err(priv, chan);
2854 		}
2855 	}
2856 }
2857 
2858 /**
2859  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2860  * @priv: driver private structure
2861  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2862  */
stmmac_mmc_setup(struct stmmac_priv * priv)2863 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2864 {
2865 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2866 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2867 
2868 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2869 
2870 	if (priv->dma_cap.rmon) {
2871 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2872 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2873 	} else
2874 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2875 }
2876 
2877 /**
2878  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2879  * @priv: driver private structure
2880  * Description:
2881  *  new GMAC chip generations have a new register to indicate the
2882  *  presence of the optional feature/functions.
2883  *  This can be also used to override the value passed through the
2884  *  platform and necessary for old MAC10/100 and GMAC chips.
2885  */
stmmac_get_hw_features(struct stmmac_priv * priv)2886 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2887 {
2888 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2889 }
2890 
2891 /**
2892  * stmmac_check_ether_addr - check if the MAC addr is valid
2893  * @priv: driver private structure
2894  * Description:
2895  * it is to verify if the MAC address is valid, in case of failures it
2896  * generates a random MAC address
2897  */
stmmac_check_ether_addr(struct stmmac_priv * priv)2898 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2899 {
2900 	u8 addr[ETH_ALEN];
2901 
2902 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2903 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2904 		if (is_valid_ether_addr(addr))
2905 			eth_hw_addr_set(priv->dev, addr);
2906 		else
2907 			eth_hw_addr_random(priv->dev);
2908 		dev_info(priv->device, "device MAC address %pM\n",
2909 			 priv->dev->dev_addr);
2910 	}
2911 }
2912 
2913 /**
2914  * stmmac_init_dma_engine - DMA init.
2915  * @priv: driver private structure
2916  * Description:
2917  * It inits the DMA invoking the specific MAC/GMAC callback.
2918  * Some DMA parameters can be passed from the platform;
2919  * in case of these are not passed a default is kept for the MAC or GMAC.
2920  */
stmmac_init_dma_engine(struct stmmac_priv * priv)2921 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2922 {
2923 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2924 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2925 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2926 	struct stmmac_rx_queue *rx_q;
2927 	struct stmmac_tx_queue *tx_q;
2928 	u32 chan = 0;
2929 	int atds = 0;
2930 	int ret = 0;
2931 
2932 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2933 		dev_err(priv->device, "Invalid DMA configuration\n");
2934 		return -EINVAL;
2935 	}
2936 
2937 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2938 		atds = 1;
2939 
2940 	ret = stmmac_reset(priv, priv->ioaddr);
2941 	if (ret) {
2942 		dev_err(priv->device, "Failed to reset the dma\n");
2943 		return ret;
2944 	}
2945 
2946 	/* DMA Configuration */
2947 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2948 
2949 	if (priv->plat->axi)
2950 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2951 
2952 	/* DMA CSR Channel configuration */
2953 	for (chan = 0; chan < dma_csr_ch; chan++) {
2954 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2955 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2956 	}
2957 
2958 	/* DMA RX Channel Configuration */
2959 	for (chan = 0; chan < rx_channels_count; chan++) {
2960 		rx_q = &priv->dma_conf.rx_queue[chan];
2961 
2962 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2963 				    rx_q->dma_rx_phy, chan);
2964 
2965 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2966 				     (rx_q->buf_alloc_num *
2967 				      sizeof(struct dma_desc));
2968 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2969 				       rx_q->rx_tail_addr, chan);
2970 	}
2971 
2972 	/* DMA TX Channel Configuration */
2973 	for (chan = 0; chan < tx_channels_count; chan++) {
2974 		tx_q = &priv->dma_conf.tx_queue[chan];
2975 
2976 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2977 				    tx_q->dma_tx_phy, chan);
2978 
2979 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2980 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2981 				       tx_q->tx_tail_addr, chan);
2982 	}
2983 
2984 	return ret;
2985 }
2986 
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)2987 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2988 {
2989 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2990 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
2991 
2992 	if (!tx_coal_timer)
2993 		return;
2994 
2995 	hrtimer_start(&tx_q->txtimer,
2996 		      STMMAC_COAL_TIMER(tx_coal_timer),
2997 		      HRTIMER_MODE_REL);
2998 }
2999 
3000 /**
3001  * stmmac_tx_timer - mitigation sw timer for tx.
3002  * @t: data pointer
3003  * Description:
3004  * This is the timer handler to directly invoke the stmmac_tx_clean.
3005  */
stmmac_tx_timer(struct hrtimer * t)3006 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3007 {
3008 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3009 	struct stmmac_priv *priv = tx_q->priv_data;
3010 	struct stmmac_channel *ch;
3011 	struct napi_struct *napi;
3012 
3013 	ch = &priv->channel[tx_q->queue_index];
3014 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3015 
3016 	if (likely(napi_schedule_prep(napi))) {
3017 		unsigned long flags;
3018 
3019 		spin_lock_irqsave(&ch->lock, flags);
3020 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3021 		spin_unlock_irqrestore(&ch->lock, flags);
3022 		__napi_schedule(napi);
3023 	}
3024 
3025 	return HRTIMER_NORESTART;
3026 }
3027 
3028 /**
3029  * stmmac_init_coalesce - init mitigation options.
3030  * @priv: driver private structure
3031  * Description:
3032  * This inits the coalesce parameters: i.e. timer rate,
3033  * timer handler and default threshold used for enabling the
3034  * interrupt on completion bit.
3035  */
stmmac_init_coalesce(struct stmmac_priv * priv)3036 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3037 {
3038 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3039 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3040 	u32 chan;
3041 
3042 	for (chan = 0; chan < tx_channel_count; chan++) {
3043 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3044 
3045 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3046 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3047 
3048 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3049 		tx_q->txtimer.function = stmmac_tx_timer;
3050 	}
3051 
3052 	for (chan = 0; chan < rx_channel_count; chan++)
3053 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3054 }
3055 
stmmac_set_rings_length(struct stmmac_priv * priv)3056 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3057 {
3058 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3059 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3060 	u32 chan;
3061 
3062 	/* set TX ring length */
3063 	for (chan = 0; chan < tx_channels_count; chan++)
3064 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3065 				       (priv->dma_conf.dma_tx_size - 1), chan);
3066 
3067 	/* set RX ring length */
3068 	for (chan = 0; chan < rx_channels_count; chan++)
3069 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3070 				       (priv->dma_conf.dma_rx_size - 1), chan);
3071 }
3072 
3073 /**
3074  *  stmmac_set_tx_queue_weight - Set TX queue weight
3075  *  @priv: driver private structure
3076  *  Description: It is used for setting TX queues weight
3077  */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)3078 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3079 {
3080 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3081 	u32 weight;
3082 	u32 queue;
3083 
3084 	for (queue = 0; queue < tx_queues_count; queue++) {
3085 		weight = priv->plat->tx_queues_cfg[queue].weight;
3086 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3087 	}
3088 }
3089 
3090 /**
3091  *  stmmac_configure_cbs - Configure CBS in TX queue
3092  *  @priv: driver private structure
3093  *  Description: It is used for configuring CBS in AVB TX queues
3094  */
stmmac_configure_cbs(struct stmmac_priv * priv)3095 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3096 {
3097 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3098 	u32 mode_to_use;
3099 	u32 queue;
3100 
3101 	/* queue 0 is reserved for legacy traffic */
3102 	for (queue = 1; queue < tx_queues_count; queue++) {
3103 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3104 		if (mode_to_use == MTL_QUEUE_DCB)
3105 			continue;
3106 
3107 		stmmac_config_cbs(priv, priv->hw,
3108 				priv->plat->tx_queues_cfg[queue].send_slope,
3109 				priv->plat->tx_queues_cfg[queue].idle_slope,
3110 				priv->plat->tx_queues_cfg[queue].high_credit,
3111 				priv->plat->tx_queues_cfg[queue].low_credit,
3112 				queue);
3113 	}
3114 }
3115 
3116 /**
3117  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3118  *  @priv: driver private structure
3119  *  Description: It is used for mapping RX queues to RX dma channels
3120  */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3121 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3122 {
3123 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3124 	u32 queue;
3125 	u32 chan;
3126 
3127 	for (queue = 0; queue < rx_queues_count; queue++) {
3128 		chan = priv->plat->rx_queues_cfg[queue].chan;
3129 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3130 	}
3131 }
3132 
3133 /**
3134  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3135  *  @priv: driver private structure
3136  *  Description: It is used for configuring the RX Queue Priority
3137  */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3138 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3139 {
3140 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3141 	u32 queue;
3142 	u32 prio;
3143 
3144 	for (queue = 0; queue < rx_queues_count; queue++) {
3145 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3146 			continue;
3147 
3148 		prio = priv->plat->rx_queues_cfg[queue].prio;
3149 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3150 	}
3151 }
3152 
3153 /**
3154  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3155  *  @priv: driver private structure
3156  *  Description: It is used for configuring the TX Queue Priority
3157  */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3158 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3159 {
3160 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3161 	u32 queue;
3162 	u32 prio;
3163 
3164 	for (queue = 0; queue < tx_queues_count; queue++) {
3165 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3166 			continue;
3167 
3168 		prio = priv->plat->tx_queues_cfg[queue].prio;
3169 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3170 	}
3171 }
3172 
3173 /**
3174  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3175  *  @priv: driver private structure
3176  *  Description: It is used for configuring the RX queue routing
3177  */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3178 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3179 {
3180 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3181 	u32 queue;
3182 	u8 packet;
3183 
3184 	for (queue = 0; queue < rx_queues_count; queue++) {
3185 		/* no specific packet type routing specified for the queue */
3186 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3187 			continue;
3188 
3189 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3190 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3191 	}
3192 }
3193 
stmmac_mac_config_rss(struct stmmac_priv * priv)3194 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3195 {
3196 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3197 		priv->rss.enable = false;
3198 		return;
3199 	}
3200 
3201 	if (priv->dev->features & NETIF_F_RXHASH)
3202 		priv->rss.enable = true;
3203 	else
3204 		priv->rss.enable = false;
3205 
3206 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3207 			     priv->plat->rx_queues_to_use);
3208 }
3209 
3210 /**
3211  *  stmmac_mtl_configuration - Configure MTL
3212  *  @priv: driver private structure
3213  *  Description: It is used for configurring MTL
3214  */
stmmac_mtl_configuration(struct stmmac_priv * priv)3215 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3216 {
3217 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3218 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3219 
3220 	if (tx_queues_count > 1)
3221 		stmmac_set_tx_queue_weight(priv);
3222 
3223 	/* Configure MTL RX algorithms */
3224 	if (rx_queues_count > 1)
3225 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3226 				priv->plat->rx_sched_algorithm);
3227 
3228 	/* Configure MTL TX algorithms */
3229 	if (tx_queues_count > 1)
3230 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3231 				priv->plat->tx_sched_algorithm);
3232 
3233 	/* Configure CBS in AVB TX queues */
3234 	if (tx_queues_count > 1)
3235 		stmmac_configure_cbs(priv);
3236 
3237 	/* Map RX MTL to DMA channels */
3238 	stmmac_rx_queue_dma_chan_map(priv);
3239 
3240 	/* Enable MAC RX Queues */
3241 	stmmac_mac_enable_rx_queues(priv);
3242 
3243 	/* Set RX priorities */
3244 	if (rx_queues_count > 1)
3245 		stmmac_mac_config_rx_queues_prio(priv);
3246 
3247 	/* Set TX priorities */
3248 	if (tx_queues_count > 1)
3249 		stmmac_mac_config_tx_queues_prio(priv);
3250 
3251 	/* Set RX routing */
3252 	if (rx_queues_count > 1)
3253 		stmmac_mac_config_rx_queues_routing(priv);
3254 
3255 	/* Receive Side Scaling */
3256 	if (rx_queues_count > 1)
3257 		stmmac_mac_config_rss(priv);
3258 }
3259 
stmmac_safety_feat_configuration(struct stmmac_priv * priv)3260 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3261 {
3262 	if (priv->dma_cap.asp) {
3263 		netdev_info(priv->dev, "Enabling Safety Features\n");
3264 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3265 					  priv->plat->safety_feat_cfg);
3266 	} else {
3267 		netdev_info(priv->dev, "No Safety Features support found\n");
3268 	}
3269 }
3270 
stmmac_fpe_start_wq(struct stmmac_priv * priv)3271 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3272 {
3273 	char *name;
3274 
3275 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3276 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3277 
3278 	name = priv->wq_name;
3279 	sprintf(name, "%s-fpe", priv->dev->name);
3280 
3281 	priv->fpe_wq = create_singlethread_workqueue(name);
3282 	if (!priv->fpe_wq) {
3283 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3284 
3285 		return -ENOMEM;
3286 	}
3287 	netdev_info(priv->dev, "FPE workqueue start");
3288 
3289 	return 0;
3290 }
3291 
3292 /**
3293  * stmmac_hw_setup - setup mac in a usable state.
3294  *  @dev : pointer to the device structure.
3295  *  @ptp_register: register PTP if set
3296  *  Description:
3297  *  this is the main function to setup the HW in a usable state because the
3298  *  dma engine is reset, the core registers are configured (e.g. AXI,
3299  *  Checksum features, timers). The DMA is ready to start receiving and
3300  *  transmitting.
3301  *  Return value:
3302  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3303  *  file on failure.
3304  */
stmmac_hw_setup(struct net_device * dev,bool ptp_register)3305 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3306 {
3307 	struct stmmac_priv *priv = netdev_priv(dev);
3308 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3309 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3310 	bool sph_en;
3311 	u32 chan;
3312 	int ret;
3313 
3314 	/* DMA initialization and SW reset */
3315 	ret = stmmac_init_dma_engine(priv);
3316 	if (ret < 0) {
3317 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3318 			   __func__);
3319 		return ret;
3320 	}
3321 
3322 	/* Copy the MAC addr into the HW  */
3323 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3324 
3325 	/* PS and related bits will be programmed according to the speed */
3326 	if (priv->hw->pcs) {
3327 		int speed = priv->plat->mac_port_sel_speed;
3328 
3329 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3330 		    (speed == SPEED_1000)) {
3331 			priv->hw->ps = speed;
3332 		} else {
3333 			dev_warn(priv->device, "invalid port speed\n");
3334 			priv->hw->ps = 0;
3335 		}
3336 	}
3337 
3338 	/* Initialize the MAC Core */
3339 	stmmac_core_init(priv, priv->hw, dev);
3340 
3341 	/* Initialize MTL*/
3342 	stmmac_mtl_configuration(priv);
3343 
3344 	/* Initialize Safety Features */
3345 	stmmac_safety_feat_configuration(priv);
3346 
3347 	ret = stmmac_rx_ipc(priv, priv->hw);
3348 	if (!ret) {
3349 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3350 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3351 		priv->hw->rx_csum = 0;
3352 	}
3353 
3354 	/* Enable the MAC Rx/Tx */
3355 	stmmac_mac_set(priv, priv->ioaddr, true);
3356 
3357 	/* Set the HW DMA mode and the COE */
3358 	stmmac_dma_operation_mode(priv);
3359 
3360 	stmmac_mmc_setup(priv);
3361 
3362 	if (ptp_register) {
3363 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3364 		if (ret < 0)
3365 			netdev_warn(priv->dev,
3366 				    "failed to enable PTP reference clock: %pe\n",
3367 				    ERR_PTR(ret));
3368 	}
3369 
3370 	ret = stmmac_init_ptp(priv);
3371 	if (ret == -EOPNOTSUPP)
3372 		netdev_info(priv->dev, "PTP not supported by HW\n");
3373 	else if (ret)
3374 		netdev_warn(priv->dev, "PTP init failed\n");
3375 	else if (ptp_register)
3376 		stmmac_ptp_register(priv);
3377 
3378 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3379 
3380 	/* Convert the timer from msec to usec */
3381 	if (!priv->tx_lpi_timer)
3382 		priv->tx_lpi_timer = eee_timer * 1000;
3383 
3384 	if (priv->use_riwt) {
3385 		u32 queue;
3386 
3387 		for (queue = 0; queue < rx_cnt; queue++) {
3388 			if (!priv->rx_riwt[queue])
3389 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3390 
3391 			stmmac_rx_watchdog(priv, priv->ioaddr,
3392 					   priv->rx_riwt[queue], queue);
3393 		}
3394 	}
3395 
3396 	if (priv->hw->pcs)
3397 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3398 
3399 	/* set TX and RX rings length */
3400 	stmmac_set_rings_length(priv);
3401 
3402 	/* Enable TSO */
3403 	if (priv->tso) {
3404 		for (chan = 0; chan < tx_cnt; chan++) {
3405 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3406 
3407 			/* TSO and TBS cannot co-exist */
3408 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3409 				continue;
3410 
3411 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3412 		}
3413 	}
3414 
3415 	/* Enable Split Header */
3416 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3417 	for (chan = 0; chan < rx_cnt; chan++)
3418 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3419 
3420 
3421 	/* VLAN Tag Insertion */
3422 	if (priv->dma_cap.vlins)
3423 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3424 
3425 	/* TBS */
3426 	for (chan = 0; chan < tx_cnt; chan++) {
3427 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3428 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3429 
3430 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3431 	}
3432 
3433 	/* Configure real RX and TX queues */
3434 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3435 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3436 
3437 	/* Start the ball rolling... */
3438 	stmmac_start_all_dma(priv);
3439 
3440 	if (priv->dma_cap.fpesel) {
3441 		stmmac_fpe_start_wq(priv);
3442 
3443 		if (priv->plat->fpe_cfg->enable)
3444 			stmmac_fpe_handshake(priv, true);
3445 	}
3446 
3447 	return 0;
3448 }
3449 
stmmac_hw_teardown(struct net_device * dev)3450 static void stmmac_hw_teardown(struct net_device *dev)
3451 {
3452 	struct stmmac_priv *priv = netdev_priv(dev);
3453 
3454 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3455 }
3456 
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)3457 static void stmmac_free_irq(struct net_device *dev,
3458 			    enum request_irq_err irq_err, int irq_idx)
3459 {
3460 	struct stmmac_priv *priv = netdev_priv(dev);
3461 	int j;
3462 
3463 	switch (irq_err) {
3464 	case REQ_IRQ_ERR_ALL:
3465 		irq_idx = priv->plat->tx_queues_to_use;
3466 		fallthrough;
3467 	case REQ_IRQ_ERR_TX:
3468 		for (j = irq_idx - 1; j >= 0; j--) {
3469 			if (priv->tx_irq[j] > 0) {
3470 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3471 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3472 			}
3473 		}
3474 		irq_idx = priv->plat->rx_queues_to_use;
3475 		fallthrough;
3476 	case REQ_IRQ_ERR_RX:
3477 		for (j = irq_idx - 1; j >= 0; j--) {
3478 			if (priv->rx_irq[j] > 0) {
3479 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3480 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3481 			}
3482 		}
3483 
3484 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3485 			free_irq(priv->sfty_ue_irq, dev);
3486 		fallthrough;
3487 	case REQ_IRQ_ERR_SFTY_UE:
3488 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3489 			free_irq(priv->sfty_ce_irq, dev);
3490 		fallthrough;
3491 	case REQ_IRQ_ERR_SFTY_CE:
3492 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3493 			free_irq(priv->lpi_irq, dev);
3494 		fallthrough;
3495 	case REQ_IRQ_ERR_LPI:
3496 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3497 			free_irq(priv->wol_irq, dev);
3498 		fallthrough;
3499 	case REQ_IRQ_ERR_WOL:
3500 		free_irq(dev->irq, dev);
3501 		fallthrough;
3502 	case REQ_IRQ_ERR_MAC:
3503 	case REQ_IRQ_ERR_NO:
3504 		/* If MAC IRQ request error, no more IRQ to free */
3505 		break;
3506 	}
3507 }
3508 
stmmac_request_irq_multi_msi(struct net_device * dev)3509 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3510 {
3511 	struct stmmac_priv *priv = netdev_priv(dev);
3512 	enum request_irq_err irq_err;
3513 	cpumask_t cpu_mask;
3514 	int irq_idx = 0;
3515 	char *int_name;
3516 	int ret;
3517 	int i;
3518 
3519 	/* For common interrupt */
3520 	int_name = priv->int_name_mac;
3521 	sprintf(int_name, "%s:%s", dev->name, "mac");
3522 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3523 			  0, int_name, dev);
3524 	if (unlikely(ret < 0)) {
3525 		netdev_err(priv->dev,
3526 			   "%s: alloc mac MSI %d (error: %d)\n",
3527 			   __func__, dev->irq, ret);
3528 		irq_err = REQ_IRQ_ERR_MAC;
3529 		goto irq_error;
3530 	}
3531 
3532 	/* Request the Wake IRQ in case of another line
3533 	 * is used for WoL
3534 	 */
3535 	priv->wol_irq_disabled = true;
3536 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3537 		int_name = priv->int_name_wol;
3538 		sprintf(int_name, "%s:%s", dev->name, "wol");
3539 		ret = request_irq(priv->wol_irq,
3540 				  stmmac_mac_interrupt,
3541 				  0, int_name, dev);
3542 		if (unlikely(ret < 0)) {
3543 			netdev_err(priv->dev,
3544 				   "%s: alloc wol MSI %d (error: %d)\n",
3545 				   __func__, priv->wol_irq, ret);
3546 			irq_err = REQ_IRQ_ERR_WOL;
3547 			goto irq_error;
3548 		}
3549 	}
3550 
3551 	/* Request the LPI IRQ in case of another line
3552 	 * is used for LPI
3553 	 */
3554 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3555 		int_name = priv->int_name_lpi;
3556 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3557 		ret = request_irq(priv->lpi_irq,
3558 				  stmmac_mac_interrupt,
3559 				  0, int_name, dev);
3560 		if (unlikely(ret < 0)) {
3561 			netdev_err(priv->dev,
3562 				   "%s: alloc lpi MSI %d (error: %d)\n",
3563 				   __func__, priv->lpi_irq, ret);
3564 			irq_err = REQ_IRQ_ERR_LPI;
3565 			goto irq_error;
3566 		}
3567 	}
3568 
3569 	/* Request the Safety Feature Correctible Error line in
3570 	 * case of another line is used
3571 	 */
3572 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3573 		int_name = priv->int_name_sfty_ce;
3574 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3575 		ret = request_irq(priv->sfty_ce_irq,
3576 				  stmmac_safety_interrupt,
3577 				  0, int_name, dev);
3578 		if (unlikely(ret < 0)) {
3579 			netdev_err(priv->dev,
3580 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3581 				   __func__, priv->sfty_ce_irq, ret);
3582 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3583 			goto irq_error;
3584 		}
3585 	}
3586 
3587 	/* Request the Safety Feature Uncorrectible Error line in
3588 	 * case of another line is used
3589 	 */
3590 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3591 		int_name = priv->int_name_sfty_ue;
3592 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3593 		ret = request_irq(priv->sfty_ue_irq,
3594 				  stmmac_safety_interrupt,
3595 				  0, int_name, dev);
3596 		if (unlikely(ret < 0)) {
3597 			netdev_err(priv->dev,
3598 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3599 				   __func__, priv->sfty_ue_irq, ret);
3600 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3601 			goto irq_error;
3602 		}
3603 	}
3604 
3605 	/* Request Rx MSI irq */
3606 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3607 		if (i >= MTL_MAX_RX_QUEUES)
3608 			break;
3609 		if (priv->rx_irq[i] == 0)
3610 			continue;
3611 
3612 		int_name = priv->int_name_rx_irq[i];
3613 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3614 		ret = request_irq(priv->rx_irq[i],
3615 				  stmmac_msi_intr_rx,
3616 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3617 		if (unlikely(ret < 0)) {
3618 			netdev_err(priv->dev,
3619 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3620 				   __func__, i, priv->rx_irq[i], ret);
3621 			irq_err = REQ_IRQ_ERR_RX;
3622 			irq_idx = i;
3623 			goto irq_error;
3624 		}
3625 		cpumask_clear(&cpu_mask);
3626 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3627 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3628 	}
3629 
3630 	/* Request Tx MSI irq */
3631 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3632 		if (i >= MTL_MAX_TX_QUEUES)
3633 			break;
3634 		if (priv->tx_irq[i] == 0)
3635 			continue;
3636 
3637 		int_name = priv->int_name_tx_irq[i];
3638 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3639 		ret = request_irq(priv->tx_irq[i],
3640 				  stmmac_msi_intr_tx,
3641 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3642 		if (unlikely(ret < 0)) {
3643 			netdev_err(priv->dev,
3644 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3645 				   __func__, i, priv->tx_irq[i], ret);
3646 			irq_err = REQ_IRQ_ERR_TX;
3647 			irq_idx = i;
3648 			goto irq_error;
3649 		}
3650 		cpumask_clear(&cpu_mask);
3651 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3652 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3653 	}
3654 
3655 	return 0;
3656 
3657 irq_error:
3658 	stmmac_free_irq(dev, irq_err, irq_idx);
3659 	return ret;
3660 }
3661 
stmmac_request_irq_single(struct net_device * dev)3662 static int stmmac_request_irq_single(struct net_device *dev)
3663 {
3664 	struct stmmac_priv *priv = netdev_priv(dev);
3665 	enum request_irq_err irq_err;
3666 	int ret;
3667 
3668 	ret = request_irq(dev->irq, stmmac_interrupt,
3669 			  IRQF_SHARED, dev->name, dev);
3670 	if (unlikely(ret < 0)) {
3671 		netdev_err(priv->dev,
3672 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3673 			   __func__, dev->irq, ret);
3674 		irq_err = REQ_IRQ_ERR_MAC;
3675 		goto irq_error;
3676 	}
3677 
3678 	/* Request the Wake IRQ in case of another line
3679 	 * is used for WoL
3680 	 */
3681 	priv->wol_irq_disabled = true;
3682 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3683 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3684 				  IRQF_SHARED, dev->name, dev);
3685 		if (unlikely(ret < 0)) {
3686 			netdev_err(priv->dev,
3687 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3688 				   __func__, priv->wol_irq, ret);
3689 			irq_err = REQ_IRQ_ERR_WOL;
3690 			goto irq_error;
3691 		}
3692 	}
3693 
3694 	/* Request the IRQ lines */
3695 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3696 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3697 				  IRQF_SHARED, dev->name, dev);
3698 		if (unlikely(ret < 0)) {
3699 			netdev_err(priv->dev,
3700 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3701 				   __func__, priv->lpi_irq, ret);
3702 			irq_err = REQ_IRQ_ERR_LPI;
3703 			goto irq_error;
3704 		}
3705 	}
3706 
3707 	return 0;
3708 
3709 irq_error:
3710 	stmmac_free_irq(dev, irq_err, 0);
3711 	return ret;
3712 }
3713 
stmmac_request_irq(struct net_device * dev)3714 static int stmmac_request_irq(struct net_device *dev)
3715 {
3716 	struct stmmac_priv *priv = netdev_priv(dev);
3717 	int ret;
3718 
3719 	/* Request the IRQ lines */
3720 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3721 		ret = stmmac_request_irq_multi_msi(dev);
3722 	else
3723 		ret = stmmac_request_irq_single(dev);
3724 
3725 	return ret;
3726 }
3727 
3728 /**
3729  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3730  *  @priv: driver private structure
3731  *  @mtu: MTU to setup the dma queue and buf with
3732  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3733  *  Allocate the Tx/Rx DMA queue and init them.
3734  *  Return value:
3735  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3736  */
3737 static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)3738 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3739 {
3740 	struct stmmac_dma_conf *dma_conf;
3741 	int chan, bfsize, ret;
3742 
3743 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3744 	if (!dma_conf) {
3745 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3746 			   __func__);
3747 		return ERR_PTR(-ENOMEM);
3748 	}
3749 
3750 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3751 	if (bfsize < 0)
3752 		bfsize = 0;
3753 
3754 	if (bfsize < BUF_SIZE_16KiB)
3755 		bfsize = stmmac_set_bfsize(mtu, 0);
3756 
3757 	dma_conf->dma_buf_sz = bfsize;
3758 	/* Chose the tx/rx size from the already defined one in the
3759 	 * priv struct. (if defined)
3760 	 */
3761 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3762 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3763 
3764 	if (!dma_conf->dma_tx_size)
3765 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3766 	if (!dma_conf->dma_rx_size)
3767 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3768 
3769 	/* Earlier check for TBS */
3770 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3771 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3772 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3773 
3774 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3775 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3776 	}
3777 
3778 	ret = alloc_dma_desc_resources(priv, dma_conf);
3779 	if (ret < 0) {
3780 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3781 			   __func__);
3782 		goto alloc_error;
3783 	}
3784 
3785 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3786 	if (ret < 0) {
3787 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3788 			   __func__);
3789 		goto init_error;
3790 	}
3791 
3792 	return dma_conf;
3793 
3794 init_error:
3795 	free_dma_desc_resources(priv, dma_conf);
3796 alloc_error:
3797 	kfree(dma_conf);
3798 	return ERR_PTR(ret);
3799 }
3800 
3801 /**
3802  *  __stmmac_open - open entry point of the driver
3803  *  @dev : pointer to the device structure.
3804  *  @dma_conf :  structure to take the dma data
3805  *  Description:
3806  *  This function is the open entry point of the driver.
3807  *  Return value:
3808  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3809  *  file on failure.
3810  */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)3811 static int __stmmac_open(struct net_device *dev,
3812 			 struct stmmac_dma_conf *dma_conf)
3813 {
3814 	struct stmmac_priv *priv = netdev_priv(dev);
3815 	int mode = priv->plat->phy_interface;
3816 	u32 chan;
3817 	int ret;
3818 
3819 	ret = pm_runtime_resume_and_get(priv->device);
3820 	if (ret < 0)
3821 		return ret;
3822 
3823 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3824 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
3825 	    (!priv->hw->xpcs ||
3826 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
3827 	    !priv->hw->lynx_pcs) {
3828 		ret = stmmac_init_phy(dev);
3829 		if (ret) {
3830 			netdev_err(priv->dev,
3831 				   "%s: Cannot attach to PHY (error: %d)\n",
3832 				   __func__, ret);
3833 			goto init_phy_error;
3834 		}
3835 	}
3836 
3837 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3838 
3839 	buf_sz = dma_conf->dma_buf_sz;
3840 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3841 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3842 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3843 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3844 
3845 	stmmac_reset_queues_param(priv);
3846 
3847 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3848 	    priv->plat->serdes_powerup) {
3849 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3850 		if (ret < 0) {
3851 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3852 				   __func__);
3853 			goto init_error;
3854 		}
3855 	}
3856 
3857 	ret = stmmac_hw_setup(dev, true);
3858 	if (ret < 0) {
3859 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3860 		goto init_error;
3861 	}
3862 
3863 	stmmac_init_coalesce(priv);
3864 
3865 	phylink_start(priv->phylink);
3866 	/* We may have called phylink_speed_down before */
3867 	phylink_speed_up(priv->phylink);
3868 
3869 	ret = stmmac_request_irq(dev);
3870 	if (ret)
3871 		goto irq_error;
3872 
3873 	stmmac_enable_all_queues(priv);
3874 	netif_tx_start_all_queues(priv->dev);
3875 	stmmac_enable_all_dma_irq(priv);
3876 
3877 	return 0;
3878 
3879 irq_error:
3880 	phylink_stop(priv->phylink);
3881 
3882 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3883 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3884 
3885 	stmmac_hw_teardown(dev);
3886 init_error:
3887 	phylink_disconnect_phy(priv->phylink);
3888 init_phy_error:
3889 	pm_runtime_put(priv->device);
3890 	return ret;
3891 }
3892 
stmmac_open(struct net_device * dev)3893 static int stmmac_open(struct net_device *dev)
3894 {
3895 	struct stmmac_priv *priv = netdev_priv(dev);
3896 	struct stmmac_dma_conf *dma_conf;
3897 	int ret;
3898 
3899 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3900 	if (IS_ERR(dma_conf))
3901 		return PTR_ERR(dma_conf);
3902 
3903 	ret = __stmmac_open(dev, dma_conf);
3904 	if (ret)
3905 		free_dma_desc_resources(priv, dma_conf);
3906 
3907 	kfree(dma_conf);
3908 	return ret;
3909 }
3910 
stmmac_fpe_stop_wq(struct stmmac_priv * priv)3911 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3912 {
3913 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3914 
3915 	if (priv->fpe_wq) {
3916 		destroy_workqueue(priv->fpe_wq);
3917 		priv->fpe_wq = NULL;
3918 	}
3919 
3920 	netdev_info(priv->dev, "FPE workqueue stop");
3921 }
3922 
3923 /**
3924  *  stmmac_release - close entry point of the driver
3925  *  @dev : device pointer.
3926  *  Description:
3927  *  This is the stop entry point of the driver.
3928  */
stmmac_release(struct net_device * dev)3929 static int stmmac_release(struct net_device *dev)
3930 {
3931 	struct stmmac_priv *priv = netdev_priv(dev);
3932 	u32 chan;
3933 
3934 	if (device_may_wakeup(priv->device))
3935 		phylink_speed_down(priv->phylink, false);
3936 	/* Stop and disconnect the PHY */
3937 	phylink_stop(priv->phylink);
3938 	phylink_disconnect_phy(priv->phylink);
3939 
3940 	stmmac_disable_all_queues(priv);
3941 
3942 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3943 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3944 
3945 	netif_tx_disable(dev);
3946 
3947 	/* Free the IRQ lines */
3948 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3949 
3950 	if (priv->eee_enabled) {
3951 		priv->tx_path_in_lpi_mode = false;
3952 		del_timer_sync(&priv->eee_ctrl_timer);
3953 	}
3954 
3955 	/* Stop TX/RX DMA and clear the descriptors */
3956 	stmmac_stop_all_dma(priv);
3957 
3958 	/* Release and free the Rx/Tx resources */
3959 	free_dma_desc_resources(priv, &priv->dma_conf);
3960 
3961 	/* Disable the MAC Rx/Tx */
3962 	stmmac_mac_set(priv, priv->ioaddr, false);
3963 
3964 	/* Powerdown Serdes if there is */
3965 	if (priv->plat->serdes_powerdown)
3966 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3967 
3968 	netif_carrier_off(dev);
3969 
3970 	stmmac_release_ptp(priv);
3971 
3972 	pm_runtime_put(priv->device);
3973 
3974 	if (priv->dma_cap.fpesel)
3975 		stmmac_fpe_stop_wq(priv);
3976 
3977 	return 0;
3978 }
3979 
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)3980 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3981 			       struct stmmac_tx_queue *tx_q)
3982 {
3983 	u16 tag = 0x0, inner_tag = 0x0;
3984 	u32 inner_type = 0x0;
3985 	struct dma_desc *p;
3986 
3987 	if (!priv->dma_cap.vlins)
3988 		return false;
3989 	if (!skb_vlan_tag_present(skb))
3990 		return false;
3991 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3992 		inner_tag = skb_vlan_tag_get(skb);
3993 		inner_type = STMMAC_VLAN_INSERT;
3994 	}
3995 
3996 	tag = skb_vlan_tag_get(skb);
3997 
3998 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3999 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4000 	else
4001 		p = &tx_q->dma_tx[tx_q->cur_tx];
4002 
4003 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4004 		return false;
4005 
4006 	stmmac_set_tx_owner(priv, p);
4007 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4008 	return true;
4009 }
4010 
4011 /**
4012  *  stmmac_tso_allocator - close entry point of the driver
4013  *  @priv: driver private structure
4014  *  @des: buffer start address
4015  *  @total_len: total length to fill in descriptors
4016  *  @last_segment: condition for the last descriptor
4017  *  @queue: TX queue index
4018  *  Description:
4019  *  This function fills descriptor and request new descriptors according to
4020  *  buffer length to fill
4021  */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4022 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4023 				 int total_len, bool last_segment, u32 queue)
4024 {
4025 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4026 	struct dma_desc *desc;
4027 	u32 buff_size;
4028 	int tmp_len;
4029 
4030 	tmp_len = total_len;
4031 
4032 	while (tmp_len > 0) {
4033 		dma_addr_t curr_addr;
4034 
4035 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4036 						priv->dma_conf.dma_tx_size);
4037 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4038 
4039 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4040 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4041 		else
4042 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4043 
4044 		curr_addr = des + (total_len - tmp_len);
4045 		if (priv->dma_cap.addr64 <= 32)
4046 			desc->des0 = cpu_to_le32(curr_addr);
4047 		else
4048 			stmmac_set_desc_addr(priv, desc, curr_addr);
4049 
4050 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4051 			    TSO_MAX_BUFF_SIZE : tmp_len;
4052 
4053 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4054 				0, 1,
4055 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4056 				0, 0);
4057 
4058 		tmp_len -= TSO_MAX_BUFF_SIZE;
4059 	}
4060 }
4061 
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4062 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4063 {
4064 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4065 	int desc_size;
4066 
4067 	if (likely(priv->extend_desc))
4068 		desc_size = sizeof(struct dma_extended_desc);
4069 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4070 		desc_size = sizeof(struct dma_edesc);
4071 	else
4072 		desc_size = sizeof(struct dma_desc);
4073 
4074 	/* The own bit must be the latest setting done when prepare the
4075 	 * descriptor and then barrier is needed to make sure that
4076 	 * all is coherent before granting the DMA engine.
4077 	 */
4078 	wmb();
4079 
4080 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4081 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4082 }
4083 
4084 /**
4085  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4086  *  @skb : the socket buffer
4087  *  @dev : device pointer
4088  *  Description: this is the transmit function that is called on TSO frames
4089  *  (support available on GMAC4 and newer chips).
4090  *  Diagram below show the ring programming in case of TSO frames:
4091  *
4092  *  First Descriptor
4093  *   --------
4094  *   | DES0 |---> buffer1 = L2/L3/L4 header
4095  *   | DES1 |---> TCP Payload (can continue on next descr...)
4096  *   | DES2 |---> buffer 1 and 2 len
4097  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4098  *   --------
4099  *	|
4100  *     ...
4101  *	|
4102  *   --------
4103  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4104  *   | DES1 | --|
4105  *   | DES2 | --> buffer 1 and 2 len
4106  *   | DES3 |
4107  *   --------
4108  *
4109  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4110  */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4111 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4112 {
4113 	struct dma_desc *desc, *first, *mss_desc = NULL;
4114 	struct stmmac_priv *priv = netdev_priv(dev);
4115 	int nfrags = skb_shinfo(skb)->nr_frags;
4116 	u32 queue = skb_get_queue_mapping(skb);
4117 	unsigned int first_entry, tx_packets;
4118 	struct stmmac_txq_stats *txq_stats;
4119 	int tmp_pay_len = 0, first_tx;
4120 	struct stmmac_tx_queue *tx_q;
4121 	bool has_vlan, set_ic;
4122 	u8 proto_hdr_len, hdr;
4123 	u32 pay_len, mss;
4124 	dma_addr_t des;
4125 	int i;
4126 
4127 	tx_q = &priv->dma_conf.tx_queue[queue];
4128 	txq_stats = &priv->xstats.txq_stats[queue];
4129 	first_tx = tx_q->cur_tx;
4130 
4131 	/* Compute header lengths */
4132 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4133 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4134 		hdr = sizeof(struct udphdr);
4135 	} else {
4136 		proto_hdr_len = skb_tcp_all_headers(skb);
4137 		hdr = tcp_hdrlen(skb);
4138 	}
4139 
4140 	/* Desc availability based on threshold should be enough safe */
4141 	if (unlikely(stmmac_tx_avail(priv, queue) <
4142 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4143 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4144 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4145 								queue));
4146 			/* This is a hard error, log it. */
4147 			netdev_err(priv->dev,
4148 				   "%s: Tx Ring full when queue awake\n",
4149 				   __func__);
4150 		}
4151 		return NETDEV_TX_BUSY;
4152 	}
4153 
4154 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4155 
4156 	mss = skb_shinfo(skb)->gso_size;
4157 
4158 	/* set new MSS value if needed */
4159 	if (mss != tx_q->mss) {
4160 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4161 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4162 		else
4163 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4164 
4165 		stmmac_set_mss(priv, mss_desc, mss);
4166 		tx_q->mss = mss;
4167 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4168 						priv->dma_conf.dma_tx_size);
4169 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4170 	}
4171 
4172 	if (netif_msg_tx_queued(priv)) {
4173 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4174 			__func__, hdr, proto_hdr_len, pay_len, mss);
4175 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4176 			skb->data_len);
4177 	}
4178 
4179 	/* Check if VLAN can be inserted by HW */
4180 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4181 
4182 	first_entry = tx_q->cur_tx;
4183 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4184 
4185 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4186 		desc = &tx_q->dma_entx[first_entry].basic;
4187 	else
4188 		desc = &tx_q->dma_tx[first_entry];
4189 	first = desc;
4190 
4191 	if (has_vlan)
4192 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4193 
4194 	/* first descriptor: fill Headers on Buf1 */
4195 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4196 			     DMA_TO_DEVICE);
4197 	if (dma_mapping_error(priv->device, des))
4198 		goto dma_map_err;
4199 
4200 	if (priv->dma_cap.addr64 <= 32) {
4201 		first->des0 = cpu_to_le32(des);
4202 
4203 		/* Fill start of payload in buff2 of first descriptor */
4204 		if (pay_len)
4205 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4206 
4207 		/* If needed take extra descriptors to fill the remaining payload */
4208 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4209 	} else {
4210 		stmmac_set_desc_addr(priv, first, des);
4211 		tmp_pay_len = pay_len;
4212 		des += proto_hdr_len;
4213 		pay_len = 0;
4214 	}
4215 
4216 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4217 
4218 	/* In case two or more DMA transmit descriptors are allocated for this
4219 	 * non-paged SKB data, the DMA buffer address should be saved to
4220 	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4221 	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4222 	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4223 	 * since the tail areas of the DMA buffer can be accessed by DMA engine
4224 	 * sooner or later.
4225 	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4226 	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4227 	 * this DMA buffer right after the DMA engine completely finishes the
4228 	 * full buffer transmission.
4229 	 */
4230 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4231 	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4232 	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4233 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4234 
4235 	/* Prepare fragments */
4236 	for (i = 0; i < nfrags; i++) {
4237 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4238 
4239 		des = skb_frag_dma_map(priv->device, frag, 0,
4240 				       skb_frag_size(frag),
4241 				       DMA_TO_DEVICE);
4242 		if (dma_mapping_error(priv->device, des))
4243 			goto dma_map_err;
4244 
4245 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4246 				     (i == nfrags - 1), queue);
4247 
4248 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4249 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4250 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4251 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4252 	}
4253 
4254 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4255 
4256 	/* Only the last descriptor gets to point to the skb. */
4257 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4258 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4259 
4260 	/* Manage tx mitigation */
4261 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4262 	tx_q->tx_count_frames += tx_packets;
4263 
4264 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4265 		set_ic = true;
4266 	else if (!priv->tx_coal_frames[queue])
4267 		set_ic = false;
4268 	else if (tx_packets > priv->tx_coal_frames[queue])
4269 		set_ic = true;
4270 	else if ((tx_q->tx_count_frames %
4271 		  priv->tx_coal_frames[queue]) < tx_packets)
4272 		set_ic = true;
4273 	else
4274 		set_ic = false;
4275 
4276 	if (set_ic) {
4277 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4278 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4279 		else
4280 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4281 
4282 		tx_q->tx_count_frames = 0;
4283 		stmmac_set_tx_ic(priv, desc);
4284 	}
4285 
4286 	/* We've used all descriptors we need for this skb, however,
4287 	 * advance cur_tx so that it references a fresh descriptor.
4288 	 * ndo_start_xmit will fill this descriptor the next time it's
4289 	 * called and stmmac_tx_clean may clean up to this descriptor.
4290 	 */
4291 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4292 
4293 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4294 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4295 			  __func__);
4296 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4297 	}
4298 
4299 	u64_stats_update_begin(&txq_stats->q_syncp);
4300 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4301 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4302 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4303 	if (set_ic)
4304 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4305 	u64_stats_update_end(&txq_stats->q_syncp);
4306 
4307 	if (priv->sarc_type)
4308 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4309 
4310 	skb_tx_timestamp(skb);
4311 
4312 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4313 		     priv->hwts_tx_en)) {
4314 		/* declare that device is doing timestamping */
4315 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4316 		stmmac_enable_tx_timestamp(priv, first);
4317 	}
4318 
4319 	/* Complete the first descriptor before granting the DMA */
4320 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4321 			proto_hdr_len,
4322 			pay_len,
4323 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4324 			hdr / 4, (skb->len - proto_hdr_len));
4325 
4326 	/* If context desc is used to change MSS */
4327 	if (mss_desc) {
4328 		/* Make sure that first descriptor has been completely
4329 		 * written, including its own bit. This is because MSS is
4330 		 * actually before first descriptor, so we need to make
4331 		 * sure that MSS's own bit is the last thing written.
4332 		 */
4333 		dma_wmb();
4334 		stmmac_set_tx_owner(priv, mss_desc);
4335 	}
4336 
4337 	if (netif_msg_pktdata(priv)) {
4338 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4339 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4340 			tx_q->cur_tx, first, nfrags);
4341 		pr_info(">>> frame to be transmitted: ");
4342 		print_pkt(skb->data, skb_headlen(skb));
4343 	}
4344 
4345 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4346 
4347 	stmmac_flush_tx_descriptors(priv, queue);
4348 	stmmac_tx_timer_arm(priv, queue);
4349 
4350 	return NETDEV_TX_OK;
4351 
4352 dma_map_err:
4353 	dev_err(priv->device, "Tx dma map failed\n");
4354 	dev_kfree_skb(skb);
4355 	priv->xstats.tx_dropped++;
4356 	return NETDEV_TX_OK;
4357 }
4358 
4359 /**
4360  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4361  * @skb: socket buffer to check
4362  *
4363  * Check if a packet has an ethertype that will trigger the IP header checks
4364  * and IP/TCP checksum engine of the stmmac core.
4365  *
4366  * Return: true if the ethertype can trigger the checksum engine, false
4367  * otherwise
4368  */
stmmac_has_ip_ethertype(struct sk_buff * skb)4369 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4370 {
4371 	int depth = 0;
4372 	__be16 proto;
4373 
4374 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4375 				    &depth);
4376 
4377 	return (depth <= ETH_HLEN) &&
4378 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4379 }
4380 
4381 /**
4382  *  stmmac_xmit - Tx entry point of the driver
4383  *  @skb : the socket buffer
4384  *  @dev : device pointer
4385  *  Description : this is the tx entry point of the driver.
4386  *  It programs the chain or the ring and supports oversized frames
4387  *  and SG feature.
4388  */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)4389 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4390 {
4391 	unsigned int first_entry, tx_packets, enh_desc;
4392 	struct stmmac_priv *priv = netdev_priv(dev);
4393 	unsigned int nopaged_len = skb_headlen(skb);
4394 	int i, csum_insertion = 0, is_jumbo = 0;
4395 	u32 queue = skb_get_queue_mapping(skb);
4396 	int nfrags = skb_shinfo(skb)->nr_frags;
4397 	int gso = skb_shinfo(skb)->gso_type;
4398 	struct stmmac_txq_stats *txq_stats;
4399 	struct dma_edesc *tbs_desc = NULL;
4400 	struct dma_desc *desc, *first;
4401 	struct stmmac_tx_queue *tx_q;
4402 	bool has_vlan, set_ic;
4403 	int entry, first_tx;
4404 	dma_addr_t des;
4405 
4406 	tx_q = &priv->dma_conf.tx_queue[queue];
4407 	txq_stats = &priv->xstats.txq_stats[queue];
4408 	first_tx = tx_q->cur_tx;
4409 
4410 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4411 		stmmac_disable_eee_mode(priv);
4412 
4413 	/* Manage oversized TCP frames for GMAC4 device */
4414 	if (skb_is_gso(skb) && priv->tso) {
4415 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4416 			return stmmac_tso_xmit(skb, dev);
4417 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4418 			return stmmac_tso_xmit(skb, dev);
4419 	}
4420 
4421 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4422 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4423 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4424 								queue));
4425 			/* This is a hard error, log it. */
4426 			netdev_err(priv->dev,
4427 				   "%s: Tx Ring full when queue awake\n",
4428 				   __func__);
4429 		}
4430 		return NETDEV_TX_BUSY;
4431 	}
4432 
4433 	/* Check if VLAN can be inserted by HW */
4434 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4435 
4436 	entry = tx_q->cur_tx;
4437 	first_entry = entry;
4438 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4439 
4440 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4441 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4442 	 * queues. In that case, checksum offloading for those queues that don't
4443 	 * support tx coe needs to fallback to software checksum calculation.
4444 	 *
4445 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4446 	 * also have to be checksummed in software.
4447 	 */
4448 	if (csum_insertion &&
4449 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4450 	     !stmmac_has_ip_ethertype(skb))) {
4451 		if (unlikely(skb_checksum_help(skb)))
4452 			goto dma_map_err;
4453 		csum_insertion = !csum_insertion;
4454 	}
4455 
4456 	if (likely(priv->extend_desc))
4457 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4458 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4459 		desc = &tx_q->dma_entx[entry].basic;
4460 	else
4461 		desc = tx_q->dma_tx + entry;
4462 
4463 	first = desc;
4464 
4465 	if (has_vlan)
4466 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4467 
4468 	enh_desc = priv->plat->enh_desc;
4469 	/* To program the descriptors according to the size of the frame */
4470 	if (enh_desc)
4471 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4472 
4473 	if (unlikely(is_jumbo)) {
4474 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4475 		if (unlikely(entry < 0) && (entry != -EINVAL))
4476 			goto dma_map_err;
4477 	}
4478 
4479 	for (i = 0; i < nfrags; i++) {
4480 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4481 		int len = skb_frag_size(frag);
4482 		bool last_segment = (i == (nfrags - 1));
4483 
4484 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4485 		WARN_ON(tx_q->tx_skbuff[entry]);
4486 
4487 		if (likely(priv->extend_desc))
4488 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4489 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4490 			desc = &tx_q->dma_entx[entry].basic;
4491 		else
4492 			desc = tx_q->dma_tx + entry;
4493 
4494 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4495 				       DMA_TO_DEVICE);
4496 		if (dma_mapping_error(priv->device, des))
4497 			goto dma_map_err; /* should reuse desc w/o issues */
4498 
4499 		tx_q->tx_skbuff_dma[entry].buf = des;
4500 
4501 		stmmac_set_desc_addr(priv, desc, des);
4502 
4503 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4504 		tx_q->tx_skbuff_dma[entry].len = len;
4505 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4506 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4507 
4508 		/* Prepare the descriptor and set the own bit too */
4509 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4510 				priv->mode, 1, last_segment, skb->len);
4511 	}
4512 
4513 	/* Only the last descriptor gets to point to the skb. */
4514 	tx_q->tx_skbuff[entry] = skb;
4515 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4516 
4517 	/* According to the coalesce parameter the IC bit for the latest
4518 	 * segment is reset and the timer re-started to clean the tx status.
4519 	 * This approach takes care about the fragments: desc is the first
4520 	 * element in case of no SG.
4521 	 */
4522 	tx_packets = (entry + 1) - first_tx;
4523 	tx_q->tx_count_frames += tx_packets;
4524 
4525 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4526 		set_ic = true;
4527 	else if (!priv->tx_coal_frames[queue])
4528 		set_ic = false;
4529 	else if (tx_packets > priv->tx_coal_frames[queue])
4530 		set_ic = true;
4531 	else if ((tx_q->tx_count_frames %
4532 		  priv->tx_coal_frames[queue]) < tx_packets)
4533 		set_ic = true;
4534 	else
4535 		set_ic = false;
4536 
4537 	if (set_ic) {
4538 		if (likely(priv->extend_desc))
4539 			desc = &tx_q->dma_etx[entry].basic;
4540 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4541 			desc = &tx_q->dma_entx[entry].basic;
4542 		else
4543 			desc = &tx_q->dma_tx[entry];
4544 
4545 		tx_q->tx_count_frames = 0;
4546 		stmmac_set_tx_ic(priv, desc);
4547 	}
4548 
4549 	/* We've used all descriptors we need for this skb, however,
4550 	 * advance cur_tx so that it references a fresh descriptor.
4551 	 * ndo_start_xmit will fill this descriptor the next time it's
4552 	 * called and stmmac_tx_clean may clean up to this descriptor.
4553 	 */
4554 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4555 	tx_q->cur_tx = entry;
4556 
4557 	if (netif_msg_pktdata(priv)) {
4558 		netdev_dbg(priv->dev,
4559 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4560 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4561 			   entry, first, nfrags);
4562 
4563 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4564 		print_pkt(skb->data, skb->len);
4565 	}
4566 
4567 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4568 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4569 			  __func__);
4570 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4571 	}
4572 
4573 	u64_stats_update_begin(&txq_stats->q_syncp);
4574 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4575 	if (set_ic)
4576 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4577 	u64_stats_update_end(&txq_stats->q_syncp);
4578 
4579 	if (priv->sarc_type)
4580 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4581 
4582 	skb_tx_timestamp(skb);
4583 
4584 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4585 	 * problems because all the descriptors are actually ready to be
4586 	 * passed to the DMA engine.
4587 	 */
4588 	if (likely(!is_jumbo)) {
4589 		bool last_segment = (nfrags == 0);
4590 
4591 		des = dma_map_single(priv->device, skb->data,
4592 				     nopaged_len, DMA_TO_DEVICE);
4593 		if (dma_mapping_error(priv->device, des))
4594 			goto dma_map_err;
4595 
4596 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4597 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4598 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4599 
4600 		stmmac_set_desc_addr(priv, first, des);
4601 
4602 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4603 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4604 
4605 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4606 			     priv->hwts_tx_en)) {
4607 			/* declare that device is doing timestamping */
4608 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4609 			stmmac_enable_tx_timestamp(priv, first);
4610 		}
4611 
4612 		/* Prepare the first descriptor setting the OWN bit too */
4613 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4614 				csum_insertion, priv->mode, 0, last_segment,
4615 				skb->len);
4616 	}
4617 
4618 	if (tx_q->tbs & STMMAC_TBS_EN) {
4619 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4620 
4621 		tbs_desc = &tx_q->dma_entx[first_entry];
4622 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4623 	}
4624 
4625 	stmmac_set_tx_owner(priv, first);
4626 
4627 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4628 
4629 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4630 
4631 	stmmac_flush_tx_descriptors(priv, queue);
4632 	stmmac_tx_timer_arm(priv, queue);
4633 
4634 	return NETDEV_TX_OK;
4635 
4636 dma_map_err:
4637 	netdev_err(priv->dev, "Tx DMA map failed\n");
4638 	dev_kfree_skb(skb);
4639 	priv->xstats.tx_dropped++;
4640 	return NETDEV_TX_OK;
4641 }
4642 
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)4643 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4644 {
4645 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4646 	__be16 vlan_proto = veth->h_vlan_proto;
4647 	u16 vlanid;
4648 
4649 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4650 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4651 	    (vlan_proto == htons(ETH_P_8021AD) &&
4652 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4653 		/* pop the vlan tag */
4654 		vlanid = ntohs(veth->h_vlan_TCI);
4655 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4656 		skb_pull(skb, VLAN_HLEN);
4657 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4658 	}
4659 }
4660 
4661 /**
4662  * stmmac_rx_refill - refill used skb preallocated buffers
4663  * @priv: driver private structure
4664  * @queue: RX queue index
4665  * Description : this is to reallocate the skb for the reception process
4666  * that is based on zero-copy.
4667  */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)4668 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4669 {
4670 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4671 	int dirty = stmmac_rx_dirty(priv, queue);
4672 	unsigned int entry = rx_q->dirty_rx;
4673 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4674 
4675 	if (priv->dma_cap.host_dma_width <= 32)
4676 		gfp |= GFP_DMA32;
4677 
4678 	while (dirty-- > 0) {
4679 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4680 		struct dma_desc *p;
4681 		bool use_rx_wd;
4682 
4683 		if (priv->extend_desc)
4684 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4685 		else
4686 			p = rx_q->dma_rx + entry;
4687 
4688 		if (!buf->page) {
4689 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4690 			if (!buf->page)
4691 				break;
4692 		}
4693 
4694 		if (priv->sph && !buf->sec_page) {
4695 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4696 			if (!buf->sec_page)
4697 				break;
4698 
4699 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4700 		}
4701 
4702 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4703 
4704 		stmmac_set_desc_addr(priv, p, buf->addr);
4705 		if (priv->sph)
4706 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4707 		else
4708 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4709 		stmmac_refill_desc3(priv, rx_q, p);
4710 
4711 		rx_q->rx_count_frames++;
4712 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4713 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4714 			rx_q->rx_count_frames = 0;
4715 
4716 		use_rx_wd = !priv->rx_coal_frames[queue];
4717 		use_rx_wd |= rx_q->rx_count_frames > 0;
4718 		if (!priv->use_riwt)
4719 			use_rx_wd = false;
4720 
4721 		dma_wmb();
4722 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4723 
4724 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4725 	}
4726 	rx_q->dirty_rx = entry;
4727 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4728 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4729 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4730 }
4731 
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4732 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4733 				       struct dma_desc *p,
4734 				       int status, unsigned int len)
4735 {
4736 	unsigned int plen = 0, hlen = 0;
4737 	int coe = priv->hw->rx_csum;
4738 
4739 	/* Not first descriptor, buffer is always zero */
4740 	if (priv->sph && len)
4741 		return 0;
4742 
4743 	/* First descriptor, get split header length */
4744 	stmmac_get_rx_header_len(priv, p, &hlen);
4745 	if (priv->sph && hlen) {
4746 		priv->xstats.rx_split_hdr_pkt_n++;
4747 		return hlen;
4748 	}
4749 
4750 	/* First descriptor, not last descriptor and not split header */
4751 	if (status & rx_not_ls)
4752 		return priv->dma_conf.dma_buf_sz;
4753 
4754 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4755 
4756 	/* First descriptor and last descriptor and not split header */
4757 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4758 }
4759 
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4760 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4761 				       struct dma_desc *p,
4762 				       int status, unsigned int len)
4763 {
4764 	int coe = priv->hw->rx_csum;
4765 	unsigned int plen = 0;
4766 
4767 	/* Not split header, buffer is not available */
4768 	if (!priv->sph)
4769 		return 0;
4770 
4771 	/* Not last descriptor */
4772 	if (status & rx_not_ls)
4773 		return priv->dma_conf.dma_buf_sz;
4774 
4775 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4776 
4777 	/* Last descriptor */
4778 	return plen - len;
4779 }
4780 
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)4781 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4782 				struct xdp_frame *xdpf, bool dma_map)
4783 {
4784 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4785 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4786 	unsigned int entry = tx_q->cur_tx;
4787 	struct dma_desc *tx_desc;
4788 	dma_addr_t dma_addr;
4789 	bool set_ic;
4790 
4791 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4792 		return STMMAC_XDP_CONSUMED;
4793 
4794 	if (likely(priv->extend_desc))
4795 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4796 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4797 		tx_desc = &tx_q->dma_entx[entry].basic;
4798 	else
4799 		tx_desc = tx_q->dma_tx + entry;
4800 
4801 	if (dma_map) {
4802 		dma_addr = dma_map_single(priv->device, xdpf->data,
4803 					  xdpf->len, DMA_TO_DEVICE);
4804 		if (dma_mapping_error(priv->device, dma_addr))
4805 			return STMMAC_XDP_CONSUMED;
4806 
4807 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4808 	} else {
4809 		struct page *page = virt_to_page(xdpf->data);
4810 
4811 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4812 			   xdpf->headroom;
4813 		dma_sync_single_for_device(priv->device, dma_addr,
4814 					   xdpf->len, DMA_BIDIRECTIONAL);
4815 
4816 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4817 	}
4818 
4819 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4820 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4821 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4822 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4823 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4824 
4825 	tx_q->xdpf[entry] = xdpf;
4826 
4827 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4828 
4829 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4830 			       true, priv->mode, true, true,
4831 			       xdpf->len);
4832 
4833 	tx_q->tx_count_frames++;
4834 
4835 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4836 		set_ic = true;
4837 	else
4838 		set_ic = false;
4839 
4840 	if (set_ic) {
4841 		tx_q->tx_count_frames = 0;
4842 		stmmac_set_tx_ic(priv, tx_desc);
4843 		u64_stats_update_begin(&txq_stats->q_syncp);
4844 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4845 		u64_stats_update_end(&txq_stats->q_syncp);
4846 	}
4847 
4848 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4849 
4850 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4851 	tx_q->cur_tx = entry;
4852 
4853 	return STMMAC_XDP_TX;
4854 }
4855 
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)4856 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4857 				   int cpu)
4858 {
4859 	int index = cpu;
4860 
4861 	if (unlikely(index < 0))
4862 		index = 0;
4863 
4864 	while (index >= priv->plat->tx_queues_to_use)
4865 		index -= priv->plat->tx_queues_to_use;
4866 
4867 	return index;
4868 }
4869 
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)4870 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4871 				struct xdp_buff *xdp)
4872 {
4873 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4874 	int cpu = smp_processor_id();
4875 	struct netdev_queue *nq;
4876 	int queue;
4877 	int res;
4878 
4879 	if (unlikely(!xdpf))
4880 		return STMMAC_XDP_CONSUMED;
4881 
4882 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4883 	nq = netdev_get_tx_queue(priv->dev, queue);
4884 
4885 	__netif_tx_lock(nq, cpu);
4886 	/* Avoids TX time-out as we are sharing with slow path */
4887 	txq_trans_cond_update(nq);
4888 
4889 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4890 	if (res == STMMAC_XDP_TX)
4891 		stmmac_flush_tx_descriptors(priv, queue);
4892 
4893 	__netif_tx_unlock(nq);
4894 
4895 	return res;
4896 }
4897 
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)4898 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4899 				 struct bpf_prog *prog,
4900 				 struct xdp_buff *xdp)
4901 {
4902 	u32 act;
4903 	int res;
4904 
4905 	act = bpf_prog_run_xdp(prog, xdp);
4906 	switch (act) {
4907 	case XDP_PASS:
4908 		res = STMMAC_XDP_PASS;
4909 		break;
4910 	case XDP_TX:
4911 		res = stmmac_xdp_xmit_back(priv, xdp);
4912 		break;
4913 	case XDP_REDIRECT:
4914 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4915 			res = STMMAC_XDP_CONSUMED;
4916 		else
4917 			res = STMMAC_XDP_REDIRECT;
4918 		break;
4919 	default:
4920 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4921 		fallthrough;
4922 	case XDP_ABORTED:
4923 		trace_xdp_exception(priv->dev, prog, act);
4924 		fallthrough;
4925 	case XDP_DROP:
4926 		res = STMMAC_XDP_CONSUMED;
4927 		break;
4928 	}
4929 
4930 	return res;
4931 }
4932 
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)4933 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4934 					   struct xdp_buff *xdp)
4935 {
4936 	struct bpf_prog *prog;
4937 	int res;
4938 
4939 	prog = READ_ONCE(priv->xdp_prog);
4940 	if (!prog) {
4941 		res = STMMAC_XDP_PASS;
4942 		goto out;
4943 	}
4944 
4945 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
4946 out:
4947 	return ERR_PTR(-res);
4948 }
4949 
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)4950 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4951 				   int xdp_status)
4952 {
4953 	int cpu = smp_processor_id();
4954 	int queue;
4955 
4956 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4957 
4958 	if (xdp_status & STMMAC_XDP_TX)
4959 		stmmac_tx_timer_arm(priv, queue);
4960 
4961 	if (xdp_status & STMMAC_XDP_REDIRECT)
4962 		xdp_do_flush();
4963 }
4964 
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)4965 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4966 					       struct xdp_buff *xdp)
4967 {
4968 	unsigned int metasize = xdp->data - xdp->data_meta;
4969 	unsigned int datasize = xdp->data_end - xdp->data;
4970 	struct sk_buff *skb;
4971 
4972 	skb = __napi_alloc_skb(&ch->rxtx_napi,
4973 			       xdp->data_end - xdp->data_hard_start,
4974 			       GFP_ATOMIC | __GFP_NOWARN);
4975 	if (unlikely(!skb))
4976 		return NULL;
4977 
4978 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
4979 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4980 	if (metasize)
4981 		skb_metadata_set(skb, metasize);
4982 
4983 	return skb;
4984 }
4985 
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)4986 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4987 				   struct dma_desc *p, struct dma_desc *np,
4988 				   struct xdp_buff *xdp)
4989 {
4990 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
4991 	struct stmmac_channel *ch = &priv->channel[queue];
4992 	unsigned int len = xdp->data_end - xdp->data;
4993 	enum pkt_hash_types hash_type;
4994 	int coe = priv->hw->rx_csum;
4995 	struct sk_buff *skb;
4996 	u32 hash;
4997 
4998 	skb = stmmac_construct_skb_zc(ch, xdp);
4999 	if (!skb) {
5000 		priv->xstats.rx_dropped++;
5001 		return;
5002 	}
5003 
5004 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5005 	stmmac_rx_vlan(priv->dev, skb);
5006 	skb->protocol = eth_type_trans(skb, priv->dev);
5007 
5008 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5009 		skb_checksum_none_assert(skb);
5010 	else
5011 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5012 
5013 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5014 		skb_set_hash(skb, hash, hash_type);
5015 
5016 	skb_record_rx_queue(skb, queue);
5017 	napi_gro_receive(&ch->rxtx_napi, skb);
5018 
5019 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5020 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5021 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5022 	u64_stats_update_end(&rxq_stats->napi_syncp);
5023 }
5024 
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)5025 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5026 {
5027 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5028 	unsigned int entry = rx_q->dirty_rx;
5029 	struct dma_desc *rx_desc = NULL;
5030 	bool ret = true;
5031 
5032 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5033 
5034 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5035 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5036 		dma_addr_t dma_addr;
5037 		bool use_rx_wd;
5038 
5039 		if (!buf->xdp) {
5040 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5041 			if (!buf->xdp) {
5042 				ret = false;
5043 				break;
5044 			}
5045 		}
5046 
5047 		if (priv->extend_desc)
5048 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5049 		else
5050 			rx_desc = rx_q->dma_rx + entry;
5051 
5052 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5053 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5054 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5055 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5056 
5057 		rx_q->rx_count_frames++;
5058 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5059 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5060 			rx_q->rx_count_frames = 0;
5061 
5062 		use_rx_wd = !priv->rx_coal_frames[queue];
5063 		use_rx_wd |= rx_q->rx_count_frames > 0;
5064 		if (!priv->use_riwt)
5065 			use_rx_wd = false;
5066 
5067 		dma_wmb();
5068 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5069 
5070 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5071 	}
5072 
5073 	if (rx_desc) {
5074 		rx_q->dirty_rx = entry;
5075 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5076 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5077 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5078 	}
5079 
5080 	return ret;
5081 }
5082 
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)5083 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5084 {
5085 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5086 	 * to represent incoming packet, whereas cb field in the same structure
5087 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5088 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5089 	 */
5090 	return (struct stmmac_xdp_buff *)xdp;
5091 }
5092 
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5093 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5094 {
5095 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5096 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5097 	unsigned int count = 0, error = 0, len = 0;
5098 	int dirty = stmmac_rx_dirty(priv, queue);
5099 	unsigned int next_entry = rx_q->cur_rx;
5100 	u32 rx_errors = 0, rx_dropped = 0;
5101 	unsigned int desc_size;
5102 	struct bpf_prog *prog;
5103 	bool failure = false;
5104 	int xdp_status = 0;
5105 	int status = 0;
5106 
5107 	if (netif_msg_rx_status(priv)) {
5108 		void *rx_head;
5109 
5110 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5111 		if (priv->extend_desc) {
5112 			rx_head = (void *)rx_q->dma_erx;
5113 			desc_size = sizeof(struct dma_extended_desc);
5114 		} else {
5115 			rx_head = (void *)rx_q->dma_rx;
5116 			desc_size = sizeof(struct dma_desc);
5117 		}
5118 
5119 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5120 				    rx_q->dma_rx_phy, desc_size);
5121 	}
5122 	while (count < limit) {
5123 		struct stmmac_rx_buffer *buf;
5124 		struct stmmac_xdp_buff *ctx;
5125 		unsigned int buf1_len = 0;
5126 		struct dma_desc *np, *p;
5127 		int entry;
5128 		int res;
5129 
5130 		if (!count && rx_q->state_saved) {
5131 			error = rx_q->state.error;
5132 			len = rx_q->state.len;
5133 		} else {
5134 			rx_q->state_saved = false;
5135 			error = 0;
5136 			len = 0;
5137 		}
5138 
5139 		if (count >= limit)
5140 			break;
5141 
5142 read_again:
5143 		buf1_len = 0;
5144 		entry = next_entry;
5145 		buf = &rx_q->buf_pool[entry];
5146 
5147 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5148 			failure = failure ||
5149 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5150 			dirty = 0;
5151 		}
5152 
5153 		if (priv->extend_desc)
5154 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5155 		else
5156 			p = rx_q->dma_rx + entry;
5157 
5158 		/* read the status of the incoming frame */
5159 		status = stmmac_rx_status(priv, &priv->xstats, p);
5160 		/* check if managed by the DMA otherwise go ahead */
5161 		if (unlikely(status & dma_own))
5162 			break;
5163 
5164 		/* Prefetch the next RX descriptor */
5165 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5166 						priv->dma_conf.dma_rx_size);
5167 		next_entry = rx_q->cur_rx;
5168 
5169 		if (priv->extend_desc)
5170 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5171 		else
5172 			np = rx_q->dma_rx + next_entry;
5173 
5174 		prefetch(np);
5175 
5176 		/* Ensure a valid XSK buffer before proceed */
5177 		if (!buf->xdp)
5178 			break;
5179 
5180 		if (priv->extend_desc)
5181 			stmmac_rx_extended_status(priv, &priv->xstats,
5182 						  rx_q->dma_erx + entry);
5183 		if (unlikely(status == discard_frame)) {
5184 			xsk_buff_free(buf->xdp);
5185 			buf->xdp = NULL;
5186 			dirty++;
5187 			error = 1;
5188 			if (!priv->hwts_rx_en)
5189 				rx_errors++;
5190 		}
5191 
5192 		if (unlikely(error && (status & rx_not_ls)))
5193 			goto read_again;
5194 		if (unlikely(error)) {
5195 			count++;
5196 			continue;
5197 		}
5198 
5199 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5200 		if (likely(status & rx_not_ls)) {
5201 			xsk_buff_free(buf->xdp);
5202 			buf->xdp = NULL;
5203 			dirty++;
5204 			count++;
5205 			goto read_again;
5206 		}
5207 
5208 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5209 		ctx->priv = priv;
5210 		ctx->desc = p;
5211 		ctx->ndesc = np;
5212 
5213 		/* XDP ZC Frame only support primary buffers for now */
5214 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5215 		len += buf1_len;
5216 
5217 		/* ACS is disabled; strip manually. */
5218 		if (likely(!(status & rx_not_ls))) {
5219 			buf1_len -= ETH_FCS_LEN;
5220 			len -= ETH_FCS_LEN;
5221 		}
5222 
5223 		/* RX buffer is good and fit into a XSK pool buffer */
5224 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5225 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5226 
5227 		prog = READ_ONCE(priv->xdp_prog);
5228 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5229 
5230 		switch (res) {
5231 		case STMMAC_XDP_PASS:
5232 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5233 			xsk_buff_free(buf->xdp);
5234 			break;
5235 		case STMMAC_XDP_CONSUMED:
5236 			xsk_buff_free(buf->xdp);
5237 			rx_dropped++;
5238 			break;
5239 		case STMMAC_XDP_TX:
5240 		case STMMAC_XDP_REDIRECT:
5241 			xdp_status |= res;
5242 			break;
5243 		}
5244 
5245 		buf->xdp = NULL;
5246 		dirty++;
5247 		count++;
5248 	}
5249 
5250 	if (status & rx_not_ls) {
5251 		rx_q->state_saved = true;
5252 		rx_q->state.error = error;
5253 		rx_q->state.len = len;
5254 	}
5255 
5256 	stmmac_finalize_xdp_rx(priv, xdp_status);
5257 
5258 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5259 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5260 	u64_stats_update_end(&rxq_stats->napi_syncp);
5261 
5262 	priv->xstats.rx_dropped += rx_dropped;
5263 	priv->xstats.rx_errors += rx_errors;
5264 
5265 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5266 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5267 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5268 		else
5269 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5270 
5271 		return (int)count;
5272 	}
5273 
5274 	return failure ? limit : (int)count;
5275 }
5276 
5277 /**
5278  * stmmac_rx - manage the receive process
5279  * @priv: driver private structure
5280  * @limit: napi bugget
5281  * @queue: RX queue index.
5282  * Description :  this the function called by the napi poll method.
5283  * It gets all the frames inside the ring.
5284  */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)5285 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5286 {
5287 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5288 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5289 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5290 	struct stmmac_channel *ch = &priv->channel[queue];
5291 	unsigned int count = 0, error = 0, len = 0;
5292 	int status = 0, coe = priv->hw->rx_csum;
5293 	unsigned int next_entry = rx_q->cur_rx;
5294 	enum dma_data_direction dma_dir;
5295 	unsigned int desc_size;
5296 	struct sk_buff *skb = NULL;
5297 	struct stmmac_xdp_buff ctx;
5298 	int xdp_status = 0;
5299 	int buf_sz;
5300 
5301 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5302 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5303 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5304 
5305 	if (netif_msg_rx_status(priv)) {
5306 		void *rx_head;
5307 
5308 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5309 		if (priv->extend_desc) {
5310 			rx_head = (void *)rx_q->dma_erx;
5311 			desc_size = sizeof(struct dma_extended_desc);
5312 		} else {
5313 			rx_head = (void *)rx_q->dma_rx;
5314 			desc_size = sizeof(struct dma_desc);
5315 		}
5316 
5317 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5318 				    rx_q->dma_rx_phy, desc_size);
5319 	}
5320 	while (count < limit) {
5321 		unsigned int buf1_len = 0, buf2_len = 0;
5322 		enum pkt_hash_types hash_type;
5323 		struct stmmac_rx_buffer *buf;
5324 		struct dma_desc *np, *p;
5325 		int entry;
5326 		u32 hash;
5327 
5328 		if (!count && rx_q->state_saved) {
5329 			skb = rx_q->state.skb;
5330 			error = rx_q->state.error;
5331 			len = rx_q->state.len;
5332 		} else {
5333 			rx_q->state_saved = false;
5334 			skb = NULL;
5335 			error = 0;
5336 			len = 0;
5337 		}
5338 
5339 read_again:
5340 		if (count >= limit)
5341 			break;
5342 
5343 		buf1_len = 0;
5344 		buf2_len = 0;
5345 		entry = next_entry;
5346 		buf = &rx_q->buf_pool[entry];
5347 
5348 		if (priv->extend_desc)
5349 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5350 		else
5351 			p = rx_q->dma_rx + entry;
5352 
5353 		/* read the status of the incoming frame */
5354 		status = stmmac_rx_status(priv, &priv->xstats, p);
5355 		/* check if managed by the DMA otherwise go ahead */
5356 		if (unlikely(status & dma_own))
5357 			break;
5358 
5359 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5360 						priv->dma_conf.dma_rx_size);
5361 		next_entry = rx_q->cur_rx;
5362 
5363 		if (priv->extend_desc)
5364 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5365 		else
5366 			np = rx_q->dma_rx + next_entry;
5367 
5368 		prefetch(np);
5369 
5370 		if (priv->extend_desc)
5371 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5372 		if (unlikely(status == discard_frame)) {
5373 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5374 			buf->page = NULL;
5375 			error = 1;
5376 			if (!priv->hwts_rx_en)
5377 				rx_errors++;
5378 		}
5379 
5380 		if (unlikely(error && (status & rx_not_ls)))
5381 			goto read_again;
5382 		if (unlikely(error)) {
5383 			dev_kfree_skb(skb);
5384 			skb = NULL;
5385 			count++;
5386 			continue;
5387 		}
5388 
5389 		/* Buffer is good. Go on. */
5390 
5391 		prefetch(page_address(buf->page) + buf->page_offset);
5392 		if (buf->sec_page)
5393 			prefetch(page_address(buf->sec_page));
5394 
5395 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5396 		len += buf1_len;
5397 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5398 		len += buf2_len;
5399 
5400 		/* ACS is disabled; strip manually. */
5401 		if (likely(!(status & rx_not_ls))) {
5402 			if (buf2_len) {
5403 				buf2_len -= ETH_FCS_LEN;
5404 				len -= ETH_FCS_LEN;
5405 			} else if (buf1_len) {
5406 				buf1_len -= ETH_FCS_LEN;
5407 				len -= ETH_FCS_LEN;
5408 			}
5409 		}
5410 
5411 		if (!skb) {
5412 			unsigned int pre_len, sync_len;
5413 
5414 			dma_sync_single_for_cpu(priv->device, buf->addr,
5415 						buf1_len, dma_dir);
5416 
5417 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5418 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5419 					 buf->page_offset, buf1_len, true);
5420 
5421 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5422 				  buf->page_offset;
5423 
5424 			ctx.priv = priv;
5425 			ctx.desc = p;
5426 			ctx.ndesc = np;
5427 
5428 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5429 			/* Due xdp_adjust_tail: DMA sync for_device
5430 			 * cover max len CPU touch
5431 			 */
5432 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5433 				   buf->page_offset;
5434 			sync_len = max(sync_len, pre_len);
5435 
5436 			/* For Not XDP_PASS verdict */
5437 			if (IS_ERR(skb)) {
5438 				unsigned int xdp_res = -PTR_ERR(skb);
5439 
5440 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5441 					page_pool_put_page(rx_q->page_pool,
5442 							   virt_to_head_page(ctx.xdp.data),
5443 							   sync_len, true);
5444 					buf->page = NULL;
5445 					rx_dropped++;
5446 
5447 					/* Clear skb as it was set as
5448 					 * status by XDP program.
5449 					 */
5450 					skb = NULL;
5451 
5452 					if (unlikely((status & rx_not_ls)))
5453 						goto read_again;
5454 
5455 					count++;
5456 					continue;
5457 				} else if (xdp_res & (STMMAC_XDP_TX |
5458 						      STMMAC_XDP_REDIRECT)) {
5459 					xdp_status |= xdp_res;
5460 					buf->page = NULL;
5461 					skb = NULL;
5462 					count++;
5463 					continue;
5464 				}
5465 			}
5466 		}
5467 
5468 		if (!skb) {
5469 			/* XDP program may expand or reduce tail */
5470 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5471 
5472 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5473 			if (!skb) {
5474 				rx_dropped++;
5475 				count++;
5476 				goto drain_data;
5477 			}
5478 
5479 			/* XDP program may adjust header */
5480 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5481 			skb_put(skb, buf1_len);
5482 
5483 			/* Data payload copied into SKB, page ready for recycle */
5484 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5485 			buf->page = NULL;
5486 		} else if (buf1_len) {
5487 			dma_sync_single_for_cpu(priv->device, buf->addr,
5488 						buf1_len, dma_dir);
5489 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5490 					buf->page, buf->page_offset, buf1_len,
5491 					priv->dma_conf.dma_buf_sz);
5492 
5493 			/* Data payload appended into SKB */
5494 			skb_mark_for_recycle(skb);
5495 			buf->page = NULL;
5496 		}
5497 
5498 		if (buf2_len) {
5499 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5500 						buf2_len, dma_dir);
5501 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5502 					buf->sec_page, 0, buf2_len,
5503 					priv->dma_conf.dma_buf_sz);
5504 
5505 			/* Data payload appended into SKB */
5506 			skb_mark_for_recycle(skb);
5507 			buf->sec_page = NULL;
5508 		}
5509 
5510 drain_data:
5511 		if (likely(status & rx_not_ls))
5512 			goto read_again;
5513 		if (!skb)
5514 			continue;
5515 
5516 		/* Got entire packet into SKB. Finish it. */
5517 
5518 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5519 		stmmac_rx_vlan(priv->dev, skb);
5520 		skb->protocol = eth_type_trans(skb, priv->dev);
5521 
5522 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5523 			skb_checksum_none_assert(skb);
5524 		else
5525 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5526 
5527 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5528 			skb_set_hash(skb, hash, hash_type);
5529 
5530 		skb_record_rx_queue(skb, queue);
5531 		napi_gro_receive(&ch->rx_napi, skb);
5532 		skb = NULL;
5533 
5534 		rx_packets++;
5535 		rx_bytes += len;
5536 		count++;
5537 	}
5538 
5539 	if (status & rx_not_ls || skb) {
5540 		rx_q->state_saved = true;
5541 		rx_q->state.skb = skb;
5542 		rx_q->state.error = error;
5543 		rx_q->state.len = len;
5544 	}
5545 
5546 	stmmac_finalize_xdp_rx(priv, xdp_status);
5547 
5548 	stmmac_rx_refill(priv, queue);
5549 
5550 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5551 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5552 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5553 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5554 	u64_stats_update_end(&rxq_stats->napi_syncp);
5555 
5556 	priv->xstats.rx_dropped += rx_dropped;
5557 	priv->xstats.rx_errors += rx_errors;
5558 
5559 	return count;
5560 }
5561 
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)5562 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5563 {
5564 	struct stmmac_channel *ch =
5565 		container_of(napi, struct stmmac_channel, rx_napi);
5566 	struct stmmac_priv *priv = ch->priv_data;
5567 	struct stmmac_rxq_stats *rxq_stats;
5568 	u32 chan = ch->index;
5569 	int work_done;
5570 
5571 	rxq_stats = &priv->xstats.rxq_stats[chan];
5572 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5573 	u64_stats_inc(&rxq_stats->napi.poll);
5574 	u64_stats_update_end(&rxq_stats->napi_syncp);
5575 
5576 	work_done = stmmac_rx(priv, budget, chan);
5577 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5578 		unsigned long flags;
5579 
5580 		spin_lock_irqsave(&ch->lock, flags);
5581 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5582 		spin_unlock_irqrestore(&ch->lock, flags);
5583 	}
5584 
5585 	return work_done;
5586 }
5587 
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)5588 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5589 {
5590 	struct stmmac_channel *ch =
5591 		container_of(napi, struct stmmac_channel, tx_napi);
5592 	struct stmmac_priv *priv = ch->priv_data;
5593 	struct stmmac_txq_stats *txq_stats;
5594 	u32 chan = ch->index;
5595 	int work_done;
5596 
5597 	txq_stats = &priv->xstats.txq_stats[chan];
5598 	u64_stats_update_begin(&txq_stats->napi_syncp);
5599 	u64_stats_inc(&txq_stats->napi.poll);
5600 	u64_stats_update_end(&txq_stats->napi_syncp);
5601 
5602 	work_done = stmmac_tx_clean(priv, budget, chan);
5603 	work_done = min(work_done, budget);
5604 
5605 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5606 		unsigned long flags;
5607 
5608 		spin_lock_irqsave(&ch->lock, flags);
5609 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5610 		spin_unlock_irqrestore(&ch->lock, flags);
5611 	}
5612 
5613 	return work_done;
5614 }
5615 
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5616 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5617 {
5618 	struct stmmac_channel *ch =
5619 		container_of(napi, struct stmmac_channel, rxtx_napi);
5620 	struct stmmac_priv *priv = ch->priv_data;
5621 	int rx_done, tx_done, rxtx_done;
5622 	struct stmmac_rxq_stats *rxq_stats;
5623 	struct stmmac_txq_stats *txq_stats;
5624 	u32 chan = ch->index;
5625 
5626 	rxq_stats = &priv->xstats.rxq_stats[chan];
5627 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5628 	u64_stats_inc(&rxq_stats->napi.poll);
5629 	u64_stats_update_end(&rxq_stats->napi_syncp);
5630 
5631 	txq_stats = &priv->xstats.txq_stats[chan];
5632 	u64_stats_update_begin(&txq_stats->napi_syncp);
5633 	u64_stats_inc(&txq_stats->napi.poll);
5634 	u64_stats_update_end(&txq_stats->napi_syncp);
5635 
5636 	tx_done = stmmac_tx_clean(priv, budget, chan);
5637 	tx_done = min(tx_done, budget);
5638 
5639 	rx_done = stmmac_rx_zc(priv, budget, chan);
5640 
5641 	rxtx_done = max(tx_done, rx_done);
5642 
5643 	/* If either TX or RX work is not complete, return budget
5644 	 * and keep pooling
5645 	 */
5646 	if (rxtx_done >= budget)
5647 		return budget;
5648 
5649 	/* all work done, exit the polling mode */
5650 	if (napi_complete_done(napi, rxtx_done)) {
5651 		unsigned long flags;
5652 
5653 		spin_lock_irqsave(&ch->lock, flags);
5654 		/* Both RX and TX work done are compelte,
5655 		 * so enable both RX & TX IRQs.
5656 		 */
5657 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5658 		spin_unlock_irqrestore(&ch->lock, flags);
5659 	}
5660 
5661 	return min(rxtx_done, budget - 1);
5662 }
5663 
5664 /**
5665  *  stmmac_tx_timeout
5666  *  @dev : Pointer to net device structure
5667  *  @txqueue: the index of the hanging transmit queue
5668  *  Description: this function is called when a packet transmission fails to
5669  *   complete within a reasonable time. The driver will mark the error in the
5670  *   netdev structure and arrange for the device to be reset to a sane state
5671  *   in order to transmit a new packet.
5672  */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)5673 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5674 {
5675 	struct stmmac_priv *priv = netdev_priv(dev);
5676 
5677 	stmmac_global_err(priv);
5678 }
5679 
5680 /**
5681  *  stmmac_set_rx_mode - entry point for multicast addressing
5682  *  @dev : pointer to the device structure
5683  *  Description:
5684  *  This function is a driver entry point which gets called by the kernel
5685  *  whenever multicast addresses must be enabled/disabled.
5686  *  Return value:
5687  *  void.
5688  */
stmmac_set_rx_mode(struct net_device * dev)5689 static void stmmac_set_rx_mode(struct net_device *dev)
5690 {
5691 	struct stmmac_priv *priv = netdev_priv(dev);
5692 
5693 	stmmac_set_filter(priv, priv->hw, dev);
5694 }
5695 
5696 /**
5697  *  stmmac_change_mtu - entry point to change MTU size for the device.
5698  *  @dev : device pointer.
5699  *  @new_mtu : the new MTU size for the device.
5700  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5701  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5702  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5703  *  Return value:
5704  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5705  *  file on failure.
5706  */
stmmac_change_mtu(struct net_device * dev,int new_mtu)5707 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5708 {
5709 	struct stmmac_priv *priv = netdev_priv(dev);
5710 	int txfifosz = priv->plat->tx_fifo_size;
5711 	struct stmmac_dma_conf *dma_conf;
5712 	const int mtu = new_mtu;
5713 	int ret;
5714 
5715 	if (txfifosz == 0)
5716 		txfifosz = priv->dma_cap.tx_fifo_size;
5717 
5718 	txfifosz /= priv->plat->tx_queues_to_use;
5719 
5720 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5721 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5722 		return -EINVAL;
5723 	}
5724 
5725 	new_mtu = STMMAC_ALIGN(new_mtu);
5726 
5727 	/* If condition true, FIFO is too small or MTU too large */
5728 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5729 		return -EINVAL;
5730 
5731 	if (netif_running(dev)) {
5732 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5733 		/* Try to allocate the new DMA conf with the new mtu */
5734 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5735 		if (IS_ERR(dma_conf)) {
5736 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5737 				   mtu);
5738 			return PTR_ERR(dma_conf);
5739 		}
5740 
5741 		stmmac_release(dev);
5742 
5743 		ret = __stmmac_open(dev, dma_conf);
5744 		if (ret) {
5745 			free_dma_desc_resources(priv, dma_conf);
5746 			kfree(dma_conf);
5747 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5748 			return ret;
5749 		}
5750 
5751 		kfree(dma_conf);
5752 
5753 		stmmac_set_rx_mode(dev);
5754 	}
5755 
5756 	dev->mtu = mtu;
5757 	netdev_update_features(dev);
5758 
5759 	return 0;
5760 }
5761 
stmmac_fix_features(struct net_device * dev,netdev_features_t features)5762 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5763 					     netdev_features_t features)
5764 {
5765 	struct stmmac_priv *priv = netdev_priv(dev);
5766 
5767 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5768 		features &= ~NETIF_F_RXCSUM;
5769 
5770 	if (!priv->plat->tx_coe)
5771 		features &= ~NETIF_F_CSUM_MASK;
5772 
5773 	/* Some GMAC devices have a bugged Jumbo frame support that
5774 	 * needs to have the Tx COE disabled for oversized frames
5775 	 * (due to limited buffer sizes). In this case we disable
5776 	 * the TX csum insertion in the TDES and not use SF.
5777 	 */
5778 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5779 		features &= ~NETIF_F_CSUM_MASK;
5780 
5781 	/* Disable tso if asked by ethtool */
5782 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5783 		if (features & NETIF_F_TSO)
5784 			priv->tso = true;
5785 		else
5786 			priv->tso = false;
5787 	}
5788 
5789 	return features;
5790 }
5791 
stmmac_set_features(struct net_device * netdev,netdev_features_t features)5792 static int stmmac_set_features(struct net_device *netdev,
5793 			       netdev_features_t features)
5794 {
5795 	struct stmmac_priv *priv = netdev_priv(netdev);
5796 
5797 	/* Keep the COE Type in case of csum is supporting */
5798 	if (features & NETIF_F_RXCSUM)
5799 		priv->hw->rx_csum = priv->plat->rx_coe;
5800 	else
5801 		priv->hw->rx_csum = 0;
5802 	/* No check needed because rx_coe has been set before and it will be
5803 	 * fixed in case of issue.
5804 	 */
5805 	stmmac_rx_ipc(priv, priv->hw);
5806 
5807 	if (priv->sph_cap) {
5808 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5809 		u32 chan;
5810 
5811 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5812 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5813 	}
5814 
5815 	return 0;
5816 }
5817 
stmmac_fpe_event_status(struct stmmac_priv * priv,int status)5818 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5819 {
5820 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5821 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5822 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5823 	bool *hs_enable = &fpe_cfg->hs_enable;
5824 
5825 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5826 		return;
5827 
5828 	/* If LP has sent verify mPacket, LP is FPE capable */
5829 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5830 		if (*lp_state < FPE_STATE_CAPABLE)
5831 			*lp_state = FPE_STATE_CAPABLE;
5832 
5833 		/* If user has requested FPE enable, quickly response */
5834 		if (*hs_enable)
5835 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5836 						fpe_cfg,
5837 						MPACKET_RESPONSE);
5838 	}
5839 
5840 	/* If Local has sent verify mPacket, Local is FPE capable */
5841 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5842 		if (*lo_state < FPE_STATE_CAPABLE)
5843 			*lo_state = FPE_STATE_CAPABLE;
5844 	}
5845 
5846 	/* If LP has sent response mPacket, LP is entering FPE ON */
5847 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5848 		*lp_state = FPE_STATE_ENTERING_ON;
5849 
5850 	/* If Local has sent response mPacket, Local is entering FPE ON */
5851 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5852 		*lo_state = FPE_STATE_ENTERING_ON;
5853 
5854 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5855 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5856 	    priv->fpe_wq) {
5857 		queue_work(priv->fpe_wq, &priv->fpe_task);
5858 	}
5859 }
5860 
stmmac_common_interrupt(struct stmmac_priv * priv)5861 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5862 {
5863 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5864 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5865 	u32 queues_count;
5866 	u32 queue;
5867 	bool xmac;
5868 
5869 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5870 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5871 
5872 	if (priv->irq_wake)
5873 		pm_wakeup_event(priv->device, 0);
5874 
5875 	if (priv->dma_cap.estsel)
5876 		stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5877 				      &priv->xstats, tx_cnt);
5878 
5879 	if (priv->dma_cap.fpesel) {
5880 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5881 						   priv->dev);
5882 
5883 		stmmac_fpe_event_status(priv, status);
5884 	}
5885 
5886 	/* To handle GMAC own interrupts */
5887 	if ((priv->plat->has_gmac) || xmac) {
5888 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5889 
5890 		if (unlikely(status)) {
5891 			/* For LPI we need to save the tx status */
5892 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5893 				priv->tx_path_in_lpi_mode = true;
5894 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5895 				priv->tx_path_in_lpi_mode = false;
5896 		}
5897 
5898 		for (queue = 0; queue < queues_count; queue++) {
5899 			status = stmmac_host_mtl_irq_status(priv, priv->hw,
5900 							    queue);
5901 		}
5902 
5903 		/* PCS link status */
5904 		if (priv->hw->pcs &&
5905 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5906 			if (priv->xstats.pcs_link)
5907 				netif_carrier_on(priv->dev);
5908 			else
5909 				netif_carrier_off(priv->dev);
5910 		}
5911 
5912 		stmmac_timestamp_interrupt(priv, priv);
5913 	}
5914 }
5915 
5916 /**
5917  *  stmmac_interrupt - main ISR
5918  *  @irq: interrupt number.
5919  *  @dev_id: to pass the net device pointer.
5920  *  Description: this is the main driver interrupt service routine.
5921  *  It can call:
5922  *  o DMA service routine (to manage incoming frame reception and transmission
5923  *    status)
5924  *  o Core interrupts to manage: remote wake-up, management counter, LPI
5925  *    interrupts.
5926  */
stmmac_interrupt(int irq,void * dev_id)5927 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5928 {
5929 	struct net_device *dev = (struct net_device *)dev_id;
5930 	struct stmmac_priv *priv = netdev_priv(dev);
5931 
5932 	/* Check if adapter is up */
5933 	if (test_bit(STMMAC_DOWN, &priv->state))
5934 		return IRQ_HANDLED;
5935 
5936 	/* Check if a fatal error happened */
5937 	if (stmmac_safety_feat_interrupt(priv))
5938 		return IRQ_HANDLED;
5939 
5940 	/* To handle Common interrupts */
5941 	stmmac_common_interrupt(priv);
5942 
5943 	/* To handle DMA interrupts */
5944 	stmmac_dma_interrupt(priv);
5945 
5946 	return IRQ_HANDLED;
5947 }
5948 
stmmac_mac_interrupt(int irq,void * dev_id)5949 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5950 {
5951 	struct net_device *dev = (struct net_device *)dev_id;
5952 	struct stmmac_priv *priv = netdev_priv(dev);
5953 
5954 	/* Check if adapter is up */
5955 	if (test_bit(STMMAC_DOWN, &priv->state))
5956 		return IRQ_HANDLED;
5957 
5958 	/* To handle Common interrupts */
5959 	stmmac_common_interrupt(priv);
5960 
5961 	return IRQ_HANDLED;
5962 }
5963 
stmmac_safety_interrupt(int irq,void * dev_id)5964 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5965 {
5966 	struct net_device *dev = (struct net_device *)dev_id;
5967 	struct stmmac_priv *priv = netdev_priv(dev);
5968 
5969 	/* Check if adapter is up */
5970 	if (test_bit(STMMAC_DOWN, &priv->state))
5971 		return IRQ_HANDLED;
5972 
5973 	/* Check if a fatal error happened */
5974 	stmmac_safety_feat_interrupt(priv);
5975 
5976 	return IRQ_HANDLED;
5977 }
5978 
stmmac_msi_intr_tx(int irq,void * data)5979 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5980 {
5981 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5982 	struct stmmac_dma_conf *dma_conf;
5983 	int chan = tx_q->queue_index;
5984 	struct stmmac_priv *priv;
5985 	int status;
5986 
5987 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
5988 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5989 
5990 	/* Check if adapter is up */
5991 	if (test_bit(STMMAC_DOWN, &priv->state))
5992 		return IRQ_HANDLED;
5993 
5994 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5995 
5996 	if (unlikely(status & tx_hard_error_bump_tc)) {
5997 		/* Try to bump up the dma threshold on this failure */
5998 		stmmac_bump_dma_threshold(priv, chan);
5999 	} else if (unlikely(status == tx_hard_error)) {
6000 		stmmac_tx_err(priv, chan);
6001 	}
6002 
6003 	return IRQ_HANDLED;
6004 }
6005 
stmmac_msi_intr_rx(int irq,void * data)6006 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6007 {
6008 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6009 	struct stmmac_dma_conf *dma_conf;
6010 	int chan = rx_q->queue_index;
6011 	struct stmmac_priv *priv;
6012 
6013 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6014 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6015 
6016 	/* Check if adapter is up */
6017 	if (test_bit(STMMAC_DOWN, &priv->state))
6018 		return IRQ_HANDLED;
6019 
6020 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6021 
6022 	return IRQ_HANDLED;
6023 }
6024 
6025 /**
6026  *  stmmac_ioctl - Entry point for the Ioctl
6027  *  @dev: Device pointer.
6028  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6029  *  a proprietary structure used to pass information to the driver.
6030  *  @cmd: IOCTL command
6031  *  Description:
6032  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6033  */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6034 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6035 {
6036 	struct stmmac_priv *priv = netdev_priv (dev);
6037 	int ret = -EOPNOTSUPP;
6038 
6039 	if (!netif_running(dev))
6040 		return -EINVAL;
6041 
6042 	switch (cmd) {
6043 	case SIOCGMIIPHY:
6044 	case SIOCGMIIREG:
6045 	case SIOCSMIIREG:
6046 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6047 		break;
6048 	case SIOCSHWTSTAMP:
6049 		ret = stmmac_hwtstamp_set(dev, rq);
6050 		break;
6051 	case SIOCGHWTSTAMP:
6052 		ret = stmmac_hwtstamp_get(dev, rq);
6053 		break;
6054 	default:
6055 		break;
6056 	}
6057 
6058 	return ret;
6059 }
6060 
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)6061 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6062 				    void *cb_priv)
6063 {
6064 	struct stmmac_priv *priv = cb_priv;
6065 	int ret = -EOPNOTSUPP;
6066 
6067 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6068 		return ret;
6069 
6070 	__stmmac_disable_all_queues(priv);
6071 
6072 	switch (type) {
6073 	case TC_SETUP_CLSU32:
6074 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6075 		break;
6076 	case TC_SETUP_CLSFLOWER:
6077 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6078 		break;
6079 	default:
6080 		break;
6081 	}
6082 
6083 	stmmac_enable_all_queues(priv);
6084 	return ret;
6085 }
6086 
6087 static LIST_HEAD(stmmac_block_cb_list);
6088 
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)6089 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6090 			   void *type_data)
6091 {
6092 	struct stmmac_priv *priv = netdev_priv(ndev);
6093 
6094 	switch (type) {
6095 	case TC_QUERY_CAPS:
6096 		return stmmac_tc_query_caps(priv, priv, type_data);
6097 	case TC_SETUP_BLOCK:
6098 		return flow_block_cb_setup_simple(type_data,
6099 						  &stmmac_block_cb_list,
6100 						  stmmac_setup_tc_block_cb,
6101 						  priv, priv, true);
6102 	case TC_SETUP_QDISC_CBS:
6103 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6104 	case TC_SETUP_QDISC_TAPRIO:
6105 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6106 	case TC_SETUP_QDISC_ETF:
6107 		return stmmac_tc_setup_etf(priv, priv, type_data);
6108 	default:
6109 		return -EOPNOTSUPP;
6110 	}
6111 }
6112 
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)6113 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6114 			       struct net_device *sb_dev)
6115 {
6116 	int gso = skb_shinfo(skb)->gso_type;
6117 
6118 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6119 		/*
6120 		 * There is no way to determine the number of TSO/USO
6121 		 * capable Queues. Let's use always the Queue 0
6122 		 * because if TSO/USO is supported then at least this
6123 		 * one will be capable.
6124 		 */
6125 		return 0;
6126 	}
6127 
6128 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6129 }
6130 
stmmac_set_mac_address(struct net_device * ndev,void * addr)6131 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6132 {
6133 	struct stmmac_priv *priv = netdev_priv(ndev);
6134 	int ret = 0;
6135 
6136 	ret = pm_runtime_resume_and_get(priv->device);
6137 	if (ret < 0)
6138 		return ret;
6139 
6140 	ret = eth_mac_addr(ndev, addr);
6141 	if (ret)
6142 		goto set_mac_error;
6143 
6144 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6145 
6146 set_mac_error:
6147 	pm_runtime_put(priv->device);
6148 
6149 	return ret;
6150 }
6151 
6152 #ifdef CONFIG_DEBUG_FS
6153 static struct dentry *stmmac_fs_dir;
6154 
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6155 static void sysfs_display_ring(void *head, int size, int extend_desc,
6156 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6157 {
6158 	int i;
6159 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6160 	struct dma_desc *p = (struct dma_desc *)head;
6161 	dma_addr_t dma_addr;
6162 
6163 	for (i = 0; i < size; i++) {
6164 		if (extend_desc) {
6165 			dma_addr = dma_phy_addr + i * sizeof(*ep);
6166 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6167 				   i, &dma_addr,
6168 				   le32_to_cpu(ep->basic.des0),
6169 				   le32_to_cpu(ep->basic.des1),
6170 				   le32_to_cpu(ep->basic.des2),
6171 				   le32_to_cpu(ep->basic.des3));
6172 			ep++;
6173 		} else {
6174 			dma_addr = dma_phy_addr + i * sizeof(*p);
6175 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6176 				   i, &dma_addr,
6177 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6178 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6179 			p++;
6180 		}
6181 		seq_printf(seq, "\n");
6182 	}
6183 }
6184 
stmmac_rings_status_show(struct seq_file * seq,void * v)6185 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6186 {
6187 	struct net_device *dev = seq->private;
6188 	struct stmmac_priv *priv = netdev_priv(dev);
6189 	u32 rx_count = priv->plat->rx_queues_to_use;
6190 	u32 tx_count = priv->plat->tx_queues_to_use;
6191 	u32 queue;
6192 
6193 	if ((dev->flags & IFF_UP) == 0)
6194 		return 0;
6195 
6196 	for (queue = 0; queue < rx_count; queue++) {
6197 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6198 
6199 		seq_printf(seq, "RX Queue %d:\n", queue);
6200 
6201 		if (priv->extend_desc) {
6202 			seq_printf(seq, "Extended descriptor ring:\n");
6203 			sysfs_display_ring((void *)rx_q->dma_erx,
6204 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6205 		} else {
6206 			seq_printf(seq, "Descriptor ring:\n");
6207 			sysfs_display_ring((void *)rx_q->dma_rx,
6208 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6209 		}
6210 	}
6211 
6212 	for (queue = 0; queue < tx_count; queue++) {
6213 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6214 
6215 		seq_printf(seq, "TX Queue %d:\n", queue);
6216 
6217 		if (priv->extend_desc) {
6218 			seq_printf(seq, "Extended descriptor ring:\n");
6219 			sysfs_display_ring((void *)tx_q->dma_etx,
6220 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6221 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6222 			seq_printf(seq, "Descriptor ring:\n");
6223 			sysfs_display_ring((void *)tx_q->dma_tx,
6224 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6225 		}
6226 	}
6227 
6228 	return 0;
6229 }
6230 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6231 
stmmac_dma_cap_show(struct seq_file * seq,void * v)6232 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6233 {
6234 	static const char * const dwxgmac_timestamp_source[] = {
6235 		"None",
6236 		"Internal",
6237 		"External",
6238 		"Both",
6239 	};
6240 	static const char * const dwxgmac_safety_feature_desc[] = {
6241 		"No",
6242 		"All Safety Features with ECC and Parity",
6243 		"All Safety Features without ECC or Parity",
6244 		"All Safety Features with Parity Only",
6245 		"ECC Only",
6246 		"UNDEFINED",
6247 		"UNDEFINED",
6248 		"UNDEFINED",
6249 	};
6250 	struct net_device *dev = seq->private;
6251 	struct stmmac_priv *priv = netdev_priv(dev);
6252 
6253 	if (!priv->hw_cap_support) {
6254 		seq_printf(seq, "DMA HW features not supported\n");
6255 		return 0;
6256 	}
6257 
6258 	seq_printf(seq, "==============================\n");
6259 	seq_printf(seq, "\tDMA HW features\n");
6260 	seq_printf(seq, "==============================\n");
6261 
6262 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6263 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6264 	seq_printf(seq, "\t1000 Mbps: %s\n",
6265 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6266 	seq_printf(seq, "\tHalf duplex: %s\n",
6267 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6268 	if (priv->plat->has_xgmac) {
6269 		seq_printf(seq,
6270 			   "\tNumber of Additional MAC address registers: %d\n",
6271 			   priv->dma_cap.multi_addr);
6272 	} else {
6273 		seq_printf(seq, "\tHash Filter: %s\n",
6274 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6275 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6276 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6277 	}
6278 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6279 		   (priv->dma_cap.pcs) ? "Y" : "N");
6280 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6281 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6282 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6283 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6284 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6285 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6286 	seq_printf(seq, "\tRMON module: %s\n",
6287 		   (priv->dma_cap.rmon) ? "Y" : "N");
6288 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6289 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6290 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6291 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6292 	if (priv->plat->has_xgmac)
6293 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6294 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6295 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6296 		   (priv->dma_cap.eee) ? "Y" : "N");
6297 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6298 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6299 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6300 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6301 	    priv->plat->has_xgmac) {
6302 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6303 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6304 	} else {
6305 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6306 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6307 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6308 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6309 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6310 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6311 	}
6312 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6313 		   priv->dma_cap.number_rx_channel);
6314 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6315 		   priv->dma_cap.number_tx_channel);
6316 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6317 		   priv->dma_cap.number_rx_queues);
6318 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6319 		   priv->dma_cap.number_tx_queues);
6320 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6321 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6322 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6323 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6324 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6325 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6326 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6327 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6328 		   priv->dma_cap.pps_out_num);
6329 	seq_printf(seq, "\tSafety Features: %s\n",
6330 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6331 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6332 		   priv->dma_cap.frpsel ? "Y" : "N");
6333 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6334 		   priv->dma_cap.host_dma_width);
6335 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6336 		   priv->dma_cap.rssen ? "Y" : "N");
6337 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6338 		   priv->dma_cap.vlhash ? "Y" : "N");
6339 	seq_printf(seq, "\tSplit Header: %s\n",
6340 		   priv->dma_cap.sphen ? "Y" : "N");
6341 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6342 		   priv->dma_cap.vlins ? "Y" : "N");
6343 	seq_printf(seq, "\tDouble VLAN: %s\n",
6344 		   priv->dma_cap.dvlan ? "Y" : "N");
6345 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6346 		   priv->dma_cap.l3l4fnum);
6347 	seq_printf(seq, "\tARP Offloading: %s\n",
6348 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6349 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6350 		   priv->dma_cap.estsel ? "Y" : "N");
6351 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6352 		   priv->dma_cap.fpesel ? "Y" : "N");
6353 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6354 		   priv->dma_cap.tbssel ? "Y" : "N");
6355 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6356 		   priv->dma_cap.tbs_ch_num);
6357 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6358 		   priv->dma_cap.sgfsel ? "Y" : "N");
6359 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6360 		   BIT(priv->dma_cap.ttsfd) >> 1);
6361 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6362 		   priv->dma_cap.numtc);
6363 	seq_printf(seq, "\tDCB Feature: %s\n",
6364 		   priv->dma_cap.dcben ? "Y" : "N");
6365 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6366 		   priv->dma_cap.advthword ? "Y" : "N");
6367 	seq_printf(seq, "\tPTP Offload: %s\n",
6368 		   priv->dma_cap.ptoen ? "Y" : "N");
6369 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6370 		   priv->dma_cap.osten ? "Y" : "N");
6371 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6372 		   priv->dma_cap.pfcen ? "Y" : "N");
6373 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6374 		   BIT(priv->dma_cap.frpes) << 6);
6375 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6376 		   BIT(priv->dma_cap.frpbs) << 6);
6377 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6378 		   priv->dma_cap.frppipe_num);
6379 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6380 		   priv->dma_cap.nrvf_num ?
6381 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6382 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6383 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6384 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6385 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6386 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6387 		   priv->dma_cap.cbtisel ? "Y" : "N");
6388 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6389 		   priv->dma_cap.aux_snapshot_n);
6390 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6391 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6392 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6393 		   priv->dma_cap.edma ? "Y" : "N");
6394 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6395 		   priv->dma_cap.ediffc ? "Y" : "N");
6396 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6397 		   priv->dma_cap.vxn ? "Y" : "N");
6398 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6399 		   priv->dma_cap.dbgmem ? "Y" : "N");
6400 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6401 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6402 	return 0;
6403 }
6404 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6405 
6406 /* Use network device events to rename debugfs file entries.
6407  */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6408 static int stmmac_device_event(struct notifier_block *unused,
6409 			       unsigned long event, void *ptr)
6410 {
6411 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6412 	struct stmmac_priv *priv = netdev_priv(dev);
6413 
6414 	if (dev->netdev_ops != &stmmac_netdev_ops)
6415 		goto done;
6416 
6417 	switch (event) {
6418 	case NETDEV_CHANGENAME:
6419 		if (priv->dbgfs_dir)
6420 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6421 							 priv->dbgfs_dir,
6422 							 stmmac_fs_dir,
6423 							 dev->name);
6424 		break;
6425 	}
6426 done:
6427 	return NOTIFY_DONE;
6428 }
6429 
6430 static struct notifier_block stmmac_notifier = {
6431 	.notifier_call = stmmac_device_event,
6432 };
6433 
stmmac_init_fs(struct net_device * dev)6434 static void stmmac_init_fs(struct net_device *dev)
6435 {
6436 	struct stmmac_priv *priv = netdev_priv(dev);
6437 
6438 	rtnl_lock();
6439 
6440 	/* Create per netdev entries */
6441 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6442 
6443 	/* Entry to report DMA RX/TX rings */
6444 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6445 			    &stmmac_rings_status_fops);
6446 
6447 	/* Entry to report the DMA HW features */
6448 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6449 			    &stmmac_dma_cap_fops);
6450 
6451 	rtnl_unlock();
6452 }
6453 
stmmac_exit_fs(struct net_device * dev)6454 static void stmmac_exit_fs(struct net_device *dev)
6455 {
6456 	struct stmmac_priv *priv = netdev_priv(dev);
6457 
6458 	debugfs_remove_recursive(priv->dbgfs_dir);
6459 }
6460 #endif /* CONFIG_DEBUG_FS */
6461 
stmmac_vid_crc32_le(__le16 vid_le)6462 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6463 {
6464 	unsigned char *data = (unsigned char *)&vid_le;
6465 	unsigned char data_byte = 0;
6466 	u32 crc = ~0x0;
6467 	u32 temp = 0;
6468 	int i, bits;
6469 
6470 	bits = get_bitmask_order(VLAN_VID_MASK);
6471 	for (i = 0; i < bits; i++) {
6472 		if ((i % 8) == 0)
6473 			data_byte = data[i / 8];
6474 
6475 		temp = ((crc & 1) ^ data_byte) & 1;
6476 		crc >>= 1;
6477 		data_byte >>= 1;
6478 
6479 		if (temp)
6480 			crc ^= 0xedb88320;
6481 	}
6482 
6483 	return crc;
6484 }
6485 
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)6486 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6487 {
6488 	u32 crc, hash = 0;
6489 	u16 pmatch = 0;
6490 	int count = 0;
6491 	u16 vid = 0;
6492 
6493 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6494 		__le16 vid_le = cpu_to_le16(vid);
6495 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6496 		hash |= (1 << crc);
6497 		count++;
6498 	}
6499 
6500 	if (!priv->dma_cap.vlhash) {
6501 		if (count > 2) /* VID = 0 always passes filter */
6502 			return -EOPNOTSUPP;
6503 
6504 		pmatch = vid;
6505 		hash = 0;
6506 	}
6507 
6508 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6509 }
6510 
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)6511 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6512 {
6513 	struct stmmac_priv *priv = netdev_priv(ndev);
6514 	bool is_double = false;
6515 	int ret;
6516 
6517 	ret = pm_runtime_resume_and_get(priv->device);
6518 	if (ret < 0)
6519 		return ret;
6520 
6521 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6522 		is_double = true;
6523 
6524 	set_bit(vid, priv->active_vlans);
6525 	ret = stmmac_vlan_update(priv, is_double);
6526 	if (ret) {
6527 		clear_bit(vid, priv->active_vlans);
6528 		goto err_pm_put;
6529 	}
6530 
6531 	if (priv->hw->num_vlan) {
6532 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6533 		if (ret)
6534 			goto err_pm_put;
6535 	}
6536 err_pm_put:
6537 	pm_runtime_put(priv->device);
6538 
6539 	return ret;
6540 }
6541 
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)6542 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6543 {
6544 	struct stmmac_priv *priv = netdev_priv(ndev);
6545 	bool is_double = false;
6546 	int ret;
6547 
6548 	ret = pm_runtime_resume_and_get(priv->device);
6549 	if (ret < 0)
6550 		return ret;
6551 
6552 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6553 		is_double = true;
6554 
6555 	clear_bit(vid, priv->active_vlans);
6556 
6557 	if (priv->hw->num_vlan) {
6558 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6559 		if (ret)
6560 			goto del_vlan_error;
6561 	}
6562 
6563 	ret = stmmac_vlan_update(priv, is_double);
6564 
6565 del_vlan_error:
6566 	pm_runtime_put(priv->device);
6567 
6568 	return ret;
6569 }
6570 
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)6571 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6572 {
6573 	struct stmmac_priv *priv = netdev_priv(dev);
6574 
6575 	switch (bpf->command) {
6576 	case XDP_SETUP_PROG:
6577 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6578 	case XDP_SETUP_XSK_POOL:
6579 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6580 					     bpf->xsk.queue_id);
6581 	default:
6582 		return -EOPNOTSUPP;
6583 	}
6584 }
6585 
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6586 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6587 			   struct xdp_frame **frames, u32 flags)
6588 {
6589 	struct stmmac_priv *priv = netdev_priv(dev);
6590 	int cpu = smp_processor_id();
6591 	struct netdev_queue *nq;
6592 	int i, nxmit = 0;
6593 	int queue;
6594 
6595 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6596 		return -ENETDOWN;
6597 
6598 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6599 		return -EINVAL;
6600 
6601 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6602 	nq = netdev_get_tx_queue(priv->dev, queue);
6603 
6604 	__netif_tx_lock(nq, cpu);
6605 	/* Avoids TX time-out as we are sharing with slow path */
6606 	txq_trans_cond_update(nq);
6607 
6608 	for (i = 0; i < num_frames; i++) {
6609 		int res;
6610 
6611 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6612 		if (res == STMMAC_XDP_CONSUMED)
6613 			break;
6614 
6615 		nxmit++;
6616 	}
6617 
6618 	if (flags & XDP_XMIT_FLUSH) {
6619 		stmmac_flush_tx_descriptors(priv, queue);
6620 		stmmac_tx_timer_arm(priv, queue);
6621 	}
6622 
6623 	__netif_tx_unlock(nq);
6624 
6625 	return nxmit;
6626 }
6627 
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6628 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6629 {
6630 	struct stmmac_channel *ch = &priv->channel[queue];
6631 	unsigned long flags;
6632 
6633 	spin_lock_irqsave(&ch->lock, flags);
6634 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6635 	spin_unlock_irqrestore(&ch->lock, flags);
6636 
6637 	stmmac_stop_rx_dma(priv, queue);
6638 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6639 }
6640 
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)6641 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6642 {
6643 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6644 	struct stmmac_channel *ch = &priv->channel[queue];
6645 	unsigned long flags;
6646 	u32 buf_size;
6647 	int ret;
6648 
6649 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6650 	if (ret) {
6651 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6652 		return;
6653 	}
6654 
6655 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6656 	if (ret) {
6657 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6658 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6659 		return;
6660 	}
6661 
6662 	stmmac_reset_rx_queue(priv, queue);
6663 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6664 
6665 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6666 			    rx_q->dma_rx_phy, rx_q->queue_index);
6667 
6668 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6669 			     sizeof(struct dma_desc));
6670 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6671 			       rx_q->rx_tail_addr, rx_q->queue_index);
6672 
6673 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6674 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6675 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6676 				      buf_size,
6677 				      rx_q->queue_index);
6678 	} else {
6679 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6680 				      priv->dma_conf.dma_buf_sz,
6681 				      rx_q->queue_index);
6682 	}
6683 
6684 	stmmac_start_rx_dma(priv, queue);
6685 
6686 	spin_lock_irqsave(&ch->lock, flags);
6687 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6688 	spin_unlock_irqrestore(&ch->lock, flags);
6689 }
6690 
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)6691 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6692 {
6693 	struct stmmac_channel *ch = &priv->channel[queue];
6694 	unsigned long flags;
6695 
6696 	spin_lock_irqsave(&ch->lock, flags);
6697 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6698 	spin_unlock_irqrestore(&ch->lock, flags);
6699 
6700 	stmmac_stop_tx_dma(priv, queue);
6701 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6702 }
6703 
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)6704 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6705 {
6706 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6707 	struct stmmac_channel *ch = &priv->channel[queue];
6708 	unsigned long flags;
6709 	int ret;
6710 
6711 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6712 	if (ret) {
6713 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6714 		return;
6715 	}
6716 
6717 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6718 	if (ret) {
6719 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6720 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6721 		return;
6722 	}
6723 
6724 	stmmac_reset_tx_queue(priv, queue);
6725 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6726 
6727 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6728 			    tx_q->dma_tx_phy, tx_q->queue_index);
6729 
6730 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6731 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6732 
6733 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6734 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6735 			       tx_q->tx_tail_addr, tx_q->queue_index);
6736 
6737 	stmmac_start_tx_dma(priv, queue);
6738 
6739 	spin_lock_irqsave(&ch->lock, flags);
6740 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6741 	spin_unlock_irqrestore(&ch->lock, flags);
6742 }
6743 
stmmac_xdp_release(struct net_device * dev)6744 void stmmac_xdp_release(struct net_device *dev)
6745 {
6746 	struct stmmac_priv *priv = netdev_priv(dev);
6747 	u32 chan;
6748 
6749 	/* Ensure tx function is not running */
6750 	netif_tx_disable(dev);
6751 
6752 	/* Disable NAPI process */
6753 	stmmac_disable_all_queues(priv);
6754 
6755 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6756 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6757 
6758 	/* Free the IRQ lines */
6759 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6760 
6761 	/* Stop TX/RX DMA channels */
6762 	stmmac_stop_all_dma(priv);
6763 
6764 	/* Release and free the Rx/Tx resources */
6765 	free_dma_desc_resources(priv, &priv->dma_conf);
6766 
6767 	/* Disable the MAC Rx/Tx */
6768 	stmmac_mac_set(priv, priv->ioaddr, false);
6769 
6770 	/* set trans_start so we don't get spurious
6771 	 * watchdogs during reset
6772 	 */
6773 	netif_trans_update(dev);
6774 	netif_carrier_off(dev);
6775 }
6776 
stmmac_xdp_open(struct net_device * dev)6777 int stmmac_xdp_open(struct net_device *dev)
6778 {
6779 	struct stmmac_priv *priv = netdev_priv(dev);
6780 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6781 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6782 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6783 	struct stmmac_rx_queue *rx_q;
6784 	struct stmmac_tx_queue *tx_q;
6785 	u32 buf_size;
6786 	bool sph_en;
6787 	u32 chan;
6788 	int ret;
6789 
6790 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6791 	if (ret < 0) {
6792 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6793 			   __func__);
6794 		goto dma_desc_error;
6795 	}
6796 
6797 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6798 	if (ret < 0) {
6799 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6800 			   __func__);
6801 		goto init_error;
6802 	}
6803 
6804 	stmmac_reset_queues_param(priv);
6805 
6806 	/* DMA CSR Channel configuration */
6807 	for (chan = 0; chan < dma_csr_ch; chan++) {
6808 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6809 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6810 	}
6811 
6812 	/* Adjust Split header */
6813 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6814 
6815 	/* DMA RX Channel Configuration */
6816 	for (chan = 0; chan < rx_cnt; chan++) {
6817 		rx_q = &priv->dma_conf.rx_queue[chan];
6818 
6819 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6820 				    rx_q->dma_rx_phy, chan);
6821 
6822 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6823 				     (rx_q->buf_alloc_num *
6824 				      sizeof(struct dma_desc));
6825 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6826 				       rx_q->rx_tail_addr, chan);
6827 
6828 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6829 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6830 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6831 					      buf_size,
6832 					      rx_q->queue_index);
6833 		} else {
6834 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6835 					      priv->dma_conf.dma_buf_sz,
6836 					      rx_q->queue_index);
6837 		}
6838 
6839 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6840 	}
6841 
6842 	/* DMA TX Channel Configuration */
6843 	for (chan = 0; chan < tx_cnt; chan++) {
6844 		tx_q = &priv->dma_conf.tx_queue[chan];
6845 
6846 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6847 				    tx_q->dma_tx_phy, chan);
6848 
6849 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6850 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6851 				       tx_q->tx_tail_addr, chan);
6852 
6853 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6854 		tx_q->txtimer.function = stmmac_tx_timer;
6855 	}
6856 
6857 	/* Enable the MAC Rx/Tx */
6858 	stmmac_mac_set(priv, priv->ioaddr, true);
6859 
6860 	/* Start Rx & Tx DMA Channels */
6861 	stmmac_start_all_dma(priv);
6862 
6863 	ret = stmmac_request_irq(dev);
6864 	if (ret)
6865 		goto irq_error;
6866 
6867 	/* Enable NAPI process*/
6868 	stmmac_enable_all_queues(priv);
6869 	netif_carrier_on(dev);
6870 	netif_tx_start_all_queues(dev);
6871 	stmmac_enable_all_dma_irq(priv);
6872 
6873 	return 0;
6874 
6875 irq_error:
6876 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6877 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6878 
6879 	stmmac_hw_teardown(dev);
6880 init_error:
6881 	free_dma_desc_resources(priv, &priv->dma_conf);
6882 dma_desc_error:
6883 	return ret;
6884 }
6885 
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)6886 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6887 {
6888 	struct stmmac_priv *priv = netdev_priv(dev);
6889 	struct stmmac_rx_queue *rx_q;
6890 	struct stmmac_tx_queue *tx_q;
6891 	struct stmmac_channel *ch;
6892 
6893 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6894 	    !netif_carrier_ok(priv->dev))
6895 		return -ENETDOWN;
6896 
6897 	if (!stmmac_xdp_is_enabled(priv))
6898 		return -EINVAL;
6899 
6900 	if (queue >= priv->plat->rx_queues_to_use ||
6901 	    queue >= priv->plat->tx_queues_to_use)
6902 		return -EINVAL;
6903 
6904 	rx_q = &priv->dma_conf.rx_queue[queue];
6905 	tx_q = &priv->dma_conf.tx_queue[queue];
6906 	ch = &priv->channel[queue];
6907 
6908 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6909 		return -EINVAL;
6910 
6911 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6912 		/* EQoS does not have per-DMA channel SW interrupt,
6913 		 * so we schedule RX Napi straight-away.
6914 		 */
6915 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6916 			__napi_schedule(&ch->rxtx_napi);
6917 	}
6918 
6919 	return 0;
6920 }
6921 
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)6922 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6923 {
6924 	struct stmmac_priv *priv = netdev_priv(dev);
6925 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6926 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6927 	unsigned int start;
6928 	int q;
6929 
6930 	for (q = 0; q < tx_cnt; q++) {
6931 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
6932 		u64 tx_packets;
6933 		u64 tx_bytes;
6934 
6935 		do {
6936 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
6937 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
6938 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
6939 		do {
6940 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
6941 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
6942 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
6943 
6944 		stats->tx_packets += tx_packets;
6945 		stats->tx_bytes += tx_bytes;
6946 	}
6947 
6948 	for (q = 0; q < rx_cnt; q++) {
6949 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
6950 		u64 rx_packets;
6951 		u64 rx_bytes;
6952 
6953 		do {
6954 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
6955 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
6956 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
6957 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
6958 
6959 		stats->rx_packets += rx_packets;
6960 		stats->rx_bytes += rx_bytes;
6961 	}
6962 
6963 	stats->rx_dropped = priv->xstats.rx_dropped;
6964 	stats->rx_errors = priv->xstats.rx_errors;
6965 	stats->tx_dropped = priv->xstats.tx_dropped;
6966 	stats->tx_errors = priv->xstats.tx_errors;
6967 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
6968 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
6969 	stats->rx_length_errors = priv->xstats.rx_length;
6970 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
6971 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
6972 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
6973 }
6974 
6975 static const struct net_device_ops stmmac_netdev_ops = {
6976 	.ndo_open = stmmac_open,
6977 	.ndo_start_xmit = stmmac_xmit,
6978 	.ndo_stop = stmmac_release,
6979 	.ndo_change_mtu = stmmac_change_mtu,
6980 	.ndo_fix_features = stmmac_fix_features,
6981 	.ndo_set_features = stmmac_set_features,
6982 	.ndo_set_rx_mode = stmmac_set_rx_mode,
6983 	.ndo_tx_timeout = stmmac_tx_timeout,
6984 	.ndo_eth_ioctl = stmmac_ioctl,
6985 	.ndo_get_stats64 = stmmac_get_stats64,
6986 	.ndo_setup_tc = stmmac_setup_tc,
6987 	.ndo_select_queue = stmmac_select_queue,
6988 	.ndo_set_mac_address = stmmac_set_mac_address,
6989 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6990 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6991 	.ndo_bpf = stmmac_bpf,
6992 	.ndo_xdp_xmit = stmmac_xdp_xmit,
6993 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
6994 };
6995 
stmmac_reset_subtask(struct stmmac_priv * priv)6996 static void stmmac_reset_subtask(struct stmmac_priv *priv)
6997 {
6998 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
6999 		return;
7000 	if (test_bit(STMMAC_DOWN, &priv->state))
7001 		return;
7002 
7003 	netdev_err(priv->dev, "Reset adapter.\n");
7004 
7005 	rtnl_lock();
7006 	netif_trans_update(priv->dev);
7007 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7008 		usleep_range(1000, 2000);
7009 
7010 	set_bit(STMMAC_DOWN, &priv->state);
7011 	dev_close(priv->dev);
7012 	dev_open(priv->dev, NULL);
7013 	clear_bit(STMMAC_DOWN, &priv->state);
7014 	clear_bit(STMMAC_RESETING, &priv->state);
7015 	rtnl_unlock();
7016 }
7017 
stmmac_service_task(struct work_struct * work)7018 static void stmmac_service_task(struct work_struct *work)
7019 {
7020 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7021 			service_task);
7022 
7023 	stmmac_reset_subtask(priv);
7024 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7025 }
7026 
7027 /**
7028  *  stmmac_hw_init - Init the MAC device
7029  *  @priv: driver private structure
7030  *  Description: this function is to configure the MAC device according to
7031  *  some platform parameters or the HW capability register. It prepares the
7032  *  driver to use either ring or chain modes and to setup either enhanced or
7033  *  normal descriptors.
7034  */
stmmac_hw_init(struct stmmac_priv * priv)7035 static int stmmac_hw_init(struct stmmac_priv *priv)
7036 {
7037 	int ret;
7038 
7039 	/* dwmac-sun8i only work in chain mode */
7040 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7041 		chain_mode = 1;
7042 	priv->chain_mode = chain_mode;
7043 
7044 	/* Initialize HW Interface */
7045 	ret = stmmac_hwif_init(priv);
7046 	if (ret)
7047 		return ret;
7048 
7049 	/* Get the HW capability (new GMAC newer than 3.50a) */
7050 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7051 	if (priv->hw_cap_support) {
7052 		dev_info(priv->device, "DMA HW capability register supported\n");
7053 
7054 		/* We can override some gmac/dma configuration fields: e.g.
7055 		 * enh_desc, tx_coe (e.g. that are passed through the
7056 		 * platform) with the values from the HW capability
7057 		 * register (if supported).
7058 		 */
7059 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7060 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7061 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7062 		priv->hw->pmt = priv->plat->pmt;
7063 		if (priv->dma_cap.hash_tb_sz) {
7064 			priv->hw->multicast_filter_bins =
7065 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7066 			priv->hw->mcast_bits_log2 =
7067 					ilog2(priv->hw->multicast_filter_bins);
7068 		}
7069 
7070 		/* TXCOE doesn't work in thresh DMA mode */
7071 		if (priv->plat->force_thresh_dma_mode)
7072 			priv->plat->tx_coe = 0;
7073 		else
7074 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7075 
7076 		/* In case of GMAC4 rx_coe is from HW cap register. */
7077 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7078 
7079 		if (priv->dma_cap.rx_coe_type2)
7080 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7081 		else if (priv->dma_cap.rx_coe_type1)
7082 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7083 
7084 	} else {
7085 		dev_info(priv->device, "No HW DMA feature register supported\n");
7086 	}
7087 
7088 	if (priv->plat->rx_coe) {
7089 		priv->hw->rx_csum = priv->plat->rx_coe;
7090 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7091 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7092 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7093 	}
7094 	if (priv->plat->tx_coe)
7095 		dev_info(priv->device, "TX Checksum insertion supported\n");
7096 
7097 	if (priv->plat->pmt) {
7098 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7099 		device_set_wakeup_capable(priv->device, 1);
7100 	}
7101 
7102 	if (priv->dma_cap.tsoen)
7103 		dev_info(priv->device, "TSO supported\n");
7104 
7105 	priv->hw->vlan_fail_q_en =
7106 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7107 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7108 
7109 	/* Run HW quirks, if any */
7110 	if (priv->hwif_quirks) {
7111 		ret = priv->hwif_quirks(priv);
7112 		if (ret)
7113 			return ret;
7114 	}
7115 
7116 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7117 	 * In some case, for example on bugged HW this feature
7118 	 * has to be disable and this can be done by passing the
7119 	 * riwt_off field from the platform.
7120 	 */
7121 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7122 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7123 		priv->use_riwt = 1;
7124 		dev_info(priv->device,
7125 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7126 	}
7127 
7128 	return 0;
7129 }
7130 
stmmac_napi_add(struct net_device * dev)7131 static void stmmac_napi_add(struct net_device *dev)
7132 {
7133 	struct stmmac_priv *priv = netdev_priv(dev);
7134 	u32 queue, maxq;
7135 
7136 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7137 
7138 	for (queue = 0; queue < maxq; queue++) {
7139 		struct stmmac_channel *ch = &priv->channel[queue];
7140 
7141 		ch->priv_data = priv;
7142 		ch->index = queue;
7143 		spin_lock_init(&ch->lock);
7144 
7145 		if (queue < priv->plat->rx_queues_to_use) {
7146 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7147 		}
7148 		if (queue < priv->plat->tx_queues_to_use) {
7149 			netif_napi_add_tx(dev, &ch->tx_napi,
7150 					  stmmac_napi_poll_tx);
7151 		}
7152 		if (queue < priv->plat->rx_queues_to_use &&
7153 		    queue < priv->plat->tx_queues_to_use) {
7154 			netif_napi_add(dev, &ch->rxtx_napi,
7155 				       stmmac_napi_poll_rxtx);
7156 		}
7157 	}
7158 }
7159 
stmmac_napi_del(struct net_device * dev)7160 static void stmmac_napi_del(struct net_device *dev)
7161 {
7162 	struct stmmac_priv *priv = netdev_priv(dev);
7163 	u32 queue, maxq;
7164 
7165 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7166 
7167 	for (queue = 0; queue < maxq; queue++) {
7168 		struct stmmac_channel *ch = &priv->channel[queue];
7169 
7170 		if (queue < priv->plat->rx_queues_to_use)
7171 			netif_napi_del(&ch->rx_napi);
7172 		if (queue < priv->plat->tx_queues_to_use)
7173 			netif_napi_del(&ch->tx_napi);
7174 		if (queue < priv->plat->rx_queues_to_use &&
7175 		    queue < priv->plat->tx_queues_to_use) {
7176 			netif_napi_del(&ch->rxtx_napi);
7177 		}
7178 	}
7179 }
7180 
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)7181 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7182 {
7183 	struct stmmac_priv *priv = netdev_priv(dev);
7184 	int ret = 0, i;
7185 	int max_speed;
7186 
7187 	if (netif_running(dev))
7188 		stmmac_release(dev);
7189 
7190 	stmmac_napi_del(dev);
7191 
7192 	priv->plat->rx_queues_to_use = rx_cnt;
7193 	priv->plat->tx_queues_to_use = tx_cnt;
7194 	if (!netif_is_rxfh_configured(dev))
7195 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7196 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7197 									rx_cnt);
7198 
7199 	stmmac_mac_phylink_get_caps(priv);
7200 
7201 	priv->phylink_config.mac_capabilities = priv->hw->link.caps;
7202 
7203 	max_speed = priv->plat->max_speed;
7204 	if (max_speed)
7205 		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
7206 
7207 	stmmac_napi_add(dev);
7208 
7209 	if (netif_running(dev))
7210 		ret = stmmac_open(dev);
7211 
7212 	return ret;
7213 }
7214 
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7215 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7216 {
7217 	struct stmmac_priv *priv = netdev_priv(dev);
7218 	int ret = 0;
7219 
7220 	if (netif_running(dev))
7221 		stmmac_release(dev);
7222 
7223 	priv->dma_conf.dma_rx_size = rx_size;
7224 	priv->dma_conf.dma_tx_size = tx_size;
7225 
7226 	if (netif_running(dev))
7227 		ret = stmmac_open(dev);
7228 
7229 	return ret;
7230 }
7231 
7232 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
stmmac_fpe_lp_task(struct work_struct * work)7233 static void stmmac_fpe_lp_task(struct work_struct *work)
7234 {
7235 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7236 						fpe_task);
7237 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7238 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7239 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7240 	bool *hs_enable = &fpe_cfg->hs_enable;
7241 	bool *enable = &fpe_cfg->enable;
7242 	int retries = 20;
7243 
7244 	while (retries-- > 0) {
7245 		/* Bail out immediately if FPE handshake is OFF */
7246 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7247 			break;
7248 
7249 		if (*lo_state == FPE_STATE_ENTERING_ON &&
7250 		    *lp_state == FPE_STATE_ENTERING_ON) {
7251 			stmmac_fpe_configure(priv, priv->ioaddr,
7252 					     fpe_cfg,
7253 					     priv->plat->tx_queues_to_use,
7254 					     priv->plat->rx_queues_to_use,
7255 					     *enable);
7256 
7257 			netdev_info(priv->dev, "configured FPE\n");
7258 
7259 			*lo_state = FPE_STATE_ON;
7260 			*lp_state = FPE_STATE_ON;
7261 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7262 			break;
7263 		}
7264 
7265 		if ((*lo_state == FPE_STATE_CAPABLE ||
7266 		     *lo_state == FPE_STATE_ENTERING_ON) &&
7267 		     *lp_state != FPE_STATE_ON) {
7268 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7269 				    *lo_state, *lp_state);
7270 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7271 						fpe_cfg,
7272 						MPACKET_VERIFY);
7273 		}
7274 		/* Sleep then retry */
7275 		msleep(500);
7276 	}
7277 
7278 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7279 }
7280 
stmmac_fpe_handshake(struct stmmac_priv * priv,bool enable)7281 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7282 {
7283 	if (priv->plat->fpe_cfg->hs_enable != enable) {
7284 		if (enable) {
7285 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7286 						priv->plat->fpe_cfg,
7287 						MPACKET_VERIFY);
7288 		} else {
7289 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7290 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7291 		}
7292 
7293 		priv->plat->fpe_cfg->hs_enable = enable;
7294 	}
7295 }
7296 
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7297 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7298 {
7299 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7300 	struct dma_desc *desc_contains_ts = ctx->desc;
7301 	struct stmmac_priv *priv = ctx->priv;
7302 	struct dma_desc *ndesc = ctx->ndesc;
7303 	struct dma_desc *desc = ctx->desc;
7304 	u64 ns = 0;
7305 
7306 	if (!priv->hwts_rx_en)
7307 		return -ENODATA;
7308 
7309 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7310 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7311 		desc_contains_ts = ndesc;
7312 
7313 	/* Check if timestamp is available */
7314 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7315 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7316 		ns -= priv->plat->cdc_error_adj;
7317 		*timestamp = ns_to_ktime(ns);
7318 		return 0;
7319 	}
7320 
7321 	return -ENODATA;
7322 }
7323 
7324 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7325 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7326 };
7327 
7328 /**
7329  * stmmac_dvr_probe
7330  * @device: device pointer
7331  * @plat_dat: platform data pointer
7332  * @res: stmmac resource pointer
7333  * Description: this is the main probe function used to
7334  * call the alloc_etherdev, allocate the priv structure.
7335  * Return:
7336  * returns 0 on success, otherwise errno.
7337  */
stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7338 int stmmac_dvr_probe(struct device *device,
7339 		     struct plat_stmmacenet_data *plat_dat,
7340 		     struct stmmac_resources *res)
7341 {
7342 	struct net_device *ndev = NULL;
7343 	struct stmmac_priv *priv;
7344 	u32 rxq;
7345 	int i, ret = 0;
7346 
7347 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7348 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7349 	if (!ndev)
7350 		return -ENOMEM;
7351 
7352 	SET_NETDEV_DEV(ndev, device);
7353 
7354 	priv = netdev_priv(ndev);
7355 	priv->device = device;
7356 	priv->dev = ndev;
7357 
7358 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7359 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7360 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7361 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7362 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7363 	}
7364 
7365 	priv->xstats.pcpu_stats =
7366 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7367 	if (!priv->xstats.pcpu_stats)
7368 		return -ENOMEM;
7369 
7370 	stmmac_set_ethtool_ops(ndev);
7371 	priv->pause = pause;
7372 	priv->plat = plat_dat;
7373 	priv->ioaddr = res->addr;
7374 	priv->dev->base_addr = (unsigned long)res->addr;
7375 	priv->plat->dma_cfg->multi_msi_en =
7376 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7377 
7378 	priv->dev->irq = res->irq;
7379 	priv->wol_irq = res->wol_irq;
7380 	priv->lpi_irq = res->lpi_irq;
7381 	priv->sfty_ce_irq = res->sfty_ce_irq;
7382 	priv->sfty_ue_irq = res->sfty_ue_irq;
7383 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7384 		priv->rx_irq[i] = res->rx_irq[i];
7385 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7386 		priv->tx_irq[i] = res->tx_irq[i];
7387 
7388 	if (!is_zero_ether_addr(res->mac))
7389 		eth_hw_addr_set(priv->dev, res->mac);
7390 
7391 	dev_set_drvdata(device, priv->dev);
7392 
7393 	/* Verify driver arguments */
7394 	stmmac_verify_args();
7395 
7396 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7397 	if (!priv->af_xdp_zc_qps)
7398 		return -ENOMEM;
7399 
7400 	/* Allocate workqueue */
7401 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7402 	if (!priv->wq) {
7403 		dev_err(priv->device, "failed to create workqueue\n");
7404 		ret = -ENOMEM;
7405 		goto error_wq_init;
7406 	}
7407 
7408 	INIT_WORK(&priv->service_task, stmmac_service_task);
7409 
7410 	/* Initialize Link Partner FPE workqueue */
7411 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7412 
7413 	/* Override with kernel parameters if supplied XXX CRS XXX
7414 	 * this needs to have multiple instances
7415 	 */
7416 	if ((phyaddr >= 0) && (phyaddr <= 31))
7417 		priv->plat->phy_addr = phyaddr;
7418 
7419 	if (priv->plat->stmmac_rst) {
7420 		ret = reset_control_assert(priv->plat->stmmac_rst);
7421 		reset_control_deassert(priv->plat->stmmac_rst);
7422 		/* Some reset controllers have only reset callback instead of
7423 		 * assert + deassert callbacks pair.
7424 		 */
7425 		if (ret == -ENOTSUPP)
7426 			reset_control_reset(priv->plat->stmmac_rst);
7427 	}
7428 
7429 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7430 	if (ret == -ENOTSUPP)
7431 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7432 			ERR_PTR(ret));
7433 
7434 	/* Wait a bit for the reset to take effect */
7435 	udelay(10);
7436 
7437 	/* Init MAC and get the capabilities */
7438 	ret = stmmac_hw_init(priv);
7439 	if (ret)
7440 		goto error_hw_init;
7441 
7442 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7443 	 */
7444 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7445 		priv->plat->dma_cfg->dche = false;
7446 
7447 	stmmac_check_ether_addr(priv);
7448 
7449 	ndev->netdev_ops = &stmmac_netdev_ops;
7450 
7451 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7452 
7453 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7454 			    NETIF_F_RXCSUM;
7455 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7456 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7457 
7458 	ret = stmmac_tc_init(priv, priv);
7459 	if (!ret) {
7460 		ndev->hw_features |= NETIF_F_HW_TC;
7461 	}
7462 
7463 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7464 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7465 		if (priv->plat->has_gmac4)
7466 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7467 		priv->tso = true;
7468 		dev_info(priv->device, "TSO feature enabled\n");
7469 	}
7470 
7471 	if (priv->dma_cap.sphen &&
7472 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7473 		ndev->hw_features |= NETIF_F_GRO;
7474 		priv->sph_cap = true;
7475 		priv->sph = priv->sph_cap;
7476 		dev_info(priv->device, "SPH feature enabled\n");
7477 	}
7478 
7479 	/* Ideally our host DMA address width is the same as for the
7480 	 * device. However, it may differ and then we have to use our
7481 	 * host DMA width for allocation and the device DMA width for
7482 	 * register handling.
7483 	 */
7484 	if (priv->plat->host_dma_width)
7485 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7486 	else
7487 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7488 
7489 	if (priv->dma_cap.host_dma_width) {
7490 		ret = dma_set_mask_and_coherent(device,
7491 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7492 		if (!ret) {
7493 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7494 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7495 
7496 			/*
7497 			 * If more than 32 bits can be addressed, make sure to
7498 			 * enable enhanced addressing mode.
7499 			 */
7500 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7501 				priv->plat->dma_cfg->eame = true;
7502 		} else {
7503 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7504 			if (ret) {
7505 				dev_err(priv->device, "Failed to set DMA Mask\n");
7506 				goto error_hw_init;
7507 			}
7508 
7509 			priv->dma_cap.host_dma_width = 32;
7510 		}
7511 	}
7512 
7513 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7514 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7515 #ifdef STMMAC_VLAN_TAG_USED
7516 	/* Both mac100 and gmac support receive VLAN tag detection */
7517 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7518 	if (priv->dma_cap.vlhash) {
7519 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7520 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7521 	}
7522 	if (priv->dma_cap.vlins) {
7523 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7524 		if (priv->dma_cap.dvlan)
7525 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7526 	}
7527 #endif
7528 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7529 
7530 	priv->xstats.threshold = tc;
7531 
7532 	/* Initialize RSS */
7533 	rxq = priv->plat->rx_queues_to_use;
7534 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7535 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7536 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7537 
7538 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7539 		ndev->features |= NETIF_F_RXHASH;
7540 
7541 	ndev->vlan_features |= ndev->features;
7542 	/* TSO doesn't work on VLANs yet */
7543 	ndev->vlan_features &= ~NETIF_F_TSO;
7544 
7545 	/* MTU range: 46 - hw-specific max */
7546 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7547 	if (priv->plat->has_xgmac)
7548 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7549 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7550 		ndev->max_mtu = JUMBO_LEN;
7551 	else
7552 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7553 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7554 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7555 	 */
7556 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7557 	    (priv->plat->maxmtu >= ndev->min_mtu))
7558 		ndev->max_mtu = priv->plat->maxmtu;
7559 	else if (priv->plat->maxmtu < ndev->min_mtu)
7560 		dev_warn(priv->device,
7561 			 "%s: warning: maxmtu having invalid value (%d)\n",
7562 			 __func__, priv->plat->maxmtu);
7563 
7564 	if (flow_ctrl)
7565 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7566 
7567 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7568 
7569 	/* Setup channels NAPI */
7570 	stmmac_napi_add(ndev);
7571 
7572 	mutex_init(&priv->lock);
7573 
7574 	/* If a specific clk_csr value is passed from the platform
7575 	 * this means that the CSR Clock Range selection cannot be
7576 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7577 	 * set the MDC clock dynamically according to the csr actual
7578 	 * clock input.
7579 	 */
7580 	if (priv->plat->clk_csr >= 0)
7581 		priv->clk_csr = priv->plat->clk_csr;
7582 	else
7583 		stmmac_clk_csr_set(priv);
7584 
7585 	stmmac_check_pcs_mode(priv);
7586 
7587 	pm_runtime_get_noresume(device);
7588 	pm_runtime_set_active(device);
7589 	if (!pm_runtime_enabled(device))
7590 		pm_runtime_enable(device);
7591 
7592 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7593 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
7594 		/* MDIO bus Registration */
7595 		ret = stmmac_mdio_register(ndev);
7596 		if (ret < 0) {
7597 			dev_err_probe(priv->device, ret,
7598 				      "%s: MDIO bus (id: %d) registration failed\n",
7599 				      __func__, priv->plat->bus_id);
7600 			goto error_mdio_register;
7601 		}
7602 	}
7603 
7604 	if (priv->plat->speed_mode_2500)
7605 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7606 
7607 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7608 		ret = stmmac_xpcs_setup(priv->mii);
7609 		if (ret)
7610 			goto error_xpcs_setup;
7611 	}
7612 
7613 	ret = stmmac_phy_setup(priv);
7614 	if (ret) {
7615 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7616 		goto error_phy_setup;
7617 	}
7618 
7619 	ret = register_netdev(ndev);
7620 	if (ret) {
7621 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7622 			__func__, ret);
7623 		goto error_netdev_register;
7624 	}
7625 
7626 #ifdef CONFIG_DEBUG_FS
7627 	stmmac_init_fs(ndev);
7628 #endif
7629 
7630 	if (priv->plat->dump_debug_regs)
7631 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7632 
7633 	/* Let pm_runtime_put() disable the clocks.
7634 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7635 	 */
7636 	pm_runtime_put(device);
7637 
7638 	return ret;
7639 
7640 error_netdev_register:
7641 	phylink_destroy(priv->phylink);
7642 error_xpcs_setup:
7643 error_phy_setup:
7644 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7645 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7646 		stmmac_mdio_unregister(ndev);
7647 error_mdio_register:
7648 	stmmac_napi_del(ndev);
7649 error_hw_init:
7650 	destroy_workqueue(priv->wq);
7651 error_wq_init:
7652 	bitmap_free(priv->af_xdp_zc_qps);
7653 
7654 	return ret;
7655 }
7656 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7657 
7658 /**
7659  * stmmac_dvr_remove
7660  * @dev: device pointer
7661  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7662  * changes the link status, releases the DMA descriptor rings.
7663  */
stmmac_dvr_remove(struct device * dev)7664 void stmmac_dvr_remove(struct device *dev)
7665 {
7666 	struct net_device *ndev = dev_get_drvdata(dev);
7667 	struct stmmac_priv *priv = netdev_priv(ndev);
7668 
7669 	netdev_info(priv->dev, "%s: removing driver", __func__);
7670 
7671 	pm_runtime_get_sync(dev);
7672 
7673 	stmmac_stop_all_dma(priv);
7674 	stmmac_mac_set(priv, priv->ioaddr, false);
7675 	netif_carrier_off(ndev);
7676 	unregister_netdev(ndev);
7677 
7678 #ifdef CONFIG_DEBUG_FS
7679 	stmmac_exit_fs(ndev);
7680 #endif
7681 	phylink_destroy(priv->phylink);
7682 	if (priv->plat->stmmac_rst)
7683 		reset_control_assert(priv->plat->stmmac_rst);
7684 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7685 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7686 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7687 		stmmac_mdio_unregister(ndev);
7688 	destroy_workqueue(priv->wq);
7689 	mutex_destroy(&priv->lock);
7690 	bitmap_free(priv->af_xdp_zc_qps);
7691 
7692 	pm_runtime_disable(dev);
7693 	pm_runtime_put_noidle(dev);
7694 }
7695 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7696 
7697 /**
7698  * stmmac_suspend - suspend callback
7699  * @dev: device pointer
7700  * Description: this is the function to suspend the device and it is called
7701  * by the platform driver to stop the network queue, release the resources,
7702  * program the PMT register (for WoL), clean and release driver resources.
7703  */
stmmac_suspend(struct device * dev)7704 int stmmac_suspend(struct device *dev)
7705 {
7706 	struct net_device *ndev = dev_get_drvdata(dev);
7707 	struct stmmac_priv *priv = netdev_priv(ndev);
7708 	u32 chan;
7709 
7710 	if (!ndev || !netif_running(ndev))
7711 		return 0;
7712 
7713 	mutex_lock(&priv->lock);
7714 
7715 	netif_device_detach(ndev);
7716 
7717 	stmmac_disable_all_queues(priv);
7718 
7719 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7720 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7721 
7722 	if (priv->eee_enabled) {
7723 		priv->tx_path_in_lpi_mode = false;
7724 		del_timer_sync(&priv->eee_ctrl_timer);
7725 	}
7726 
7727 	/* Stop TX/RX DMA */
7728 	stmmac_stop_all_dma(priv);
7729 
7730 	if (priv->plat->serdes_powerdown)
7731 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7732 
7733 	/* Enable Power down mode by programming the PMT regs */
7734 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7735 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7736 		priv->irq_wake = 1;
7737 	} else {
7738 		stmmac_mac_set(priv, priv->ioaddr, false);
7739 		pinctrl_pm_select_sleep_state(priv->device);
7740 	}
7741 
7742 	mutex_unlock(&priv->lock);
7743 
7744 	rtnl_lock();
7745 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7746 		phylink_suspend(priv->phylink, true);
7747 	} else {
7748 		if (device_may_wakeup(priv->device))
7749 			phylink_speed_down(priv->phylink, false);
7750 		phylink_suspend(priv->phylink, false);
7751 	}
7752 	rtnl_unlock();
7753 
7754 	if (priv->dma_cap.fpesel) {
7755 		/* Disable FPE */
7756 		stmmac_fpe_configure(priv, priv->ioaddr,
7757 				     priv->plat->fpe_cfg,
7758 				     priv->plat->tx_queues_to_use,
7759 				     priv->plat->rx_queues_to_use, false);
7760 
7761 		stmmac_fpe_handshake(priv, false);
7762 		stmmac_fpe_stop_wq(priv);
7763 	}
7764 
7765 	priv->speed = SPEED_UNKNOWN;
7766 	return 0;
7767 }
7768 EXPORT_SYMBOL_GPL(stmmac_suspend);
7769 
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)7770 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7771 {
7772 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7773 
7774 	rx_q->cur_rx = 0;
7775 	rx_q->dirty_rx = 0;
7776 }
7777 
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)7778 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7779 {
7780 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7781 
7782 	tx_q->cur_tx = 0;
7783 	tx_q->dirty_tx = 0;
7784 	tx_q->mss = 0;
7785 
7786 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7787 }
7788 
7789 /**
7790  * stmmac_reset_queues_param - reset queue parameters
7791  * @priv: device pointer
7792  */
stmmac_reset_queues_param(struct stmmac_priv * priv)7793 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7794 {
7795 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7796 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7797 	u32 queue;
7798 
7799 	for (queue = 0; queue < rx_cnt; queue++)
7800 		stmmac_reset_rx_queue(priv, queue);
7801 
7802 	for (queue = 0; queue < tx_cnt; queue++)
7803 		stmmac_reset_tx_queue(priv, queue);
7804 }
7805 
7806 /**
7807  * stmmac_resume - resume callback
7808  * @dev: device pointer
7809  * Description: when resume this function is invoked to setup the DMA and CORE
7810  * in a usable state.
7811  */
stmmac_resume(struct device * dev)7812 int stmmac_resume(struct device *dev)
7813 {
7814 	struct net_device *ndev = dev_get_drvdata(dev);
7815 	struct stmmac_priv *priv = netdev_priv(ndev);
7816 	int ret;
7817 
7818 	if (!netif_running(ndev))
7819 		return 0;
7820 
7821 	/* Power Down bit, into the PM register, is cleared
7822 	 * automatically as soon as a magic packet or a Wake-up frame
7823 	 * is received. Anyway, it's better to manually clear
7824 	 * this bit because it can generate problems while resuming
7825 	 * from another devices (e.g. serial console).
7826 	 */
7827 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7828 		mutex_lock(&priv->lock);
7829 		stmmac_pmt(priv, priv->hw, 0);
7830 		mutex_unlock(&priv->lock);
7831 		priv->irq_wake = 0;
7832 	} else {
7833 		pinctrl_pm_select_default_state(priv->device);
7834 		/* reset the phy so that it's ready */
7835 		if (priv->mii)
7836 			stmmac_mdio_reset(priv->mii);
7837 	}
7838 
7839 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7840 	    priv->plat->serdes_powerup) {
7841 		ret = priv->plat->serdes_powerup(ndev,
7842 						 priv->plat->bsp_priv);
7843 
7844 		if (ret < 0)
7845 			return ret;
7846 	}
7847 
7848 	rtnl_lock();
7849 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7850 		phylink_resume(priv->phylink);
7851 	} else {
7852 		phylink_resume(priv->phylink);
7853 		if (device_may_wakeup(priv->device))
7854 			phylink_speed_up(priv->phylink);
7855 	}
7856 	rtnl_unlock();
7857 
7858 	rtnl_lock();
7859 	mutex_lock(&priv->lock);
7860 
7861 	stmmac_reset_queues_param(priv);
7862 
7863 	stmmac_free_tx_skbufs(priv);
7864 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7865 
7866 	stmmac_hw_setup(ndev, false);
7867 	stmmac_init_coalesce(priv);
7868 	stmmac_set_rx_mode(ndev);
7869 
7870 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7871 
7872 	stmmac_enable_all_queues(priv);
7873 	stmmac_enable_all_dma_irq(priv);
7874 
7875 	mutex_unlock(&priv->lock);
7876 	rtnl_unlock();
7877 
7878 	netif_device_attach(ndev);
7879 
7880 	return 0;
7881 }
7882 EXPORT_SYMBOL_GPL(stmmac_resume);
7883 
7884 #ifndef MODULE
stmmac_cmdline_opt(char * str)7885 static int __init stmmac_cmdline_opt(char *str)
7886 {
7887 	char *opt;
7888 
7889 	if (!str || !*str)
7890 		return 1;
7891 	while ((opt = strsep(&str, ",")) != NULL) {
7892 		if (!strncmp(opt, "debug:", 6)) {
7893 			if (kstrtoint(opt + 6, 0, &debug))
7894 				goto err;
7895 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7896 			if (kstrtoint(opt + 8, 0, &phyaddr))
7897 				goto err;
7898 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7899 			if (kstrtoint(opt + 7, 0, &buf_sz))
7900 				goto err;
7901 		} else if (!strncmp(opt, "tc:", 3)) {
7902 			if (kstrtoint(opt + 3, 0, &tc))
7903 				goto err;
7904 		} else if (!strncmp(opt, "watchdog:", 9)) {
7905 			if (kstrtoint(opt + 9, 0, &watchdog))
7906 				goto err;
7907 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7908 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7909 				goto err;
7910 		} else if (!strncmp(opt, "pause:", 6)) {
7911 			if (kstrtoint(opt + 6, 0, &pause))
7912 				goto err;
7913 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7914 			if (kstrtoint(opt + 10, 0, &eee_timer))
7915 				goto err;
7916 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7917 			if (kstrtoint(opt + 11, 0, &chain_mode))
7918 				goto err;
7919 		}
7920 	}
7921 	return 1;
7922 
7923 err:
7924 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7925 	return 1;
7926 }
7927 
7928 __setup("stmmaceth=", stmmac_cmdline_opt);
7929 #endif /* MODULE */
7930 
stmmac_init(void)7931 static int __init stmmac_init(void)
7932 {
7933 #ifdef CONFIG_DEBUG_FS
7934 	/* Create debugfs main directory if it doesn't exist yet */
7935 	if (!stmmac_fs_dir)
7936 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7937 	register_netdevice_notifier(&stmmac_notifier);
7938 #endif
7939 
7940 	return 0;
7941 }
7942 
stmmac_exit(void)7943 static void __exit stmmac_exit(void)
7944 {
7945 #ifdef CONFIG_DEBUG_FS
7946 	unregister_netdevice_notifier(&stmmac_notifier);
7947 	debugfs_remove_recursive(stmmac_fs_dir);
7948 #endif
7949 }
7950 
7951 module_init(stmmac_init)
7952 module_exit(stmmac_exit)
7953 
7954 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7955 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7956 MODULE_LICENSE("GPL");
7957