xref: /openbmc/linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 060f35a317ef09101b128f399dce7ed13d019461)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*******************************************************************************
3    This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4    ST Ethernet IPs are built around a Synopsys IP Core.
5  
6  	Copyright(C) 2007-2011 STMicroelectronics Ltd
7  
8  
9    Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10  
11    Documentation available at:
12  	http://www.stlinux.com
13    Support available at:
14  	https://bugzilla.stlinux.com/
15  *******************************************************************************/
16  
17  #include <linux/clk.h>
18  #include <linux/kernel.h>
19  #include <linux/interrupt.h>
20  #include <linux/ip.h>
21  #include <linux/tcp.h>
22  #include <linux/skbuff.h>
23  #include <linux/ethtool.h>
24  #include <linux/if_ether.h>
25  #include <linux/crc32.h>
26  #include <linux/mii.h>
27  #include <linux/if.h>
28  #include <linux/if_vlan.h>
29  #include <linux/dma-mapping.h>
30  #include <linux/slab.h>
31  #include <linux/pm_runtime.h>
32  #include <linux/prefetch.h>
33  #include <linux/pinctrl/consumer.h>
34  #ifdef CONFIG_DEBUG_FS
35  #include <linux/debugfs.h>
36  #include <linux/seq_file.h>
37  #endif /* CONFIG_DEBUG_FS */
38  #include <linux/net_tstamp.h>
39  #include <linux/phylink.h>
40  #include <linux/udp.h>
41  #include <linux/bpf_trace.h>
42  #include <net/page_pool/helpers.h>
43  #include <net/pkt_cls.h>
44  #include <net/xdp_sock_drv.h>
45  #include "stmmac_ptp.h"
46  #include "stmmac.h"
47  #include "stmmac_xdp.h"
48  #include <linux/reset.h>
49  #include <linux/of_mdio.h>
50  #include "dwmac1000.h"
51  #include "dwxgmac2.h"
52  #include "hwif.h"
53  
54  /* As long as the interface is active, we keep the timestamping counter enabled
55   * with fine resolution and binary rollover. This avoid non-monotonic behavior
56   * (clock jumps) when changing timestamping settings at runtime.
57   */
58  #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59  				 PTP_TCR_TSCTRLSSR)
60  
61  #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62  #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63  
64  /* Module parameters */
65  #define TX_TIMEO	5000
66  static int watchdog = TX_TIMEO;
67  module_param(watchdog, int, 0644);
68  MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69  
70  static int debug = -1;
71  module_param(debug, int, 0644);
72  MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73  
74  static int phyaddr = -1;
75  module_param(phyaddr, int, 0444);
76  MODULE_PARM_DESC(phyaddr, "Physical device address");
77  
78  #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79  #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80  
81  /* Limit to make sure XDP TX and slow path can coexist */
82  #define STMMAC_XSK_TX_BUDGET_MAX	256
83  #define STMMAC_TX_XSK_AVAIL		16
84  #define STMMAC_RX_FILL_BATCH		16
85  
86  #define STMMAC_XDP_PASS		0
87  #define STMMAC_XDP_CONSUMED	BIT(0)
88  #define STMMAC_XDP_TX		BIT(1)
89  #define STMMAC_XDP_REDIRECT	BIT(2)
90  
91  static int flow_ctrl = FLOW_AUTO;
92  module_param(flow_ctrl, int, 0644);
93  MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94  
95  static int pause = PAUSE_TIME;
96  module_param(pause, int, 0644);
97  MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98  
99  #define TC_DEFAULT 64
100  static int tc = TC_DEFAULT;
101  module_param(tc, int, 0644);
102  MODULE_PARM_DESC(tc, "DMA threshold control value");
103  
104  #define	DEFAULT_BUFSIZE	1536
105  static int buf_sz = DEFAULT_BUFSIZE;
106  module_param(buf_sz, int, 0644);
107  MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108  
109  #define	STMMAC_RX_COPYBREAK	256
110  
111  static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112  				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113  				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114  
115  #define STMMAC_DEFAULT_LPI_TIMER	1000
116  static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117  module_param(eee_timer, int, 0644);
118  MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119  #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120  
121  /* By default the driver will use the ring mode to manage tx and rx descriptors,
122   * but allow user to force to use the chain instead of the ring
123   */
124  static unsigned int chain_mode;
125  module_param(chain_mode, int, 0444);
126  MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127  
128  static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129  /* For MSI interrupts handling */
130  static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131  static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132  static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133  static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134  static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135  static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136  static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137  static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138  static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139  static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140  					  u32 rxmode, u32 chan);
141  
142  #ifdef CONFIG_DEBUG_FS
143  static const struct net_device_ops stmmac_netdev_ops;
144  static void stmmac_init_fs(struct net_device *dev);
145  static void stmmac_exit_fs(struct net_device *dev);
146  #endif
147  
148  #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149  
stmmac_bus_clks_config(struct stmmac_priv * priv,bool enabled)150  int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151  {
152  	int ret = 0;
153  
154  	if (enabled) {
155  		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156  		if (ret)
157  			return ret;
158  		ret = clk_prepare_enable(priv->plat->pclk);
159  		if (ret) {
160  			clk_disable_unprepare(priv->plat->stmmac_clk);
161  			return ret;
162  		}
163  		if (priv->plat->clks_config) {
164  			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165  			if (ret) {
166  				clk_disable_unprepare(priv->plat->stmmac_clk);
167  				clk_disable_unprepare(priv->plat->pclk);
168  				return ret;
169  			}
170  		}
171  	} else {
172  		clk_disable_unprepare(priv->plat->stmmac_clk);
173  		clk_disable_unprepare(priv->plat->pclk);
174  		if (priv->plat->clks_config)
175  			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176  	}
177  
178  	return ret;
179  }
180  EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181  
182  /**
183   * stmmac_verify_args - verify the driver parameters.
184   * Description: it checks the driver parameters and set a default in case of
185   * errors.
186   */
stmmac_verify_args(void)187  static void stmmac_verify_args(void)
188  {
189  	if (unlikely(watchdog < 0))
190  		watchdog = TX_TIMEO;
191  	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192  		buf_sz = DEFAULT_BUFSIZE;
193  	if (unlikely(flow_ctrl > 1))
194  		flow_ctrl = FLOW_AUTO;
195  	else if (likely(flow_ctrl < 0))
196  		flow_ctrl = FLOW_OFF;
197  	if (unlikely((pause < 0) || (pause > 0xffff)))
198  		pause = PAUSE_TIME;
199  	if (eee_timer < 0)
200  		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201  }
202  
__stmmac_disable_all_queues(struct stmmac_priv * priv)203  static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204  {
205  	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206  	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207  	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208  	u32 queue;
209  
210  	for (queue = 0; queue < maxq; queue++) {
211  		struct stmmac_channel *ch = &priv->channel[queue];
212  
213  		if (stmmac_xdp_is_enabled(priv) &&
214  		    test_bit(queue, priv->af_xdp_zc_qps)) {
215  			napi_disable(&ch->rxtx_napi);
216  			continue;
217  		}
218  
219  		if (queue < rx_queues_cnt)
220  			napi_disable(&ch->rx_napi);
221  		if (queue < tx_queues_cnt)
222  			napi_disable(&ch->tx_napi);
223  	}
224  }
225  
226  /**
227   * stmmac_disable_all_queues - Disable all queues
228   * @priv: driver private structure
229   */
stmmac_disable_all_queues(struct stmmac_priv * priv)230  static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231  {
232  	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233  	struct stmmac_rx_queue *rx_q;
234  	u32 queue;
235  
236  	/* synchronize_rcu() needed for pending XDP buffers to drain */
237  	for (queue = 0; queue < rx_queues_cnt; queue++) {
238  		rx_q = &priv->dma_conf.rx_queue[queue];
239  		if (rx_q->xsk_pool) {
240  			synchronize_rcu();
241  			break;
242  		}
243  	}
244  
245  	__stmmac_disable_all_queues(priv);
246  }
247  
248  /**
249   * stmmac_enable_all_queues - Enable all queues
250   * @priv: driver private structure
251   */
stmmac_enable_all_queues(struct stmmac_priv * priv)252  static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253  {
254  	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255  	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256  	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257  	u32 queue;
258  
259  	for (queue = 0; queue < maxq; queue++) {
260  		struct stmmac_channel *ch = &priv->channel[queue];
261  
262  		if (stmmac_xdp_is_enabled(priv) &&
263  		    test_bit(queue, priv->af_xdp_zc_qps)) {
264  			napi_enable(&ch->rxtx_napi);
265  			continue;
266  		}
267  
268  		if (queue < rx_queues_cnt)
269  			napi_enable(&ch->rx_napi);
270  		if (queue < tx_queues_cnt)
271  			napi_enable(&ch->tx_napi);
272  	}
273  }
274  
stmmac_service_event_schedule(struct stmmac_priv * priv)275  static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276  {
277  	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278  	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279  		queue_work(priv->wq, &priv->service_task);
280  }
281  
stmmac_global_err(struct stmmac_priv * priv)282  static void stmmac_global_err(struct stmmac_priv *priv)
283  {
284  	netif_carrier_off(priv->dev);
285  	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286  	stmmac_service_event_schedule(priv);
287  }
288  
289  /**
290   * stmmac_clk_csr_set - dynamically set the MDC clock
291   * @priv: driver private structure
292   * Description: this is to dynamically set the MDC clock according to the csr
293   * clock input.
294   * Note:
295   *	If a specific clk_csr value is passed from the platform
296   *	this means that the CSR Clock Range selection cannot be
297   *	changed at run-time and it is fixed (as reported in the driver
298   *	documentation). Viceversa the driver will try to set the MDC
299   *	clock dynamically according to the actual clock input.
300   */
stmmac_clk_csr_set(struct stmmac_priv * priv)301  static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302  {
303  	u32 clk_rate;
304  
305  	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306  
307  	/* Platform provided default clk_csr would be assumed valid
308  	 * for all other cases except for the below mentioned ones.
309  	 * For values higher than the IEEE 802.3 specified frequency
310  	 * we can not estimate the proper divider as it is not known
311  	 * the frequency of clk_csr_i. So we do not change the default
312  	 * divider.
313  	 */
314  	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315  		if (clk_rate < CSR_F_35M)
316  			priv->clk_csr = STMMAC_CSR_20_35M;
317  		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318  			priv->clk_csr = STMMAC_CSR_35_60M;
319  		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320  			priv->clk_csr = STMMAC_CSR_60_100M;
321  		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322  			priv->clk_csr = STMMAC_CSR_100_150M;
323  		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324  			priv->clk_csr = STMMAC_CSR_150_250M;
325  		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326  			priv->clk_csr = STMMAC_CSR_250_300M;
327  	}
328  
329  	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330  		if (clk_rate > 160000000)
331  			priv->clk_csr = 0x03;
332  		else if (clk_rate > 80000000)
333  			priv->clk_csr = 0x02;
334  		else if (clk_rate > 40000000)
335  			priv->clk_csr = 0x01;
336  		else
337  			priv->clk_csr = 0;
338  	}
339  
340  	if (priv->plat->has_xgmac) {
341  		if (clk_rate > 400000000)
342  			priv->clk_csr = 0x5;
343  		else if (clk_rate > 350000000)
344  			priv->clk_csr = 0x4;
345  		else if (clk_rate > 300000000)
346  			priv->clk_csr = 0x3;
347  		else if (clk_rate > 250000000)
348  			priv->clk_csr = 0x2;
349  		else if (clk_rate > 150000000)
350  			priv->clk_csr = 0x1;
351  		else
352  			priv->clk_csr = 0x0;
353  	}
354  }
355  
print_pkt(unsigned char * buf,int len)356  static void print_pkt(unsigned char *buf, int len)
357  {
358  	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359  	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360  }
361  
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)362  static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363  {
364  	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365  	u32 avail;
366  
367  	if (tx_q->dirty_tx > tx_q->cur_tx)
368  		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369  	else
370  		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371  
372  	return avail;
373  }
374  
375  /**
376   * stmmac_rx_dirty - Get RX queue dirty
377   * @priv: driver private structure
378   * @queue: RX queue index
379   */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)380  static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381  {
382  	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383  	u32 dirty;
384  
385  	if (rx_q->dirty_rx <= rx_q->cur_rx)
386  		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387  	else
388  		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389  
390  	return dirty;
391  }
392  
stmmac_lpi_entry_timer_config(struct stmmac_priv * priv,bool en)393  static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394  {
395  	int tx_lpi_timer;
396  
397  	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398  	priv->eee_sw_timer_en = en ? 0 : 1;
399  	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400  	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401  }
402  
403  /**
404   * stmmac_enable_eee_mode - check and enter in LPI mode
405   * @priv: driver private structure
406   * Description: this function is to verify and enter in LPI mode in case of
407   * EEE.
408   */
stmmac_enable_eee_mode(struct stmmac_priv * priv)409  static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410  {
411  	u32 tx_cnt = priv->plat->tx_queues_to_use;
412  	u32 queue;
413  
414  	/* check if all TX queues have the work finished */
415  	for (queue = 0; queue < tx_cnt; queue++) {
416  		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417  
418  		if (tx_q->dirty_tx != tx_q->cur_tx)
419  			return -EBUSY; /* still unfinished work */
420  	}
421  
422  	/* Check and enter in LPI mode */
423  	if (!priv->tx_path_in_lpi_mode)
424  		stmmac_set_eee_mode(priv, priv->hw,
425  			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426  	return 0;
427  }
428  
429  /**
430   * stmmac_disable_eee_mode - disable and exit from LPI mode
431   * @priv: driver private structure
432   * Description: this function is to exit and disable EEE in case of
433   * LPI state is true. This is called by the xmit.
434   */
stmmac_disable_eee_mode(struct stmmac_priv * priv)435  void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436  {
437  	if (!priv->eee_sw_timer_en) {
438  		stmmac_lpi_entry_timer_config(priv, 0);
439  		return;
440  	}
441  
442  	stmmac_reset_eee_mode(priv, priv->hw);
443  	del_timer_sync(&priv->eee_ctrl_timer);
444  	priv->tx_path_in_lpi_mode = false;
445  }
446  
447  /**
448   * stmmac_eee_ctrl_timer - EEE TX SW timer.
449   * @t:  timer_list struct containing private info
450   * Description:
451   *  if there is no data transfer and if we are not in LPI state,
452   *  then MAC Transmitter can be moved to LPI state.
453   */
stmmac_eee_ctrl_timer(struct timer_list * t)454  static void stmmac_eee_ctrl_timer(struct timer_list *t)
455  {
456  	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457  
458  	if (stmmac_enable_eee_mode(priv))
459  		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460  }
461  
462  /**
463   * stmmac_eee_init - init EEE
464   * @priv: driver private structure
465   * Description:
466   *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467   *  can also manage EEE, this function enable the LPI state and start related
468   *  timer.
469   */
stmmac_eee_init(struct stmmac_priv * priv)470  bool stmmac_eee_init(struct stmmac_priv *priv)
471  {
472  	int eee_tw_timer = priv->eee_tw_timer;
473  
474  	/* Using PCS we cannot dial with the phy registers at this stage
475  	 * so we do not support extra feature like EEE.
476  	 */
477  	if (priv->hw->pcs == STMMAC_PCS_TBI ||
478  	    priv->hw->pcs == STMMAC_PCS_RTBI)
479  		return false;
480  
481  	/* Check if MAC core supports the EEE feature. */
482  	if (!priv->dma_cap.eee)
483  		return false;
484  
485  	mutex_lock(&priv->lock);
486  
487  	/* Check if it needs to be deactivated */
488  	if (!priv->eee_active) {
489  		if (priv->eee_enabled) {
490  			netdev_dbg(priv->dev, "disable EEE\n");
491  			stmmac_lpi_entry_timer_config(priv, 0);
492  			del_timer_sync(&priv->eee_ctrl_timer);
493  			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494  			if (priv->hw->xpcs)
495  				xpcs_config_eee(priv->hw->xpcs,
496  						priv->plat->mult_fact_100ns,
497  						false);
498  		}
499  		mutex_unlock(&priv->lock);
500  		return false;
501  	}
502  
503  	if (priv->eee_active && !priv->eee_enabled) {
504  		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505  		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506  				     eee_tw_timer);
507  		if (priv->hw->xpcs)
508  			xpcs_config_eee(priv->hw->xpcs,
509  					priv->plat->mult_fact_100ns,
510  					true);
511  	}
512  
513  	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514  		del_timer_sync(&priv->eee_ctrl_timer);
515  		priv->tx_path_in_lpi_mode = false;
516  		stmmac_lpi_entry_timer_config(priv, 1);
517  	} else {
518  		stmmac_lpi_entry_timer_config(priv, 0);
519  		mod_timer(&priv->eee_ctrl_timer,
520  			  STMMAC_LPI_T(priv->tx_lpi_timer));
521  	}
522  
523  	mutex_unlock(&priv->lock);
524  	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525  	return true;
526  }
527  
528  /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529   * @priv: driver private structure
530   * @p : descriptor pointer
531   * @skb : the socket buffer
532   * Description :
533   * This function will read timestamp from the descriptor & pass it to stack.
534   * and also perform some sanity checks.
535   */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)536  static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537  				   struct dma_desc *p, struct sk_buff *skb)
538  {
539  	struct skb_shared_hwtstamps shhwtstamp;
540  	bool found = false;
541  	u64 ns = 0;
542  
543  	if (!priv->hwts_tx_en)
544  		return;
545  
546  	/* exit if skb doesn't support hw tstamp */
547  	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548  		return;
549  
550  	/* check tx tstamp status */
551  	if (stmmac_get_tx_timestamp_status(priv, p)) {
552  		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553  		found = true;
554  	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555  		found = true;
556  	}
557  
558  	if (found) {
559  		ns -= priv->plat->cdc_error_adj;
560  
561  		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562  		shhwtstamp.hwtstamp = ns_to_ktime(ns);
563  
564  		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565  		/* pass tstamp to stack */
566  		skb_tstamp_tx(skb, &shhwtstamp);
567  	}
568  }
569  
570  /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571   * @priv: driver private structure
572   * @p : descriptor pointer
573   * @np : next descriptor pointer
574   * @skb : the socket buffer
575   * Description :
576   * This function will read received packet's timestamp from the descriptor
577   * and pass it to stack. It also perform some sanity checks.
578   */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)579  static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580  				   struct dma_desc *np, struct sk_buff *skb)
581  {
582  	struct skb_shared_hwtstamps *shhwtstamp = NULL;
583  	struct dma_desc *desc = p;
584  	u64 ns = 0;
585  
586  	if (!priv->hwts_rx_en)
587  		return;
588  	/* For GMAC4, the valid timestamp is from CTX next desc. */
589  	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590  		desc = np;
591  
592  	/* Check if timestamp is available */
593  	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594  		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595  
596  		ns -= priv->plat->cdc_error_adj;
597  
598  		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599  		shhwtstamp = skb_hwtstamps(skb);
600  		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601  		shhwtstamp->hwtstamp = ns_to_ktime(ns);
602  	} else  {
603  		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604  	}
605  }
606  
607  /**
608   *  stmmac_hwtstamp_set - control hardware timestamping.
609   *  @dev: device pointer.
610   *  @ifr: An IOCTL specific structure, that can contain a pointer to
611   *  a proprietary structure used to pass information to the driver.
612   *  Description:
613   *  This function configures the MAC to enable/disable both outgoing(TX)
614   *  and incoming(RX) packets time stamping based on user input.
615   *  Return Value:
616   *  0 on success and an appropriate -ve integer on failure.
617   */
stmmac_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)618  static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619  {
620  	struct stmmac_priv *priv = netdev_priv(dev);
621  	struct hwtstamp_config config;
622  	u32 ptp_v2 = 0;
623  	u32 tstamp_all = 0;
624  	u32 ptp_over_ipv4_udp = 0;
625  	u32 ptp_over_ipv6_udp = 0;
626  	u32 ptp_over_ethernet = 0;
627  	u32 snap_type_sel = 0;
628  	u32 ts_master_en = 0;
629  	u32 ts_event_en = 0;
630  
631  	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632  		netdev_alert(priv->dev, "No support for HW time stamping\n");
633  		priv->hwts_tx_en = 0;
634  		priv->hwts_rx_en = 0;
635  
636  		return -EOPNOTSUPP;
637  	}
638  
639  	if (copy_from_user(&config, ifr->ifr_data,
640  			   sizeof(config)))
641  		return -EFAULT;
642  
643  	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644  		   __func__, config.flags, config.tx_type, config.rx_filter);
645  
646  	if (config.tx_type != HWTSTAMP_TX_OFF &&
647  	    config.tx_type != HWTSTAMP_TX_ON)
648  		return -ERANGE;
649  
650  	if (priv->adv_ts) {
651  		switch (config.rx_filter) {
652  		case HWTSTAMP_FILTER_NONE:
653  			/* time stamp no incoming packet at all */
654  			config.rx_filter = HWTSTAMP_FILTER_NONE;
655  			break;
656  
657  		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658  			/* PTP v1, UDP, any kind of event packet */
659  			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660  			/* 'xmac' hardware can support Sync, Pdelay_Req and
661  			 * Pdelay_resp by setting bit14 and bits17/16 to 01
662  			 * This leaves Delay_Req timestamps out.
663  			 * Enable all events *and* general purpose message
664  			 * timestamping
665  			 */
666  			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667  			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668  			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669  			break;
670  
671  		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672  			/* PTP v1, UDP, Sync packet */
673  			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674  			/* take time stamp for SYNC messages only */
675  			ts_event_en = PTP_TCR_TSEVNTENA;
676  
677  			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678  			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679  			break;
680  
681  		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682  			/* PTP v1, UDP, Delay_req packet */
683  			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684  			/* take time stamp for Delay_Req messages only */
685  			ts_master_en = PTP_TCR_TSMSTRENA;
686  			ts_event_en = PTP_TCR_TSEVNTENA;
687  
688  			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689  			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690  			break;
691  
692  		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693  			/* PTP v2, UDP, any kind of event packet */
694  			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695  			ptp_v2 = PTP_TCR_TSVER2ENA;
696  			/* take time stamp for all event messages */
697  			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698  
699  			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700  			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701  			break;
702  
703  		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704  			/* PTP v2, UDP, Sync packet */
705  			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706  			ptp_v2 = PTP_TCR_TSVER2ENA;
707  			/* take time stamp for SYNC messages only */
708  			ts_event_en = PTP_TCR_TSEVNTENA;
709  
710  			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711  			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712  			break;
713  
714  		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715  			/* PTP v2, UDP, Delay_req packet */
716  			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717  			ptp_v2 = PTP_TCR_TSVER2ENA;
718  			/* take time stamp for Delay_Req messages only */
719  			ts_master_en = PTP_TCR_TSMSTRENA;
720  			ts_event_en = PTP_TCR_TSEVNTENA;
721  
722  			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723  			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724  			break;
725  
726  		case HWTSTAMP_FILTER_PTP_V2_EVENT:
727  			/* PTP v2/802.AS1 any layer, any kind of event packet */
728  			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729  			ptp_v2 = PTP_TCR_TSVER2ENA;
730  			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731  			if (priv->synopsys_id < DWMAC_CORE_4_10)
732  				ts_event_en = PTP_TCR_TSEVNTENA;
733  			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734  			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735  			ptp_over_ethernet = PTP_TCR_TSIPENA;
736  			break;
737  
738  		case HWTSTAMP_FILTER_PTP_V2_SYNC:
739  			/* PTP v2/802.AS1, any layer, Sync packet */
740  			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741  			ptp_v2 = PTP_TCR_TSVER2ENA;
742  			/* take time stamp for SYNC messages only */
743  			ts_event_en = PTP_TCR_TSEVNTENA;
744  
745  			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746  			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747  			ptp_over_ethernet = PTP_TCR_TSIPENA;
748  			break;
749  
750  		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751  			/* PTP v2/802.AS1, any layer, Delay_req packet */
752  			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753  			ptp_v2 = PTP_TCR_TSVER2ENA;
754  			/* take time stamp for Delay_Req messages only */
755  			ts_master_en = PTP_TCR_TSMSTRENA;
756  			ts_event_en = PTP_TCR_TSEVNTENA;
757  
758  			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759  			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760  			ptp_over_ethernet = PTP_TCR_TSIPENA;
761  			break;
762  
763  		case HWTSTAMP_FILTER_NTP_ALL:
764  		case HWTSTAMP_FILTER_ALL:
765  			/* time stamp any incoming packet */
766  			config.rx_filter = HWTSTAMP_FILTER_ALL;
767  			tstamp_all = PTP_TCR_TSENALL;
768  			break;
769  
770  		default:
771  			return -ERANGE;
772  		}
773  	} else {
774  		switch (config.rx_filter) {
775  		case HWTSTAMP_FILTER_NONE:
776  			config.rx_filter = HWTSTAMP_FILTER_NONE;
777  			break;
778  		default:
779  			/* PTP v1, UDP, any kind of event packet */
780  			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781  			break;
782  		}
783  	}
784  	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785  	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786  
787  	priv->systime_flags = STMMAC_HWTS_ACTIVE;
788  
789  	if (priv->hwts_tx_en || priv->hwts_rx_en) {
790  		priv->systime_flags |= tstamp_all | ptp_v2 |
791  				       ptp_over_ethernet | ptp_over_ipv6_udp |
792  				       ptp_over_ipv4_udp | ts_event_en |
793  				       ts_master_en | snap_type_sel;
794  	}
795  
796  	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797  
798  	memcpy(&priv->tstamp_config, &config, sizeof(config));
799  
800  	return copy_to_user(ifr->ifr_data, &config,
801  			    sizeof(config)) ? -EFAULT : 0;
802  }
803  
804  /**
805   *  stmmac_hwtstamp_get - read hardware timestamping.
806   *  @dev: device pointer.
807   *  @ifr: An IOCTL specific structure, that can contain a pointer to
808   *  a proprietary structure used to pass information to the driver.
809   *  Description:
810   *  This function obtain the current hardware timestamping settings
811   *  as requested.
812   */
stmmac_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)813  static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814  {
815  	struct stmmac_priv *priv = netdev_priv(dev);
816  	struct hwtstamp_config *config = &priv->tstamp_config;
817  
818  	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819  		return -EOPNOTSUPP;
820  
821  	return copy_to_user(ifr->ifr_data, config,
822  			    sizeof(*config)) ? -EFAULT : 0;
823  }
824  
825  /**
826   * stmmac_init_tstamp_counter - init hardware timestamping counter
827   * @priv: driver private structure
828   * @systime_flags: timestamping flags
829   * Description:
830   * Initialize hardware counter for packet timestamping.
831   * This is valid as long as the interface is open and not suspended.
832   * Will be rerun after resuming from suspend, case in which the timestamping
833   * flags updated by stmmac_hwtstamp_set() also need to be restored.
834   */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)835  int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836  {
837  	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838  	struct timespec64 now;
839  	u32 sec_inc = 0;
840  	u64 temp = 0;
841  
842  	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843  		return -EOPNOTSUPP;
844  
845  	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846  	priv->systime_flags = systime_flags;
847  
848  	/* program Sub Second Increment reg */
849  	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850  					   priv->plat->clk_ptp_rate,
851  					   xmac, &sec_inc);
852  	temp = div_u64(1000000000ULL, sec_inc);
853  
854  	/* Store sub second increment for later use */
855  	priv->sub_second_inc = sec_inc;
856  
857  	/* calculate default added value:
858  	 * formula is :
859  	 * addend = (2^32)/freq_div_ratio;
860  	 * where, freq_div_ratio = 1e9ns/sec_inc
861  	 */
862  	temp = (u64)(temp << 32);
863  	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864  	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865  
866  	/* initialize system time */
867  	ktime_get_real_ts64(&now);
868  
869  	/* lower 32 bits of tv_sec are safe until y2106 */
870  	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871  
872  	return 0;
873  }
874  EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875  
876  /**
877   * stmmac_init_ptp - init PTP
878   * @priv: driver private structure
879   * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880   * This is done by looking at the HW cap. register.
881   * This function also registers the ptp driver.
882   */
stmmac_init_ptp(struct stmmac_priv * priv)883  static int stmmac_init_ptp(struct stmmac_priv *priv)
884  {
885  	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886  	int ret;
887  
888  	if (priv->plat->ptp_clk_freq_config)
889  		priv->plat->ptp_clk_freq_config(priv);
890  
891  	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892  	if (ret)
893  		return ret;
894  
895  	priv->adv_ts = 0;
896  	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897  	if (xmac && priv->dma_cap.atime_stamp)
898  		priv->adv_ts = 1;
899  	/* Dwmac 3.x core with extend_desc can support adv_ts */
900  	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901  		priv->adv_ts = 1;
902  
903  	if (priv->dma_cap.time_stamp)
904  		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
905  
906  	if (priv->adv_ts)
907  		netdev_info(priv->dev,
908  			    "IEEE 1588-2008 Advanced Timestamp supported\n");
909  
910  	priv->hwts_tx_en = 0;
911  	priv->hwts_rx_en = 0;
912  
913  	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
914  		stmmac_hwtstamp_correct_latency(priv, priv);
915  
916  	return 0;
917  }
918  
stmmac_release_ptp(struct stmmac_priv * priv)919  static void stmmac_release_ptp(struct stmmac_priv *priv)
920  {
921  	clk_disable_unprepare(priv->plat->clk_ptp_ref);
922  	stmmac_ptp_unregister(priv);
923  }
924  
925  /**
926   *  stmmac_mac_flow_ctrl - Configure flow control in all queues
927   *  @priv: driver private structure
928   *  @duplex: duplex passed to the next function
929   *  Description: It is used for configuring the flow control in all queues
930   */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex)931  static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932  {
933  	u32 tx_cnt = priv->plat->tx_queues_to_use;
934  
935  	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936  			priv->pause, tx_cnt);
937  }
938  
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)939  static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
940  						 phy_interface_t interface)
941  {
942  	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943  
944  	if (priv->hw->xpcs)
945  		return &priv->hw->xpcs->pcs;
946  
947  	if (priv->hw->lynx_pcs)
948  		return priv->hw->lynx_pcs;
949  
950  	return NULL;
951  }
952  
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)953  static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
954  			      const struct phylink_link_state *state)
955  {
956  	/* Nothing to do, xpcs_config() handles everything */
957  }
958  
stmmac_fpe_link_state_handle(struct stmmac_priv * priv,bool is_up)959  static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
960  {
961  	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
962  	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
963  	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
964  	bool *hs_enable = &fpe_cfg->hs_enable;
965  
966  	if (is_up && *hs_enable) {
967  		stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
968  					MPACKET_VERIFY);
969  	} else {
970  		*lo_state = FPE_STATE_OFF;
971  		*lp_state = FPE_STATE_OFF;
972  	}
973  }
974  
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)975  static void stmmac_mac_link_down(struct phylink_config *config,
976  				 unsigned int mode, phy_interface_t interface)
977  {
978  	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
979  
980  	stmmac_mac_set(priv, priv->ioaddr, false);
981  	priv->eee_active = false;
982  	priv->tx_lpi_enabled = false;
983  	priv->eee_enabled = stmmac_eee_init(priv);
984  	stmmac_set_eee_pls(priv, priv->hw, false);
985  
986  	if (priv->dma_cap.fpesel)
987  		stmmac_fpe_link_state_handle(priv, false);
988  }
989  
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)990  static void stmmac_mac_link_up(struct phylink_config *config,
991  			       struct phy_device *phy,
992  			       unsigned int mode, phy_interface_t interface,
993  			       int speed, int duplex,
994  			       bool tx_pause, bool rx_pause)
995  {
996  	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
997  	u32 old_ctrl, ctrl;
998  
999  	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1000  	    priv->plat->serdes_powerup)
1001  		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1002  
1003  	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1004  	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1005  
1006  	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1007  		switch (speed) {
1008  		case SPEED_10000:
1009  			ctrl |= priv->hw->link.xgmii.speed10000;
1010  			break;
1011  		case SPEED_5000:
1012  			ctrl |= priv->hw->link.xgmii.speed5000;
1013  			break;
1014  		case SPEED_2500:
1015  			ctrl |= priv->hw->link.xgmii.speed2500;
1016  			break;
1017  		default:
1018  			return;
1019  		}
1020  	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1021  		switch (speed) {
1022  		case SPEED_100000:
1023  			ctrl |= priv->hw->link.xlgmii.speed100000;
1024  			break;
1025  		case SPEED_50000:
1026  			ctrl |= priv->hw->link.xlgmii.speed50000;
1027  			break;
1028  		case SPEED_40000:
1029  			ctrl |= priv->hw->link.xlgmii.speed40000;
1030  			break;
1031  		case SPEED_25000:
1032  			ctrl |= priv->hw->link.xlgmii.speed25000;
1033  			break;
1034  		case SPEED_10000:
1035  			ctrl |= priv->hw->link.xgmii.speed10000;
1036  			break;
1037  		case SPEED_2500:
1038  			ctrl |= priv->hw->link.speed2500;
1039  			break;
1040  		case SPEED_1000:
1041  			ctrl |= priv->hw->link.speed1000;
1042  			break;
1043  		default:
1044  			return;
1045  		}
1046  	} else {
1047  		switch (speed) {
1048  		case SPEED_2500:
1049  			ctrl |= priv->hw->link.speed2500;
1050  			break;
1051  		case SPEED_1000:
1052  			ctrl |= priv->hw->link.speed1000;
1053  			break;
1054  		case SPEED_100:
1055  			ctrl |= priv->hw->link.speed100;
1056  			break;
1057  		case SPEED_10:
1058  			ctrl |= priv->hw->link.speed10;
1059  			break;
1060  		default:
1061  			return;
1062  		}
1063  	}
1064  
1065  	priv->speed = speed;
1066  
1067  	if (priv->plat->fix_mac_speed)
1068  		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1069  
1070  	if (!duplex)
1071  		ctrl &= ~priv->hw->link.duplex;
1072  	else
1073  		ctrl |= priv->hw->link.duplex;
1074  
1075  	/* Flow Control operation */
1076  	if (rx_pause && tx_pause)
1077  		priv->flow_ctrl = FLOW_AUTO;
1078  	else if (rx_pause && !tx_pause)
1079  		priv->flow_ctrl = FLOW_RX;
1080  	else if (!rx_pause && tx_pause)
1081  		priv->flow_ctrl = FLOW_TX;
1082  	else
1083  		priv->flow_ctrl = FLOW_OFF;
1084  
1085  	stmmac_mac_flow_ctrl(priv, duplex);
1086  
1087  	if (ctrl != old_ctrl)
1088  		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1089  
1090  	stmmac_mac_set(priv, priv->ioaddr, true);
1091  	if (phy && priv->dma_cap.eee) {
1092  		priv->eee_active =
1093  			phy_init_eee(phy, !(priv->plat->flags &
1094  				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1095  		priv->eee_enabled = stmmac_eee_init(priv);
1096  		priv->tx_lpi_enabled = priv->eee_enabled;
1097  		stmmac_set_eee_pls(priv, priv->hw, true);
1098  	}
1099  
1100  	if (priv->dma_cap.fpesel)
1101  		stmmac_fpe_link_state_handle(priv, true);
1102  
1103  	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1104  		stmmac_hwtstamp_correct_latency(priv, priv);
1105  }
1106  
1107  static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1108  	.mac_select_pcs = stmmac_mac_select_pcs,
1109  	.mac_config = stmmac_mac_config,
1110  	.mac_link_down = stmmac_mac_link_down,
1111  	.mac_link_up = stmmac_mac_link_up,
1112  };
1113  
1114  /**
1115   * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1116   * @priv: driver private structure
1117   * Description: this is to verify if the HW supports the PCS.
1118   * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1119   * configured for the TBI, RTBI, or SGMII PHY interface.
1120   */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1121  static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1122  {
1123  	int interface = priv->plat->mac_interface;
1124  
1125  	if (priv->dma_cap.pcs) {
1126  		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1127  		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1128  		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1129  		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1130  			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1131  			priv->hw->pcs = STMMAC_PCS_RGMII;
1132  		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1133  			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1134  			priv->hw->pcs = STMMAC_PCS_SGMII;
1135  		}
1136  	}
1137  }
1138  
1139  /**
1140   * stmmac_init_phy - PHY initialization
1141   * @dev: net device structure
1142   * Description: it initializes the driver's PHY state, and attaches the PHY
1143   * to the mac driver.
1144   *  Return value:
1145   *  0 on success
1146   */
stmmac_init_phy(struct net_device * dev)1147  static int stmmac_init_phy(struct net_device *dev)
1148  {
1149  	struct stmmac_priv *priv = netdev_priv(dev);
1150  	struct fwnode_handle *phy_fwnode;
1151  	struct fwnode_handle *fwnode;
1152  	int ret;
1153  
1154  	if (!phylink_expects_phy(priv->phylink))
1155  		return 0;
1156  
1157  	fwnode = priv->plat->port_node;
1158  	if (!fwnode)
1159  		fwnode = dev_fwnode(priv->device);
1160  
1161  	if (fwnode)
1162  		phy_fwnode = fwnode_get_phy_node(fwnode);
1163  	else
1164  		phy_fwnode = NULL;
1165  
1166  	/* Some DT bindings do not set-up the PHY handle. Let's try to
1167  	 * manually parse it
1168  	 */
1169  	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1170  		int addr = priv->plat->phy_addr;
1171  		struct phy_device *phydev;
1172  
1173  		if (addr < 0) {
1174  			netdev_err(priv->dev, "no phy found\n");
1175  			return -ENODEV;
1176  		}
1177  
1178  		phydev = mdiobus_get_phy(priv->mii, addr);
1179  		if (!phydev) {
1180  			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1181  			return -ENODEV;
1182  		}
1183  
1184  		ret = phylink_connect_phy(priv->phylink, phydev);
1185  	} else {
1186  		fwnode_handle_put(phy_fwnode);
1187  		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1188  	}
1189  
1190  	if (!priv->plat->pmt) {
1191  		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1192  
1193  		phylink_ethtool_get_wol(priv->phylink, &wol);
1194  		device_set_wakeup_capable(priv->device, !!wol.supported);
1195  		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1196  	}
1197  
1198  	return ret;
1199  }
1200  
stmmac_phy_setup(struct stmmac_priv * priv)1201  static int stmmac_phy_setup(struct stmmac_priv *priv)
1202  {
1203  	struct stmmac_mdio_bus_data *mdio_bus_data;
1204  	int mode = priv->plat->phy_interface;
1205  	struct fwnode_handle *fwnode;
1206  	struct phylink *phylink;
1207  	int max_speed;
1208  
1209  	priv->phylink_config.dev = &priv->dev->dev;
1210  	priv->phylink_config.type = PHYLINK_NETDEV;
1211  	priv->phylink_config.mac_managed_pm = true;
1212  
1213  	mdio_bus_data = priv->plat->mdio_bus_data;
1214  	if (mdio_bus_data)
1215  		priv->phylink_config.ovr_an_inband =
1216  			mdio_bus_data->xpcs_an_inband;
1217  
1218  	/* Set the platform/firmware specified interface mode. Note, phylink
1219  	 * deals with the PHY interface mode, not the MAC interface mode.
1220  	 */
1221  	__set_bit(mode, priv->phylink_config.supported_interfaces);
1222  
1223  	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1224  	if (priv->hw->xpcs)
1225  		xpcs_get_interfaces(priv->hw->xpcs,
1226  				    priv->phylink_config.supported_interfaces);
1227  
1228  	/* Get the MAC specific capabilities */
1229  	stmmac_mac_phylink_get_caps(priv);
1230  
1231  	priv->phylink_config.mac_capabilities = priv->hw->link.caps;
1232  
1233  	max_speed = priv->plat->max_speed;
1234  	if (max_speed)
1235  		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1236  
1237  	fwnode = priv->plat->port_node;
1238  	if (!fwnode)
1239  		fwnode = dev_fwnode(priv->device);
1240  
1241  	phylink = phylink_create(&priv->phylink_config, fwnode,
1242  				 mode, &stmmac_phylink_mac_ops);
1243  	if (IS_ERR(phylink))
1244  		return PTR_ERR(phylink);
1245  
1246  	priv->phylink = phylink;
1247  	return 0;
1248  }
1249  
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1250  static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1251  				    struct stmmac_dma_conf *dma_conf)
1252  {
1253  	u32 rx_cnt = priv->plat->rx_queues_to_use;
1254  	unsigned int desc_size;
1255  	void *head_rx;
1256  	u32 queue;
1257  
1258  	/* Display RX rings */
1259  	for (queue = 0; queue < rx_cnt; queue++) {
1260  		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1261  
1262  		pr_info("\tRX Queue %u rings\n", queue);
1263  
1264  		if (priv->extend_desc) {
1265  			head_rx = (void *)rx_q->dma_erx;
1266  			desc_size = sizeof(struct dma_extended_desc);
1267  		} else {
1268  			head_rx = (void *)rx_q->dma_rx;
1269  			desc_size = sizeof(struct dma_desc);
1270  		}
1271  
1272  		/* Display RX ring */
1273  		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1274  				    rx_q->dma_rx_phy, desc_size);
1275  	}
1276  }
1277  
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1278  static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1279  				    struct stmmac_dma_conf *dma_conf)
1280  {
1281  	u32 tx_cnt = priv->plat->tx_queues_to_use;
1282  	unsigned int desc_size;
1283  	void *head_tx;
1284  	u32 queue;
1285  
1286  	/* Display TX rings */
1287  	for (queue = 0; queue < tx_cnt; queue++) {
1288  		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1289  
1290  		pr_info("\tTX Queue %d rings\n", queue);
1291  
1292  		if (priv->extend_desc) {
1293  			head_tx = (void *)tx_q->dma_etx;
1294  			desc_size = sizeof(struct dma_extended_desc);
1295  		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1296  			head_tx = (void *)tx_q->dma_entx;
1297  			desc_size = sizeof(struct dma_edesc);
1298  		} else {
1299  			head_tx = (void *)tx_q->dma_tx;
1300  			desc_size = sizeof(struct dma_desc);
1301  		}
1302  
1303  		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1304  				    tx_q->dma_tx_phy, desc_size);
1305  	}
1306  }
1307  
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1308  static void stmmac_display_rings(struct stmmac_priv *priv,
1309  				 struct stmmac_dma_conf *dma_conf)
1310  {
1311  	/* Display RX ring */
1312  	stmmac_display_rx_rings(priv, dma_conf);
1313  
1314  	/* Display TX ring */
1315  	stmmac_display_tx_rings(priv, dma_conf);
1316  }
1317  
stmmac_set_bfsize(int mtu,int bufsize)1318  static int stmmac_set_bfsize(int mtu, int bufsize)
1319  {
1320  	int ret = bufsize;
1321  
1322  	if (mtu >= BUF_SIZE_8KiB)
1323  		ret = BUF_SIZE_16KiB;
1324  	else if (mtu >= BUF_SIZE_4KiB)
1325  		ret = BUF_SIZE_8KiB;
1326  	else if (mtu >= BUF_SIZE_2KiB)
1327  		ret = BUF_SIZE_4KiB;
1328  	else if (mtu > DEFAULT_BUFSIZE)
1329  		ret = BUF_SIZE_2KiB;
1330  	else
1331  		ret = DEFAULT_BUFSIZE;
1332  
1333  	return ret;
1334  }
1335  
1336  /**
1337   * stmmac_clear_rx_descriptors - clear RX descriptors
1338   * @priv: driver private structure
1339   * @dma_conf: structure to take the dma data
1340   * @queue: RX queue index
1341   * Description: this function is called to clear the RX descriptors
1342   * in case of both basic and extended descriptors are used.
1343   */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1344  static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1345  					struct stmmac_dma_conf *dma_conf,
1346  					u32 queue)
1347  {
1348  	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1349  	int i;
1350  
1351  	/* Clear the RX descriptors */
1352  	for (i = 0; i < dma_conf->dma_rx_size; i++)
1353  		if (priv->extend_desc)
1354  			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1355  					priv->use_riwt, priv->mode,
1356  					(i == dma_conf->dma_rx_size - 1),
1357  					dma_conf->dma_buf_sz);
1358  		else
1359  			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1360  					priv->use_riwt, priv->mode,
1361  					(i == dma_conf->dma_rx_size - 1),
1362  					dma_conf->dma_buf_sz);
1363  }
1364  
1365  /**
1366   * stmmac_clear_tx_descriptors - clear tx descriptors
1367   * @priv: driver private structure
1368   * @dma_conf: structure to take the dma data
1369   * @queue: TX queue index.
1370   * Description: this function is called to clear the TX descriptors
1371   * in case of both basic and extended descriptors are used.
1372   */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1373  static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1374  					struct stmmac_dma_conf *dma_conf,
1375  					u32 queue)
1376  {
1377  	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1378  	int i;
1379  
1380  	/* Clear the TX descriptors */
1381  	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1382  		int last = (i == (dma_conf->dma_tx_size - 1));
1383  		struct dma_desc *p;
1384  
1385  		if (priv->extend_desc)
1386  			p = &tx_q->dma_etx[i].basic;
1387  		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1388  			p = &tx_q->dma_entx[i].basic;
1389  		else
1390  			p = &tx_q->dma_tx[i];
1391  
1392  		stmmac_init_tx_desc(priv, p, priv->mode, last);
1393  	}
1394  }
1395  
1396  /**
1397   * stmmac_clear_descriptors - clear descriptors
1398   * @priv: driver private structure
1399   * @dma_conf: structure to take the dma data
1400   * Description: this function is called to clear the TX and RX descriptors
1401   * in case of both basic and extended descriptors are used.
1402   */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1403  static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1404  				     struct stmmac_dma_conf *dma_conf)
1405  {
1406  	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1407  	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1408  	u32 queue;
1409  
1410  	/* Clear the RX descriptors */
1411  	for (queue = 0; queue < rx_queue_cnt; queue++)
1412  		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1413  
1414  	/* Clear the TX descriptors */
1415  	for (queue = 0; queue < tx_queue_cnt; queue++)
1416  		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1417  }
1418  
1419  /**
1420   * stmmac_init_rx_buffers - init the RX descriptor buffer.
1421   * @priv: driver private structure
1422   * @dma_conf: structure to take the dma data
1423   * @p: descriptor pointer
1424   * @i: descriptor index
1425   * @flags: gfp flag
1426   * @queue: RX queue index
1427   * Description: this function is called to allocate a receive buffer, perform
1428   * the DMA mapping and init the descriptor.
1429   */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1430  static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1431  				  struct stmmac_dma_conf *dma_conf,
1432  				  struct dma_desc *p,
1433  				  int i, gfp_t flags, u32 queue)
1434  {
1435  	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1436  	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1437  	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1438  
1439  	if (priv->dma_cap.host_dma_width <= 32)
1440  		gfp |= GFP_DMA32;
1441  
1442  	if (!buf->page) {
1443  		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1444  		if (!buf->page)
1445  			return -ENOMEM;
1446  		buf->page_offset = stmmac_rx_offset(priv);
1447  	}
1448  
1449  	if (priv->sph && !buf->sec_page) {
1450  		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1451  		if (!buf->sec_page)
1452  			return -ENOMEM;
1453  
1454  		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1455  		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1456  	} else {
1457  		buf->sec_page = NULL;
1458  		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1459  	}
1460  
1461  	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1462  
1463  	stmmac_set_desc_addr(priv, p, buf->addr);
1464  	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1465  		stmmac_init_desc3(priv, p);
1466  
1467  	return 0;
1468  }
1469  
1470  /**
1471   * stmmac_free_rx_buffer - free RX dma buffers
1472   * @priv: private structure
1473   * @rx_q: RX queue
1474   * @i: buffer index.
1475   */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1476  static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1477  				  struct stmmac_rx_queue *rx_q,
1478  				  int i)
1479  {
1480  	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1481  
1482  	if (buf->page)
1483  		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1484  	buf->page = NULL;
1485  
1486  	if (buf->sec_page)
1487  		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1488  	buf->sec_page = NULL;
1489  }
1490  
1491  /**
1492   * stmmac_free_tx_buffer - free RX dma buffers
1493   * @priv: private structure
1494   * @dma_conf: structure to take the dma data
1495   * @queue: RX queue index
1496   * @i: buffer index.
1497   */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1498  static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1499  				  struct stmmac_dma_conf *dma_conf,
1500  				  u32 queue, int i)
1501  {
1502  	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1503  
1504  	if (tx_q->tx_skbuff_dma[i].buf &&
1505  	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1506  		if (tx_q->tx_skbuff_dma[i].map_as_page)
1507  			dma_unmap_page(priv->device,
1508  				       tx_q->tx_skbuff_dma[i].buf,
1509  				       tx_q->tx_skbuff_dma[i].len,
1510  				       DMA_TO_DEVICE);
1511  		else
1512  			dma_unmap_single(priv->device,
1513  					 tx_q->tx_skbuff_dma[i].buf,
1514  					 tx_q->tx_skbuff_dma[i].len,
1515  					 DMA_TO_DEVICE);
1516  	}
1517  
1518  	if (tx_q->xdpf[i] &&
1519  	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1520  	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1521  		xdp_return_frame(tx_q->xdpf[i]);
1522  		tx_q->xdpf[i] = NULL;
1523  	}
1524  
1525  	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1526  		tx_q->xsk_frames_done++;
1527  
1528  	if (tx_q->tx_skbuff[i] &&
1529  	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1530  		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1531  		tx_q->tx_skbuff[i] = NULL;
1532  	}
1533  
1534  	tx_q->tx_skbuff_dma[i].buf = 0;
1535  	tx_q->tx_skbuff_dma[i].map_as_page = false;
1536  }
1537  
1538  /**
1539   * dma_free_rx_skbufs - free RX dma buffers
1540   * @priv: private structure
1541   * @dma_conf: structure to take the dma data
1542   * @queue: RX queue index
1543   */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1544  static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1545  			       struct stmmac_dma_conf *dma_conf,
1546  			       u32 queue)
1547  {
1548  	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1549  	int i;
1550  
1551  	for (i = 0; i < dma_conf->dma_rx_size; i++)
1552  		stmmac_free_rx_buffer(priv, rx_q, i);
1553  }
1554  
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1555  static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1556  				   struct stmmac_dma_conf *dma_conf,
1557  				   u32 queue, gfp_t flags)
1558  {
1559  	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1560  	int i;
1561  
1562  	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1563  		struct dma_desc *p;
1564  		int ret;
1565  
1566  		if (priv->extend_desc)
1567  			p = &((rx_q->dma_erx + i)->basic);
1568  		else
1569  			p = rx_q->dma_rx + i;
1570  
1571  		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1572  					     queue);
1573  		if (ret)
1574  			return ret;
1575  
1576  		rx_q->buf_alloc_num++;
1577  	}
1578  
1579  	return 0;
1580  }
1581  
1582  /**
1583   * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1584   * @priv: private structure
1585   * @dma_conf: structure to take the dma data
1586   * @queue: RX queue index
1587   */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1588  static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1589  				struct stmmac_dma_conf *dma_conf,
1590  				u32 queue)
1591  {
1592  	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1593  	int i;
1594  
1595  	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1596  		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1597  
1598  		if (!buf->xdp)
1599  			continue;
1600  
1601  		xsk_buff_free(buf->xdp);
1602  		buf->xdp = NULL;
1603  	}
1604  }
1605  
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1606  static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1607  				      struct stmmac_dma_conf *dma_conf,
1608  				      u32 queue)
1609  {
1610  	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1611  	int i;
1612  
1613  	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1614  	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1615  	 * use this macro to make sure no size violations.
1616  	 */
1617  	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1618  
1619  	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1620  		struct stmmac_rx_buffer *buf;
1621  		dma_addr_t dma_addr;
1622  		struct dma_desc *p;
1623  
1624  		if (priv->extend_desc)
1625  			p = (struct dma_desc *)(rx_q->dma_erx + i);
1626  		else
1627  			p = rx_q->dma_rx + i;
1628  
1629  		buf = &rx_q->buf_pool[i];
1630  
1631  		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1632  		if (!buf->xdp)
1633  			return -ENOMEM;
1634  
1635  		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1636  		stmmac_set_desc_addr(priv, p, dma_addr);
1637  		rx_q->buf_alloc_num++;
1638  	}
1639  
1640  	return 0;
1641  }
1642  
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1643  static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1644  {
1645  	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1646  		return NULL;
1647  
1648  	return xsk_get_pool_from_qid(priv->dev, queue);
1649  }
1650  
1651  /**
1652   * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1653   * @priv: driver private structure
1654   * @dma_conf: structure to take the dma data
1655   * @queue: RX queue index
1656   * @flags: gfp flag.
1657   * Description: this function initializes the DMA RX descriptors
1658   * and allocates the socket buffers. It supports the chained and ring
1659   * modes.
1660   */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1661  static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1662  				    struct stmmac_dma_conf *dma_conf,
1663  				    u32 queue, gfp_t flags)
1664  {
1665  	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1666  	int ret;
1667  
1668  	netif_dbg(priv, probe, priv->dev,
1669  		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1670  		  (u32)rx_q->dma_rx_phy);
1671  
1672  	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1673  
1674  	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1675  
1676  	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1677  
1678  	if (rx_q->xsk_pool) {
1679  		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1680  						   MEM_TYPE_XSK_BUFF_POOL,
1681  						   NULL));
1682  		netdev_info(priv->dev,
1683  			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1684  			    rx_q->queue_index);
1685  		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1686  	} else {
1687  		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1688  						   MEM_TYPE_PAGE_POOL,
1689  						   rx_q->page_pool));
1690  		netdev_info(priv->dev,
1691  			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1692  			    rx_q->queue_index);
1693  	}
1694  
1695  	if (rx_q->xsk_pool) {
1696  		/* RX XDP ZC buffer pool may not be populated, e.g.
1697  		 * xdpsock TX-only.
1698  		 */
1699  		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1700  	} else {
1701  		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1702  		if (ret < 0)
1703  			return -ENOMEM;
1704  	}
1705  
1706  	/* Setup the chained descriptor addresses */
1707  	if (priv->mode == STMMAC_CHAIN_MODE) {
1708  		if (priv->extend_desc)
1709  			stmmac_mode_init(priv, rx_q->dma_erx,
1710  					 rx_q->dma_rx_phy,
1711  					 dma_conf->dma_rx_size, 1);
1712  		else
1713  			stmmac_mode_init(priv, rx_q->dma_rx,
1714  					 rx_q->dma_rx_phy,
1715  					 dma_conf->dma_rx_size, 0);
1716  	}
1717  
1718  	return 0;
1719  }
1720  
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1721  static int init_dma_rx_desc_rings(struct net_device *dev,
1722  				  struct stmmac_dma_conf *dma_conf,
1723  				  gfp_t flags)
1724  {
1725  	struct stmmac_priv *priv = netdev_priv(dev);
1726  	u32 rx_count = priv->plat->rx_queues_to_use;
1727  	int queue;
1728  	int ret;
1729  
1730  	/* RX INITIALIZATION */
1731  	netif_dbg(priv, probe, priv->dev,
1732  		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1733  
1734  	for (queue = 0; queue < rx_count; queue++) {
1735  		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1736  		if (ret)
1737  			goto err_init_rx_buffers;
1738  	}
1739  
1740  	return 0;
1741  
1742  err_init_rx_buffers:
1743  	while (queue >= 0) {
1744  		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1745  
1746  		if (rx_q->xsk_pool)
1747  			dma_free_rx_xskbufs(priv, dma_conf, queue);
1748  		else
1749  			dma_free_rx_skbufs(priv, dma_conf, queue);
1750  
1751  		rx_q->buf_alloc_num = 0;
1752  		rx_q->xsk_pool = NULL;
1753  
1754  		queue--;
1755  	}
1756  
1757  	return ret;
1758  }
1759  
1760  /**
1761   * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1762   * @priv: driver private structure
1763   * @dma_conf: structure to take the dma data
1764   * @queue: TX queue index
1765   * Description: this function initializes the DMA TX descriptors
1766   * and allocates the socket buffers. It supports the chained and ring
1767   * modes.
1768   */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1769  static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1770  				    struct stmmac_dma_conf *dma_conf,
1771  				    u32 queue)
1772  {
1773  	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1774  	int i;
1775  
1776  	netif_dbg(priv, probe, priv->dev,
1777  		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1778  		  (u32)tx_q->dma_tx_phy);
1779  
1780  	/* Setup the chained descriptor addresses */
1781  	if (priv->mode == STMMAC_CHAIN_MODE) {
1782  		if (priv->extend_desc)
1783  			stmmac_mode_init(priv, tx_q->dma_etx,
1784  					 tx_q->dma_tx_phy,
1785  					 dma_conf->dma_tx_size, 1);
1786  		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1787  			stmmac_mode_init(priv, tx_q->dma_tx,
1788  					 tx_q->dma_tx_phy,
1789  					 dma_conf->dma_tx_size, 0);
1790  	}
1791  
1792  	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1793  
1794  	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1795  		struct dma_desc *p;
1796  
1797  		if (priv->extend_desc)
1798  			p = &((tx_q->dma_etx + i)->basic);
1799  		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1800  			p = &((tx_q->dma_entx + i)->basic);
1801  		else
1802  			p = tx_q->dma_tx + i;
1803  
1804  		stmmac_clear_desc(priv, p);
1805  
1806  		tx_q->tx_skbuff_dma[i].buf = 0;
1807  		tx_q->tx_skbuff_dma[i].map_as_page = false;
1808  		tx_q->tx_skbuff_dma[i].len = 0;
1809  		tx_q->tx_skbuff_dma[i].last_segment = false;
1810  		tx_q->tx_skbuff[i] = NULL;
1811  	}
1812  
1813  	return 0;
1814  }
1815  
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)1816  static int init_dma_tx_desc_rings(struct net_device *dev,
1817  				  struct stmmac_dma_conf *dma_conf)
1818  {
1819  	struct stmmac_priv *priv = netdev_priv(dev);
1820  	u32 tx_queue_cnt;
1821  	u32 queue;
1822  
1823  	tx_queue_cnt = priv->plat->tx_queues_to_use;
1824  
1825  	for (queue = 0; queue < tx_queue_cnt; queue++)
1826  		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1827  
1828  	return 0;
1829  }
1830  
1831  /**
1832   * init_dma_desc_rings - init the RX/TX descriptor rings
1833   * @dev: net device structure
1834   * @dma_conf: structure to take the dma data
1835   * @flags: gfp flag.
1836   * Description: this function initializes the DMA RX/TX descriptors
1837   * and allocates the socket buffers. It supports the chained and ring
1838   * modes.
1839   */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1840  static int init_dma_desc_rings(struct net_device *dev,
1841  			       struct stmmac_dma_conf *dma_conf,
1842  			       gfp_t flags)
1843  {
1844  	struct stmmac_priv *priv = netdev_priv(dev);
1845  	int ret;
1846  
1847  	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1848  	if (ret)
1849  		return ret;
1850  
1851  	ret = init_dma_tx_desc_rings(dev, dma_conf);
1852  
1853  	stmmac_clear_descriptors(priv, dma_conf);
1854  
1855  	if (netif_msg_hw(priv))
1856  		stmmac_display_rings(priv, dma_conf);
1857  
1858  	return ret;
1859  }
1860  
1861  /**
1862   * dma_free_tx_skbufs - free TX dma buffers
1863   * @priv: private structure
1864   * @dma_conf: structure to take the dma data
1865   * @queue: TX queue index
1866   */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1867  static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1868  			       struct stmmac_dma_conf *dma_conf,
1869  			       u32 queue)
1870  {
1871  	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1872  	int i;
1873  
1874  	tx_q->xsk_frames_done = 0;
1875  
1876  	for (i = 0; i < dma_conf->dma_tx_size; i++)
1877  		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1878  
1879  	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1880  		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1881  		tx_q->xsk_frames_done = 0;
1882  		tx_q->xsk_pool = NULL;
1883  	}
1884  }
1885  
1886  /**
1887   * stmmac_free_tx_skbufs - free TX skb buffers
1888   * @priv: private structure
1889   */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)1890  static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1891  {
1892  	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1893  	u32 queue;
1894  
1895  	for (queue = 0; queue < tx_queue_cnt; queue++)
1896  		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1897  }
1898  
1899  /**
1900   * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1901   * @priv: private structure
1902   * @dma_conf: structure to take the dma data
1903   * @queue: RX queue index
1904   */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1905  static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1906  					 struct stmmac_dma_conf *dma_conf,
1907  					 u32 queue)
1908  {
1909  	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1910  
1911  	/* Release the DMA RX socket buffers */
1912  	if (rx_q->xsk_pool)
1913  		dma_free_rx_xskbufs(priv, dma_conf, queue);
1914  	else
1915  		dma_free_rx_skbufs(priv, dma_conf, queue);
1916  
1917  	rx_q->buf_alloc_num = 0;
1918  	rx_q->xsk_pool = NULL;
1919  
1920  	/* Free DMA regions of consistent memory previously allocated */
1921  	if (!priv->extend_desc)
1922  		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1923  				  sizeof(struct dma_desc),
1924  				  rx_q->dma_rx, rx_q->dma_rx_phy);
1925  	else
1926  		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1927  				  sizeof(struct dma_extended_desc),
1928  				  rx_q->dma_erx, rx_q->dma_rx_phy);
1929  
1930  	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1931  		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1932  
1933  	kfree(rx_q->buf_pool);
1934  	if (rx_q->page_pool)
1935  		page_pool_destroy(rx_q->page_pool);
1936  }
1937  
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1938  static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1939  				       struct stmmac_dma_conf *dma_conf)
1940  {
1941  	u32 rx_count = priv->plat->rx_queues_to_use;
1942  	u32 queue;
1943  
1944  	/* Free RX queue resources */
1945  	for (queue = 0; queue < rx_count; queue++)
1946  		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1947  }
1948  
1949  /**
1950   * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1951   * @priv: private structure
1952   * @dma_conf: structure to take the dma data
1953   * @queue: TX queue index
1954   */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1955  static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1956  					 struct stmmac_dma_conf *dma_conf,
1957  					 u32 queue)
1958  {
1959  	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1960  	size_t size;
1961  	void *addr;
1962  
1963  	/* Release the DMA TX socket buffers */
1964  	dma_free_tx_skbufs(priv, dma_conf, queue);
1965  
1966  	if (priv->extend_desc) {
1967  		size = sizeof(struct dma_extended_desc);
1968  		addr = tx_q->dma_etx;
1969  	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1970  		size = sizeof(struct dma_edesc);
1971  		addr = tx_q->dma_entx;
1972  	} else {
1973  		size = sizeof(struct dma_desc);
1974  		addr = tx_q->dma_tx;
1975  	}
1976  
1977  	size *= dma_conf->dma_tx_size;
1978  
1979  	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1980  
1981  	kfree(tx_q->tx_skbuff_dma);
1982  	kfree(tx_q->tx_skbuff);
1983  }
1984  
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1985  static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1986  				       struct stmmac_dma_conf *dma_conf)
1987  {
1988  	u32 tx_count = priv->plat->tx_queues_to_use;
1989  	u32 queue;
1990  
1991  	/* Free TX queue resources */
1992  	for (queue = 0; queue < tx_count; queue++)
1993  		__free_dma_tx_desc_resources(priv, dma_conf, queue);
1994  }
1995  
1996  /**
1997   * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1998   * @priv: private structure
1999   * @dma_conf: structure to take the dma data
2000   * @queue: RX queue index
2001   * Description: according to which descriptor can be used (extend or basic)
2002   * this function allocates the resources for TX and RX paths. In case of
2003   * reception, for example, it pre-allocated the RX socket buffer in order to
2004   * allow zero-copy mechanism.
2005   */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2006  static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2007  					 struct stmmac_dma_conf *dma_conf,
2008  					 u32 queue)
2009  {
2010  	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2011  	struct stmmac_channel *ch = &priv->channel[queue];
2012  	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2013  	struct page_pool_params pp_params = { 0 };
2014  	unsigned int num_pages;
2015  	unsigned int napi_id;
2016  	int ret;
2017  
2018  	rx_q->queue_index = queue;
2019  	rx_q->priv_data = priv;
2020  
2021  	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2022  	pp_params.pool_size = dma_conf->dma_rx_size;
2023  	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2024  	pp_params.order = ilog2(num_pages);
2025  	pp_params.nid = dev_to_node(priv->device);
2026  	pp_params.dev = priv->device;
2027  	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2028  	pp_params.offset = stmmac_rx_offset(priv);
2029  	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2030  
2031  	rx_q->page_pool = page_pool_create(&pp_params);
2032  	if (IS_ERR(rx_q->page_pool)) {
2033  		ret = PTR_ERR(rx_q->page_pool);
2034  		rx_q->page_pool = NULL;
2035  		return ret;
2036  	}
2037  
2038  	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2039  				 sizeof(*rx_q->buf_pool),
2040  				 GFP_KERNEL);
2041  	if (!rx_q->buf_pool)
2042  		return -ENOMEM;
2043  
2044  	if (priv->extend_desc) {
2045  		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2046  						   dma_conf->dma_rx_size *
2047  						   sizeof(struct dma_extended_desc),
2048  						   &rx_q->dma_rx_phy,
2049  						   GFP_KERNEL);
2050  		if (!rx_q->dma_erx)
2051  			return -ENOMEM;
2052  
2053  	} else {
2054  		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2055  						  dma_conf->dma_rx_size *
2056  						  sizeof(struct dma_desc),
2057  						  &rx_q->dma_rx_phy,
2058  						  GFP_KERNEL);
2059  		if (!rx_q->dma_rx)
2060  			return -ENOMEM;
2061  	}
2062  
2063  	if (stmmac_xdp_is_enabled(priv) &&
2064  	    test_bit(queue, priv->af_xdp_zc_qps))
2065  		napi_id = ch->rxtx_napi.napi_id;
2066  	else
2067  		napi_id = ch->rx_napi.napi_id;
2068  
2069  	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2070  			       rx_q->queue_index,
2071  			       napi_id);
2072  	if (ret) {
2073  		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2074  		return -EINVAL;
2075  	}
2076  
2077  	return 0;
2078  }
2079  
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2080  static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2081  				       struct stmmac_dma_conf *dma_conf)
2082  {
2083  	u32 rx_count = priv->plat->rx_queues_to_use;
2084  	u32 queue;
2085  	int ret;
2086  
2087  	/* RX queues buffers and DMA */
2088  	for (queue = 0; queue < rx_count; queue++) {
2089  		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2090  		if (ret)
2091  			goto err_dma;
2092  	}
2093  
2094  	return 0;
2095  
2096  err_dma:
2097  	free_dma_rx_desc_resources(priv, dma_conf);
2098  
2099  	return ret;
2100  }
2101  
2102  /**
2103   * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2104   * @priv: private structure
2105   * @dma_conf: structure to take the dma data
2106   * @queue: TX queue index
2107   * Description: according to which descriptor can be used (extend or basic)
2108   * this function allocates the resources for TX and RX paths. In case of
2109   * reception, for example, it pre-allocated the RX socket buffer in order to
2110   * allow zero-copy mechanism.
2111   */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2112  static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2113  					 struct stmmac_dma_conf *dma_conf,
2114  					 u32 queue)
2115  {
2116  	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2117  	size_t size;
2118  	void *addr;
2119  
2120  	tx_q->queue_index = queue;
2121  	tx_q->priv_data = priv;
2122  
2123  	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2124  				      sizeof(*tx_q->tx_skbuff_dma),
2125  				      GFP_KERNEL);
2126  	if (!tx_q->tx_skbuff_dma)
2127  		return -ENOMEM;
2128  
2129  	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2130  				  sizeof(struct sk_buff *),
2131  				  GFP_KERNEL);
2132  	if (!tx_q->tx_skbuff)
2133  		return -ENOMEM;
2134  
2135  	if (priv->extend_desc)
2136  		size = sizeof(struct dma_extended_desc);
2137  	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2138  		size = sizeof(struct dma_edesc);
2139  	else
2140  		size = sizeof(struct dma_desc);
2141  
2142  	size *= dma_conf->dma_tx_size;
2143  
2144  	addr = dma_alloc_coherent(priv->device, size,
2145  				  &tx_q->dma_tx_phy, GFP_KERNEL);
2146  	if (!addr)
2147  		return -ENOMEM;
2148  
2149  	if (priv->extend_desc)
2150  		tx_q->dma_etx = addr;
2151  	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2152  		tx_q->dma_entx = addr;
2153  	else
2154  		tx_q->dma_tx = addr;
2155  
2156  	return 0;
2157  }
2158  
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2159  static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2160  				       struct stmmac_dma_conf *dma_conf)
2161  {
2162  	u32 tx_count = priv->plat->tx_queues_to_use;
2163  	u32 queue;
2164  	int ret;
2165  
2166  	/* TX queues buffers and DMA */
2167  	for (queue = 0; queue < tx_count; queue++) {
2168  		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2169  		if (ret)
2170  			goto err_dma;
2171  	}
2172  
2173  	return 0;
2174  
2175  err_dma:
2176  	free_dma_tx_desc_resources(priv, dma_conf);
2177  	return ret;
2178  }
2179  
2180  /**
2181   * alloc_dma_desc_resources - alloc TX/RX resources.
2182   * @priv: private structure
2183   * @dma_conf: structure to take the dma data
2184   * Description: according to which descriptor can be used (extend or basic)
2185   * this function allocates the resources for TX and RX paths. In case of
2186   * reception, for example, it pre-allocated the RX socket buffer in order to
2187   * allow zero-copy mechanism.
2188   */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2189  static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2190  				    struct stmmac_dma_conf *dma_conf)
2191  {
2192  	/* RX Allocation */
2193  	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2194  
2195  	if (ret)
2196  		return ret;
2197  
2198  	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2199  
2200  	return ret;
2201  }
2202  
2203  /**
2204   * free_dma_desc_resources - free dma desc resources
2205   * @priv: private structure
2206   * @dma_conf: structure to take the dma data
2207   */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2208  static void free_dma_desc_resources(struct stmmac_priv *priv,
2209  				    struct stmmac_dma_conf *dma_conf)
2210  {
2211  	/* Release the DMA TX socket buffers */
2212  	free_dma_tx_desc_resources(priv, dma_conf);
2213  
2214  	/* Release the DMA RX socket buffers later
2215  	 * to ensure all pending XDP_TX buffers are returned.
2216  	 */
2217  	free_dma_rx_desc_resources(priv, dma_conf);
2218  }
2219  
2220  /**
2221   *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2222   *  @priv: driver private structure
2223   *  Description: It is used for enabling the rx queues in the MAC
2224   */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)2225  static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2226  {
2227  	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2228  	int queue;
2229  	u8 mode;
2230  
2231  	for (queue = 0; queue < rx_queues_count; queue++) {
2232  		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2233  		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2234  	}
2235  }
2236  
2237  /**
2238   * stmmac_start_rx_dma - start RX DMA channel
2239   * @priv: driver private structure
2240   * @chan: RX channel index
2241   * Description:
2242   * This starts a RX DMA channel
2243   */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2244  static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2245  {
2246  	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2247  	stmmac_start_rx(priv, priv->ioaddr, chan);
2248  }
2249  
2250  /**
2251   * stmmac_start_tx_dma - start TX DMA channel
2252   * @priv: driver private structure
2253   * @chan: TX channel index
2254   * Description:
2255   * This starts a TX DMA channel
2256   */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2257  static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2258  {
2259  	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2260  	stmmac_start_tx(priv, priv->ioaddr, chan);
2261  }
2262  
2263  /**
2264   * stmmac_stop_rx_dma - stop RX DMA channel
2265   * @priv: driver private structure
2266   * @chan: RX channel index
2267   * Description:
2268   * This stops a RX DMA channel
2269   */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2270  static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2271  {
2272  	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2273  	stmmac_stop_rx(priv, priv->ioaddr, chan);
2274  }
2275  
2276  /**
2277   * stmmac_stop_tx_dma - stop TX DMA channel
2278   * @priv: driver private structure
2279   * @chan: TX channel index
2280   * Description:
2281   * This stops a TX DMA channel
2282   */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2283  static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2284  {
2285  	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2286  	stmmac_stop_tx(priv, priv->ioaddr, chan);
2287  }
2288  
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2289  static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2290  {
2291  	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2292  	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2293  	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2294  	u32 chan;
2295  
2296  	for (chan = 0; chan < dma_csr_ch; chan++) {
2297  		struct stmmac_channel *ch = &priv->channel[chan];
2298  		unsigned long flags;
2299  
2300  		spin_lock_irqsave(&ch->lock, flags);
2301  		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2302  		spin_unlock_irqrestore(&ch->lock, flags);
2303  	}
2304  }
2305  
2306  /**
2307   * stmmac_start_all_dma - start all RX and TX DMA channels
2308   * @priv: driver private structure
2309   * Description:
2310   * This starts all the RX and TX DMA channels
2311   */
stmmac_start_all_dma(struct stmmac_priv * priv)2312  static void stmmac_start_all_dma(struct stmmac_priv *priv)
2313  {
2314  	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2315  	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2316  	u32 chan = 0;
2317  
2318  	for (chan = 0; chan < rx_channels_count; chan++)
2319  		stmmac_start_rx_dma(priv, chan);
2320  
2321  	for (chan = 0; chan < tx_channels_count; chan++)
2322  		stmmac_start_tx_dma(priv, chan);
2323  }
2324  
2325  /**
2326   * stmmac_stop_all_dma - stop all RX and TX DMA channels
2327   * @priv: driver private structure
2328   * Description:
2329   * This stops the RX and TX DMA channels
2330   */
stmmac_stop_all_dma(struct stmmac_priv * priv)2331  static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2332  {
2333  	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2334  	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2335  	u32 chan = 0;
2336  
2337  	for (chan = 0; chan < rx_channels_count; chan++)
2338  		stmmac_stop_rx_dma(priv, chan);
2339  
2340  	for (chan = 0; chan < tx_channels_count; chan++)
2341  		stmmac_stop_tx_dma(priv, chan);
2342  }
2343  
2344  /**
2345   *  stmmac_dma_operation_mode - HW DMA operation mode
2346   *  @priv: driver private structure
2347   *  Description: it is used for configuring the DMA operation mode register in
2348   *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2349   */
stmmac_dma_operation_mode(struct stmmac_priv * priv)2350  static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2351  {
2352  	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2353  	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2354  	int rxfifosz = priv->plat->rx_fifo_size;
2355  	int txfifosz = priv->plat->tx_fifo_size;
2356  	u32 txmode = 0;
2357  	u32 rxmode = 0;
2358  	u32 chan = 0;
2359  	u8 qmode = 0;
2360  
2361  	if (rxfifosz == 0)
2362  		rxfifosz = priv->dma_cap.rx_fifo_size;
2363  	if (txfifosz == 0)
2364  		txfifosz = priv->dma_cap.tx_fifo_size;
2365  
2366  	/* Adjust for real per queue fifo size */
2367  	rxfifosz /= rx_channels_count;
2368  	txfifosz /= tx_channels_count;
2369  
2370  	if (priv->plat->force_thresh_dma_mode) {
2371  		txmode = tc;
2372  		rxmode = tc;
2373  	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2374  		/*
2375  		 * In case of GMAC, SF mode can be enabled
2376  		 * to perform the TX COE in HW. This depends on:
2377  		 * 1) TX COE if actually supported
2378  		 * 2) There is no bugged Jumbo frame support
2379  		 *    that needs to not insert csum in the TDES.
2380  		 */
2381  		txmode = SF_DMA_MODE;
2382  		rxmode = SF_DMA_MODE;
2383  		priv->xstats.threshold = SF_DMA_MODE;
2384  	} else {
2385  		txmode = tc;
2386  		rxmode = SF_DMA_MODE;
2387  	}
2388  
2389  	/* configure all channels */
2390  	for (chan = 0; chan < rx_channels_count; chan++) {
2391  		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2392  		u32 buf_size;
2393  
2394  		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2395  
2396  		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2397  				rxfifosz, qmode);
2398  
2399  		if (rx_q->xsk_pool) {
2400  			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2401  			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2402  					      buf_size,
2403  					      chan);
2404  		} else {
2405  			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2406  					      priv->dma_conf.dma_buf_sz,
2407  					      chan);
2408  		}
2409  	}
2410  
2411  	for (chan = 0; chan < tx_channels_count; chan++) {
2412  		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2413  
2414  		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2415  				txfifosz, qmode);
2416  	}
2417  }
2418  
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2419  static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2420  {
2421  	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2422  	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2423  	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2424  	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2425  	unsigned int entry = tx_q->cur_tx;
2426  	struct dma_desc *tx_desc = NULL;
2427  	struct xdp_desc xdp_desc;
2428  	bool work_done = true;
2429  	u32 tx_set_ic_bit = 0;
2430  
2431  	/* Avoids TX time-out as we are sharing with slow path */
2432  	txq_trans_cond_update(nq);
2433  
2434  	budget = min(budget, stmmac_tx_avail(priv, queue));
2435  
2436  	while (budget-- > 0) {
2437  		dma_addr_t dma_addr;
2438  		bool set_ic;
2439  
2440  		/* We are sharing with slow path and stop XSK TX desc submission when
2441  		 * available TX ring is less than threshold.
2442  		 */
2443  		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2444  		    !netif_carrier_ok(priv->dev)) {
2445  			work_done = false;
2446  			break;
2447  		}
2448  
2449  		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2450  			break;
2451  
2452  		if (likely(priv->extend_desc))
2453  			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2454  		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2455  			tx_desc = &tx_q->dma_entx[entry].basic;
2456  		else
2457  			tx_desc = tx_q->dma_tx + entry;
2458  
2459  		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2460  		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2461  
2462  		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2463  
2464  		/* To return XDP buffer to XSK pool, we simple call
2465  		 * xsk_tx_completed(), so we don't need to fill up
2466  		 * 'buf' and 'xdpf'.
2467  		 */
2468  		tx_q->tx_skbuff_dma[entry].buf = 0;
2469  		tx_q->xdpf[entry] = NULL;
2470  
2471  		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2472  		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2473  		tx_q->tx_skbuff_dma[entry].last_segment = true;
2474  		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2475  
2476  		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2477  
2478  		tx_q->tx_count_frames++;
2479  
2480  		if (!priv->tx_coal_frames[queue])
2481  			set_ic = false;
2482  		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2483  			set_ic = true;
2484  		else
2485  			set_ic = false;
2486  
2487  		if (set_ic) {
2488  			tx_q->tx_count_frames = 0;
2489  			stmmac_set_tx_ic(priv, tx_desc);
2490  			tx_set_ic_bit++;
2491  		}
2492  
2493  		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2494  				       true, priv->mode, true, true,
2495  				       xdp_desc.len);
2496  
2497  		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2498  
2499  		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2500  		entry = tx_q->cur_tx;
2501  	}
2502  	u64_stats_update_begin(&txq_stats->napi_syncp);
2503  	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2504  	u64_stats_update_end(&txq_stats->napi_syncp);
2505  
2506  	if (tx_desc) {
2507  		stmmac_flush_tx_descriptors(priv, queue);
2508  		xsk_tx_release(pool);
2509  	}
2510  
2511  	/* Return true if all of the 3 conditions are met
2512  	 *  a) TX Budget is still available
2513  	 *  b) work_done = true when XSK TX desc peek is empty (no more
2514  	 *     pending XSK TX for transmission)
2515  	 */
2516  	return !!budget && work_done;
2517  }
2518  
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)2519  static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2520  {
2521  	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2522  		tc += 64;
2523  
2524  		if (priv->plat->force_thresh_dma_mode)
2525  			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2526  		else
2527  			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2528  						      chan);
2529  
2530  		priv->xstats.threshold = tc;
2531  	}
2532  }
2533  
2534  /**
2535   * stmmac_tx_clean - to manage the transmission completion
2536   * @priv: driver private structure
2537   * @budget: napi budget limiting this functions packet handling
2538   * @queue: TX queue index
2539   * Description: it reclaims the transmit resources after transmission completes.
2540   */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue)2541  static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2542  {
2543  	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2544  	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2545  	unsigned int bytes_compl = 0, pkts_compl = 0;
2546  	unsigned int entry, xmits = 0, count = 0;
2547  	u32 tx_packets = 0, tx_errors = 0;
2548  
2549  	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2550  
2551  	tx_q->xsk_frames_done = 0;
2552  
2553  	entry = tx_q->dirty_tx;
2554  
2555  	/* Try to clean all TX complete frame in 1 shot */
2556  	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2557  		struct xdp_frame *xdpf;
2558  		struct sk_buff *skb;
2559  		struct dma_desc *p;
2560  		int status;
2561  
2562  		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2563  		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2564  			xdpf = tx_q->xdpf[entry];
2565  			skb = NULL;
2566  		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2567  			xdpf = NULL;
2568  			skb = tx_q->tx_skbuff[entry];
2569  		} else {
2570  			xdpf = NULL;
2571  			skb = NULL;
2572  		}
2573  
2574  		if (priv->extend_desc)
2575  			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2576  		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2577  			p = &tx_q->dma_entx[entry].basic;
2578  		else
2579  			p = tx_q->dma_tx + entry;
2580  
2581  		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2582  		/* Check if the descriptor is owned by the DMA */
2583  		if (unlikely(status & tx_dma_own))
2584  			break;
2585  
2586  		count++;
2587  
2588  		/* Make sure descriptor fields are read after reading
2589  		 * the own bit.
2590  		 */
2591  		dma_rmb();
2592  
2593  		/* Just consider the last segment and ...*/
2594  		if (likely(!(status & tx_not_ls))) {
2595  			/* ... verify the status error condition */
2596  			if (unlikely(status & tx_err)) {
2597  				tx_errors++;
2598  				if (unlikely(status & tx_err_bump_tc))
2599  					stmmac_bump_dma_threshold(priv, queue);
2600  			} else {
2601  				tx_packets++;
2602  			}
2603  			if (skb)
2604  				stmmac_get_tx_hwtstamp(priv, p, skb);
2605  		}
2606  
2607  		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2608  			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2609  			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2610  				dma_unmap_page(priv->device,
2611  					       tx_q->tx_skbuff_dma[entry].buf,
2612  					       tx_q->tx_skbuff_dma[entry].len,
2613  					       DMA_TO_DEVICE);
2614  			else
2615  				dma_unmap_single(priv->device,
2616  						 tx_q->tx_skbuff_dma[entry].buf,
2617  						 tx_q->tx_skbuff_dma[entry].len,
2618  						 DMA_TO_DEVICE);
2619  			tx_q->tx_skbuff_dma[entry].buf = 0;
2620  			tx_q->tx_skbuff_dma[entry].len = 0;
2621  			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2622  		}
2623  
2624  		stmmac_clean_desc3(priv, tx_q, p);
2625  
2626  		tx_q->tx_skbuff_dma[entry].last_segment = false;
2627  		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2628  
2629  		if (xdpf &&
2630  		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2631  			xdp_return_frame_rx_napi(xdpf);
2632  			tx_q->xdpf[entry] = NULL;
2633  		}
2634  
2635  		if (xdpf &&
2636  		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2637  			xdp_return_frame(xdpf);
2638  			tx_q->xdpf[entry] = NULL;
2639  		}
2640  
2641  		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2642  			tx_q->xsk_frames_done++;
2643  
2644  		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2645  			if (likely(skb)) {
2646  				pkts_compl++;
2647  				bytes_compl += skb->len;
2648  				dev_consume_skb_any(skb);
2649  				tx_q->tx_skbuff[entry] = NULL;
2650  			}
2651  		}
2652  
2653  		stmmac_release_tx_desc(priv, p, priv->mode);
2654  
2655  		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2656  	}
2657  	tx_q->dirty_tx = entry;
2658  
2659  	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2660  				  pkts_compl, bytes_compl);
2661  
2662  	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2663  								queue))) &&
2664  	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2665  
2666  		netif_dbg(priv, tx_done, priv->dev,
2667  			  "%s: restart transmit\n", __func__);
2668  		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2669  	}
2670  
2671  	if (tx_q->xsk_pool) {
2672  		bool work_done;
2673  
2674  		if (tx_q->xsk_frames_done)
2675  			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2676  
2677  		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2678  			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2679  
2680  		/* For XSK TX, we try to send as many as possible.
2681  		 * If XSK work done (XSK TX desc empty and budget still
2682  		 * available), return "budget - 1" to reenable TX IRQ.
2683  		 * Else, return "budget" to make NAPI continue polling.
2684  		 */
2685  		work_done = stmmac_xdp_xmit_zc(priv, queue,
2686  					       STMMAC_XSK_TX_BUDGET_MAX);
2687  		if (work_done)
2688  			xmits = budget - 1;
2689  		else
2690  			xmits = budget;
2691  	}
2692  
2693  	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2694  	    priv->eee_sw_timer_en) {
2695  		if (stmmac_enable_eee_mode(priv))
2696  			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2697  	}
2698  
2699  	/* We still have pending packets, let's call for a new scheduling */
2700  	if (tx_q->dirty_tx != tx_q->cur_tx)
2701  		stmmac_tx_timer_arm(priv, queue);
2702  
2703  	u64_stats_update_begin(&txq_stats->napi_syncp);
2704  	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2705  	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2706  	u64_stats_inc(&txq_stats->napi.tx_clean);
2707  	u64_stats_update_end(&txq_stats->napi_syncp);
2708  
2709  	priv->xstats.tx_errors += tx_errors;
2710  
2711  	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2712  
2713  	/* Combine decisions from TX clean and XSK TX */
2714  	return max(count, xmits);
2715  }
2716  
2717  /**
2718   * stmmac_tx_err - to manage the tx error
2719   * @priv: driver private structure
2720   * @chan: channel index
2721   * Description: it cleans the descriptors and restarts the transmission
2722   * in case of transmission errors.
2723   */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)2724  static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2725  {
2726  	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2727  
2728  	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2729  
2730  	stmmac_stop_tx_dma(priv, chan);
2731  	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2732  	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2733  	stmmac_reset_tx_queue(priv, chan);
2734  	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2735  			    tx_q->dma_tx_phy, chan);
2736  	stmmac_start_tx_dma(priv, chan);
2737  
2738  	priv->xstats.tx_errors++;
2739  	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2740  }
2741  
2742  /**
2743   *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2744   *  @priv: driver private structure
2745   *  @txmode: TX operating mode
2746   *  @rxmode: RX operating mode
2747   *  @chan: channel index
2748   *  Description: it is used for configuring of the DMA operation mode in
2749   *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2750   *  mode.
2751   */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)2752  static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2753  					  u32 rxmode, u32 chan)
2754  {
2755  	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2756  	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2757  	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2758  	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2759  	int rxfifosz = priv->plat->rx_fifo_size;
2760  	int txfifosz = priv->plat->tx_fifo_size;
2761  
2762  	if (rxfifosz == 0)
2763  		rxfifosz = priv->dma_cap.rx_fifo_size;
2764  	if (txfifosz == 0)
2765  		txfifosz = priv->dma_cap.tx_fifo_size;
2766  
2767  	/* Adjust for real per queue fifo size */
2768  	rxfifosz /= rx_channels_count;
2769  	txfifosz /= tx_channels_count;
2770  
2771  	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2772  	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2773  }
2774  
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)2775  static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2776  {
2777  	int ret;
2778  
2779  	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2780  			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2781  	if (ret && (ret != -EINVAL)) {
2782  		stmmac_global_err(priv);
2783  		return true;
2784  	}
2785  
2786  	return false;
2787  }
2788  
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)2789  static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2790  {
2791  	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2792  						 &priv->xstats, chan, dir);
2793  	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2794  	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2795  	struct stmmac_channel *ch = &priv->channel[chan];
2796  	struct napi_struct *rx_napi;
2797  	struct napi_struct *tx_napi;
2798  	unsigned long flags;
2799  
2800  	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2801  	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2802  
2803  	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2804  		if (napi_schedule_prep(rx_napi)) {
2805  			spin_lock_irqsave(&ch->lock, flags);
2806  			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2807  			spin_unlock_irqrestore(&ch->lock, flags);
2808  			__napi_schedule(rx_napi);
2809  		}
2810  	}
2811  
2812  	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2813  		if (napi_schedule_prep(tx_napi)) {
2814  			spin_lock_irqsave(&ch->lock, flags);
2815  			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2816  			spin_unlock_irqrestore(&ch->lock, flags);
2817  			__napi_schedule(tx_napi);
2818  		}
2819  	}
2820  
2821  	return status;
2822  }
2823  
2824  /**
2825   * stmmac_dma_interrupt - DMA ISR
2826   * @priv: driver private structure
2827   * Description: this is the DMA ISR. It is called by the main ISR.
2828   * It calls the dwmac dma routine and schedule poll method in case of some
2829   * work can be done.
2830   */
stmmac_dma_interrupt(struct stmmac_priv * priv)2831  static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2832  {
2833  	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2834  	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2835  	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2836  				tx_channel_count : rx_channel_count;
2837  	u32 chan;
2838  	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2839  
2840  	/* Make sure we never check beyond our status buffer. */
2841  	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2842  		channels_to_check = ARRAY_SIZE(status);
2843  
2844  	for (chan = 0; chan < channels_to_check; chan++)
2845  		status[chan] = stmmac_napi_check(priv, chan,
2846  						 DMA_DIR_RXTX);
2847  
2848  	for (chan = 0; chan < tx_channel_count; chan++) {
2849  		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2850  			/* Try to bump up the dma threshold on this failure */
2851  			stmmac_bump_dma_threshold(priv, chan);
2852  		} else if (unlikely(status[chan] == tx_hard_error)) {
2853  			stmmac_tx_err(priv, chan);
2854  		}
2855  	}
2856  }
2857  
2858  /**
2859   * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2860   * @priv: driver private structure
2861   * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2862   */
stmmac_mmc_setup(struct stmmac_priv * priv)2863  static void stmmac_mmc_setup(struct stmmac_priv *priv)
2864  {
2865  	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2866  			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2867  
2868  	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2869  
2870  	if (priv->dma_cap.rmon) {
2871  		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2872  		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2873  	} else
2874  		netdev_info(priv->dev, "No MAC Management Counters available\n");
2875  }
2876  
2877  /**
2878   * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2879   * @priv: driver private structure
2880   * Description:
2881   *  new GMAC chip generations have a new register to indicate the
2882   *  presence of the optional feature/functions.
2883   *  This can be also used to override the value passed through the
2884   *  platform and necessary for old MAC10/100 and GMAC chips.
2885   */
stmmac_get_hw_features(struct stmmac_priv * priv)2886  static int stmmac_get_hw_features(struct stmmac_priv *priv)
2887  {
2888  	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2889  }
2890  
2891  /**
2892   * stmmac_check_ether_addr - check if the MAC addr is valid
2893   * @priv: driver private structure
2894   * Description:
2895   * it is to verify if the MAC address is valid, in case of failures it
2896   * generates a random MAC address
2897   */
stmmac_check_ether_addr(struct stmmac_priv * priv)2898  static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2899  {
2900  	u8 addr[ETH_ALEN];
2901  
2902  	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2903  		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2904  		if (is_valid_ether_addr(addr))
2905  			eth_hw_addr_set(priv->dev, addr);
2906  		else
2907  			eth_hw_addr_random(priv->dev);
2908  		dev_info(priv->device, "device MAC address %pM\n",
2909  			 priv->dev->dev_addr);
2910  	}
2911  }
2912  
2913  /**
2914   * stmmac_init_dma_engine - DMA init.
2915   * @priv: driver private structure
2916   * Description:
2917   * It inits the DMA invoking the specific MAC/GMAC callback.
2918   * Some DMA parameters can be passed from the platform;
2919   * in case of these are not passed a default is kept for the MAC or GMAC.
2920   */
stmmac_init_dma_engine(struct stmmac_priv * priv)2921  static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2922  {
2923  	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2924  	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2925  	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2926  	struct stmmac_rx_queue *rx_q;
2927  	struct stmmac_tx_queue *tx_q;
2928  	u32 chan = 0;
2929  	int atds = 0;
2930  	int ret = 0;
2931  
2932  	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2933  		dev_err(priv->device, "Invalid DMA configuration\n");
2934  		return -EINVAL;
2935  	}
2936  
2937  	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2938  		atds = 1;
2939  
2940  	ret = stmmac_reset(priv, priv->ioaddr);
2941  	if (ret) {
2942  		dev_err(priv->device, "Failed to reset the dma\n");
2943  		return ret;
2944  	}
2945  
2946  	/* DMA Configuration */
2947  	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2948  
2949  	if (priv->plat->axi)
2950  		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2951  
2952  	/* DMA CSR Channel configuration */
2953  	for (chan = 0; chan < dma_csr_ch; chan++) {
2954  		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2955  		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2956  	}
2957  
2958  	/* DMA RX Channel Configuration */
2959  	for (chan = 0; chan < rx_channels_count; chan++) {
2960  		rx_q = &priv->dma_conf.rx_queue[chan];
2961  
2962  		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2963  				    rx_q->dma_rx_phy, chan);
2964  
2965  		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2966  				     (rx_q->buf_alloc_num *
2967  				      sizeof(struct dma_desc));
2968  		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2969  				       rx_q->rx_tail_addr, chan);
2970  	}
2971  
2972  	/* DMA TX Channel Configuration */
2973  	for (chan = 0; chan < tx_channels_count; chan++) {
2974  		tx_q = &priv->dma_conf.tx_queue[chan];
2975  
2976  		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2977  				    tx_q->dma_tx_phy, chan);
2978  
2979  		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2980  		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2981  				       tx_q->tx_tail_addr, chan);
2982  	}
2983  
2984  	return ret;
2985  }
2986  
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)2987  static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2988  {
2989  	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2990  	u32 tx_coal_timer = priv->tx_coal_timer[queue];
2991  
2992  	if (!tx_coal_timer)
2993  		return;
2994  
2995  	hrtimer_start(&tx_q->txtimer,
2996  		      STMMAC_COAL_TIMER(tx_coal_timer),
2997  		      HRTIMER_MODE_REL);
2998  }
2999  
3000  /**
3001   * stmmac_tx_timer - mitigation sw timer for tx.
3002   * @t: data pointer
3003   * Description:
3004   * This is the timer handler to directly invoke the stmmac_tx_clean.
3005   */
stmmac_tx_timer(struct hrtimer * t)3006  static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3007  {
3008  	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3009  	struct stmmac_priv *priv = tx_q->priv_data;
3010  	struct stmmac_channel *ch;
3011  	struct napi_struct *napi;
3012  
3013  	ch = &priv->channel[tx_q->queue_index];
3014  	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3015  
3016  	if (likely(napi_schedule_prep(napi))) {
3017  		unsigned long flags;
3018  
3019  		spin_lock_irqsave(&ch->lock, flags);
3020  		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3021  		spin_unlock_irqrestore(&ch->lock, flags);
3022  		__napi_schedule(napi);
3023  	}
3024  
3025  	return HRTIMER_NORESTART;
3026  }
3027  
3028  /**
3029   * stmmac_init_coalesce - init mitigation options.
3030   * @priv: driver private structure
3031   * Description:
3032   * This inits the coalesce parameters: i.e. timer rate,
3033   * timer handler and default threshold used for enabling the
3034   * interrupt on completion bit.
3035   */
stmmac_init_coalesce(struct stmmac_priv * priv)3036  static void stmmac_init_coalesce(struct stmmac_priv *priv)
3037  {
3038  	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3039  	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3040  	u32 chan;
3041  
3042  	for (chan = 0; chan < tx_channel_count; chan++) {
3043  		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3044  
3045  		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3046  		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3047  
3048  		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3049  		tx_q->txtimer.function = stmmac_tx_timer;
3050  	}
3051  
3052  	for (chan = 0; chan < rx_channel_count; chan++)
3053  		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3054  }
3055  
stmmac_set_rings_length(struct stmmac_priv * priv)3056  static void stmmac_set_rings_length(struct stmmac_priv *priv)
3057  {
3058  	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3059  	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3060  	u32 chan;
3061  
3062  	/* set TX ring length */
3063  	for (chan = 0; chan < tx_channels_count; chan++)
3064  		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3065  				       (priv->dma_conf.dma_tx_size - 1), chan);
3066  
3067  	/* set RX ring length */
3068  	for (chan = 0; chan < rx_channels_count; chan++)
3069  		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3070  				       (priv->dma_conf.dma_rx_size - 1), chan);
3071  }
3072  
3073  /**
3074   *  stmmac_set_tx_queue_weight - Set TX queue weight
3075   *  @priv: driver private structure
3076   *  Description: It is used for setting TX queues weight
3077   */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)3078  static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3079  {
3080  	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3081  	u32 weight;
3082  	u32 queue;
3083  
3084  	for (queue = 0; queue < tx_queues_count; queue++) {
3085  		weight = priv->plat->tx_queues_cfg[queue].weight;
3086  		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3087  	}
3088  }
3089  
3090  /**
3091   *  stmmac_configure_cbs - Configure CBS in TX queue
3092   *  @priv: driver private structure
3093   *  Description: It is used for configuring CBS in AVB TX queues
3094   */
stmmac_configure_cbs(struct stmmac_priv * priv)3095  static void stmmac_configure_cbs(struct stmmac_priv *priv)
3096  {
3097  	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3098  	u32 mode_to_use;
3099  	u32 queue;
3100  
3101  	/* queue 0 is reserved for legacy traffic */
3102  	for (queue = 1; queue < tx_queues_count; queue++) {
3103  		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3104  		if (mode_to_use == MTL_QUEUE_DCB)
3105  			continue;
3106  
3107  		stmmac_config_cbs(priv, priv->hw,
3108  				priv->plat->tx_queues_cfg[queue].send_slope,
3109  				priv->plat->tx_queues_cfg[queue].idle_slope,
3110  				priv->plat->tx_queues_cfg[queue].high_credit,
3111  				priv->plat->tx_queues_cfg[queue].low_credit,
3112  				queue);
3113  	}
3114  }
3115  
3116  /**
3117   *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3118   *  @priv: driver private structure
3119   *  Description: It is used for mapping RX queues to RX dma channels
3120   */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3121  static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3122  {
3123  	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3124  	u32 queue;
3125  	u32 chan;
3126  
3127  	for (queue = 0; queue < rx_queues_count; queue++) {
3128  		chan = priv->plat->rx_queues_cfg[queue].chan;
3129  		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3130  	}
3131  }
3132  
3133  /**
3134   *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3135   *  @priv: driver private structure
3136   *  Description: It is used for configuring the RX Queue Priority
3137   */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3138  static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3139  {
3140  	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3141  	u32 queue;
3142  	u32 prio;
3143  
3144  	for (queue = 0; queue < rx_queues_count; queue++) {
3145  		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3146  			continue;
3147  
3148  		prio = priv->plat->rx_queues_cfg[queue].prio;
3149  		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3150  	}
3151  }
3152  
3153  /**
3154   *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3155   *  @priv: driver private structure
3156   *  Description: It is used for configuring the TX Queue Priority
3157   */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3158  static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3159  {
3160  	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3161  	u32 queue;
3162  	u32 prio;
3163  
3164  	for (queue = 0; queue < tx_queues_count; queue++) {
3165  		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3166  			continue;
3167  
3168  		prio = priv->plat->tx_queues_cfg[queue].prio;
3169  		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3170  	}
3171  }
3172  
3173  /**
3174   *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3175   *  @priv: driver private structure
3176   *  Description: It is used for configuring the RX queue routing
3177   */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3178  static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3179  {
3180  	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3181  	u32 queue;
3182  	u8 packet;
3183  
3184  	for (queue = 0; queue < rx_queues_count; queue++) {
3185  		/* no specific packet type routing specified for the queue */
3186  		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3187  			continue;
3188  
3189  		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3190  		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3191  	}
3192  }
3193  
stmmac_mac_config_rss(struct stmmac_priv * priv)3194  static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3195  {
3196  	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3197  		priv->rss.enable = false;
3198  		return;
3199  	}
3200  
3201  	if (priv->dev->features & NETIF_F_RXHASH)
3202  		priv->rss.enable = true;
3203  	else
3204  		priv->rss.enable = false;
3205  
3206  	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3207  			     priv->plat->rx_queues_to_use);
3208  }
3209  
3210  /**
3211   *  stmmac_mtl_configuration - Configure MTL
3212   *  @priv: driver private structure
3213   *  Description: It is used for configurring MTL
3214   */
stmmac_mtl_configuration(struct stmmac_priv * priv)3215  static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3216  {
3217  	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3218  	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3219  
3220  	if (tx_queues_count > 1)
3221  		stmmac_set_tx_queue_weight(priv);
3222  
3223  	/* Configure MTL RX algorithms */
3224  	if (rx_queues_count > 1)
3225  		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3226  				priv->plat->rx_sched_algorithm);
3227  
3228  	/* Configure MTL TX algorithms */
3229  	if (tx_queues_count > 1)
3230  		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3231  				priv->plat->tx_sched_algorithm);
3232  
3233  	/* Configure CBS in AVB TX queues */
3234  	if (tx_queues_count > 1)
3235  		stmmac_configure_cbs(priv);
3236  
3237  	/* Map RX MTL to DMA channels */
3238  	stmmac_rx_queue_dma_chan_map(priv);
3239  
3240  	/* Enable MAC RX Queues */
3241  	stmmac_mac_enable_rx_queues(priv);
3242  
3243  	/* Set RX priorities */
3244  	if (rx_queues_count > 1)
3245  		stmmac_mac_config_rx_queues_prio(priv);
3246  
3247  	/* Set TX priorities */
3248  	if (tx_queues_count > 1)
3249  		stmmac_mac_config_tx_queues_prio(priv);
3250  
3251  	/* Set RX routing */
3252  	if (rx_queues_count > 1)
3253  		stmmac_mac_config_rx_queues_routing(priv);
3254  
3255  	/* Receive Side Scaling */
3256  	if (rx_queues_count > 1)
3257  		stmmac_mac_config_rss(priv);
3258  }
3259  
stmmac_safety_feat_configuration(struct stmmac_priv * priv)3260  static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3261  {
3262  	if (priv->dma_cap.asp) {
3263  		netdev_info(priv->dev, "Enabling Safety Features\n");
3264  		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3265  					  priv->plat->safety_feat_cfg);
3266  	} else {
3267  		netdev_info(priv->dev, "No Safety Features support found\n");
3268  	}
3269  }
3270  
stmmac_fpe_start_wq(struct stmmac_priv * priv)3271  static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3272  {
3273  	char *name;
3274  
3275  	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3276  	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3277  
3278  	name = priv->wq_name;
3279  	sprintf(name, "%s-fpe", priv->dev->name);
3280  
3281  	priv->fpe_wq = create_singlethread_workqueue(name);
3282  	if (!priv->fpe_wq) {
3283  		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3284  
3285  		return -ENOMEM;
3286  	}
3287  	netdev_info(priv->dev, "FPE workqueue start");
3288  
3289  	return 0;
3290  }
3291  
3292  /**
3293   * stmmac_hw_setup - setup mac in a usable state.
3294   *  @dev : pointer to the device structure.
3295   *  @ptp_register: register PTP if set
3296   *  Description:
3297   *  this is the main function to setup the HW in a usable state because the
3298   *  dma engine is reset, the core registers are configured (e.g. AXI,
3299   *  Checksum features, timers). The DMA is ready to start receiving and
3300   *  transmitting.
3301   *  Return value:
3302   *  0 on success and an appropriate (-)ve integer as defined in errno.h
3303   *  file on failure.
3304   */
stmmac_hw_setup(struct net_device * dev,bool ptp_register)3305  static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3306  {
3307  	struct stmmac_priv *priv = netdev_priv(dev);
3308  	u32 rx_cnt = priv->plat->rx_queues_to_use;
3309  	u32 tx_cnt = priv->plat->tx_queues_to_use;
3310  	bool sph_en;
3311  	u32 chan;
3312  	int ret;
3313  
3314  	/* DMA initialization and SW reset */
3315  	ret = stmmac_init_dma_engine(priv);
3316  	if (ret < 0) {
3317  		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3318  			   __func__);
3319  		return ret;
3320  	}
3321  
3322  	/* Copy the MAC addr into the HW  */
3323  	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3324  
3325  	/* PS and related bits will be programmed according to the speed */
3326  	if (priv->hw->pcs) {
3327  		int speed = priv->plat->mac_port_sel_speed;
3328  
3329  		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3330  		    (speed == SPEED_1000)) {
3331  			priv->hw->ps = speed;
3332  		} else {
3333  			dev_warn(priv->device, "invalid port speed\n");
3334  			priv->hw->ps = 0;
3335  		}
3336  	}
3337  
3338  	/* Initialize the MAC Core */
3339  	stmmac_core_init(priv, priv->hw, dev);
3340  
3341  	/* Initialize MTL*/
3342  	stmmac_mtl_configuration(priv);
3343  
3344  	/* Initialize Safety Features */
3345  	stmmac_safety_feat_configuration(priv);
3346  
3347  	ret = stmmac_rx_ipc(priv, priv->hw);
3348  	if (!ret) {
3349  		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3350  		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3351  		priv->hw->rx_csum = 0;
3352  	}
3353  
3354  	/* Enable the MAC Rx/Tx */
3355  	stmmac_mac_set(priv, priv->ioaddr, true);
3356  
3357  	/* Set the HW DMA mode and the COE */
3358  	stmmac_dma_operation_mode(priv);
3359  
3360  	stmmac_mmc_setup(priv);
3361  
3362  	if (ptp_register) {
3363  		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3364  		if (ret < 0)
3365  			netdev_warn(priv->dev,
3366  				    "failed to enable PTP reference clock: %pe\n",
3367  				    ERR_PTR(ret));
3368  	}
3369  
3370  	ret = stmmac_init_ptp(priv);
3371  	if (ret == -EOPNOTSUPP)
3372  		netdev_info(priv->dev, "PTP not supported by HW\n");
3373  	else if (ret)
3374  		netdev_warn(priv->dev, "PTP init failed\n");
3375  	else if (ptp_register)
3376  		stmmac_ptp_register(priv);
3377  
3378  	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3379  
3380  	/* Convert the timer from msec to usec */
3381  	if (!priv->tx_lpi_timer)
3382  		priv->tx_lpi_timer = eee_timer * 1000;
3383  
3384  	if (priv->use_riwt) {
3385  		u32 queue;
3386  
3387  		for (queue = 0; queue < rx_cnt; queue++) {
3388  			if (!priv->rx_riwt[queue])
3389  				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3390  
3391  			stmmac_rx_watchdog(priv, priv->ioaddr,
3392  					   priv->rx_riwt[queue], queue);
3393  		}
3394  	}
3395  
3396  	if (priv->hw->pcs)
3397  		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3398  
3399  	/* set TX and RX rings length */
3400  	stmmac_set_rings_length(priv);
3401  
3402  	/* Enable TSO */
3403  	if (priv->tso) {
3404  		for (chan = 0; chan < tx_cnt; chan++) {
3405  			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3406  
3407  			/* TSO and TBS cannot co-exist */
3408  			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3409  				continue;
3410  
3411  			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3412  		}
3413  	}
3414  
3415  	/* Enable Split Header */
3416  	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3417  	for (chan = 0; chan < rx_cnt; chan++)
3418  		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3419  
3420  
3421  	/* VLAN Tag Insertion */
3422  	if (priv->dma_cap.vlins)
3423  		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3424  
3425  	/* TBS */
3426  	for (chan = 0; chan < tx_cnt; chan++) {
3427  		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3428  		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3429  
3430  		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3431  	}
3432  
3433  	/* Configure real RX and TX queues */
3434  	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3435  	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3436  
3437  	/* Start the ball rolling... */
3438  	stmmac_start_all_dma(priv);
3439  
3440  	if (priv->dma_cap.fpesel) {
3441  		stmmac_fpe_start_wq(priv);
3442  
3443  		if (priv->plat->fpe_cfg->enable)
3444  			stmmac_fpe_handshake(priv, true);
3445  	}
3446  
3447  	return 0;
3448  }
3449  
stmmac_hw_teardown(struct net_device * dev)3450  static void stmmac_hw_teardown(struct net_device *dev)
3451  {
3452  	struct stmmac_priv *priv = netdev_priv(dev);
3453  
3454  	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3455  }
3456  
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)3457  static void stmmac_free_irq(struct net_device *dev,
3458  			    enum request_irq_err irq_err, int irq_idx)
3459  {
3460  	struct stmmac_priv *priv = netdev_priv(dev);
3461  	int j;
3462  
3463  	switch (irq_err) {
3464  	case REQ_IRQ_ERR_ALL:
3465  		irq_idx = priv->plat->tx_queues_to_use;
3466  		fallthrough;
3467  	case REQ_IRQ_ERR_TX:
3468  		for (j = irq_idx - 1; j >= 0; j--) {
3469  			if (priv->tx_irq[j] > 0) {
3470  				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3471  				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3472  			}
3473  		}
3474  		irq_idx = priv->plat->rx_queues_to_use;
3475  		fallthrough;
3476  	case REQ_IRQ_ERR_RX:
3477  		for (j = irq_idx - 1; j >= 0; j--) {
3478  			if (priv->rx_irq[j] > 0) {
3479  				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3480  				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3481  			}
3482  		}
3483  
3484  		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3485  			free_irq(priv->sfty_ue_irq, dev);
3486  		fallthrough;
3487  	case REQ_IRQ_ERR_SFTY_UE:
3488  		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3489  			free_irq(priv->sfty_ce_irq, dev);
3490  		fallthrough;
3491  	case REQ_IRQ_ERR_SFTY_CE:
3492  		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3493  			free_irq(priv->lpi_irq, dev);
3494  		fallthrough;
3495  	case REQ_IRQ_ERR_LPI:
3496  		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3497  			free_irq(priv->wol_irq, dev);
3498  		fallthrough;
3499  	case REQ_IRQ_ERR_WOL:
3500  		free_irq(dev->irq, dev);
3501  		fallthrough;
3502  	case REQ_IRQ_ERR_MAC:
3503  	case REQ_IRQ_ERR_NO:
3504  		/* If MAC IRQ request error, no more IRQ to free */
3505  		break;
3506  	}
3507  }
3508  
stmmac_request_irq_multi_msi(struct net_device * dev)3509  static int stmmac_request_irq_multi_msi(struct net_device *dev)
3510  {
3511  	struct stmmac_priv *priv = netdev_priv(dev);
3512  	enum request_irq_err irq_err;
3513  	cpumask_t cpu_mask;
3514  	int irq_idx = 0;
3515  	char *int_name;
3516  	int ret;
3517  	int i;
3518  
3519  	/* For common interrupt */
3520  	int_name = priv->int_name_mac;
3521  	sprintf(int_name, "%s:%s", dev->name, "mac");
3522  	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3523  			  0, int_name, dev);
3524  	if (unlikely(ret < 0)) {
3525  		netdev_err(priv->dev,
3526  			   "%s: alloc mac MSI %d (error: %d)\n",
3527  			   __func__, dev->irq, ret);
3528  		irq_err = REQ_IRQ_ERR_MAC;
3529  		goto irq_error;
3530  	}
3531  
3532  	/* Request the Wake IRQ in case of another line
3533  	 * is used for WoL
3534  	 */
3535  	priv->wol_irq_disabled = true;
3536  	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3537  		int_name = priv->int_name_wol;
3538  		sprintf(int_name, "%s:%s", dev->name, "wol");
3539  		ret = request_irq(priv->wol_irq,
3540  				  stmmac_mac_interrupt,
3541  				  0, int_name, dev);
3542  		if (unlikely(ret < 0)) {
3543  			netdev_err(priv->dev,
3544  				   "%s: alloc wol MSI %d (error: %d)\n",
3545  				   __func__, priv->wol_irq, ret);
3546  			irq_err = REQ_IRQ_ERR_WOL;
3547  			goto irq_error;
3548  		}
3549  	}
3550  
3551  	/* Request the LPI IRQ in case of another line
3552  	 * is used for LPI
3553  	 */
3554  	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3555  		int_name = priv->int_name_lpi;
3556  		sprintf(int_name, "%s:%s", dev->name, "lpi");
3557  		ret = request_irq(priv->lpi_irq,
3558  				  stmmac_mac_interrupt,
3559  				  0, int_name, dev);
3560  		if (unlikely(ret < 0)) {
3561  			netdev_err(priv->dev,
3562  				   "%s: alloc lpi MSI %d (error: %d)\n",
3563  				   __func__, priv->lpi_irq, ret);
3564  			irq_err = REQ_IRQ_ERR_LPI;
3565  			goto irq_error;
3566  		}
3567  	}
3568  
3569  	/* Request the Safety Feature Correctible Error line in
3570  	 * case of another line is used
3571  	 */
3572  	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3573  		int_name = priv->int_name_sfty_ce;
3574  		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3575  		ret = request_irq(priv->sfty_ce_irq,
3576  				  stmmac_safety_interrupt,
3577  				  0, int_name, dev);
3578  		if (unlikely(ret < 0)) {
3579  			netdev_err(priv->dev,
3580  				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3581  				   __func__, priv->sfty_ce_irq, ret);
3582  			irq_err = REQ_IRQ_ERR_SFTY_CE;
3583  			goto irq_error;
3584  		}
3585  	}
3586  
3587  	/* Request the Safety Feature Uncorrectible Error line in
3588  	 * case of another line is used
3589  	 */
3590  	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3591  		int_name = priv->int_name_sfty_ue;
3592  		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3593  		ret = request_irq(priv->sfty_ue_irq,
3594  				  stmmac_safety_interrupt,
3595  				  0, int_name, dev);
3596  		if (unlikely(ret < 0)) {
3597  			netdev_err(priv->dev,
3598  				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3599  				   __func__, priv->sfty_ue_irq, ret);
3600  			irq_err = REQ_IRQ_ERR_SFTY_UE;
3601  			goto irq_error;
3602  		}
3603  	}
3604  
3605  	/* Request Rx MSI irq */
3606  	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3607  		if (i >= MTL_MAX_RX_QUEUES)
3608  			break;
3609  		if (priv->rx_irq[i] == 0)
3610  			continue;
3611  
3612  		int_name = priv->int_name_rx_irq[i];
3613  		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3614  		ret = request_irq(priv->rx_irq[i],
3615  				  stmmac_msi_intr_rx,
3616  				  0, int_name, &priv->dma_conf.rx_queue[i]);
3617  		if (unlikely(ret < 0)) {
3618  			netdev_err(priv->dev,
3619  				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3620  				   __func__, i, priv->rx_irq[i], ret);
3621  			irq_err = REQ_IRQ_ERR_RX;
3622  			irq_idx = i;
3623  			goto irq_error;
3624  		}
3625  		cpumask_clear(&cpu_mask);
3626  		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3627  		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3628  	}
3629  
3630  	/* Request Tx MSI irq */
3631  	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3632  		if (i >= MTL_MAX_TX_QUEUES)
3633  			break;
3634  		if (priv->tx_irq[i] == 0)
3635  			continue;
3636  
3637  		int_name = priv->int_name_tx_irq[i];
3638  		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3639  		ret = request_irq(priv->tx_irq[i],
3640  				  stmmac_msi_intr_tx,
3641  				  0, int_name, &priv->dma_conf.tx_queue[i]);
3642  		if (unlikely(ret < 0)) {
3643  			netdev_err(priv->dev,
3644  				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3645  				   __func__, i, priv->tx_irq[i], ret);
3646  			irq_err = REQ_IRQ_ERR_TX;
3647  			irq_idx = i;
3648  			goto irq_error;
3649  		}
3650  		cpumask_clear(&cpu_mask);
3651  		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3652  		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3653  	}
3654  
3655  	return 0;
3656  
3657  irq_error:
3658  	stmmac_free_irq(dev, irq_err, irq_idx);
3659  	return ret;
3660  }
3661  
stmmac_request_irq_single(struct net_device * dev)3662  static int stmmac_request_irq_single(struct net_device *dev)
3663  {
3664  	struct stmmac_priv *priv = netdev_priv(dev);
3665  	enum request_irq_err irq_err;
3666  	int ret;
3667  
3668  	ret = request_irq(dev->irq, stmmac_interrupt,
3669  			  IRQF_SHARED, dev->name, dev);
3670  	if (unlikely(ret < 0)) {
3671  		netdev_err(priv->dev,
3672  			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3673  			   __func__, dev->irq, ret);
3674  		irq_err = REQ_IRQ_ERR_MAC;
3675  		goto irq_error;
3676  	}
3677  
3678  	/* Request the Wake IRQ in case of another line
3679  	 * is used for WoL
3680  	 */
3681  	priv->wol_irq_disabled = true;
3682  	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3683  		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3684  				  IRQF_SHARED, dev->name, dev);
3685  		if (unlikely(ret < 0)) {
3686  			netdev_err(priv->dev,
3687  				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3688  				   __func__, priv->wol_irq, ret);
3689  			irq_err = REQ_IRQ_ERR_WOL;
3690  			goto irq_error;
3691  		}
3692  	}
3693  
3694  	/* Request the IRQ lines */
3695  	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3696  		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3697  				  IRQF_SHARED, dev->name, dev);
3698  		if (unlikely(ret < 0)) {
3699  			netdev_err(priv->dev,
3700  				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3701  				   __func__, priv->lpi_irq, ret);
3702  			irq_err = REQ_IRQ_ERR_LPI;
3703  			goto irq_error;
3704  		}
3705  	}
3706  
3707  	return 0;
3708  
3709  irq_error:
3710  	stmmac_free_irq(dev, irq_err, 0);
3711  	return ret;
3712  }
3713  
stmmac_request_irq(struct net_device * dev)3714  static int stmmac_request_irq(struct net_device *dev)
3715  {
3716  	struct stmmac_priv *priv = netdev_priv(dev);
3717  	int ret;
3718  
3719  	/* Request the IRQ lines */
3720  	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3721  		ret = stmmac_request_irq_multi_msi(dev);
3722  	else
3723  		ret = stmmac_request_irq_single(dev);
3724  
3725  	return ret;
3726  }
3727  
3728  /**
3729   *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3730   *  @priv: driver private structure
3731   *  @mtu: MTU to setup the dma queue and buf with
3732   *  Description: Allocate and generate a dma_conf based on the provided MTU.
3733   *  Allocate the Tx/Rx DMA queue and init them.
3734   *  Return value:
3735   *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3736   */
3737  static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)3738  stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3739  {
3740  	struct stmmac_dma_conf *dma_conf;
3741  	int chan, bfsize, ret;
3742  
3743  	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3744  	if (!dma_conf) {
3745  		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3746  			   __func__);
3747  		return ERR_PTR(-ENOMEM);
3748  	}
3749  
3750  	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3751  	if (bfsize < 0)
3752  		bfsize = 0;
3753  
3754  	if (bfsize < BUF_SIZE_16KiB)
3755  		bfsize = stmmac_set_bfsize(mtu, 0);
3756  
3757  	dma_conf->dma_buf_sz = bfsize;
3758  	/* Chose the tx/rx size from the already defined one in the
3759  	 * priv struct. (if defined)
3760  	 */
3761  	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3762  	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3763  
3764  	if (!dma_conf->dma_tx_size)
3765  		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3766  	if (!dma_conf->dma_rx_size)
3767  		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3768  
3769  	/* Earlier check for TBS */
3770  	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3771  		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3772  		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3773  
3774  		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3775  		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3776  	}
3777  
3778  	ret = alloc_dma_desc_resources(priv, dma_conf);
3779  	if (ret < 0) {
3780  		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3781  			   __func__);
3782  		goto alloc_error;
3783  	}
3784  
3785  	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3786  	if (ret < 0) {
3787  		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3788  			   __func__);
3789  		goto init_error;
3790  	}
3791  
3792  	return dma_conf;
3793  
3794  init_error:
3795  	free_dma_desc_resources(priv, dma_conf);
3796  alloc_error:
3797  	kfree(dma_conf);
3798  	return ERR_PTR(ret);
3799  }
3800  
3801  /**
3802   *  __stmmac_open - open entry point of the driver
3803   *  @dev : pointer to the device structure.
3804   *  @dma_conf :  structure to take the dma data
3805   *  Description:
3806   *  This function is the open entry point of the driver.
3807   *  Return value:
3808   *  0 on success and an appropriate (-)ve integer as defined in errno.h
3809   *  file on failure.
3810   */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)3811  static int __stmmac_open(struct net_device *dev,
3812  			 struct stmmac_dma_conf *dma_conf)
3813  {
3814  	struct stmmac_priv *priv = netdev_priv(dev);
3815  	int mode = priv->plat->phy_interface;
3816  	u32 chan;
3817  	int ret;
3818  
3819  	ret = pm_runtime_resume_and_get(priv->device);
3820  	if (ret < 0)
3821  		return ret;
3822  
3823  	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3824  	    priv->hw->pcs != STMMAC_PCS_RTBI &&
3825  	    (!priv->hw->xpcs ||
3826  	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
3827  	    !priv->hw->lynx_pcs) {
3828  		ret = stmmac_init_phy(dev);
3829  		if (ret) {
3830  			netdev_err(priv->dev,
3831  				   "%s: Cannot attach to PHY (error: %d)\n",
3832  				   __func__, ret);
3833  			goto init_phy_error;
3834  		}
3835  	}
3836  
3837  	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3838  
3839  	buf_sz = dma_conf->dma_buf_sz;
3840  	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3841  		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3842  			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3843  	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3844  
3845  	stmmac_reset_queues_param(priv);
3846  
3847  	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3848  	    priv->plat->serdes_powerup) {
3849  		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3850  		if (ret < 0) {
3851  			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3852  				   __func__);
3853  			goto init_error;
3854  		}
3855  	}
3856  
3857  	ret = stmmac_hw_setup(dev, true);
3858  	if (ret < 0) {
3859  		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3860  		goto init_error;
3861  	}
3862  
3863  	stmmac_init_coalesce(priv);
3864  
3865  	phylink_start(priv->phylink);
3866  	/* We may have called phylink_speed_down before */
3867  	phylink_speed_up(priv->phylink);
3868  
3869  	ret = stmmac_request_irq(dev);
3870  	if (ret)
3871  		goto irq_error;
3872  
3873  	stmmac_enable_all_queues(priv);
3874  	netif_tx_start_all_queues(priv->dev);
3875  	stmmac_enable_all_dma_irq(priv);
3876  
3877  	return 0;
3878  
3879  irq_error:
3880  	phylink_stop(priv->phylink);
3881  
3882  	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3883  		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3884  
3885  	stmmac_hw_teardown(dev);
3886  init_error:
3887  	phylink_disconnect_phy(priv->phylink);
3888  init_phy_error:
3889  	pm_runtime_put(priv->device);
3890  	return ret;
3891  }
3892  
stmmac_open(struct net_device * dev)3893  static int stmmac_open(struct net_device *dev)
3894  {
3895  	struct stmmac_priv *priv = netdev_priv(dev);
3896  	struct stmmac_dma_conf *dma_conf;
3897  	int ret;
3898  
3899  	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3900  	if (IS_ERR(dma_conf))
3901  		return PTR_ERR(dma_conf);
3902  
3903  	ret = __stmmac_open(dev, dma_conf);
3904  	if (ret)
3905  		free_dma_desc_resources(priv, dma_conf);
3906  
3907  	kfree(dma_conf);
3908  	return ret;
3909  }
3910  
stmmac_fpe_stop_wq(struct stmmac_priv * priv)3911  static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3912  {
3913  	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3914  
3915  	if (priv->fpe_wq) {
3916  		destroy_workqueue(priv->fpe_wq);
3917  		priv->fpe_wq = NULL;
3918  	}
3919  
3920  	netdev_info(priv->dev, "FPE workqueue stop");
3921  }
3922  
3923  /**
3924   *  stmmac_release - close entry point of the driver
3925   *  @dev : device pointer.
3926   *  Description:
3927   *  This is the stop entry point of the driver.
3928   */
stmmac_release(struct net_device * dev)3929  static int stmmac_release(struct net_device *dev)
3930  {
3931  	struct stmmac_priv *priv = netdev_priv(dev);
3932  	u32 chan;
3933  
3934  	if (device_may_wakeup(priv->device))
3935  		phylink_speed_down(priv->phylink, false);
3936  	/* Stop and disconnect the PHY */
3937  	phylink_stop(priv->phylink);
3938  	phylink_disconnect_phy(priv->phylink);
3939  
3940  	stmmac_disable_all_queues(priv);
3941  
3942  	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3943  		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3944  
3945  	netif_tx_disable(dev);
3946  
3947  	/* Free the IRQ lines */
3948  	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3949  
3950  	if (priv->eee_enabled) {
3951  		priv->tx_path_in_lpi_mode = false;
3952  		del_timer_sync(&priv->eee_ctrl_timer);
3953  	}
3954  
3955  	/* Stop TX/RX DMA and clear the descriptors */
3956  	stmmac_stop_all_dma(priv);
3957  
3958  	/* Release and free the Rx/Tx resources */
3959  	free_dma_desc_resources(priv, &priv->dma_conf);
3960  
3961  	/* Disable the MAC Rx/Tx */
3962  	stmmac_mac_set(priv, priv->ioaddr, false);
3963  
3964  	/* Powerdown Serdes if there is */
3965  	if (priv->plat->serdes_powerdown)
3966  		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3967  
3968  	netif_carrier_off(dev);
3969  
3970  	stmmac_release_ptp(priv);
3971  
3972  	pm_runtime_put(priv->device);
3973  
3974  	if (priv->dma_cap.fpesel)
3975  		stmmac_fpe_stop_wq(priv);
3976  
3977  	return 0;
3978  }
3979  
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)3980  static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3981  			       struct stmmac_tx_queue *tx_q)
3982  {
3983  	u16 tag = 0x0, inner_tag = 0x0;
3984  	u32 inner_type = 0x0;
3985  	struct dma_desc *p;
3986  
3987  	if (!priv->dma_cap.vlins)
3988  		return false;
3989  	if (!skb_vlan_tag_present(skb))
3990  		return false;
3991  	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3992  		inner_tag = skb_vlan_tag_get(skb);
3993  		inner_type = STMMAC_VLAN_INSERT;
3994  	}
3995  
3996  	tag = skb_vlan_tag_get(skb);
3997  
3998  	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3999  		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4000  	else
4001  		p = &tx_q->dma_tx[tx_q->cur_tx];
4002  
4003  	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4004  		return false;
4005  
4006  	stmmac_set_tx_owner(priv, p);
4007  	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4008  	return true;
4009  }
4010  
4011  /**
4012   *  stmmac_tso_allocator - close entry point of the driver
4013   *  @priv: driver private structure
4014   *  @des: buffer start address
4015   *  @total_len: total length to fill in descriptors
4016   *  @last_segment: condition for the last descriptor
4017   *  @queue: TX queue index
4018   *  Description:
4019   *  This function fills descriptor and request new descriptors according to
4020   *  buffer length to fill
4021   */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4022  static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4023  				 int total_len, bool last_segment, u32 queue)
4024  {
4025  	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4026  	struct dma_desc *desc;
4027  	u32 buff_size;
4028  	int tmp_len;
4029  
4030  	tmp_len = total_len;
4031  
4032  	while (tmp_len > 0) {
4033  		dma_addr_t curr_addr;
4034  
4035  		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4036  						priv->dma_conf.dma_tx_size);
4037  		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4038  
4039  		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4040  			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4041  		else
4042  			desc = &tx_q->dma_tx[tx_q->cur_tx];
4043  
4044  		curr_addr = des + (total_len - tmp_len);
4045  		if (priv->dma_cap.addr64 <= 32)
4046  			desc->des0 = cpu_to_le32(curr_addr);
4047  		else
4048  			stmmac_set_desc_addr(priv, desc, curr_addr);
4049  
4050  		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4051  			    TSO_MAX_BUFF_SIZE : tmp_len;
4052  
4053  		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4054  				0, 1,
4055  				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4056  				0, 0);
4057  
4058  		tmp_len -= TSO_MAX_BUFF_SIZE;
4059  	}
4060  }
4061  
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4062  static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4063  {
4064  	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4065  	int desc_size;
4066  
4067  	if (likely(priv->extend_desc))
4068  		desc_size = sizeof(struct dma_extended_desc);
4069  	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4070  		desc_size = sizeof(struct dma_edesc);
4071  	else
4072  		desc_size = sizeof(struct dma_desc);
4073  
4074  	/* The own bit must be the latest setting done when prepare the
4075  	 * descriptor and then barrier is needed to make sure that
4076  	 * all is coherent before granting the DMA engine.
4077  	 */
4078  	wmb();
4079  
4080  	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4081  	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4082  }
4083  
4084  /**
4085   *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4086   *  @skb : the socket buffer
4087   *  @dev : device pointer
4088   *  Description: this is the transmit function that is called on TSO frames
4089   *  (support available on GMAC4 and newer chips).
4090   *  Diagram below show the ring programming in case of TSO frames:
4091   *
4092   *  First Descriptor
4093   *   --------
4094   *   | DES0 |---> buffer1 = L2/L3/L4 header
4095   *   | DES1 |---> TCP Payload (can continue on next descr...)
4096   *   | DES2 |---> buffer 1 and 2 len
4097   *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4098   *   --------
4099   *	|
4100   *     ...
4101   *	|
4102   *   --------
4103   *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4104   *   | DES1 | --|
4105   *   | DES2 | --> buffer 1 and 2 len
4106   *   | DES3 |
4107   *   --------
4108   *
4109   * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4110   */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4111  static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4112  {
4113  	struct dma_desc *desc, *first, *mss_desc = NULL;
4114  	struct stmmac_priv *priv = netdev_priv(dev);
4115  	int nfrags = skb_shinfo(skb)->nr_frags;
4116  	u32 queue = skb_get_queue_mapping(skb);
4117  	unsigned int first_entry, tx_packets;
4118  	struct stmmac_txq_stats *txq_stats;
4119  	int tmp_pay_len = 0, first_tx;
4120  	struct stmmac_tx_queue *tx_q;
4121  	bool has_vlan, set_ic;
4122  	dma_addr_t tso_des, des;
4123  	u8 proto_hdr_len, hdr;
4124  	u32 pay_len, mss;
4125  	int i;
4126  
4127  	tx_q = &priv->dma_conf.tx_queue[queue];
4128  	txq_stats = &priv->xstats.txq_stats[queue];
4129  	first_tx = tx_q->cur_tx;
4130  
4131  	/* Compute header lengths */
4132  	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4133  		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4134  		hdr = sizeof(struct udphdr);
4135  	} else {
4136  		proto_hdr_len = skb_tcp_all_headers(skb);
4137  		hdr = tcp_hdrlen(skb);
4138  	}
4139  
4140  	/* Desc availability based on threshold should be enough safe */
4141  	if (unlikely(stmmac_tx_avail(priv, queue) <
4142  		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4143  		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4144  			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4145  								queue));
4146  			/* This is a hard error, log it. */
4147  			netdev_err(priv->dev,
4148  				   "%s: Tx Ring full when queue awake\n",
4149  				   __func__);
4150  		}
4151  		return NETDEV_TX_BUSY;
4152  	}
4153  
4154  	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4155  
4156  	mss = skb_shinfo(skb)->gso_size;
4157  
4158  	/* set new MSS value if needed */
4159  	if (mss != tx_q->mss) {
4160  		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4161  			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4162  		else
4163  			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4164  
4165  		stmmac_set_mss(priv, mss_desc, mss);
4166  		tx_q->mss = mss;
4167  		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4168  						priv->dma_conf.dma_tx_size);
4169  		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4170  	}
4171  
4172  	if (netif_msg_tx_queued(priv)) {
4173  		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4174  			__func__, hdr, proto_hdr_len, pay_len, mss);
4175  		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4176  			skb->data_len);
4177  	}
4178  
4179  	/* Check if VLAN can be inserted by HW */
4180  	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4181  
4182  	first_entry = tx_q->cur_tx;
4183  	WARN_ON(tx_q->tx_skbuff[first_entry]);
4184  
4185  	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4186  		desc = &tx_q->dma_entx[first_entry].basic;
4187  	else
4188  		desc = &tx_q->dma_tx[first_entry];
4189  	first = desc;
4190  
4191  	if (has_vlan)
4192  		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4193  
4194  	/* first descriptor: fill Headers on Buf1 */
4195  	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4196  			     DMA_TO_DEVICE);
4197  	if (dma_mapping_error(priv->device, des))
4198  		goto dma_map_err;
4199  
4200  	if (priv->dma_cap.addr64 <= 32) {
4201  		first->des0 = cpu_to_le32(des);
4202  
4203  		/* Fill start of payload in buff2 of first descriptor */
4204  		if (pay_len)
4205  			first->des1 = cpu_to_le32(des + proto_hdr_len);
4206  
4207  		/* If needed take extra descriptors to fill the remaining payload */
4208  		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4209  		tso_des = des;
4210  	} else {
4211  		stmmac_set_desc_addr(priv, first, des);
4212  		tmp_pay_len = pay_len;
4213  		tso_des = des + proto_hdr_len;
4214  		pay_len = 0;
4215  	}
4216  
4217  	stmmac_tso_allocator(priv, tso_des, tmp_pay_len, (nfrags == 0), queue);
4218  
4219  	/* In case two or more DMA transmit descriptors are allocated for this
4220  	 * non-paged SKB data, the DMA buffer address should be saved to
4221  	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4222  	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4223  	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4224  	 * since the tail areas of the DMA buffer can be accessed by DMA engine
4225  	 * sooner or later.
4226  	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4227  	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4228  	 * this DMA buffer right after the DMA engine completely finishes the
4229  	 * full buffer transmission.
4230  	 */
4231  	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4232  	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4233  	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4234  	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4235  
4236  	/* Prepare fragments */
4237  	for (i = 0; i < nfrags; i++) {
4238  		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4239  
4240  		des = skb_frag_dma_map(priv->device, frag, 0,
4241  				       skb_frag_size(frag),
4242  				       DMA_TO_DEVICE);
4243  		if (dma_mapping_error(priv->device, des))
4244  			goto dma_map_err;
4245  
4246  		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4247  				     (i == nfrags - 1), queue);
4248  
4249  		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4250  		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4251  		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4252  		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4253  	}
4254  
4255  	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4256  
4257  	/* Only the last descriptor gets to point to the skb. */
4258  	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4259  	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4260  
4261  	/* Manage tx mitigation */
4262  	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4263  	tx_q->tx_count_frames += tx_packets;
4264  
4265  	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4266  		set_ic = true;
4267  	else if (!priv->tx_coal_frames[queue])
4268  		set_ic = false;
4269  	else if (tx_packets > priv->tx_coal_frames[queue])
4270  		set_ic = true;
4271  	else if ((tx_q->tx_count_frames %
4272  		  priv->tx_coal_frames[queue]) < tx_packets)
4273  		set_ic = true;
4274  	else
4275  		set_ic = false;
4276  
4277  	if (set_ic) {
4278  		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4279  			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4280  		else
4281  			desc = &tx_q->dma_tx[tx_q->cur_tx];
4282  
4283  		tx_q->tx_count_frames = 0;
4284  		stmmac_set_tx_ic(priv, desc);
4285  	}
4286  
4287  	/* We've used all descriptors we need for this skb, however,
4288  	 * advance cur_tx so that it references a fresh descriptor.
4289  	 * ndo_start_xmit will fill this descriptor the next time it's
4290  	 * called and stmmac_tx_clean may clean up to this descriptor.
4291  	 */
4292  	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4293  
4294  	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4295  		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4296  			  __func__);
4297  		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4298  	}
4299  
4300  	u64_stats_update_begin(&txq_stats->q_syncp);
4301  	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4302  	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4303  	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4304  	if (set_ic)
4305  		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4306  	u64_stats_update_end(&txq_stats->q_syncp);
4307  
4308  	if (priv->sarc_type)
4309  		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4310  
4311  	skb_tx_timestamp(skb);
4312  
4313  	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4314  		     priv->hwts_tx_en)) {
4315  		/* declare that device is doing timestamping */
4316  		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4317  		stmmac_enable_tx_timestamp(priv, first);
4318  	}
4319  
4320  	/* Complete the first descriptor before granting the DMA */
4321  	stmmac_prepare_tso_tx_desc(priv, first, 1,
4322  			proto_hdr_len,
4323  			pay_len,
4324  			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4325  			hdr / 4, (skb->len - proto_hdr_len));
4326  
4327  	/* If context desc is used to change MSS */
4328  	if (mss_desc) {
4329  		/* Make sure that first descriptor has been completely
4330  		 * written, including its own bit. This is because MSS is
4331  		 * actually before first descriptor, so we need to make
4332  		 * sure that MSS's own bit is the last thing written.
4333  		 */
4334  		dma_wmb();
4335  		stmmac_set_tx_owner(priv, mss_desc);
4336  	}
4337  
4338  	if (netif_msg_pktdata(priv)) {
4339  		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4340  			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4341  			tx_q->cur_tx, first, nfrags);
4342  		pr_info(">>> frame to be transmitted: ");
4343  		print_pkt(skb->data, skb_headlen(skb));
4344  	}
4345  
4346  	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4347  
4348  	stmmac_flush_tx_descriptors(priv, queue);
4349  	stmmac_tx_timer_arm(priv, queue);
4350  
4351  	return NETDEV_TX_OK;
4352  
4353  dma_map_err:
4354  	dev_err(priv->device, "Tx dma map failed\n");
4355  	dev_kfree_skb(skb);
4356  	priv->xstats.tx_dropped++;
4357  	return NETDEV_TX_OK;
4358  }
4359  
4360  /**
4361   * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4362   * @skb: socket buffer to check
4363   *
4364   * Check if a packet has an ethertype that will trigger the IP header checks
4365   * and IP/TCP checksum engine of the stmmac core.
4366   *
4367   * Return: true if the ethertype can trigger the checksum engine, false
4368   * otherwise
4369   */
stmmac_has_ip_ethertype(struct sk_buff * skb)4370  static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4371  {
4372  	int depth = 0;
4373  	__be16 proto;
4374  
4375  	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4376  				    &depth);
4377  
4378  	return (depth <= ETH_HLEN) &&
4379  		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4380  }
4381  
4382  /**
4383   *  stmmac_xmit - Tx entry point of the driver
4384   *  @skb : the socket buffer
4385   *  @dev : device pointer
4386   *  Description : this is the tx entry point of the driver.
4387   *  It programs the chain or the ring and supports oversized frames
4388   *  and SG feature.
4389   */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)4390  static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4391  {
4392  	unsigned int first_entry, tx_packets, enh_desc;
4393  	struct stmmac_priv *priv = netdev_priv(dev);
4394  	unsigned int nopaged_len = skb_headlen(skb);
4395  	int i, csum_insertion = 0, is_jumbo = 0;
4396  	u32 queue = skb_get_queue_mapping(skb);
4397  	int nfrags = skb_shinfo(skb)->nr_frags;
4398  	int gso = skb_shinfo(skb)->gso_type;
4399  	struct stmmac_txq_stats *txq_stats;
4400  	struct dma_edesc *tbs_desc = NULL;
4401  	struct dma_desc *desc, *first;
4402  	struct stmmac_tx_queue *tx_q;
4403  	bool has_vlan, set_ic;
4404  	int entry, first_tx;
4405  	dma_addr_t des;
4406  
4407  	tx_q = &priv->dma_conf.tx_queue[queue];
4408  	txq_stats = &priv->xstats.txq_stats[queue];
4409  	first_tx = tx_q->cur_tx;
4410  
4411  	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4412  		stmmac_disable_eee_mode(priv);
4413  
4414  	/* Manage oversized TCP frames for GMAC4 device */
4415  	if (skb_is_gso(skb) && priv->tso) {
4416  		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4417  			return stmmac_tso_xmit(skb, dev);
4418  		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4419  			return stmmac_tso_xmit(skb, dev);
4420  	}
4421  
4422  	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4423  		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4424  			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4425  								queue));
4426  			/* This is a hard error, log it. */
4427  			netdev_err(priv->dev,
4428  				   "%s: Tx Ring full when queue awake\n",
4429  				   __func__);
4430  		}
4431  		return NETDEV_TX_BUSY;
4432  	}
4433  
4434  	/* Check if VLAN can be inserted by HW */
4435  	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4436  
4437  	entry = tx_q->cur_tx;
4438  	first_entry = entry;
4439  	WARN_ON(tx_q->tx_skbuff[first_entry]);
4440  
4441  	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4442  	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4443  	 * queues. In that case, checksum offloading for those queues that don't
4444  	 * support tx coe needs to fallback to software checksum calculation.
4445  	 *
4446  	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4447  	 * also have to be checksummed in software.
4448  	 */
4449  	if (csum_insertion &&
4450  	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4451  	     !stmmac_has_ip_ethertype(skb))) {
4452  		if (unlikely(skb_checksum_help(skb)))
4453  			goto dma_map_err;
4454  		csum_insertion = !csum_insertion;
4455  	}
4456  
4457  	if (likely(priv->extend_desc))
4458  		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4459  	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4460  		desc = &tx_q->dma_entx[entry].basic;
4461  	else
4462  		desc = tx_q->dma_tx + entry;
4463  
4464  	first = desc;
4465  
4466  	if (has_vlan)
4467  		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4468  
4469  	enh_desc = priv->plat->enh_desc;
4470  	/* To program the descriptors according to the size of the frame */
4471  	if (enh_desc)
4472  		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4473  
4474  	if (unlikely(is_jumbo)) {
4475  		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4476  		if (unlikely(entry < 0) && (entry != -EINVAL))
4477  			goto dma_map_err;
4478  	}
4479  
4480  	for (i = 0; i < nfrags; i++) {
4481  		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4482  		int len = skb_frag_size(frag);
4483  		bool last_segment = (i == (nfrags - 1));
4484  
4485  		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4486  		WARN_ON(tx_q->tx_skbuff[entry]);
4487  
4488  		if (likely(priv->extend_desc))
4489  			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4490  		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4491  			desc = &tx_q->dma_entx[entry].basic;
4492  		else
4493  			desc = tx_q->dma_tx + entry;
4494  
4495  		des = skb_frag_dma_map(priv->device, frag, 0, len,
4496  				       DMA_TO_DEVICE);
4497  		if (dma_mapping_error(priv->device, des))
4498  			goto dma_map_err; /* should reuse desc w/o issues */
4499  
4500  		tx_q->tx_skbuff_dma[entry].buf = des;
4501  
4502  		stmmac_set_desc_addr(priv, desc, des);
4503  
4504  		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4505  		tx_q->tx_skbuff_dma[entry].len = len;
4506  		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4507  		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4508  
4509  		/* Prepare the descriptor and set the own bit too */
4510  		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4511  				priv->mode, 1, last_segment, skb->len);
4512  	}
4513  
4514  	/* Only the last descriptor gets to point to the skb. */
4515  	tx_q->tx_skbuff[entry] = skb;
4516  	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4517  
4518  	/* According to the coalesce parameter the IC bit for the latest
4519  	 * segment is reset and the timer re-started to clean the tx status.
4520  	 * This approach takes care about the fragments: desc is the first
4521  	 * element in case of no SG.
4522  	 */
4523  	tx_packets = (entry + 1) - first_tx;
4524  	tx_q->tx_count_frames += tx_packets;
4525  
4526  	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4527  		set_ic = true;
4528  	else if (!priv->tx_coal_frames[queue])
4529  		set_ic = false;
4530  	else if (tx_packets > priv->tx_coal_frames[queue])
4531  		set_ic = true;
4532  	else if ((tx_q->tx_count_frames %
4533  		  priv->tx_coal_frames[queue]) < tx_packets)
4534  		set_ic = true;
4535  	else
4536  		set_ic = false;
4537  
4538  	if (set_ic) {
4539  		if (likely(priv->extend_desc))
4540  			desc = &tx_q->dma_etx[entry].basic;
4541  		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4542  			desc = &tx_q->dma_entx[entry].basic;
4543  		else
4544  			desc = &tx_q->dma_tx[entry];
4545  
4546  		tx_q->tx_count_frames = 0;
4547  		stmmac_set_tx_ic(priv, desc);
4548  	}
4549  
4550  	/* We've used all descriptors we need for this skb, however,
4551  	 * advance cur_tx so that it references a fresh descriptor.
4552  	 * ndo_start_xmit will fill this descriptor the next time it's
4553  	 * called and stmmac_tx_clean may clean up to this descriptor.
4554  	 */
4555  	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4556  	tx_q->cur_tx = entry;
4557  
4558  	if (netif_msg_pktdata(priv)) {
4559  		netdev_dbg(priv->dev,
4560  			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4561  			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4562  			   entry, first, nfrags);
4563  
4564  		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4565  		print_pkt(skb->data, skb->len);
4566  	}
4567  
4568  	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4569  		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4570  			  __func__);
4571  		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4572  	}
4573  
4574  	u64_stats_update_begin(&txq_stats->q_syncp);
4575  	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4576  	if (set_ic)
4577  		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4578  	u64_stats_update_end(&txq_stats->q_syncp);
4579  
4580  	if (priv->sarc_type)
4581  		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4582  
4583  	skb_tx_timestamp(skb);
4584  
4585  	/* Ready to fill the first descriptor and set the OWN bit w/o any
4586  	 * problems because all the descriptors are actually ready to be
4587  	 * passed to the DMA engine.
4588  	 */
4589  	if (likely(!is_jumbo)) {
4590  		bool last_segment = (nfrags == 0);
4591  
4592  		des = dma_map_single(priv->device, skb->data,
4593  				     nopaged_len, DMA_TO_DEVICE);
4594  		if (dma_mapping_error(priv->device, des))
4595  			goto dma_map_err;
4596  
4597  		tx_q->tx_skbuff_dma[first_entry].buf = des;
4598  		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4599  		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4600  
4601  		stmmac_set_desc_addr(priv, first, des);
4602  
4603  		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4604  		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4605  
4606  		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4607  			     priv->hwts_tx_en)) {
4608  			/* declare that device is doing timestamping */
4609  			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4610  			stmmac_enable_tx_timestamp(priv, first);
4611  		}
4612  
4613  		/* Prepare the first descriptor setting the OWN bit too */
4614  		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4615  				csum_insertion, priv->mode, 0, last_segment,
4616  				skb->len);
4617  	}
4618  
4619  	if (tx_q->tbs & STMMAC_TBS_EN) {
4620  		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4621  
4622  		tbs_desc = &tx_q->dma_entx[first_entry];
4623  		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4624  	}
4625  
4626  	stmmac_set_tx_owner(priv, first);
4627  
4628  	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4629  
4630  	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4631  
4632  	stmmac_flush_tx_descriptors(priv, queue);
4633  	stmmac_tx_timer_arm(priv, queue);
4634  
4635  	return NETDEV_TX_OK;
4636  
4637  dma_map_err:
4638  	netdev_err(priv->dev, "Tx DMA map failed\n");
4639  	dev_kfree_skb(skb);
4640  	priv->xstats.tx_dropped++;
4641  	return NETDEV_TX_OK;
4642  }
4643  
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)4644  static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4645  {
4646  	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4647  	__be16 vlan_proto = veth->h_vlan_proto;
4648  	u16 vlanid;
4649  
4650  	if ((vlan_proto == htons(ETH_P_8021Q) &&
4651  	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4652  	    (vlan_proto == htons(ETH_P_8021AD) &&
4653  	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4654  		/* pop the vlan tag */
4655  		vlanid = ntohs(veth->h_vlan_TCI);
4656  		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4657  		skb_pull(skb, VLAN_HLEN);
4658  		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4659  	}
4660  }
4661  
4662  /**
4663   * stmmac_rx_refill - refill used skb preallocated buffers
4664   * @priv: driver private structure
4665   * @queue: RX queue index
4666   * Description : this is to reallocate the skb for the reception process
4667   * that is based on zero-copy.
4668   */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)4669  static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4670  {
4671  	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4672  	int dirty = stmmac_rx_dirty(priv, queue);
4673  	unsigned int entry = rx_q->dirty_rx;
4674  	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4675  
4676  	if (priv->dma_cap.host_dma_width <= 32)
4677  		gfp |= GFP_DMA32;
4678  
4679  	while (dirty-- > 0) {
4680  		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4681  		struct dma_desc *p;
4682  		bool use_rx_wd;
4683  
4684  		if (priv->extend_desc)
4685  			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4686  		else
4687  			p = rx_q->dma_rx + entry;
4688  
4689  		if (!buf->page) {
4690  			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4691  			if (!buf->page)
4692  				break;
4693  		}
4694  
4695  		if (priv->sph && !buf->sec_page) {
4696  			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4697  			if (!buf->sec_page)
4698  				break;
4699  
4700  			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4701  		}
4702  
4703  		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4704  
4705  		stmmac_set_desc_addr(priv, p, buf->addr);
4706  		if (priv->sph)
4707  			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4708  		else
4709  			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4710  		stmmac_refill_desc3(priv, rx_q, p);
4711  
4712  		rx_q->rx_count_frames++;
4713  		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4714  		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4715  			rx_q->rx_count_frames = 0;
4716  
4717  		use_rx_wd = !priv->rx_coal_frames[queue];
4718  		use_rx_wd |= rx_q->rx_count_frames > 0;
4719  		if (!priv->use_riwt)
4720  			use_rx_wd = false;
4721  
4722  		dma_wmb();
4723  		stmmac_set_rx_owner(priv, p, use_rx_wd);
4724  
4725  		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4726  	}
4727  	rx_q->dirty_rx = entry;
4728  	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4729  			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4730  	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4731  }
4732  
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4733  static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4734  				       struct dma_desc *p,
4735  				       int status, unsigned int len)
4736  {
4737  	unsigned int plen = 0, hlen = 0;
4738  	int coe = priv->hw->rx_csum;
4739  
4740  	/* Not first descriptor, buffer is always zero */
4741  	if (priv->sph && len)
4742  		return 0;
4743  
4744  	/* First descriptor, get split header length */
4745  	stmmac_get_rx_header_len(priv, p, &hlen);
4746  	if (priv->sph && hlen) {
4747  		priv->xstats.rx_split_hdr_pkt_n++;
4748  		return hlen;
4749  	}
4750  
4751  	/* First descriptor, not last descriptor and not split header */
4752  	if (status & rx_not_ls)
4753  		return priv->dma_conf.dma_buf_sz;
4754  
4755  	plen = stmmac_get_rx_frame_len(priv, p, coe);
4756  
4757  	/* First descriptor and last descriptor and not split header */
4758  	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4759  }
4760  
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4761  static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4762  				       struct dma_desc *p,
4763  				       int status, unsigned int len)
4764  {
4765  	int coe = priv->hw->rx_csum;
4766  	unsigned int plen = 0;
4767  
4768  	/* Not split header, buffer is not available */
4769  	if (!priv->sph)
4770  		return 0;
4771  
4772  	/* Not last descriptor */
4773  	if (status & rx_not_ls)
4774  		return priv->dma_conf.dma_buf_sz;
4775  
4776  	plen = stmmac_get_rx_frame_len(priv, p, coe);
4777  
4778  	/* Last descriptor */
4779  	return plen - len;
4780  }
4781  
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)4782  static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4783  				struct xdp_frame *xdpf, bool dma_map)
4784  {
4785  	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4786  	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4787  	unsigned int entry = tx_q->cur_tx;
4788  	struct dma_desc *tx_desc;
4789  	dma_addr_t dma_addr;
4790  	bool set_ic;
4791  
4792  	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4793  		return STMMAC_XDP_CONSUMED;
4794  
4795  	if (likely(priv->extend_desc))
4796  		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4797  	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4798  		tx_desc = &tx_q->dma_entx[entry].basic;
4799  	else
4800  		tx_desc = tx_q->dma_tx + entry;
4801  
4802  	if (dma_map) {
4803  		dma_addr = dma_map_single(priv->device, xdpf->data,
4804  					  xdpf->len, DMA_TO_DEVICE);
4805  		if (dma_mapping_error(priv->device, dma_addr))
4806  			return STMMAC_XDP_CONSUMED;
4807  
4808  		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4809  	} else {
4810  		struct page *page = virt_to_page(xdpf->data);
4811  
4812  		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4813  			   xdpf->headroom;
4814  		dma_sync_single_for_device(priv->device, dma_addr,
4815  					   xdpf->len, DMA_BIDIRECTIONAL);
4816  
4817  		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4818  	}
4819  
4820  	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4821  	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4822  	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4823  	tx_q->tx_skbuff_dma[entry].last_segment = true;
4824  	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4825  
4826  	tx_q->xdpf[entry] = xdpf;
4827  
4828  	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4829  
4830  	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4831  			       true, priv->mode, true, true,
4832  			       xdpf->len);
4833  
4834  	tx_q->tx_count_frames++;
4835  
4836  	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4837  		set_ic = true;
4838  	else
4839  		set_ic = false;
4840  
4841  	if (set_ic) {
4842  		tx_q->tx_count_frames = 0;
4843  		stmmac_set_tx_ic(priv, tx_desc);
4844  		u64_stats_update_begin(&txq_stats->q_syncp);
4845  		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4846  		u64_stats_update_end(&txq_stats->q_syncp);
4847  	}
4848  
4849  	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4850  
4851  	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4852  	tx_q->cur_tx = entry;
4853  
4854  	return STMMAC_XDP_TX;
4855  }
4856  
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)4857  static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4858  				   int cpu)
4859  {
4860  	int index = cpu;
4861  
4862  	if (unlikely(index < 0))
4863  		index = 0;
4864  
4865  	while (index >= priv->plat->tx_queues_to_use)
4866  		index -= priv->plat->tx_queues_to_use;
4867  
4868  	return index;
4869  }
4870  
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)4871  static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4872  				struct xdp_buff *xdp)
4873  {
4874  	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4875  	int cpu = smp_processor_id();
4876  	struct netdev_queue *nq;
4877  	int queue;
4878  	int res;
4879  
4880  	if (unlikely(!xdpf))
4881  		return STMMAC_XDP_CONSUMED;
4882  
4883  	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4884  	nq = netdev_get_tx_queue(priv->dev, queue);
4885  
4886  	__netif_tx_lock(nq, cpu);
4887  	/* Avoids TX time-out as we are sharing with slow path */
4888  	txq_trans_cond_update(nq);
4889  
4890  	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4891  	if (res == STMMAC_XDP_TX)
4892  		stmmac_flush_tx_descriptors(priv, queue);
4893  
4894  	__netif_tx_unlock(nq);
4895  
4896  	return res;
4897  }
4898  
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)4899  static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4900  				 struct bpf_prog *prog,
4901  				 struct xdp_buff *xdp)
4902  {
4903  	u32 act;
4904  	int res;
4905  
4906  	act = bpf_prog_run_xdp(prog, xdp);
4907  	switch (act) {
4908  	case XDP_PASS:
4909  		res = STMMAC_XDP_PASS;
4910  		break;
4911  	case XDP_TX:
4912  		res = stmmac_xdp_xmit_back(priv, xdp);
4913  		break;
4914  	case XDP_REDIRECT:
4915  		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4916  			res = STMMAC_XDP_CONSUMED;
4917  		else
4918  			res = STMMAC_XDP_REDIRECT;
4919  		break;
4920  	default:
4921  		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4922  		fallthrough;
4923  	case XDP_ABORTED:
4924  		trace_xdp_exception(priv->dev, prog, act);
4925  		fallthrough;
4926  	case XDP_DROP:
4927  		res = STMMAC_XDP_CONSUMED;
4928  		break;
4929  	}
4930  
4931  	return res;
4932  }
4933  
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)4934  static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4935  					   struct xdp_buff *xdp)
4936  {
4937  	struct bpf_prog *prog;
4938  	int res;
4939  
4940  	prog = READ_ONCE(priv->xdp_prog);
4941  	if (!prog) {
4942  		res = STMMAC_XDP_PASS;
4943  		goto out;
4944  	}
4945  
4946  	res = __stmmac_xdp_run_prog(priv, prog, xdp);
4947  out:
4948  	return ERR_PTR(-res);
4949  }
4950  
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)4951  static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4952  				   int xdp_status)
4953  {
4954  	int cpu = smp_processor_id();
4955  	int queue;
4956  
4957  	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4958  
4959  	if (xdp_status & STMMAC_XDP_TX)
4960  		stmmac_tx_timer_arm(priv, queue);
4961  
4962  	if (xdp_status & STMMAC_XDP_REDIRECT)
4963  		xdp_do_flush();
4964  }
4965  
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)4966  static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4967  					       struct xdp_buff *xdp)
4968  {
4969  	unsigned int metasize = xdp->data - xdp->data_meta;
4970  	unsigned int datasize = xdp->data_end - xdp->data;
4971  	struct sk_buff *skb;
4972  
4973  	skb = __napi_alloc_skb(&ch->rxtx_napi,
4974  			       xdp->data_end - xdp->data_hard_start,
4975  			       GFP_ATOMIC | __GFP_NOWARN);
4976  	if (unlikely(!skb))
4977  		return NULL;
4978  
4979  	skb_reserve(skb, xdp->data - xdp->data_hard_start);
4980  	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4981  	if (metasize)
4982  		skb_metadata_set(skb, metasize);
4983  
4984  	return skb;
4985  }
4986  
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)4987  static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4988  				   struct dma_desc *p, struct dma_desc *np,
4989  				   struct xdp_buff *xdp)
4990  {
4991  	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
4992  	struct stmmac_channel *ch = &priv->channel[queue];
4993  	unsigned int len = xdp->data_end - xdp->data;
4994  	enum pkt_hash_types hash_type;
4995  	int coe = priv->hw->rx_csum;
4996  	struct sk_buff *skb;
4997  	u32 hash;
4998  
4999  	skb = stmmac_construct_skb_zc(ch, xdp);
5000  	if (!skb) {
5001  		priv->xstats.rx_dropped++;
5002  		return;
5003  	}
5004  
5005  	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5006  	stmmac_rx_vlan(priv->dev, skb);
5007  	skb->protocol = eth_type_trans(skb, priv->dev);
5008  
5009  	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5010  		skb_checksum_none_assert(skb);
5011  	else
5012  		skb->ip_summed = CHECKSUM_UNNECESSARY;
5013  
5014  	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5015  		skb_set_hash(skb, hash, hash_type);
5016  
5017  	skb_record_rx_queue(skb, queue);
5018  	napi_gro_receive(&ch->rxtx_napi, skb);
5019  
5020  	u64_stats_update_begin(&rxq_stats->napi_syncp);
5021  	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5022  	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5023  	u64_stats_update_end(&rxq_stats->napi_syncp);
5024  }
5025  
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)5026  static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5027  {
5028  	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5029  	unsigned int entry = rx_q->dirty_rx;
5030  	struct dma_desc *rx_desc = NULL;
5031  	bool ret = true;
5032  
5033  	budget = min(budget, stmmac_rx_dirty(priv, queue));
5034  
5035  	while (budget-- > 0 && entry != rx_q->cur_rx) {
5036  		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5037  		dma_addr_t dma_addr;
5038  		bool use_rx_wd;
5039  
5040  		if (!buf->xdp) {
5041  			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5042  			if (!buf->xdp) {
5043  				ret = false;
5044  				break;
5045  			}
5046  		}
5047  
5048  		if (priv->extend_desc)
5049  			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5050  		else
5051  			rx_desc = rx_q->dma_rx + entry;
5052  
5053  		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5054  		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5055  		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5056  		stmmac_refill_desc3(priv, rx_q, rx_desc);
5057  
5058  		rx_q->rx_count_frames++;
5059  		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5060  		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5061  			rx_q->rx_count_frames = 0;
5062  
5063  		use_rx_wd = !priv->rx_coal_frames[queue];
5064  		use_rx_wd |= rx_q->rx_count_frames > 0;
5065  		if (!priv->use_riwt)
5066  			use_rx_wd = false;
5067  
5068  		dma_wmb();
5069  		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5070  
5071  		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5072  	}
5073  
5074  	if (rx_desc) {
5075  		rx_q->dirty_rx = entry;
5076  		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5077  				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5078  		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5079  	}
5080  
5081  	return ret;
5082  }
5083  
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)5084  static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5085  {
5086  	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5087  	 * to represent incoming packet, whereas cb field in the same structure
5088  	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5089  	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5090  	 */
5091  	return (struct stmmac_xdp_buff *)xdp;
5092  }
5093  
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5094  static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5095  {
5096  	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5097  	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5098  	unsigned int count = 0, error = 0, len = 0;
5099  	int dirty = stmmac_rx_dirty(priv, queue);
5100  	unsigned int next_entry = rx_q->cur_rx;
5101  	u32 rx_errors = 0, rx_dropped = 0;
5102  	unsigned int desc_size;
5103  	struct bpf_prog *prog;
5104  	bool failure = false;
5105  	int xdp_status = 0;
5106  	int status = 0;
5107  
5108  	if (netif_msg_rx_status(priv)) {
5109  		void *rx_head;
5110  
5111  		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5112  		if (priv->extend_desc) {
5113  			rx_head = (void *)rx_q->dma_erx;
5114  			desc_size = sizeof(struct dma_extended_desc);
5115  		} else {
5116  			rx_head = (void *)rx_q->dma_rx;
5117  			desc_size = sizeof(struct dma_desc);
5118  		}
5119  
5120  		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5121  				    rx_q->dma_rx_phy, desc_size);
5122  	}
5123  	while (count < limit) {
5124  		struct stmmac_rx_buffer *buf;
5125  		struct stmmac_xdp_buff *ctx;
5126  		unsigned int buf1_len = 0;
5127  		struct dma_desc *np, *p;
5128  		int entry;
5129  		int res;
5130  
5131  		if (!count && rx_q->state_saved) {
5132  			error = rx_q->state.error;
5133  			len = rx_q->state.len;
5134  		} else {
5135  			rx_q->state_saved = false;
5136  			error = 0;
5137  			len = 0;
5138  		}
5139  
5140  		if (count >= limit)
5141  			break;
5142  
5143  read_again:
5144  		buf1_len = 0;
5145  		entry = next_entry;
5146  		buf = &rx_q->buf_pool[entry];
5147  
5148  		if (dirty >= STMMAC_RX_FILL_BATCH) {
5149  			failure = failure ||
5150  				  !stmmac_rx_refill_zc(priv, queue, dirty);
5151  			dirty = 0;
5152  		}
5153  
5154  		if (priv->extend_desc)
5155  			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5156  		else
5157  			p = rx_q->dma_rx + entry;
5158  
5159  		/* read the status of the incoming frame */
5160  		status = stmmac_rx_status(priv, &priv->xstats, p);
5161  		/* check if managed by the DMA otherwise go ahead */
5162  		if (unlikely(status & dma_own))
5163  			break;
5164  
5165  		/* Prefetch the next RX descriptor */
5166  		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5167  						priv->dma_conf.dma_rx_size);
5168  		next_entry = rx_q->cur_rx;
5169  
5170  		if (priv->extend_desc)
5171  			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5172  		else
5173  			np = rx_q->dma_rx + next_entry;
5174  
5175  		prefetch(np);
5176  
5177  		/* Ensure a valid XSK buffer before proceed */
5178  		if (!buf->xdp)
5179  			break;
5180  
5181  		if (priv->extend_desc)
5182  			stmmac_rx_extended_status(priv, &priv->xstats,
5183  						  rx_q->dma_erx + entry);
5184  		if (unlikely(status == discard_frame)) {
5185  			xsk_buff_free(buf->xdp);
5186  			buf->xdp = NULL;
5187  			dirty++;
5188  			error = 1;
5189  			if (!priv->hwts_rx_en)
5190  				rx_errors++;
5191  		}
5192  
5193  		if (unlikely(error && (status & rx_not_ls)))
5194  			goto read_again;
5195  		if (unlikely(error)) {
5196  			count++;
5197  			continue;
5198  		}
5199  
5200  		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5201  		if (likely(status & rx_not_ls)) {
5202  			xsk_buff_free(buf->xdp);
5203  			buf->xdp = NULL;
5204  			dirty++;
5205  			count++;
5206  			goto read_again;
5207  		}
5208  
5209  		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5210  		ctx->priv = priv;
5211  		ctx->desc = p;
5212  		ctx->ndesc = np;
5213  
5214  		/* XDP ZC Frame only support primary buffers for now */
5215  		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5216  		len += buf1_len;
5217  
5218  		/* ACS is disabled; strip manually. */
5219  		if (likely(!(status & rx_not_ls))) {
5220  			buf1_len -= ETH_FCS_LEN;
5221  			len -= ETH_FCS_LEN;
5222  		}
5223  
5224  		/* RX buffer is good and fit into a XSK pool buffer */
5225  		buf->xdp->data_end = buf->xdp->data + buf1_len;
5226  		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5227  
5228  		prog = READ_ONCE(priv->xdp_prog);
5229  		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5230  
5231  		switch (res) {
5232  		case STMMAC_XDP_PASS:
5233  			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5234  			xsk_buff_free(buf->xdp);
5235  			break;
5236  		case STMMAC_XDP_CONSUMED:
5237  			xsk_buff_free(buf->xdp);
5238  			rx_dropped++;
5239  			break;
5240  		case STMMAC_XDP_TX:
5241  		case STMMAC_XDP_REDIRECT:
5242  			xdp_status |= res;
5243  			break;
5244  		}
5245  
5246  		buf->xdp = NULL;
5247  		dirty++;
5248  		count++;
5249  	}
5250  
5251  	if (status & rx_not_ls) {
5252  		rx_q->state_saved = true;
5253  		rx_q->state.error = error;
5254  		rx_q->state.len = len;
5255  	}
5256  
5257  	stmmac_finalize_xdp_rx(priv, xdp_status);
5258  
5259  	u64_stats_update_begin(&rxq_stats->napi_syncp);
5260  	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5261  	u64_stats_update_end(&rxq_stats->napi_syncp);
5262  
5263  	priv->xstats.rx_dropped += rx_dropped;
5264  	priv->xstats.rx_errors += rx_errors;
5265  
5266  	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5267  		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5268  			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5269  		else
5270  			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5271  
5272  		return (int)count;
5273  	}
5274  
5275  	return failure ? limit : (int)count;
5276  }
5277  
5278  /**
5279   * stmmac_rx - manage the receive process
5280   * @priv: driver private structure
5281   * @limit: napi bugget
5282   * @queue: RX queue index.
5283   * Description :  this the function called by the napi poll method.
5284   * It gets all the frames inside the ring.
5285   */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)5286  static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5287  {
5288  	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5289  	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5290  	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5291  	struct stmmac_channel *ch = &priv->channel[queue];
5292  	unsigned int count = 0, error = 0, len = 0;
5293  	int status = 0, coe = priv->hw->rx_csum;
5294  	unsigned int next_entry = rx_q->cur_rx;
5295  	enum dma_data_direction dma_dir;
5296  	unsigned int desc_size;
5297  	struct sk_buff *skb = NULL;
5298  	struct stmmac_xdp_buff ctx;
5299  	int xdp_status = 0;
5300  	int buf_sz;
5301  
5302  	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5303  	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5304  	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5305  
5306  	if (netif_msg_rx_status(priv)) {
5307  		void *rx_head;
5308  
5309  		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5310  		if (priv->extend_desc) {
5311  			rx_head = (void *)rx_q->dma_erx;
5312  			desc_size = sizeof(struct dma_extended_desc);
5313  		} else {
5314  			rx_head = (void *)rx_q->dma_rx;
5315  			desc_size = sizeof(struct dma_desc);
5316  		}
5317  
5318  		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5319  				    rx_q->dma_rx_phy, desc_size);
5320  	}
5321  	while (count < limit) {
5322  		unsigned int buf1_len = 0, buf2_len = 0;
5323  		enum pkt_hash_types hash_type;
5324  		struct stmmac_rx_buffer *buf;
5325  		struct dma_desc *np, *p;
5326  		int entry;
5327  		u32 hash;
5328  
5329  		if (!count && rx_q->state_saved) {
5330  			skb = rx_q->state.skb;
5331  			error = rx_q->state.error;
5332  			len = rx_q->state.len;
5333  		} else {
5334  			rx_q->state_saved = false;
5335  			skb = NULL;
5336  			error = 0;
5337  			len = 0;
5338  		}
5339  
5340  read_again:
5341  		if (count >= limit)
5342  			break;
5343  
5344  		buf1_len = 0;
5345  		buf2_len = 0;
5346  		entry = next_entry;
5347  		buf = &rx_q->buf_pool[entry];
5348  
5349  		if (priv->extend_desc)
5350  			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5351  		else
5352  			p = rx_q->dma_rx + entry;
5353  
5354  		/* read the status of the incoming frame */
5355  		status = stmmac_rx_status(priv, &priv->xstats, p);
5356  		/* check if managed by the DMA otherwise go ahead */
5357  		if (unlikely(status & dma_own))
5358  			break;
5359  
5360  		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5361  						priv->dma_conf.dma_rx_size);
5362  		next_entry = rx_q->cur_rx;
5363  
5364  		if (priv->extend_desc)
5365  			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5366  		else
5367  			np = rx_q->dma_rx + next_entry;
5368  
5369  		prefetch(np);
5370  
5371  		if (priv->extend_desc)
5372  			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5373  		if (unlikely(status == discard_frame)) {
5374  			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5375  			buf->page = NULL;
5376  			error = 1;
5377  			if (!priv->hwts_rx_en)
5378  				rx_errors++;
5379  		}
5380  
5381  		if (unlikely(error && (status & rx_not_ls)))
5382  			goto read_again;
5383  		if (unlikely(error)) {
5384  			dev_kfree_skb(skb);
5385  			skb = NULL;
5386  			count++;
5387  			continue;
5388  		}
5389  
5390  		/* Buffer is good. Go on. */
5391  
5392  		prefetch(page_address(buf->page) + buf->page_offset);
5393  		if (buf->sec_page)
5394  			prefetch(page_address(buf->sec_page));
5395  
5396  		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5397  		len += buf1_len;
5398  		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5399  		len += buf2_len;
5400  
5401  		/* ACS is disabled; strip manually. */
5402  		if (likely(!(status & rx_not_ls))) {
5403  			if (buf2_len) {
5404  				buf2_len -= ETH_FCS_LEN;
5405  				len -= ETH_FCS_LEN;
5406  			} else if (buf1_len) {
5407  				buf1_len -= ETH_FCS_LEN;
5408  				len -= ETH_FCS_LEN;
5409  			}
5410  		}
5411  
5412  		if (!skb) {
5413  			unsigned int pre_len, sync_len;
5414  
5415  			dma_sync_single_for_cpu(priv->device, buf->addr,
5416  						buf1_len, dma_dir);
5417  
5418  			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5419  			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5420  					 buf->page_offset, buf1_len, true);
5421  
5422  			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5423  				  buf->page_offset;
5424  
5425  			ctx.priv = priv;
5426  			ctx.desc = p;
5427  			ctx.ndesc = np;
5428  
5429  			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5430  			/* Due xdp_adjust_tail: DMA sync for_device
5431  			 * cover max len CPU touch
5432  			 */
5433  			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5434  				   buf->page_offset;
5435  			sync_len = max(sync_len, pre_len);
5436  
5437  			/* For Not XDP_PASS verdict */
5438  			if (IS_ERR(skb)) {
5439  				unsigned int xdp_res = -PTR_ERR(skb);
5440  
5441  				if (xdp_res & STMMAC_XDP_CONSUMED) {
5442  					page_pool_put_page(rx_q->page_pool,
5443  							   virt_to_head_page(ctx.xdp.data),
5444  							   sync_len, true);
5445  					buf->page = NULL;
5446  					rx_dropped++;
5447  
5448  					/* Clear skb as it was set as
5449  					 * status by XDP program.
5450  					 */
5451  					skb = NULL;
5452  
5453  					if (unlikely((status & rx_not_ls)))
5454  						goto read_again;
5455  
5456  					count++;
5457  					continue;
5458  				} else if (xdp_res & (STMMAC_XDP_TX |
5459  						      STMMAC_XDP_REDIRECT)) {
5460  					xdp_status |= xdp_res;
5461  					buf->page = NULL;
5462  					skb = NULL;
5463  					count++;
5464  					continue;
5465  				}
5466  			}
5467  		}
5468  
5469  		if (!skb) {
5470  			/* XDP program may expand or reduce tail */
5471  			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5472  
5473  			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5474  			if (!skb) {
5475  				rx_dropped++;
5476  				count++;
5477  				goto drain_data;
5478  			}
5479  
5480  			/* XDP program may adjust header */
5481  			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5482  			skb_put(skb, buf1_len);
5483  
5484  			/* Data payload copied into SKB, page ready for recycle */
5485  			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5486  			buf->page = NULL;
5487  		} else if (buf1_len) {
5488  			dma_sync_single_for_cpu(priv->device, buf->addr,
5489  						buf1_len, dma_dir);
5490  			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5491  					buf->page, buf->page_offset, buf1_len,
5492  					priv->dma_conf.dma_buf_sz);
5493  
5494  			/* Data payload appended into SKB */
5495  			skb_mark_for_recycle(skb);
5496  			buf->page = NULL;
5497  		}
5498  
5499  		if (buf2_len) {
5500  			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5501  						buf2_len, dma_dir);
5502  			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5503  					buf->sec_page, 0, buf2_len,
5504  					priv->dma_conf.dma_buf_sz);
5505  
5506  			/* Data payload appended into SKB */
5507  			skb_mark_for_recycle(skb);
5508  			buf->sec_page = NULL;
5509  		}
5510  
5511  drain_data:
5512  		if (likely(status & rx_not_ls))
5513  			goto read_again;
5514  		if (!skb)
5515  			continue;
5516  
5517  		/* Got entire packet into SKB. Finish it. */
5518  
5519  		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5520  		stmmac_rx_vlan(priv->dev, skb);
5521  		skb->protocol = eth_type_trans(skb, priv->dev);
5522  
5523  		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5524  			skb_checksum_none_assert(skb);
5525  		else
5526  			skb->ip_summed = CHECKSUM_UNNECESSARY;
5527  
5528  		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5529  			skb_set_hash(skb, hash, hash_type);
5530  
5531  		skb_record_rx_queue(skb, queue);
5532  		napi_gro_receive(&ch->rx_napi, skb);
5533  		skb = NULL;
5534  
5535  		rx_packets++;
5536  		rx_bytes += len;
5537  		count++;
5538  	}
5539  
5540  	if (status & rx_not_ls || skb) {
5541  		rx_q->state_saved = true;
5542  		rx_q->state.skb = skb;
5543  		rx_q->state.error = error;
5544  		rx_q->state.len = len;
5545  	}
5546  
5547  	stmmac_finalize_xdp_rx(priv, xdp_status);
5548  
5549  	stmmac_rx_refill(priv, queue);
5550  
5551  	u64_stats_update_begin(&rxq_stats->napi_syncp);
5552  	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5553  	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5554  	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5555  	u64_stats_update_end(&rxq_stats->napi_syncp);
5556  
5557  	priv->xstats.rx_dropped += rx_dropped;
5558  	priv->xstats.rx_errors += rx_errors;
5559  
5560  	return count;
5561  }
5562  
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)5563  static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5564  {
5565  	struct stmmac_channel *ch =
5566  		container_of(napi, struct stmmac_channel, rx_napi);
5567  	struct stmmac_priv *priv = ch->priv_data;
5568  	struct stmmac_rxq_stats *rxq_stats;
5569  	u32 chan = ch->index;
5570  	int work_done;
5571  
5572  	rxq_stats = &priv->xstats.rxq_stats[chan];
5573  	u64_stats_update_begin(&rxq_stats->napi_syncp);
5574  	u64_stats_inc(&rxq_stats->napi.poll);
5575  	u64_stats_update_end(&rxq_stats->napi_syncp);
5576  
5577  	work_done = stmmac_rx(priv, budget, chan);
5578  	if (work_done < budget && napi_complete_done(napi, work_done)) {
5579  		unsigned long flags;
5580  
5581  		spin_lock_irqsave(&ch->lock, flags);
5582  		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5583  		spin_unlock_irqrestore(&ch->lock, flags);
5584  	}
5585  
5586  	return work_done;
5587  }
5588  
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)5589  static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5590  {
5591  	struct stmmac_channel *ch =
5592  		container_of(napi, struct stmmac_channel, tx_napi);
5593  	struct stmmac_priv *priv = ch->priv_data;
5594  	struct stmmac_txq_stats *txq_stats;
5595  	u32 chan = ch->index;
5596  	int work_done;
5597  
5598  	txq_stats = &priv->xstats.txq_stats[chan];
5599  	u64_stats_update_begin(&txq_stats->napi_syncp);
5600  	u64_stats_inc(&txq_stats->napi.poll);
5601  	u64_stats_update_end(&txq_stats->napi_syncp);
5602  
5603  	work_done = stmmac_tx_clean(priv, budget, chan);
5604  	work_done = min(work_done, budget);
5605  
5606  	if (work_done < budget && napi_complete_done(napi, work_done)) {
5607  		unsigned long flags;
5608  
5609  		spin_lock_irqsave(&ch->lock, flags);
5610  		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5611  		spin_unlock_irqrestore(&ch->lock, flags);
5612  	}
5613  
5614  	return work_done;
5615  }
5616  
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5617  static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5618  {
5619  	struct stmmac_channel *ch =
5620  		container_of(napi, struct stmmac_channel, rxtx_napi);
5621  	struct stmmac_priv *priv = ch->priv_data;
5622  	int rx_done, tx_done, rxtx_done;
5623  	struct stmmac_rxq_stats *rxq_stats;
5624  	struct stmmac_txq_stats *txq_stats;
5625  	u32 chan = ch->index;
5626  
5627  	rxq_stats = &priv->xstats.rxq_stats[chan];
5628  	u64_stats_update_begin(&rxq_stats->napi_syncp);
5629  	u64_stats_inc(&rxq_stats->napi.poll);
5630  	u64_stats_update_end(&rxq_stats->napi_syncp);
5631  
5632  	txq_stats = &priv->xstats.txq_stats[chan];
5633  	u64_stats_update_begin(&txq_stats->napi_syncp);
5634  	u64_stats_inc(&txq_stats->napi.poll);
5635  	u64_stats_update_end(&txq_stats->napi_syncp);
5636  
5637  	tx_done = stmmac_tx_clean(priv, budget, chan);
5638  	tx_done = min(tx_done, budget);
5639  
5640  	rx_done = stmmac_rx_zc(priv, budget, chan);
5641  
5642  	rxtx_done = max(tx_done, rx_done);
5643  
5644  	/* If either TX or RX work is not complete, return budget
5645  	 * and keep pooling
5646  	 */
5647  	if (rxtx_done >= budget)
5648  		return budget;
5649  
5650  	/* all work done, exit the polling mode */
5651  	if (napi_complete_done(napi, rxtx_done)) {
5652  		unsigned long flags;
5653  
5654  		spin_lock_irqsave(&ch->lock, flags);
5655  		/* Both RX and TX work done are compelte,
5656  		 * so enable both RX & TX IRQs.
5657  		 */
5658  		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5659  		spin_unlock_irqrestore(&ch->lock, flags);
5660  	}
5661  
5662  	return min(rxtx_done, budget - 1);
5663  }
5664  
5665  /**
5666   *  stmmac_tx_timeout
5667   *  @dev : Pointer to net device structure
5668   *  @txqueue: the index of the hanging transmit queue
5669   *  Description: this function is called when a packet transmission fails to
5670   *   complete within a reasonable time. The driver will mark the error in the
5671   *   netdev structure and arrange for the device to be reset to a sane state
5672   *   in order to transmit a new packet.
5673   */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)5674  static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5675  {
5676  	struct stmmac_priv *priv = netdev_priv(dev);
5677  
5678  	stmmac_global_err(priv);
5679  }
5680  
5681  /**
5682   *  stmmac_set_rx_mode - entry point for multicast addressing
5683   *  @dev : pointer to the device structure
5684   *  Description:
5685   *  This function is a driver entry point which gets called by the kernel
5686   *  whenever multicast addresses must be enabled/disabled.
5687   *  Return value:
5688   *  void.
5689   */
stmmac_set_rx_mode(struct net_device * dev)5690  static void stmmac_set_rx_mode(struct net_device *dev)
5691  {
5692  	struct stmmac_priv *priv = netdev_priv(dev);
5693  
5694  	stmmac_set_filter(priv, priv->hw, dev);
5695  }
5696  
5697  /**
5698   *  stmmac_change_mtu - entry point to change MTU size for the device.
5699   *  @dev : device pointer.
5700   *  @new_mtu : the new MTU size for the device.
5701   *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5702   *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5703   *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5704   *  Return value:
5705   *  0 on success and an appropriate (-)ve integer as defined in errno.h
5706   *  file on failure.
5707   */
stmmac_change_mtu(struct net_device * dev,int new_mtu)5708  static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5709  {
5710  	struct stmmac_priv *priv = netdev_priv(dev);
5711  	int txfifosz = priv->plat->tx_fifo_size;
5712  	struct stmmac_dma_conf *dma_conf;
5713  	const int mtu = new_mtu;
5714  	int ret;
5715  
5716  	if (txfifosz == 0)
5717  		txfifosz = priv->dma_cap.tx_fifo_size;
5718  
5719  	txfifosz /= priv->plat->tx_queues_to_use;
5720  
5721  	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5722  		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5723  		return -EINVAL;
5724  	}
5725  
5726  	new_mtu = STMMAC_ALIGN(new_mtu);
5727  
5728  	/* If condition true, FIFO is too small or MTU too large */
5729  	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5730  		return -EINVAL;
5731  
5732  	if (netif_running(dev)) {
5733  		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5734  		/* Try to allocate the new DMA conf with the new mtu */
5735  		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5736  		if (IS_ERR(dma_conf)) {
5737  			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5738  				   mtu);
5739  			return PTR_ERR(dma_conf);
5740  		}
5741  
5742  		stmmac_release(dev);
5743  
5744  		ret = __stmmac_open(dev, dma_conf);
5745  		if (ret) {
5746  			free_dma_desc_resources(priv, dma_conf);
5747  			kfree(dma_conf);
5748  			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5749  			return ret;
5750  		}
5751  
5752  		kfree(dma_conf);
5753  
5754  		stmmac_set_rx_mode(dev);
5755  	}
5756  
5757  	dev->mtu = mtu;
5758  	netdev_update_features(dev);
5759  
5760  	return 0;
5761  }
5762  
stmmac_fix_features(struct net_device * dev,netdev_features_t features)5763  static netdev_features_t stmmac_fix_features(struct net_device *dev,
5764  					     netdev_features_t features)
5765  {
5766  	struct stmmac_priv *priv = netdev_priv(dev);
5767  
5768  	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5769  		features &= ~NETIF_F_RXCSUM;
5770  
5771  	if (!priv->plat->tx_coe)
5772  		features &= ~NETIF_F_CSUM_MASK;
5773  
5774  	/* Some GMAC devices have a bugged Jumbo frame support that
5775  	 * needs to have the Tx COE disabled for oversized frames
5776  	 * (due to limited buffer sizes). In this case we disable
5777  	 * the TX csum insertion in the TDES and not use SF.
5778  	 */
5779  	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5780  		features &= ~NETIF_F_CSUM_MASK;
5781  
5782  	/* Disable tso if asked by ethtool */
5783  	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5784  		if (features & NETIF_F_TSO)
5785  			priv->tso = true;
5786  		else
5787  			priv->tso = false;
5788  	}
5789  
5790  	return features;
5791  }
5792  
stmmac_set_features(struct net_device * netdev,netdev_features_t features)5793  static int stmmac_set_features(struct net_device *netdev,
5794  			       netdev_features_t features)
5795  {
5796  	struct stmmac_priv *priv = netdev_priv(netdev);
5797  
5798  	/* Keep the COE Type in case of csum is supporting */
5799  	if (features & NETIF_F_RXCSUM)
5800  		priv->hw->rx_csum = priv->plat->rx_coe;
5801  	else
5802  		priv->hw->rx_csum = 0;
5803  	/* No check needed because rx_coe has been set before and it will be
5804  	 * fixed in case of issue.
5805  	 */
5806  	stmmac_rx_ipc(priv, priv->hw);
5807  
5808  	if (priv->sph_cap) {
5809  		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5810  		u32 chan;
5811  
5812  		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5813  			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5814  	}
5815  
5816  	return 0;
5817  }
5818  
stmmac_fpe_event_status(struct stmmac_priv * priv,int status)5819  static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5820  {
5821  	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5822  	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5823  	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5824  	bool *hs_enable = &fpe_cfg->hs_enable;
5825  
5826  	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5827  		return;
5828  
5829  	/* If LP has sent verify mPacket, LP is FPE capable */
5830  	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5831  		if (*lp_state < FPE_STATE_CAPABLE)
5832  			*lp_state = FPE_STATE_CAPABLE;
5833  
5834  		/* If user has requested FPE enable, quickly response */
5835  		if (*hs_enable)
5836  			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5837  						fpe_cfg,
5838  						MPACKET_RESPONSE);
5839  	}
5840  
5841  	/* If Local has sent verify mPacket, Local is FPE capable */
5842  	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5843  		if (*lo_state < FPE_STATE_CAPABLE)
5844  			*lo_state = FPE_STATE_CAPABLE;
5845  	}
5846  
5847  	/* If LP has sent response mPacket, LP is entering FPE ON */
5848  	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5849  		*lp_state = FPE_STATE_ENTERING_ON;
5850  
5851  	/* If Local has sent response mPacket, Local is entering FPE ON */
5852  	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5853  		*lo_state = FPE_STATE_ENTERING_ON;
5854  
5855  	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5856  	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5857  	    priv->fpe_wq) {
5858  		queue_work(priv->fpe_wq, &priv->fpe_task);
5859  	}
5860  }
5861  
stmmac_common_interrupt(struct stmmac_priv * priv)5862  static void stmmac_common_interrupt(struct stmmac_priv *priv)
5863  {
5864  	u32 rx_cnt = priv->plat->rx_queues_to_use;
5865  	u32 tx_cnt = priv->plat->tx_queues_to_use;
5866  	u32 queues_count;
5867  	u32 queue;
5868  	bool xmac;
5869  
5870  	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5871  	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5872  
5873  	if (priv->irq_wake)
5874  		pm_wakeup_event(priv->device, 0);
5875  
5876  	if (priv->dma_cap.estsel)
5877  		stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5878  				      &priv->xstats, tx_cnt);
5879  
5880  	if (priv->dma_cap.fpesel) {
5881  		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5882  						   priv->dev);
5883  
5884  		stmmac_fpe_event_status(priv, status);
5885  	}
5886  
5887  	/* To handle GMAC own interrupts */
5888  	if ((priv->plat->has_gmac) || xmac) {
5889  		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5890  
5891  		if (unlikely(status)) {
5892  			/* For LPI we need to save the tx status */
5893  			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5894  				priv->tx_path_in_lpi_mode = true;
5895  			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5896  				priv->tx_path_in_lpi_mode = false;
5897  		}
5898  
5899  		for (queue = 0; queue < queues_count; queue++) {
5900  			status = stmmac_host_mtl_irq_status(priv, priv->hw,
5901  							    queue);
5902  		}
5903  
5904  		/* PCS link status */
5905  		if (priv->hw->pcs &&
5906  		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5907  			if (priv->xstats.pcs_link)
5908  				netif_carrier_on(priv->dev);
5909  			else
5910  				netif_carrier_off(priv->dev);
5911  		}
5912  
5913  		stmmac_timestamp_interrupt(priv, priv);
5914  	}
5915  }
5916  
5917  /**
5918   *  stmmac_interrupt - main ISR
5919   *  @irq: interrupt number.
5920   *  @dev_id: to pass the net device pointer.
5921   *  Description: this is the main driver interrupt service routine.
5922   *  It can call:
5923   *  o DMA service routine (to manage incoming frame reception and transmission
5924   *    status)
5925   *  o Core interrupts to manage: remote wake-up, management counter, LPI
5926   *    interrupts.
5927   */
stmmac_interrupt(int irq,void * dev_id)5928  static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5929  {
5930  	struct net_device *dev = (struct net_device *)dev_id;
5931  	struct stmmac_priv *priv = netdev_priv(dev);
5932  
5933  	/* Check if adapter is up */
5934  	if (test_bit(STMMAC_DOWN, &priv->state))
5935  		return IRQ_HANDLED;
5936  
5937  	/* Check if a fatal error happened */
5938  	if (stmmac_safety_feat_interrupt(priv))
5939  		return IRQ_HANDLED;
5940  
5941  	/* To handle Common interrupts */
5942  	stmmac_common_interrupt(priv);
5943  
5944  	/* To handle DMA interrupts */
5945  	stmmac_dma_interrupt(priv);
5946  
5947  	return IRQ_HANDLED;
5948  }
5949  
stmmac_mac_interrupt(int irq,void * dev_id)5950  static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5951  {
5952  	struct net_device *dev = (struct net_device *)dev_id;
5953  	struct stmmac_priv *priv = netdev_priv(dev);
5954  
5955  	/* Check if adapter is up */
5956  	if (test_bit(STMMAC_DOWN, &priv->state))
5957  		return IRQ_HANDLED;
5958  
5959  	/* To handle Common interrupts */
5960  	stmmac_common_interrupt(priv);
5961  
5962  	return IRQ_HANDLED;
5963  }
5964  
stmmac_safety_interrupt(int irq,void * dev_id)5965  static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5966  {
5967  	struct net_device *dev = (struct net_device *)dev_id;
5968  	struct stmmac_priv *priv = netdev_priv(dev);
5969  
5970  	/* Check if adapter is up */
5971  	if (test_bit(STMMAC_DOWN, &priv->state))
5972  		return IRQ_HANDLED;
5973  
5974  	/* Check if a fatal error happened */
5975  	stmmac_safety_feat_interrupt(priv);
5976  
5977  	return IRQ_HANDLED;
5978  }
5979  
stmmac_msi_intr_tx(int irq,void * data)5980  static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5981  {
5982  	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5983  	struct stmmac_dma_conf *dma_conf;
5984  	int chan = tx_q->queue_index;
5985  	struct stmmac_priv *priv;
5986  	int status;
5987  
5988  	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
5989  	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5990  
5991  	/* Check if adapter is up */
5992  	if (test_bit(STMMAC_DOWN, &priv->state))
5993  		return IRQ_HANDLED;
5994  
5995  	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5996  
5997  	if (unlikely(status & tx_hard_error_bump_tc)) {
5998  		/* Try to bump up the dma threshold on this failure */
5999  		stmmac_bump_dma_threshold(priv, chan);
6000  	} else if (unlikely(status == tx_hard_error)) {
6001  		stmmac_tx_err(priv, chan);
6002  	}
6003  
6004  	return IRQ_HANDLED;
6005  }
6006  
stmmac_msi_intr_rx(int irq,void * data)6007  static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6008  {
6009  	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6010  	struct stmmac_dma_conf *dma_conf;
6011  	int chan = rx_q->queue_index;
6012  	struct stmmac_priv *priv;
6013  
6014  	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6015  	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6016  
6017  	/* Check if adapter is up */
6018  	if (test_bit(STMMAC_DOWN, &priv->state))
6019  		return IRQ_HANDLED;
6020  
6021  	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6022  
6023  	return IRQ_HANDLED;
6024  }
6025  
6026  /**
6027   *  stmmac_ioctl - Entry point for the Ioctl
6028   *  @dev: Device pointer.
6029   *  @rq: An IOCTL specefic structure, that can contain a pointer to
6030   *  a proprietary structure used to pass information to the driver.
6031   *  @cmd: IOCTL command
6032   *  Description:
6033   *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6034   */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6035  static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6036  {
6037  	struct stmmac_priv *priv = netdev_priv (dev);
6038  	int ret = -EOPNOTSUPP;
6039  
6040  	if (!netif_running(dev))
6041  		return -EINVAL;
6042  
6043  	switch (cmd) {
6044  	case SIOCGMIIPHY:
6045  	case SIOCGMIIREG:
6046  	case SIOCSMIIREG:
6047  		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6048  		break;
6049  	case SIOCSHWTSTAMP:
6050  		ret = stmmac_hwtstamp_set(dev, rq);
6051  		break;
6052  	case SIOCGHWTSTAMP:
6053  		ret = stmmac_hwtstamp_get(dev, rq);
6054  		break;
6055  	default:
6056  		break;
6057  	}
6058  
6059  	return ret;
6060  }
6061  
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)6062  static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6063  				    void *cb_priv)
6064  {
6065  	struct stmmac_priv *priv = cb_priv;
6066  	int ret = -EOPNOTSUPP;
6067  
6068  	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6069  		return ret;
6070  
6071  	__stmmac_disable_all_queues(priv);
6072  
6073  	switch (type) {
6074  	case TC_SETUP_CLSU32:
6075  		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6076  		break;
6077  	case TC_SETUP_CLSFLOWER:
6078  		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6079  		break;
6080  	default:
6081  		break;
6082  	}
6083  
6084  	stmmac_enable_all_queues(priv);
6085  	return ret;
6086  }
6087  
6088  static LIST_HEAD(stmmac_block_cb_list);
6089  
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)6090  static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6091  			   void *type_data)
6092  {
6093  	struct stmmac_priv *priv = netdev_priv(ndev);
6094  
6095  	switch (type) {
6096  	case TC_QUERY_CAPS:
6097  		return stmmac_tc_query_caps(priv, priv, type_data);
6098  	case TC_SETUP_BLOCK:
6099  		return flow_block_cb_setup_simple(type_data,
6100  						  &stmmac_block_cb_list,
6101  						  stmmac_setup_tc_block_cb,
6102  						  priv, priv, true);
6103  	case TC_SETUP_QDISC_CBS:
6104  		return stmmac_tc_setup_cbs(priv, priv, type_data);
6105  	case TC_SETUP_QDISC_TAPRIO:
6106  		return stmmac_tc_setup_taprio(priv, priv, type_data);
6107  	case TC_SETUP_QDISC_ETF:
6108  		return stmmac_tc_setup_etf(priv, priv, type_data);
6109  	default:
6110  		return -EOPNOTSUPP;
6111  	}
6112  }
6113  
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)6114  static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6115  			       struct net_device *sb_dev)
6116  {
6117  	int gso = skb_shinfo(skb)->gso_type;
6118  
6119  	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6120  		/*
6121  		 * There is no way to determine the number of TSO/USO
6122  		 * capable Queues. Let's use always the Queue 0
6123  		 * because if TSO/USO is supported then at least this
6124  		 * one will be capable.
6125  		 */
6126  		return 0;
6127  	}
6128  
6129  	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6130  }
6131  
stmmac_set_mac_address(struct net_device * ndev,void * addr)6132  static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6133  {
6134  	struct stmmac_priv *priv = netdev_priv(ndev);
6135  	int ret = 0;
6136  
6137  	ret = pm_runtime_resume_and_get(priv->device);
6138  	if (ret < 0)
6139  		return ret;
6140  
6141  	ret = eth_mac_addr(ndev, addr);
6142  	if (ret)
6143  		goto set_mac_error;
6144  
6145  	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6146  
6147  set_mac_error:
6148  	pm_runtime_put(priv->device);
6149  
6150  	return ret;
6151  }
6152  
6153  #ifdef CONFIG_DEBUG_FS
6154  static struct dentry *stmmac_fs_dir;
6155  
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6156  static void sysfs_display_ring(void *head, int size, int extend_desc,
6157  			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6158  {
6159  	int i;
6160  	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6161  	struct dma_desc *p = (struct dma_desc *)head;
6162  	dma_addr_t dma_addr;
6163  
6164  	for (i = 0; i < size; i++) {
6165  		if (extend_desc) {
6166  			dma_addr = dma_phy_addr + i * sizeof(*ep);
6167  			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6168  				   i, &dma_addr,
6169  				   le32_to_cpu(ep->basic.des0),
6170  				   le32_to_cpu(ep->basic.des1),
6171  				   le32_to_cpu(ep->basic.des2),
6172  				   le32_to_cpu(ep->basic.des3));
6173  			ep++;
6174  		} else {
6175  			dma_addr = dma_phy_addr + i * sizeof(*p);
6176  			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6177  				   i, &dma_addr,
6178  				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6179  				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6180  			p++;
6181  		}
6182  		seq_printf(seq, "\n");
6183  	}
6184  }
6185  
stmmac_rings_status_show(struct seq_file * seq,void * v)6186  static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6187  {
6188  	struct net_device *dev = seq->private;
6189  	struct stmmac_priv *priv = netdev_priv(dev);
6190  	u32 rx_count = priv->plat->rx_queues_to_use;
6191  	u32 tx_count = priv->plat->tx_queues_to_use;
6192  	u32 queue;
6193  
6194  	if ((dev->flags & IFF_UP) == 0)
6195  		return 0;
6196  
6197  	for (queue = 0; queue < rx_count; queue++) {
6198  		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6199  
6200  		seq_printf(seq, "RX Queue %d:\n", queue);
6201  
6202  		if (priv->extend_desc) {
6203  			seq_printf(seq, "Extended descriptor ring:\n");
6204  			sysfs_display_ring((void *)rx_q->dma_erx,
6205  					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6206  		} else {
6207  			seq_printf(seq, "Descriptor ring:\n");
6208  			sysfs_display_ring((void *)rx_q->dma_rx,
6209  					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6210  		}
6211  	}
6212  
6213  	for (queue = 0; queue < tx_count; queue++) {
6214  		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6215  
6216  		seq_printf(seq, "TX Queue %d:\n", queue);
6217  
6218  		if (priv->extend_desc) {
6219  			seq_printf(seq, "Extended descriptor ring:\n");
6220  			sysfs_display_ring((void *)tx_q->dma_etx,
6221  					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6222  		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6223  			seq_printf(seq, "Descriptor ring:\n");
6224  			sysfs_display_ring((void *)tx_q->dma_tx,
6225  					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6226  		}
6227  	}
6228  
6229  	return 0;
6230  }
6231  DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6232  
stmmac_dma_cap_show(struct seq_file * seq,void * v)6233  static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6234  {
6235  	static const char * const dwxgmac_timestamp_source[] = {
6236  		"None",
6237  		"Internal",
6238  		"External",
6239  		"Both",
6240  	};
6241  	static const char * const dwxgmac_safety_feature_desc[] = {
6242  		"No",
6243  		"All Safety Features with ECC and Parity",
6244  		"All Safety Features without ECC or Parity",
6245  		"All Safety Features with Parity Only",
6246  		"ECC Only",
6247  		"UNDEFINED",
6248  		"UNDEFINED",
6249  		"UNDEFINED",
6250  	};
6251  	struct net_device *dev = seq->private;
6252  	struct stmmac_priv *priv = netdev_priv(dev);
6253  
6254  	if (!priv->hw_cap_support) {
6255  		seq_printf(seq, "DMA HW features not supported\n");
6256  		return 0;
6257  	}
6258  
6259  	seq_printf(seq, "==============================\n");
6260  	seq_printf(seq, "\tDMA HW features\n");
6261  	seq_printf(seq, "==============================\n");
6262  
6263  	seq_printf(seq, "\t10/100 Mbps: %s\n",
6264  		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6265  	seq_printf(seq, "\t1000 Mbps: %s\n",
6266  		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6267  	seq_printf(seq, "\tHalf duplex: %s\n",
6268  		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6269  	if (priv->plat->has_xgmac) {
6270  		seq_printf(seq,
6271  			   "\tNumber of Additional MAC address registers: %d\n",
6272  			   priv->dma_cap.multi_addr);
6273  	} else {
6274  		seq_printf(seq, "\tHash Filter: %s\n",
6275  			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6276  		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6277  			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6278  	}
6279  	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6280  		   (priv->dma_cap.pcs) ? "Y" : "N");
6281  	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6282  		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6283  	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6284  		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6285  	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6286  		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6287  	seq_printf(seq, "\tRMON module: %s\n",
6288  		   (priv->dma_cap.rmon) ? "Y" : "N");
6289  	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6290  		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6291  	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6292  		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6293  	if (priv->plat->has_xgmac)
6294  		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6295  			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6296  	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6297  		   (priv->dma_cap.eee) ? "Y" : "N");
6298  	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6299  	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6300  		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6301  	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6302  	    priv->plat->has_xgmac) {
6303  		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6304  			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6305  	} else {
6306  		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6307  			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6308  		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6309  			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6310  		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6311  			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6312  	}
6313  	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6314  		   priv->dma_cap.number_rx_channel);
6315  	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6316  		   priv->dma_cap.number_tx_channel);
6317  	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6318  		   priv->dma_cap.number_rx_queues);
6319  	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6320  		   priv->dma_cap.number_tx_queues);
6321  	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6322  		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6323  	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6324  	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6325  	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6326  		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6327  	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6328  	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6329  		   priv->dma_cap.pps_out_num);
6330  	seq_printf(seq, "\tSafety Features: %s\n",
6331  		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6332  	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6333  		   priv->dma_cap.frpsel ? "Y" : "N");
6334  	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6335  		   priv->dma_cap.host_dma_width);
6336  	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6337  		   priv->dma_cap.rssen ? "Y" : "N");
6338  	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6339  		   priv->dma_cap.vlhash ? "Y" : "N");
6340  	seq_printf(seq, "\tSplit Header: %s\n",
6341  		   priv->dma_cap.sphen ? "Y" : "N");
6342  	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6343  		   priv->dma_cap.vlins ? "Y" : "N");
6344  	seq_printf(seq, "\tDouble VLAN: %s\n",
6345  		   priv->dma_cap.dvlan ? "Y" : "N");
6346  	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6347  		   priv->dma_cap.l3l4fnum);
6348  	seq_printf(seq, "\tARP Offloading: %s\n",
6349  		   priv->dma_cap.arpoffsel ? "Y" : "N");
6350  	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6351  		   priv->dma_cap.estsel ? "Y" : "N");
6352  	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6353  		   priv->dma_cap.fpesel ? "Y" : "N");
6354  	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6355  		   priv->dma_cap.tbssel ? "Y" : "N");
6356  	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6357  		   priv->dma_cap.tbs_ch_num);
6358  	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6359  		   priv->dma_cap.sgfsel ? "Y" : "N");
6360  	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6361  		   BIT(priv->dma_cap.ttsfd) >> 1);
6362  	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6363  		   priv->dma_cap.numtc);
6364  	seq_printf(seq, "\tDCB Feature: %s\n",
6365  		   priv->dma_cap.dcben ? "Y" : "N");
6366  	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6367  		   priv->dma_cap.advthword ? "Y" : "N");
6368  	seq_printf(seq, "\tPTP Offload: %s\n",
6369  		   priv->dma_cap.ptoen ? "Y" : "N");
6370  	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6371  		   priv->dma_cap.osten ? "Y" : "N");
6372  	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6373  		   priv->dma_cap.pfcen ? "Y" : "N");
6374  	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6375  		   BIT(priv->dma_cap.frpes) << 6);
6376  	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6377  		   BIT(priv->dma_cap.frpbs) << 6);
6378  	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6379  		   priv->dma_cap.frppipe_num);
6380  	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6381  		   priv->dma_cap.nrvf_num ?
6382  		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6383  	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6384  		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6385  	seq_printf(seq, "\tDepth of GCL: %lu\n",
6386  		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6387  	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6388  		   priv->dma_cap.cbtisel ? "Y" : "N");
6389  	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6390  		   priv->dma_cap.aux_snapshot_n);
6391  	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6392  		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6393  	seq_printf(seq, "\tEnhanced DMA: %s\n",
6394  		   priv->dma_cap.edma ? "Y" : "N");
6395  	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6396  		   priv->dma_cap.ediffc ? "Y" : "N");
6397  	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6398  		   priv->dma_cap.vxn ? "Y" : "N");
6399  	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6400  		   priv->dma_cap.dbgmem ? "Y" : "N");
6401  	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6402  		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6403  	return 0;
6404  }
6405  DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6406  
6407  /* Use network device events to rename debugfs file entries.
6408   */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6409  static int stmmac_device_event(struct notifier_block *unused,
6410  			       unsigned long event, void *ptr)
6411  {
6412  	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6413  	struct stmmac_priv *priv = netdev_priv(dev);
6414  
6415  	if (dev->netdev_ops != &stmmac_netdev_ops)
6416  		goto done;
6417  
6418  	switch (event) {
6419  	case NETDEV_CHANGENAME:
6420  		if (priv->dbgfs_dir)
6421  			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6422  							 priv->dbgfs_dir,
6423  							 stmmac_fs_dir,
6424  							 dev->name);
6425  		break;
6426  	}
6427  done:
6428  	return NOTIFY_DONE;
6429  }
6430  
6431  static struct notifier_block stmmac_notifier = {
6432  	.notifier_call = stmmac_device_event,
6433  };
6434  
stmmac_init_fs(struct net_device * dev)6435  static void stmmac_init_fs(struct net_device *dev)
6436  {
6437  	struct stmmac_priv *priv = netdev_priv(dev);
6438  
6439  	rtnl_lock();
6440  
6441  	/* Create per netdev entries */
6442  	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6443  
6444  	/* Entry to report DMA RX/TX rings */
6445  	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6446  			    &stmmac_rings_status_fops);
6447  
6448  	/* Entry to report the DMA HW features */
6449  	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6450  			    &stmmac_dma_cap_fops);
6451  
6452  	rtnl_unlock();
6453  }
6454  
stmmac_exit_fs(struct net_device * dev)6455  static void stmmac_exit_fs(struct net_device *dev)
6456  {
6457  	struct stmmac_priv *priv = netdev_priv(dev);
6458  
6459  	debugfs_remove_recursive(priv->dbgfs_dir);
6460  }
6461  #endif /* CONFIG_DEBUG_FS */
6462  
stmmac_vid_crc32_le(__le16 vid_le)6463  static u32 stmmac_vid_crc32_le(__le16 vid_le)
6464  {
6465  	unsigned char *data = (unsigned char *)&vid_le;
6466  	unsigned char data_byte = 0;
6467  	u32 crc = ~0x0;
6468  	u32 temp = 0;
6469  	int i, bits;
6470  
6471  	bits = get_bitmask_order(VLAN_VID_MASK);
6472  	for (i = 0; i < bits; i++) {
6473  		if ((i % 8) == 0)
6474  			data_byte = data[i / 8];
6475  
6476  		temp = ((crc & 1) ^ data_byte) & 1;
6477  		crc >>= 1;
6478  		data_byte >>= 1;
6479  
6480  		if (temp)
6481  			crc ^= 0xedb88320;
6482  	}
6483  
6484  	return crc;
6485  }
6486  
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)6487  static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6488  {
6489  	u32 crc, hash = 0;
6490  	u16 pmatch = 0;
6491  	int count = 0;
6492  	u16 vid = 0;
6493  
6494  	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6495  		__le16 vid_le = cpu_to_le16(vid);
6496  		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6497  		hash |= (1 << crc);
6498  		count++;
6499  	}
6500  
6501  	if (!priv->dma_cap.vlhash) {
6502  		if (count > 2) /* VID = 0 always passes filter */
6503  			return -EOPNOTSUPP;
6504  
6505  		pmatch = vid;
6506  		hash = 0;
6507  	}
6508  
6509  	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6510  }
6511  
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)6512  static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6513  {
6514  	struct stmmac_priv *priv = netdev_priv(ndev);
6515  	bool is_double = false;
6516  	int ret;
6517  
6518  	ret = pm_runtime_resume_and_get(priv->device);
6519  	if (ret < 0)
6520  		return ret;
6521  
6522  	if (be16_to_cpu(proto) == ETH_P_8021AD)
6523  		is_double = true;
6524  
6525  	set_bit(vid, priv->active_vlans);
6526  	ret = stmmac_vlan_update(priv, is_double);
6527  	if (ret) {
6528  		clear_bit(vid, priv->active_vlans);
6529  		goto err_pm_put;
6530  	}
6531  
6532  	if (priv->hw->num_vlan) {
6533  		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6534  		if (ret)
6535  			goto err_pm_put;
6536  	}
6537  err_pm_put:
6538  	pm_runtime_put(priv->device);
6539  
6540  	return ret;
6541  }
6542  
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)6543  static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6544  {
6545  	struct stmmac_priv *priv = netdev_priv(ndev);
6546  	bool is_double = false;
6547  	int ret;
6548  
6549  	ret = pm_runtime_resume_and_get(priv->device);
6550  	if (ret < 0)
6551  		return ret;
6552  
6553  	if (be16_to_cpu(proto) == ETH_P_8021AD)
6554  		is_double = true;
6555  
6556  	clear_bit(vid, priv->active_vlans);
6557  
6558  	if (priv->hw->num_vlan) {
6559  		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6560  		if (ret)
6561  			goto del_vlan_error;
6562  	}
6563  
6564  	ret = stmmac_vlan_update(priv, is_double);
6565  
6566  del_vlan_error:
6567  	pm_runtime_put(priv->device);
6568  
6569  	return ret;
6570  }
6571  
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)6572  static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6573  {
6574  	struct stmmac_priv *priv = netdev_priv(dev);
6575  
6576  	switch (bpf->command) {
6577  	case XDP_SETUP_PROG:
6578  		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6579  	case XDP_SETUP_XSK_POOL:
6580  		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6581  					     bpf->xsk.queue_id);
6582  	default:
6583  		return -EOPNOTSUPP;
6584  	}
6585  }
6586  
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6587  static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6588  			   struct xdp_frame **frames, u32 flags)
6589  {
6590  	struct stmmac_priv *priv = netdev_priv(dev);
6591  	int cpu = smp_processor_id();
6592  	struct netdev_queue *nq;
6593  	int i, nxmit = 0;
6594  	int queue;
6595  
6596  	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6597  		return -ENETDOWN;
6598  
6599  	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6600  		return -EINVAL;
6601  
6602  	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6603  	nq = netdev_get_tx_queue(priv->dev, queue);
6604  
6605  	__netif_tx_lock(nq, cpu);
6606  	/* Avoids TX time-out as we are sharing with slow path */
6607  	txq_trans_cond_update(nq);
6608  
6609  	for (i = 0; i < num_frames; i++) {
6610  		int res;
6611  
6612  		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6613  		if (res == STMMAC_XDP_CONSUMED)
6614  			break;
6615  
6616  		nxmit++;
6617  	}
6618  
6619  	if (flags & XDP_XMIT_FLUSH) {
6620  		stmmac_flush_tx_descriptors(priv, queue);
6621  		stmmac_tx_timer_arm(priv, queue);
6622  	}
6623  
6624  	__netif_tx_unlock(nq);
6625  
6626  	return nxmit;
6627  }
6628  
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6629  void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6630  {
6631  	struct stmmac_channel *ch = &priv->channel[queue];
6632  	unsigned long flags;
6633  
6634  	spin_lock_irqsave(&ch->lock, flags);
6635  	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6636  	spin_unlock_irqrestore(&ch->lock, flags);
6637  
6638  	stmmac_stop_rx_dma(priv, queue);
6639  	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6640  }
6641  
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)6642  void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6643  {
6644  	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6645  	struct stmmac_channel *ch = &priv->channel[queue];
6646  	unsigned long flags;
6647  	u32 buf_size;
6648  	int ret;
6649  
6650  	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6651  	if (ret) {
6652  		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6653  		return;
6654  	}
6655  
6656  	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6657  	if (ret) {
6658  		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6659  		netdev_err(priv->dev, "Failed to init RX desc.\n");
6660  		return;
6661  	}
6662  
6663  	stmmac_reset_rx_queue(priv, queue);
6664  	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6665  
6666  	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6667  			    rx_q->dma_rx_phy, rx_q->queue_index);
6668  
6669  	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6670  			     sizeof(struct dma_desc));
6671  	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6672  			       rx_q->rx_tail_addr, rx_q->queue_index);
6673  
6674  	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6675  		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6676  		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6677  				      buf_size,
6678  				      rx_q->queue_index);
6679  	} else {
6680  		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6681  				      priv->dma_conf.dma_buf_sz,
6682  				      rx_q->queue_index);
6683  	}
6684  
6685  	stmmac_start_rx_dma(priv, queue);
6686  
6687  	spin_lock_irqsave(&ch->lock, flags);
6688  	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6689  	spin_unlock_irqrestore(&ch->lock, flags);
6690  }
6691  
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)6692  void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6693  {
6694  	struct stmmac_channel *ch = &priv->channel[queue];
6695  	unsigned long flags;
6696  
6697  	spin_lock_irqsave(&ch->lock, flags);
6698  	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6699  	spin_unlock_irqrestore(&ch->lock, flags);
6700  
6701  	stmmac_stop_tx_dma(priv, queue);
6702  	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6703  }
6704  
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)6705  void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6706  {
6707  	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6708  	struct stmmac_channel *ch = &priv->channel[queue];
6709  	unsigned long flags;
6710  	int ret;
6711  
6712  	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6713  	if (ret) {
6714  		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6715  		return;
6716  	}
6717  
6718  	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6719  	if (ret) {
6720  		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6721  		netdev_err(priv->dev, "Failed to init TX desc.\n");
6722  		return;
6723  	}
6724  
6725  	stmmac_reset_tx_queue(priv, queue);
6726  	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6727  
6728  	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6729  			    tx_q->dma_tx_phy, tx_q->queue_index);
6730  
6731  	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6732  		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6733  
6734  	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6735  	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6736  			       tx_q->tx_tail_addr, tx_q->queue_index);
6737  
6738  	stmmac_start_tx_dma(priv, queue);
6739  
6740  	spin_lock_irqsave(&ch->lock, flags);
6741  	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6742  	spin_unlock_irqrestore(&ch->lock, flags);
6743  }
6744  
stmmac_xdp_release(struct net_device * dev)6745  void stmmac_xdp_release(struct net_device *dev)
6746  {
6747  	struct stmmac_priv *priv = netdev_priv(dev);
6748  	u32 chan;
6749  
6750  	/* Ensure tx function is not running */
6751  	netif_tx_disable(dev);
6752  
6753  	/* Disable NAPI process */
6754  	stmmac_disable_all_queues(priv);
6755  
6756  	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6757  		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6758  
6759  	/* Free the IRQ lines */
6760  	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6761  
6762  	/* Stop TX/RX DMA channels */
6763  	stmmac_stop_all_dma(priv);
6764  
6765  	/* Release and free the Rx/Tx resources */
6766  	free_dma_desc_resources(priv, &priv->dma_conf);
6767  
6768  	/* Disable the MAC Rx/Tx */
6769  	stmmac_mac_set(priv, priv->ioaddr, false);
6770  
6771  	/* set trans_start so we don't get spurious
6772  	 * watchdogs during reset
6773  	 */
6774  	netif_trans_update(dev);
6775  	netif_carrier_off(dev);
6776  }
6777  
stmmac_xdp_open(struct net_device * dev)6778  int stmmac_xdp_open(struct net_device *dev)
6779  {
6780  	struct stmmac_priv *priv = netdev_priv(dev);
6781  	u32 rx_cnt = priv->plat->rx_queues_to_use;
6782  	u32 tx_cnt = priv->plat->tx_queues_to_use;
6783  	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6784  	struct stmmac_rx_queue *rx_q;
6785  	struct stmmac_tx_queue *tx_q;
6786  	u32 buf_size;
6787  	bool sph_en;
6788  	u32 chan;
6789  	int ret;
6790  
6791  	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6792  	if (ret < 0) {
6793  		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6794  			   __func__);
6795  		goto dma_desc_error;
6796  	}
6797  
6798  	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6799  	if (ret < 0) {
6800  		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6801  			   __func__);
6802  		goto init_error;
6803  	}
6804  
6805  	stmmac_reset_queues_param(priv);
6806  
6807  	/* DMA CSR Channel configuration */
6808  	for (chan = 0; chan < dma_csr_ch; chan++) {
6809  		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6810  		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6811  	}
6812  
6813  	/* Adjust Split header */
6814  	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6815  
6816  	/* DMA RX Channel Configuration */
6817  	for (chan = 0; chan < rx_cnt; chan++) {
6818  		rx_q = &priv->dma_conf.rx_queue[chan];
6819  
6820  		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6821  				    rx_q->dma_rx_phy, chan);
6822  
6823  		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6824  				     (rx_q->buf_alloc_num *
6825  				      sizeof(struct dma_desc));
6826  		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6827  				       rx_q->rx_tail_addr, chan);
6828  
6829  		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6830  			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6831  			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6832  					      buf_size,
6833  					      rx_q->queue_index);
6834  		} else {
6835  			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6836  					      priv->dma_conf.dma_buf_sz,
6837  					      rx_q->queue_index);
6838  		}
6839  
6840  		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6841  	}
6842  
6843  	/* DMA TX Channel Configuration */
6844  	for (chan = 0; chan < tx_cnt; chan++) {
6845  		tx_q = &priv->dma_conf.tx_queue[chan];
6846  
6847  		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6848  				    tx_q->dma_tx_phy, chan);
6849  
6850  		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6851  		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6852  				       tx_q->tx_tail_addr, chan);
6853  
6854  		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6855  		tx_q->txtimer.function = stmmac_tx_timer;
6856  	}
6857  
6858  	/* Enable the MAC Rx/Tx */
6859  	stmmac_mac_set(priv, priv->ioaddr, true);
6860  
6861  	/* Start Rx & Tx DMA Channels */
6862  	stmmac_start_all_dma(priv);
6863  
6864  	ret = stmmac_request_irq(dev);
6865  	if (ret)
6866  		goto irq_error;
6867  
6868  	/* Enable NAPI process*/
6869  	stmmac_enable_all_queues(priv);
6870  	netif_carrier_on(dev);
6871  	netif_tx_start_all_queues(dev);
6872  	stmmac_enable_all_dma_irq(priv);
6873  
6874  	return 0;
6875  
6876  irq_error:
6877  	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6878  		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6879  
6880  	stmmac_hw_teardown(dev);
6881  init_error:
6882  	free_dma_desc_resources(priv, &priv->dma_conf);
6883  dma_desc_error:
6884  	return ret;
6885  }
6886  
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)6887  int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6888  {
6889  	struct stmmac_priv *priv = netdev_priv(dev);
6890  	struct stmmac_rx_queue *rx_q;
6891  	struct stmmac_tx_queue *tx_q;
6892  	struct stmmac_channel *ch;
6893  
6894  	if (test_bit(STMMAC_DOWN, &priv->state) ||
6895  	    !netif_carrier_ok(priv->dev))
6896  		return -ENETDOWN;
6897  
6898  	if (!stmmac_xdp_is_enabled(priv))
6899  		return -EINVAL;
6900  
6901  	if (queue >= priv->plat->rx_queues_to_use ||
6902  	    queue >= priv->plat->tx_queues_to_use)
6903  		return -EINVAL;
6904  
6905  	rx_q = &priv->dma_conf.rx_queue[queue];
6906  	tx_q = &priv->dma_conf.tx_queue[queue];
6907  	ch = &priv->channel[queue];
6908  
6909  	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6910  		return -EINVAL;
6911  
6912  	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6913  		/* EQoS does not have per-DMA channel SW interrupt,
6914  		 * so we schedule RX Napi straight-away.
6915  		 */
6916  		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6917  			__napi_schedule(&ch->rxtx_napi);
6918  	}
6919  
6920  	return 0;
6921  }
6922  
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)6923  static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6924  {
6925  	struct stmmac_priv *priv = netdev_priv(dev);
6926  	u32 tx_cnt = priv->plat->tx_queues_to_use;
6927  	u32 rx_cnt = priv->plat->rx_queues_to_use;
6928  	unsigned int start;
6929  	int q;
6930  
6931  	for (q = 0; q < tx_cnt; q++) {
6932  		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
6933  		u64 tx_packets;
6934  		u64 tx_bytes;
6935  
6936  		do {
6937  			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
6938  			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
6939  		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
6940  		do {
6941  			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
6942  			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
6943  		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
6944  
6945  		stats->tx_packets += tx_packets;
6946  		stats->tx_bytes += tx_bytes;
6947  	}
6948  
6949  	for (q = 0; q < rx_cnt; q++) {
6950  		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
6951  		u64 rx_packets;
6952  		u64 rx_bytes;
6953  
6954  		do {
6955  			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
6956  			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
6957  			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
6958  		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
6959  
6960  		stats->rx_packets += rx_packets;
6961  		stats->rx_bytes += rx_bytes;
6962  	}
6963  
6964  	stats->rx_dropped = priv->xstats.rx_dropped;
6965  	stats->rx_errors = priv->xstats.rx_errors;
6966  	stats->tx_dropped = priv->xstats.tx_dropped;
6967  	stats->tx_errors = priv->xstats.tx_errors;
6968  	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
6969  	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
6970  	stats->rx_length_errors = priv->xstats.rx_length;
6971  	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
6972  	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
6973  	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
6974  }
6975  
6976  static const struct net_device_ops stmmac_netdev_ops = {
6977  	.ndo_open = stmmac_open,
6978  	.ndo_start_xmit = stmmac_xmit,
6979  	.ndo_stop = stmmac_release,
6980  	.ndo_change_mtu = stmmac_change_mtu,
6981  	.ndo_fix_features = stmmac_fix_features,
6982  	.ndo_set_features = stmmac_set_features,
6983  	.ndo_set_rx_mode = stmmac_set_rx_mode,
6984  	.ndo_tx_timeout = stmmac_tx_timeout,
6985  	.ndo_eth_ioctl = stmmac_ioctl,
6986  	.ndo_get_stats64 = stmmac_get_stats64,
6987  	.ndo_setup_tc = stmmac_setup_tc,
6988  	.ndo_select_queue = stmmac_select_queue,
6989  	.ndo_set_mac_address = stmmac_set_mac_address,
6990  	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6991  	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6992  	.ndo_bpf = stmmac_bpf,
6993  	.ndo_xdp_xmit = stmmac_xdp_xmit,
6994  	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
6995  };
6996  
stmmac_reset_subtask(struct stmmac_priv * priv)6997  static void stmmac_reset_subtask(struct stmmac_priv *priv)
6998  {
6999  	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7000  		return;
7001  	if (test_bit(STMMAC_DOWN, &priv->state))
7002  		return;
7003  
7004  	netdev_err(priv->dev, "Reset adapter.\n");
7005  
7006  	rtnl_lock();
7007  	netif_trans_update(priv->dev);
7008  	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7009  		usleep_range(1000, 2000);
7010  
7011  	set_bit(STMMAC_DOWN, &priv->state);
7012  	dev_close(priv->dev);
7013  	dev_open(priv->dev, NULL);
7014  	clear_bit(STMMAC_DOWN, &priv->state);
7015  	clear_bit(STMMAC_RESETING, &priv->state);
7016  	rtnl_unlock();
7017  }
7018  
stmmac_service_task(struct work_struct * work)7019  static void stmmac_service_task(struct work_struct *work)
7020  {
7021  	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7022  			service_task);
7023  
7024  	stmmac_reset_subtask(priv);
7025  	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7026  }
7027  
7028  /**
7029   *  stmmac_hw_init - Init the MAC device
7030   *  @priv: driver private structure
7031   *  Description: this function is to configure the MAC device according to
7032   *  some platform parameters or the HW capability register. It prepares the
7033   *  driver to use either ring or chain modes and to setup either enhanced or
7034   *  normal descriptors.
7035   */
stmmac_hw_init(struct stmmac_priv * priv)7036  static int stmmac_hw_init(struct stmmac_priv *priv)
7037  {
7038  	int ret;
7039  
7040  	/* dwmac-sun8i only work in chain mode */
7041  	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7042  		chain_mode = 1;
7043  	priv->chain_mode = chain_mode;
7044  
7045  	/* Initialize HW Interface */
7046  	ret = stmmac_hwif_init(priv);
7047  	if (ret)
7048  		return ret;
7049  
7050  	/* Get the HW capability (new GMAC newer than 3.50a) */
7051  	priv->hw_cap_support = stmmac_get_hw_features(priv);
7052  	if (priv->hw_cap_support) {
7053  		dev_info(priv->device, "DMA HW capability register supported\n");
7054  
7055  		/* We can override some gmac/dma configuration fields: e.g.
7056  		 * enh_desc, tx_coe (e.g. that are passed through the
7057  		 * platform) with the values from the HW capability
7058  		 * register (if supported).
7059  		 */
7060  		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7061  		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7062  				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7063  		priv->hw->pmt = priv->plat->pmt;
7064  		if (priv->dma_cap.hash_tb_sz) {
7065  			priv->hw->multicast_filter_bins =
7066  					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7067  			priv->hw->mcast_bits_log2 =
7068  					ilog2(priv->hw->multicast_filter_bins);
7069  		}
7070  
7071  		/* TXCOE doesn't work in thresh DMA mode */
7072  		if (priv->plat->force_thresh_dma_mode)
7073  			priv->plat->tx_coe = 0;
7074  		else
7075  			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7076  
7077  		/* In case of GMAC4 rx_coe is from HW cap register. */
7078  		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7079  
7080  		if (priv->dma_cap.rx_coe_type2)
7081  			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7082  		else if (priv->dma_cap.rx_coe_type1)
7083  			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7084  
7085  	} else {
7086  		dev_info(priv->device, "No HW DMA feature register supported\n");
7087  	}
7088  
7089  	if (priv->plat->rx_coe) {
7090  		priv->hw->rx_csum = priv->plat->rx_coe;
7091  		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7092  		if (priv->synopsys_id < DWMAC_CORE_4_00)
7093  			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7094  	}
7095  	if (priv->plat->tx_coe)
7096  		dev_info(priv->device, "TX Checksum insertion supported\n");
7097  
7098  	if (priv->plat->pmt) {
7099  		dev_info(priv->device, "Wake-Up On Lan supported\n");
7100  		device_set_wakeup_capable(priv->device, 1);
7101  	}
7102  
7103  	if (priv->dma_cap.tsoen)
7104  		dev_info(priv->device, "TSO supported\n");
7105  
7106  	if (priv->dma_cap.number_rx_queues &&
7107  	    priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7108  		dev_warn(priv->device,
7109  			 "Number of Rx queues (%u) exceeds dma capability\n",
7110  			 priv->plat->rx_queues_to_use);
7111  		priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7112  	}
7113  	if (priv->dma_cap.number_tx_queues &&
7114  	    priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7115  		dev_warn(priv->device,
7116  			 "Number of Tx queues (%u) exceeds dma capability\n",
7117  			 priv->plat->tx_queues_to_use);
7118  		priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7119  	}
7120  
7121  	if (priv->dma_cap.rx_fifo_size &&
7122  	    priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7123  		dev_warn(priv->device,
7124  			 "Rx FIFO size (%u) exceeds dma capability\n",
7125  			 priv->plat->rx_fifo_size);
7126  		priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7127  	}
7128  	if (priv->dma_cap.tx_fifo_size &&
7129  	    priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7130  		dev_warn(priv->device,
7131  			 "Tx FIFO size (%u) exceeds dma capability\n",
7132  			 priv->plat->tx_fifo_size);
7133  		priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7134  	}
7135  
7136  	priv->hw->vlan_fail_q_en =
7137  		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7138  	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7139  
7140  	/* Run HW quirks, if any */
7141  	if (priv->hwif_quirks) {
7142  		ret = priv->hwif_quirks(priv);
7143  		if (ret)
7144  			return ret;
7145  	}
7146  
7147  	/* Rx Watchdog is available in the COREs newer than the 3.40.
7148  	 * In some case, for example on bugged HW this feature
7149  	 * has to be disable and this can be done by passing the
7150  	 * riwt_off field from the platform.
7151  	 */
7152  	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7153  	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7154  		priv->use_riwt = 1;
7155  		dev_info(priv->device,
7156  			 "Enable RX Mitigation via HW Watchdog Timer\n");
7157  	}
7158  
7159  	return 0;
7160  }
7161  
stmmac_napi_add(struct net_device * dev)7162  static void stmmac_napi_add(struct net_device *dev)
7163  {
7164  	struct stmmac_priv *priv = netdev_priv(dev);
7165  	u32 queue, maxq;
7166  
7167  	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7168  
7169  	for (queue = 0; queue < maxq; queue++) {
7170  		struct stmmac_channel *ch = &priv->channel[queue];
7171  
7172  		ch->priv_data = priv;
7173  		ch->index = queue;
7174  		spin_lock_init(&ch->lock);
7175  
7176  		if (queue < priv->plat->rx_queues_to_use) {
7177  			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7178  		}
7179  		if (queue < priv->plat->tx_queues_to_use) {
7180  			netif_napi_add_tx(dev, &ch->tx_napi,
7181  					  stmmac_napi_poll_tx);
7182  		}
7183  		if (queue < priv->plat->rx_queues_to_use &&
7184  		    queue < priv->plat->tx_queues_to_use) {
7185  			netif_napi_add(dev, &ch->rxtx_napi,
7186  				       stmmac_napi_poll_rxtx);
7187  		}
7188  	}
7189  }
7190  
stmmac_napi_del(struct net_device * dev)7191  static void stmmac_napi_del(struct net_device *dev)
7192  {
7193  	struct stmmac_priv *priv = netdev_priv(dev);
7194  	u32 queue, maxq;
7195  
7196  	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7197  
7198  	for (queue = 0; queue < maxq; queue++) {
7199  		struct stmmac_channel *ch = &priv->channel[queue];
7200  
7201  		if (queue < priv->plat->rx_queues_to_use)
7202  			netif_napi_del(&ch->rx_napi);
7203  		if (queue < priv->plat->tx_queues_to_use)
7204  			netif_napi_del(&ch->tx_napi);
7205  		if (queue < priv->plat->rx_queues_to_use &&
7206  		    queue < priv->plat->tx_queues_to_use) {
7207  			netif_napi_del(&ch->rxtx_napi);
7208  		}
7209  	}
7210  }
7211  
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)7212  int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7213  {
7214  	struct stmmac_priv *priv = netdev_priv(dev);
7215  	int ret = 0, i;
7216  	int max_speed;
7217  
7218  	if (netif_running(dev))
7219  		stmmac_release(dev);
7220  
7221  	stmmac_napi_del(dev);
7222  
7223  	priv->plat->rx_queues_to_use = rx_cnt;
7224  	priv->plat->tx_queues_to_use = tx_cnt;
7225  	if (!netif_is_rxfh_configured(dev))
7226  		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7227  			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7228  									rx_cnt);
7229  
7230  	stmmac_mac_phylink_get_caps(priv);
7231  
7232  	priv->phylink_config.mac_capabilities = priv->hw->link.caps;
7233  
7234  	max_speed = priv->plat->max_speed;
7235  	if (max_speed)
7236  		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
7237  
7238  	stmmac_napi_add(dev);
7239  
7240  	if (netif_running(dev))
7241  		ret = stmmac_open(dev);
7242  
7243  	return ret;
7244  }
7245  
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7246  int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7247  {
7248  	struct stmmac_priv *priv = netdev_priv(dev);
7249  	int ret = 0;
7250  
7251  	if (netif_running(dev))
7252  		stmmac_release(dev);
7253  
7254  	priv->dma_conf.dma_rx_size = rx_size;
7255  	priv->dma_conf.dma_tx_size = tx_size;
7256  
7257  	if (netif_running(dev))
7258  		ret = stmmac_open(dev);
7259  
7260  	return ret;
7261  }
7262  
7263  #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
stmmac_fpe_lp_task(struct work_struct * work)7264  static void stmmac_fpe_lp_task(struct work_struct *work)
7265  {
7266  	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7267  						fpe_task);
7268  	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7269  	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7270  	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7271  	bool *hs_enable = &fpe_cfg->hs_enable;
7272  	bool *enable = &fpe_cfg->enable;
7273  	int retries = 20;
7274  
7275  	while (retries-- > 0) {
7276  		/* Bail out immediately if FPE handshake is OFF */
7277  		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7278  			break;
7279  
7280  		if (*lo_state == FPE_STATE_ENTERING_ON &&
7281  		    *lp_state == FPE_STATE_ENTERING_ON) {
7282  			stmmac_fpe_configure(priv, priv->ioaddr,
7283  					     fpe_cfg,
7284  					     priv->plat->tx_queues_to_use,
7285  					     priv->plat->rx_queues_to_use,
7286  					     *enable);
7287  
7288  			netdev_info(priv->dev, "configured FPE\n");
7289  
7290  			*lo_state = FPE_STATE_ON;
7291  			*lp_state = FPE_STATE_ON;
7292  			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7293  			break;
7294  		}
7295  
7296  		if ((*lo_state == FPE_STATE_CAPABLE ||
7297  		     *lo_state == FPE_STATE_ENTERING_ON) &&
7298  		     *lp_state != FPE_STATE_ON) {
7299  			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7300  				    *lo_state, *lp_state);
7301  			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7302  						fpe_cfg,
7303  						MPACKET_VERIFY);
7304  		}
7305  		/* Sleep then retry */
7306  		msleep(500);
7307  	}
7308  
7309  	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7310  }
7311  
stmmac_fpe_handshake(struct stmmac_priv * priv,bool enable)7312  void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7313  {
7314  	if (priv->plat->fpe_cfg->hs_enable != enable) {
7315  		if (enable) {
7316  			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7317  						priv->plat->fpe_cfg,
7318  						MPACKET_VERIFY);
7319  		} else {
7320  			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7321  			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7322  		}
7323  
7324  		priv->plat->fpe_cfg->hs_enable = enable;
7325  	}
7326  }
7327  
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7328  static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7329  {
7330  	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7331  	struct dma_desc *desc_contains_ts = ctx->desc;
7332  	struct stmmac_priv *priv = ctx->priv;
7333  	struct dma_desc *ndesc = ctx->ndesc;
7334  	struct dma_desc *desc = ctx->desc;
7335  	u64 ns = 0;
7336  
7337  	if (!priv->hwts_rx_en)
7338  		return -ENODATA;
7339  
7340  	/* For GMAC4, the valid timestamp is from CTX next desc. */
7341  	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7342  		desc_contains_ts = ndesc;
7343  
7344  	/* Check if timestamp is available */
7345  	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7346  		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7347  		ns -= priv->plat->cdc_error_adj;
7348  		*timestamp = ns_to_ktime(ns);
7349  		return 0;
7350  	}
7351  
7352  	return -ENODATA;
7353  }
7354  
7355  static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7356  	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7357  };
7358  
7359  /**
7360   * stmmac_dvr_probe
7361   * @device: device pointer
7362   * @plat_dat: platform data pointer
7363   * @res: stmmac resource pointer
7364   * Description: this is the main probe function used to
7365   * call the alloc_etherdev, allocate the priv structure.
7366   * Return:
7367   * returns 0 on success, otherwise errno.
7368   */
stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7369  int stmmac_dvr_probe(struct device *device,
7370  		     struct plat_stmmacenet_data *plat_dat,
7371  		     struct stmmac_resources *res)
7372  {
7373  	struct net_device *ndev = NULL;
7374  	struct stmmac_priv *priv;
7375  	u32 rxq;
7376  	int i, ret = 0;
7377  
7378  	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7379  				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7380  	if (!ndev)
7381  		return -ENOMEM;
7382  
7383  	SET_NETDEV_DEV(ndev, device);
7384  
7385  	priv = netdev_priv(ndev);
7386  	priv->device = device;
7387  	priv->dev = ndev;
7388  
7389  	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7390  		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7391  	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7392  		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7393  		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7394  	}
7395  
7396  	priv->xstats.pcpu_stats =
7397  		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7398  	if (!priv->xstats.pcpu_stats)
7399  		return -ENOMEM;
7400  
7401  	stmmac_set_ethtool_ops(ndev);
7402  	priv->pause = pause;
7403  	priv->plat = plat_dat;
7404  	priv->ioaddr = res->addr;
7405  	priv->dev->base_addr = (unsigned long)res->addr;
7406  	priv->plat->dma_cfg->multi_msi_en =
7407  		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7408  
7409  	priv->dev->irq = res->irq;
7410  	priv->wol_irq = res->wol_irq;
7411  	priv->lpi_irq = res->lpi_irq;
7412  	priv->sfty_ce_irq = res->sfty_ce_irq;
7413  	priv->sfty_ue_irq = res->sfty_ue_irq;
7414  	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7415  		priv->rx_irq[i] = res->rx_irq[i];
7416  	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7417  		priv->tx_irq[i] = res->tx_irq[i];
7418  
7419  	if (!is_zero_ether_addr(res->mac))
7420  		eth_hw_addr_set(priv->dev, res->mac);
7421  
7422  	dev_set_drvdata(device, priv->dev);
7423  
7424  	/* Verify driver arguments */
7425  	stmmac_verify_args();
7426  
7427  	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7428  	if (!priv->af_xdp_zc_qps)
7429  		return -ENOMEM;
7430  
7431  	/* Allocate workqueue */
7432  	priv->wq = create_singlethread_workqueue("stmmac_wq");
7433  	if (!priv->wq) {
7434  		dev_err(priv->device, "failed to create workqueue\n");
7435  		ret = -ENOMEM;
7436  		goto error_wq_init;
7437  	}
7438  
7439  	INIT_WORK(&priv->service_task, stmmac_service_task);
7440  
7441  	/* Initialize Link Partner FPE workqueue */
7442  	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7443  
7444  	/* Override with kernel parameters if supplied XXX CRS XXX
7445  	 * this needs to have multiple instances
7446  	 */
7447  	if ((phyaddr >= 0) && (phyaddr <= 31))
7448  		priv->plat->phy_addr = phyaddr;
7449  
7450  	if (priv->plat->stmmac_rst) {
7451  		ret = reset_control_assert(priv->plat->stmmac_rst);
7452  		reset_control_deassert(priv->plat->stmmac_rst);
7453  		/* Some reset controllers have only reset callback instead of
7454  		 * assert + deassert callbacks pair.
7455  		 */
7456  		if (ret == -ENOTSUPP)
7457  			reset_control_reset(priv->plat->stmmac_rst);
7458  	}
7459  
7460  	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7461  	if (ret == -ENOTSUPP)
7462  		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7463  			ERR_PTR(ret));
7464  
7465  	/* Wait a bit for the reset to take effect */
7466  	udelay(10);
7467  
7468  	/* Init MAC and get the capabilities */
7469  	ret = stmmac_hw_init(priv);
7470  	if (ret)
7471  		goto error_hw_init;
7472  
7473  	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7474  	 */
7475  	if (priv->synopsys_id < DWMAC_CORE_5_20)
7476  		priv->plat->dma_cfg->dche = false;
7477  
7478  	stmmac_check_ether_addr(priv);
7479  
7480  	ndev->netdev_ops = &stmmac_netdev_ops;
7481  
7482  	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7483  
7484  	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7485  			    NETIF_F_RXCSUM;
7486  	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7487  			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7488  
7489  	ret = stmmac_tc_init(priv, priv);
7490  	if (!ret) {
7491  		ndev->hw_features |= NETIF_F_HW_TC;
7492  	}
7493  
7494  	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7495  		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7496  		if (priv->plat->has_gmac4)
7497  			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7498  		priv->tso = true;
7499  		dev_info(priv->device, "TSO feature enabled\n");
7500  	}
7501  
7502  	if (priv->dma_cap.sphen &&
7503  	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7504  		ndev->hw_features |= NETIF_F_GRO;
7505  		priv->sph_cap = true;
7506  		priv->sph = priv->sph_cap;
7507  		dev_info(priv->device, "SPH feature enabled\n");
7508  	}
7509  
7510  	/* Ideally our host DMA address width is the same as for the
7511  	 * device. However, it may differ and then we have to use our
7512  	 * host DMA width for allocation and the device DMA width for
7513  	 * register handling.
7514  	 */
7515  	if (priv->plat->host_dma_width)
7516  		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7517  	else
7518  		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7519  
7520  	if (priv->dma_cap.host_dma_width) {
7521  		ret = dma_set_mask_and_coherent(device,
7522  				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7523  		if (!ret) {
7524  			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7525  				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7526  
7527  			/*
7528  			 * If more than 32 bits can be addressed, make sure to
7529  			 * enable enhanced addressing mode.
7530  			 */
7531  			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7532  				priv->plat->dma_cfg->eame = true;
7533  		} else {
7534  			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7535  			if (ret) {
7536  				dev_err(priv->device, "Failed to set DMA Mask\n");
7537  				goto error_hw_init;
7538  			}
7539  
7540  			priv->dma_cap.host_dma_width = 32;
7541  		}
7542  	}
7543  
7544  	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7545  	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7546  #ifdef STMMAC_VLAN_TAG_USED
7547  	/* Both mac100 and gmac support receive VLAN tag detection */
7548  	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7549  	if (priv->dma_cap.vlhash) {
7550  		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7551  		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7552  	}
7553  	if (priv->dma_cap.vlins) {
7554  		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7555  		if (priv->dma_cap.dvlan)
7556  			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7557  	}
7558  #endif
7559  	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7560  
7561  	priv->xstats.threshold = tc;
7562  
7563  	/* Initialize RSS */
7564  	rxq = priv->plat->rx_queues_to_use;
7565  	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7566  	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7567  		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7568  
7569  	if (priv->dma_cap.rssen && priv->plat->rss_en)
7570  		ndev->features |= NETIF_F_RXHASH;
7571  
7572  	ndev->vlan_features |= ndev->features;
7573  	/* TSO doesn't work on VLANs yet */
7574  	ndev->vlan_features &= ~NETIF_F_TSO;
7575  
7576  	/* MTU range: 46 - hw-specific max */
7577  	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7578  	if (priv->plat->has_xgmac)
7579  		ndev->max_mtu = XGMAC_JUMBO_LEN;
7580  	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7581  		ndev->max_mtu = JUMBO_LEN;
7582  	else
7583  		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7584  	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7585  	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7586  	 */
7587  	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7588  	    (priv->plat->maxmtu >= ndev->min_mtu))
7589  		ndev->max_mtu = priv->plat->maxmtu;
7590  	else if (priv->plat->maxmtu < ndev->min_mtu)
7591  		dev_warn(priv->device,
7592  			 "%s: warning: maxmtu having invalid value (%d)\n",
7593  			 __func__, priv->plat->maxmtu);
7594  
7595  	if (flow_ctrl)
7596  		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7597  
7598  	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7599  
7600  	/* Setup channels NAPI */
7601  	stmmac_napi_add(ndev);
7602  
7603  	mutex_init(&priv->lock);
7604  
7605  	/* If a specific clk_csr value is passed from the platform
7606  	 * this means that the CSR Clock Range selection cannot be
7607  	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7608  	 * set the MDC clock dynamically according to the csr actual
7609  	 * clock input.
7610  	 */
7611  	if (priv->plat->clk_csr >= 0)
7612  		priv->clk_csr = priv->plat->clk_csr;
7613  	else
7614  		stmmac_clk_csr_set(priv);
7615  
7616  	stmmac_check_pcs_mode(priv);
7617  
7618  	pm_runtime_get_noresume(device);
7619  	pm_runtime_set_active(device);
7620  	if (!pm_runtime_enabled(device))
7621  		pm_runtime_enable(device);
7622  
7623  	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7624  	    priv->hw->pcs != STMMAC_PCS_RTBI) {
7625  		/* MDIO bus Registration */
7626  		ret = stmmac_mdio_register(ndev);
7627  		if (ret < 0) {
7628  			dev_err_probe(priv->device, ret,
7629  				      "%s: MDIO bus (id: %d) registration failed\n",
7630  				      __func__, priv->plat->bus_id);
7631  			goto error_mdio_register;
7632  		}
7633  	}
7634  
7635  	if (priv->plat->speed_mode_2500)
7636  		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7637  
7638  	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7639  		ret = stmmac_xpcs_setup(priv->mii);
7640  		if (ret)
7641  			goto error_xpcs_setup;
7642  	}
7643  
7644  	ret = stmmac_phy_setup(priv);
7645  	if (ret) {
7646  		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7647  		goto error_phy_setup;
7648  	}
7649  
7650  	ret = register_netdev(ndev);
7651  	if (ret) {
7652  		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7653  			__func__, ret);
7654  		goto error_netdev_register;
7655  	}
7656  
7657  #ifdef CONFIG_DEBUG_FS
7658  	stmmac_init_fs(ndev);
7659  #endif
7660  
7661  	if (priv->plat->dump_debug_regs)
7662  		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7663  
7664  	/* Let pm_runtime_put() disable the clocks.
7665  	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7666  	 */
7667  	pm_runtime_put(device);
7668  
7669  	return ret;
7670  
7671  error_netdev_register:
7672  	phylink_destroy(priv->phylink);
7673  error_xpcs_setup:
7674  error_phy_setup:
7675  	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7676  	    priv->hw->pcs != STMMAC_PCS_RTBI)
7677  		stmmac_mdio_unregister(ndev);
7678  error_mdio_register:
7679  	stmmac_napi_del(ndev);
7680  error_hw_init:
7681  	destroy_workqueue(priv->wq);
7682  error_wq_init:
7683  	bitmap_free(priv->af_xdp_zc_qps);
7684  
7685  	return ret;
7686  }
7687  EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7688  
7689  /**
7690   * stmmac_dvr_remove
7691   * @dev: device pointer
7692   * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7693   * changes the link status, releases the DMA descriptor rings.
7694   */
stmmac_dvr_remove(struct device * dev)7695  void stmmac_dvr_remove(struct device *dev)
7696  {
7697  	struct net_device *ndev = dev_get_drvdata(dev);
7698  	struct stmmac_priv *priv = netdev_priv(ndev);
7699  
7700  	netdev_info(priv->dev, "%s: removing driver", __func__);
7701  
7702  	pm_runtime_get_sync(dev);
7703  
7704  	stmmac_stop_all_dma(priv);
7705  	stmmac_mac_set(priv, priv->ioaddr, false);
7706  	netif_carrier_off(ndev);
7707  	unregister_netdev(ndev);
7708  
7709  #ifdef CONFIG_DEBUG_FS
7710  	stmmac_exit_fs(ndev);
7711  #endif
7712  	phylink_destroy(priv->phylink);
7713  	if (priv->plat->stmmac_rst)
7714  		reset_control_assert(priv->plat->stmmac_rst);
7715  	reset_control_assert(priv->plat->stmmac_ahb_rst);
7716  	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7717  	    priv->hw->pcs != STMMAC_PCS_RTBI)
7718  		stmmac_mdio_unregister(ndev);
7719  	destroy_workqueue(priv->wq);
7720  	mutex_destroy(&priv->lock);
7721  	bitmap_free(priv->af_xdp_zc_qps);
7722  
7723  	pm_runtime_disable(dev);
7724  	pm_runtime_put_noidle(dev);
7725  }
7726  EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7727  
7728  /**
7729   * stmmac_suspend - suspend callback
7730   * @dev: device pointer
7731   * Description: this is the function to suspend the device and it is called
7732   * by the platform driver to stop the network queue, release the resources,
7733   * program the PMT register (for WoL), clean and release driver resources.
7734   */
stmmac_suspend(struct device * dev)7735  int stmmac_suspend(struct device *dev)
7736  {
7737  	struct net_device *ndev = dev_get_drvdata(dev);
7738  	struct stmmac_priv *priv = netdev_priv(ndev);
7739  	u32 chan;
7740  
7741  	if (!ndev || !netif_running(ndev))
7742  		return 0;
7743  
7744  	mutex_lock(&priv->lock);
7745  
7746  	netif_device_detach(ndev);
7747  
7748  	stmmac_disable_all_queues(priv);
7749  
7750  	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7751  		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7752  
7753  	if (priv->eee_enabled) {
7754  		priv->tx_path_in_lpi_mode = false;
7755  		del_timer_sync(&priv->eee_ctrl_timer);
7756  	}
7757  
7758  	/* Stop TX/RX DMA */
7759  	stmmac_stop_all_dma(priv);
7760  
7761  	if (priv->plat->serdes_powerdown)
7762  		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7763  
7764  	/* Enable Power down mode by programming the PMT regs */
7765  	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7766  		stmmac_pmt(priv, priv->hw, priv->wolopts);
7767  		priv->irq_wake = 1;
7768  	} else {
7769  		stmmac_mac_set(priv, priv->ioaddr, false);
7770  		pinctrl_pm_select_sleep_state(priv->device);
7771  	}
7772  
7773  	mutex_unlock(&priv->lock);
7774  
7775  	rtnl_lock();
7776  	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7777  		phylink_suspend(priv->phylink, true);
7778  	} else {
7779  		if (device_may_wakeup(priv->device))
7780  			phylink_speed_down(priv->phylink, false);
7781  		phylink_suspend(priv->phylink, false);
7782  	}
7783  	rtnl_unlock();
7784  
7785  	if (priv->dma_cap.fpesel) {
7786  		/* Disable FPE */
7787  		stmmac_fpe_configure(priv, priv->ioaddr,
7788  				     priv->plat->fpe_cfg,
7789  				     priv->plat->tx_queues_to_use,
7790  				     priv->plat->rx_queues_to_use, false);
7791  
7792  		stmmac_fpe_handshake(priv, false);
7793  		stmmac_fpe_stop_wq(priv);
7794  	}
7795  
7796  	priv->speed = SPEED_UNKNOWN;
7797  	return 0;
7798  }
7799  EXPORT_SYMBOL_GPL(stmmac_suspend);
7800  
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)7801  static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7802  {
7803  	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7804  
7805  	rx_q->cur_rx = 0;
7806  	rx_q->dirty_rx = 0;
7807  }
7808  
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)7809  static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7810  {
7811  	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7812  
7813  	tx_q->cur_tx = 0;
7814  	tx_q->dirty_tx = 0;
7815  	tx_q->mss = 0;
7816  
7817  	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7818  }
7819  
7820  /**
7821   * stmmac_reset_queues_param - reset queue parameters
7822   * @priv: device pointer
7823   */
stmmac_reset_queues_param(struct stmmac_priv * priv)7824  static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7825  {
7826  	u32 rx_cnt = priv->plat->rx_queues_to_use;
7827  	u32 tx_cnt = priv->plat->tx_queues_to_use;
7828  	u32 queue;
7829  
7830  	for (queue = 0; queue < rx_cnt; queue++)
7831  		stmmac_reset_rx_queue(priv, queue);
7832  
7833  	for (queue = 0; queue < tx_cnt; queue++)
7834  		stmmac_reset_tx_queue(priv, queue);
7835  }
7836  
7837  /**
7838   * stmmac_resume - resume callback
7839   * @dev: device pointer
7840   * Description: when resume this function is invoked to setup the DMA and CORE
7841   * in a usable state.
7842   */
stmmac_resume(struct device * dev)7843  int stmmac_resume(struct device *dev)
7844  {
7845  	struct net_device *ndev = dev_get_drvdata(dev);
7846  	struct stmmac_priv *priv = netdev_priv(ndev);
7847  	int ret;
7848  
7849  	if (!netif_running(ndev))
7850  		return 0;
7851  
7852  	/* Power Down bit, into the PM register, is cleared
7853  	 * automatically as soon as a magic packet or a Wake-up frame
7854  	 * is received. Anyway, it's better to manually clear
7855  	 * this bit because it can generate problems while resuming
7856  	 * from another devices (e.g. serial console).
7857  	 */
7858  	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7859  		mutex_lock(&priv->lock);
7860  		stmmac_pmt(priv, priv->hw, 0);
7861  		mutex_unlock(&priv->lock);
7862  		priv->irq_wake = 0;
7863  	} else {
7864  		pinctrl_pm_select_default_state(priv->device);
7865  		/* reset the phy so that it's ready */
7866  		if (priv->mii)
7867  			stmmac_mdio_reset(priv->mii);
7868  	}
7869  
7870  	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7871  	    priv->plat->serdes_powerup) {
7872  		ret = priv->plat->serdes_powerup(ndev,
7873  						 priv->plat->bsp_priv);
7874  
7875  		if (ret < 0)
7876  			return ret;
7877  	}
7878  
7879  	rtnl_lock();
7880  	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7881  		phylink_resume(priv->phylink);
7882  	} else {
7883  		phylink_resume(priv->phylink);
7884  		if (device_may_wakeup(priv->device))
7885  			phylink_speed_up(priv->phylink);
7886  	}
7887  	rtnl_unlock();
7888  
7889  	rtnl_lock();
7890  	mutex_lock(&priv->lock);
7891  
7892  	stmmac_reset_queues_param(priv);
7893  
7894  	stmmac_free_tx_skbufs(priv);
7895  	stmmac_clear_descriptors(priv, &priv->dma_conf);
7896  
7897  	stmmac_hw_setup(ndev, false);
7898  	stmmac_init_coalesce(priv);
7899  	stmmac_set_rx_mode(ndev);
7900  
7901  	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7902  
7903  	stmmac_enable_all_queues(priv);
7904  	stmmac_enable_all_dma_irq(priv);
7905  
7906  	mutex_unlock(&priv->lock);
7907  	rtnl_unlock();
7908  
7909  	netif_device_attach(ndev);
7910  
7911  	return 0;
7912  }
7913  EXPORT_SYMBOL_GPL(stmmac_resume);
7914  
7915  #ifndef MODULE
stmmac_cmdline_opt(char * str)7916  static int __init stmmac_cmdline_opt(char *str)
7917  {
7918  	char *opt;
7919  
7920  	if (!str || !*str)
7921  		return 1;
7922  	while ((opt = strsep(&str, ",")) != NULL) {
7923  		if (!strncmp(opt, "debug:", 6)) {
7924  			if (kstrtoint(opt + 6, 0, &debug))
7925  				goto err;
7926  		} else if (!strncmp(opt, "phyaddr:", 8)) {
7927  			if (kstrtoint(opt + 8, 0, &phyaddr))
7928  				goto err;
7929  		} else if (!strncmp(opt, "buf_sz:", 7)) {
7930  			if (kstrtoint(opt + 7, 0, &buf_sz))
7931  				goto err;
7932  		} else if (!strncmp(opt, "tc:", 3)) {
7933  			if (kstrtoint(opt + 3, 0, &tc))
7934  				goto err;
7935  		} else if (!strncmp(opt, "watchdog:", 9)) {
7936  			if (kstrtoint(opt + 9, 0, &watchdog))
7937  				goto err;
7938  		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7939  			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7940  				goto err;
7941  		} else if (!strncmp(opt, "pause:", 6)) {
7942  			if (kstrtoint(opt + 6, 0, &pause))
7943  				goto err;
7944  		} else if (!strncmp(opt, "eee_timer:", 10)) {
7945  			if (kstrtoint(opt + 10, 0, &eee_timer))
7946  				goto err;
7947  		} else if (!strncmp(opt, "chain_mode:", 11)) {
7948  			if (kstrtoint(opt + 11, 0, &chain_mode))
7949  				goto err;
7950  		}
7951  	}
7952  	return 1;
7953  
7954  err:
7955  	pr_err("%s: ERROR broken module parameter conversion", __func__);
7956  	return 1;
7957  }
7958  
7959  __setup("stmmaceth=", stmmac_cmdline_opt);
7960  #endif /* MODULE */
7961  
stmmac_init(void)7962  static int __init stmmac_init(void)
7963  {
7964  #ifdef CONFIG_DEBUG_FS
7965  	/* Create debugfs main directory if it doesn't exist yet */
7966  	if (!stmmac_fs_dir)
7967  		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7968  	register_netdevice_notifier(&stmmac_notifier);
7969  #endif
7970  
7971  	return 0;
7972  }
7973  
stmmac_exit(void)7974  static void __exit stmmac_exit(void)
7975  {
7976  #ifdef CONFIG_DEBUG_FS
7977  	unregister_netdevice_notifier(&stmmac_notifier);
7978  	debugfs_remove_recursive(stmmac_fs_dir);
7979  #endif
7980  }
7981  
7982  module_init(stmmac_init)
7983  module_exit(stmmac_exit)
7984  
7985  MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7986  MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7987  MODULE_LICENSE("GPL");
7988