1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4 
5 	Copyright(C) 2007-2011 STMicroelectronics Ltd
6 
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10 
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15 
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18 
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20 
21   Documentation available at:
22 	http://www.stlinux.com
23   Support available at:
24 	https://bugzilla.stlinux.com/
25 *******************************************************************************/
26 
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include <net/pkt_cls.h>
49 #include "stmmac_ptp.h"
50 #include "stmmac.h"
51 #include <linux/reset.h>
52 #include <linux/of_mdio.h>
53 #include "dwmac1000.h"
54 #include "dwxgmac2.h"
55 #include "hwif.h"
56 
57 #define	STMMAC_ALIGN(x)		__ALIGN_KERNEL(x, SMP_CACHE_BYTES)
58 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
59 
60 /* Module parameters */
61 #define TX_TIMEO	5000
62 static int watchdog = TX_TIMEO;
63 module_param(watchdog, int, 0644);
64 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
65 
66 static int debug = -1;
67 module_param(debug, int, 0644);
68 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
69 
70 static int phyaddr = -1;
71 module_param(phyaddr, int, 0444);
72 MODULE_PARM_DESC(phyaddr, "Physical device address");
73 
74 #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
75 #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
76 
77 static int flow_ctrl = FLOW_AUTO;
78 module_param(flow_ctrl, int, 0644);
79 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
80 
81 static int pause = PAUSE_TIME;
82 module_param(pause, int, 0644);
83 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
84 
85 #define TC_DEFAULT 64
86 static int tc = TC_DEFAULT;
87 module_param(tc, int, 0644);
88 MODULE_PARM_DESC(tc, "DMA threshold control value");
89 
90 #define	DEFAULT_BUFSIZE	1536
91 static int buf_sz = DEFAULT_BUFSIZE;
92 module_param(buf_sz, int, 0644);
93 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
94 
95 #define	STMMAC_RX_COPYBREAK	256
96 
97 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
98 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
99 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
100 
101 #define STMMAC_DEFAULT_LPI_TIMER	1000
102 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
103 module_param(eee_timer, int, 0644);
104 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
105 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
106 
107 /* By default the driver will use the ring mode to manage tx and rx descriptors,
108  * but allow user to force to use the chain instead of the ring
109  */
110 static unsigned int chain_mode;
111 module_param(chain_mode, int, 0444);
112 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
113 
114 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
115 
116 #ifdef CONFIG_DEBUG_FS
117 static int stmmac_init_fs(struct net_device *dev);
118 static void stmmac_exit_fs(struct net_device *dev);
119 #endif
120 
121 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
122 
123 /**
124  * stmmac_verify_args - verify the driver parameters.
125  * Description: it checks the driver parameters and set a default in case of
126  * errors.
127  */
128 static void stmmac_verify_args(void)
129 {
130 	if (unlikely(watchdog < 0))
131 		watchdog = TX_TIMEO;
132 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
133 		buf_sz = DEFAULT_BUFSIZE;
134 	if (unlikely(flow_ctrl > 1))
135 		flow_ctrl = FLOW_AUTO;
136 	else if (likely(flow_ctrl < 0))
137 		flow_ctrl = FLOW_OFF;
138 	if (unlikely((pause < 0) || (pause > 0xffff)))
139 		pause = PAUSE_TIME;
140 	if (eee_timer < 0)
141 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
142 }
143 
144 /**
145  * stmmac_disable_all_queues - Disable all queues
146  * @priv: driver private structure
147  */
148 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
149 {
150 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
151 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
152 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
153 	u32 queue;
154 
155 	for (queue = 0; queue < maxq; queue++) {
156 		struct stmmac_channel *ch = &priv->channel[queue];
157 
158 		if (queue < rx_queues_cnt)
159 			napi_disable(&ch->rx_napi);
160 		if (queue < tx_queues_cnt)
161 			napi_disable(&ch->tx_napi);
162 	}
163 }
164 
165 /**
166  * stmmac_enable_all_queues - Enable all queues
167  * @priv: driver private structure
168  */
169 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
170 {
171 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
172 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
173 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
174 	u32 queue;
175 
176 	for (queue = 0; queue < maxq; queue++) {
177 		struct stmmac_channel *ch = &priv->channel[queue];
178 
179 		if (queue < rx_queues_cnt)
180 			napi_enable(&ch->rx_napi);
181 		if (queue < tx_queues_cnt)
182 			napi_enable(&ch->tx_napi);
183 	}
184 }
185 
186 /**
187  * stmmac_stop_all_queues - Stop all queues
188  * @priv: driver private structure
189  */
190 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
191 {
192 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193 	u32 queue;
194 
195 	for (queue = 0; queue < tx_queues_cnt; queue++)
196 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
197 }
198 
199 /**
200  * stmmac_start_all_queues - Start all queues
201  * @priv: driver private structure
202  */
203 static void stmmac_start_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
206 	u32 queue;
207 
208 	for (queue = 0; queue < tx_queues_cnt; queue++)
209 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
210 }
211 
212 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
213 {
214 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
215 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
216 		queue_work(priv->wq, &priv->service_task);
217 }
218 
219 static void stmmac_global_err(struct stmmac_priv *priv)
220 {
221 	netif_carrier_off(priv->dev);
222 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
223 	stmmac_service_event_schedule(priv);
224 }
225 
226 /**
227  * stmmac_clk_csr_set - dynamically set the MDC clock
228  * @priv: driver private structure
229  * Description: this is to dynamically set the MDC clock according to the csr
230  * clock input.
231  * Note:
232  *	If a specific clk_csr value is passed from the platform
233  *	this means that the CSR Clock Range selection cannot be
234  *	changed at run-time and it is fixed (as reported in the driver
235  *	documentation). Viceversa the driver will try to set the MDC
236  *	clock dynamically according to the actual clock input.
237  */
238 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
239 {
240 	u32 clk_rate;
241 
242 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
243 
244 	/* Platform provided default clk_csr would be assumed valid
245 	 * for all other cases except for the below mentioned ones.
246 	 * For values higher than the IEEE 802.3 specified frequency
247 	 * we can not estimate the proper divider as it is not known
248 	 * the frequency of clk_csr_i. So we do not change the default
249 	 * divider.
250 	 */
251 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
252 		if (clk_rate < CSR_F_35M)
253 			priv->clk_csr = STMMAC_CSR_20_35M;
254 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
255 			priv->clk_csr = STMMAC_CSR_35_60M;
256 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
257 			priv->clk_csr = STMMAC_CSR_60_100M;
258 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
259 			priv->clk_csr = STMMAC_CSR_100_150M;
260 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
261 			priv->clk_csr = STMMAC_CSR_150_250M;
262 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
263 			priv->clk_csr = STMMAC_CSR_250_300M;
264 	}
265 
266 	if (priv->plat->has_sun8i) {
267 		if (clk_rate > 160000000)
268 			priv->clk_csr = 0x03;
269 		else if (clk_rate > 80000000)
270 			priv->clk_csr = 0x02;
271 		else if (clk_rate > 40000000)
272 			priv->clk_csr = 0x01;
273 		else
274 			priv->clk_csr = 0;
275 	}
276 
277 	if (priv->plat->has_xgmac) {
278 		if (clk_rate > 400000000)
279 			priv->clk_csr = 0x5;
280 		else if (clk_rate > 350000000)
281 			priv->clk_csr = 0x4;
282 		else if (clk_rate > 300000000)
283 			priv->clk_csr = 0x3;
284 		else if (clk_rate > 250000000)
285 			priv->clk_csr = 0x2;
286 		else if (clk_rate > 150000000)
287 			priv->clk_csr = 0x1;
288 		else
289 			priv->clk_csr = 0x0;
290 	}
291 }
292 
293 static void print_pkt(unsigned char *buf, int len)
294 {
295 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
296 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
297 }
298 
299 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
300 {
301 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
302 	u32 avail;
303 
304 	if (tx_q->dirty_tx > tx_q->cur_tx)
305 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
306 	else
307 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
308 
309 	return avail;
310 }
311 
312 /**
313  * stmmac_rx_dirty - Get RX queue dirty
314  * @priv: driver private structure
315  * @queue: RX queue index
316  */
317 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
318 {
319 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
320 	u32 dirty;
321 
322 	if (rx_q->dirty_rx <= rx_q->cur_rx)
323 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
324 	else
325 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
326 
327 	return dirty;
328 }
329 
330 /**
331  * stmmac_hw_fix_mac_speed - callback for speed selection
332  * @priv: driver private structure
333  * Description: on some platforms (e.g. ST), some HW system configuration
334  * registers have to be set according to the link speed negotiated.
335  */
336 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
337 {
338 	struct net_device *ndev = priv->dev;
339 	struct phy_device *phydev = ndev->phydev;
340 
341 	if (likely(priv->plat->fix_mac_speed))
342 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
343 }
344 
345 /**
346  * stmmac_enable_eee_mode - check and enter in LPI mode
347  * @priv: driver private structure
348  * Description: this function is to verify and enter in LPI mode in case of
349  * EEE.
350  */
351 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
352 {
353 	u32 tx_cnt = priv->plat->tx_queues_to_use;
354 	u32 queue;
355 
356 	/* check if all TX queues have the work finished */
357 	for (queue = 0; queue < tx_cnt; queue++) {
358 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
359 
360 		if (tx_q->dirty_tx != tx_q->cur_tx)
361 			return; /* still unfinished work */
362 	}
363 
364 	/* Check and enter in LPI mode */
365 	if (!priv->tx_path_in_lpi_mode)
366 		stmmac_set_eee_mode(priv, priv->hw,
367 				priv->plat->en_tx_lpi_clockgating);
368 }
369 
370 /**
371  * stmmac_disable_eee_mode - disable and exit from LPI mode
372  * @priv: driver private structure
373  * Description: this function is to exit and disable EEE in case of
374  * LPI state is true. This is called by the xmit.
375  */
376 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
377 {
378 	stmmac_reset_eee_mode(priv, priv->hw);
379 	del_timer_sync(&priv->eee_ctrl_timer);
380 	priv->tx_path_in_lpi_mode = false;
381 }
382 
383 /**
384  * stmmac_eee_ctrl_timer - EEE TX SW timer.
385  * @arg : data hook
386  * Description:
387  *  if there is no data transfer and if we are not in LPI state,
388  *  then MAC Transmitter can be moved to LPI state.
389  */
390 static void stmmac_eee_ctrl_timer(struct timer_list *t)
391 {
392 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
393 
394 	stmmac_enable_eee_mode(priv);
395 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
396 }
397 
398 /**
399  * stmmac_eee_init - init EEE
400  * @priv: driver private structure
401  * Description:
402  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
403  *  can also manage EEE, this function enable the LPI state and start related
404  *  timer.
405  */
406 bool stmmac_eee_init(struct stmmac_priv *priv)
407 {
408 	struct net_device *ndev = priv->dev;
409 	int interface = priv->plat->interface;
410 	bool ret = false;
411 
412 	if ((interface != PHY_INTERFACE_MODE_MII) &&
413 	    (interface != PHY_INTERFACE_MODE_GMII) &&
414 	    !phy_interface_mode_is_rgmii(interface))
415 		goto out;
416 
417 	/* Using PCS we cannot dial with the phy registers at this stage
418 	 * so we do not support extra feature like EEE.
419 	 */
420 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
421 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
422 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
423 		goto out;
424 
425 	/* MAC core supports the EEE feature. */
426 	if (priv->dma_cap.eee) {
427 		int tx_lpi_timer = priv->tx_lpi_timer;
428 
429 		/* Check if the PHY supports EEE */
430 		if (phy_init_eee(ndev->phydev, 1)) {
431 			/* To manage at run-time if the EEE cannot be supported
432 			 * anymore (for example because the lp caps have been
433 			 * changed).
434 			 * In that case the driver disable own timers.
435 			 */
436 			mutex_lock(&priv->lock);
437 			if (priv->eee_active) {
438 				netdev_dbg(priv->dev, "disable EEE\n");
439 				del_timer_sync(&priv->eee_ctrl_timer);
440 				stmmac_set_eee_timer(priv, priv->hw, 0,
441 						tx_lpi_timer);
442 			}
443 			priv->eee_active = 0;
444 			mutex_unlock(&priv->lock);
445 			goto out;
446 		}
447 		/* Activate the EEE and start timers */
448 		mutex_lock(&priv->lock);
449 		if (!priv->eee_active) {
450 			priv->eee_active = 1;
451 			timer_setup(&priv->eee_ctrl_timer,
452 				    stmmac_eee_ctrl_timer, 0);
453 			mod_timer(&priv->eee_ctrl_timer,
454 				  STMMAC_LPI_T(eee_timer));
455 
456 			stmmac_set_eee_timer(priv, priv->hw,
457 					STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
458 		}
459 		/* Set HW EEE according to the speed */
460 		stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
461 
462 		ret = true;
463 		mutex_unlock(&priv->lock);
464 
465 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
466 	}
467 out:
468 	return ret;
469 }
470 
471 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
472  * @priv: driver private structure
473  * @p : descriptor pointer
474  * @skb : the socket buffer
475  * Description :
476  * This function will read timestamp from the descriptor & pass it to stack.
477  * and also perform some sanity checks.
478  */
479 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
480 				   struct dma_desc *p, struct sk_buff *skb)
481 {
482 	struct skb_shared_hwtstamps shhwtstamp;
483 	u64 ns = 0;
484 
485 	if (!priv->hwts_tx_en)
486 		return;
487 
488 	/* exit if skb doesn't support hw tstamp */
489 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
490 		return;
491 
492 	/* check tx tstamp status */
493 	if (stmmac_get_tx_timestamp_status(priv, p)) {
494 		/* get the valid tstamp */
495 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
496 
497 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
498 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
499 
500 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
501 		/* pass tstamp to stack */
502 		skb_tstamp_tx(skb, &shhwtstamp);
503 	}
504 
505 	return;
506 }
507 
508 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
509  * @priv: driver private structure
510  * @p : descriptor pointer
511  * @np : next descriptor pointer
512  * @skb : the socket buffer
513  * Description :
514  * This function will read received packet's timestamp from the descriptor
515  * and pass it to stack. It also perform some sanity checks.
516  */
517 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
518 				   struct dma_desc *np, struct sk_buff *skb)
519 {
520 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
521 	struct dma_desc *desc = p;
522 	u64 ns = 0;
523 
524 	if (!priv->hwts_rx_en)
525 		return;
526 	/* For GMAC4, the valid timestamp is from CTX next desc. */
527 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
528 		desc = np;
529 
530 	/* Check if timestamp is available */
531 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
532 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
533 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
534 		shhwtstamp = skb_hwtstamps(skb);
535 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
536 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
537 	} else  {
538 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
539 	}
540 }
541 
542 /**
543  *  stmmac_hwtstamp_set - control hardware timestamping.
544  *  @dev: device pointer.
545  *  @ifr: An IOCTL specific structure, that can contain a pointer to
546  *  a proprietary structure used to pass information to the driver.
547  *  Description:
548  *  This function configures the MAC to enable/disable both outgoing(TX)
549  *  and incoming(RX) packets time stamping based on user input.
550  *  Return Value:
551  *  0 on success and an appropriate -ve integer on failure.
552  */
553 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
554 {
555 	struct stmmac_priv *priv = netdev_priv(dev);
556 	struct hwtstamp_config config;
557 	struct timespec64 now;
558 	u64 temp = 0;
559 	u32 ptp_v2 = 0;
560 	u32 tstamp_all = 0;
561 	u32 ptp_over_ipv4_udp = 0;
562 	u32 ptp_over_ipv6_udp = 0;
563 	u32 ptp_over_ethernet = 0;
564 	u32 snap_type_sel = 0;
565 	u32 ts_master_en = 0;
566 	u32 ts_event_en = 0;
567 	u32 sec_inc = 0;
568 	u32 value = 0;
569 	bool xmac;
570 
571 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
572 
573 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
574 		netdev_alert(priv->dev, "No support for HW time stamping\n");
575 		priv->hwts_tx_en = 0;
576 		priv->hwts_rx_en = 0;
577 
578 		return -EOPNOTSUPP;
579 	}
580 
581 	if (copy_from_user(&config, ifr->ifr_data,
582 			   sizeof(config)))
583 		return -EFAULT;
584 
585 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
586 		   __func__, config.flags, config.tx_type, config.rx_filter);
587 
588 	/* reserved for future extensions */
589 	if (config.flags)
590 		return -EINVAL;
591 
592 	if (config.tx_type != HWTSTAMP_TX_OFF &&
593 	    config.tx_type != HWTSTAMP_TX_ON)
594 		return -ERANGE;
595 
596 	if (priv->adv_ts) {
597 		switch (config.rx_filter) {
598 		case HWTSTAMP_FILTER_NONE:
599 			/* time stamp no incoming packet at all */
600 			config.rx_filter = HWTSTAMP_FILTER_NONE;
601 			break;
602 
603 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
604 			/* PTP v1, UDP, any kind of event packet */
605 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
606 			/* 'xmac' hardware can support Sync, Pdelay_Req and
607 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
608 			 * This leaves Delay_Req timestamps out.
609 			 * Enable all events *and* general purpose message
610 			 * timestamping
611 			 */
612 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
613 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
614 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
615 			break;
616 
617 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
618 			/* PTP v1, UDP, Sync packet */
619 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
620 			/* take time stamp for SYNC messages only */
621 			ts_event_en = PTP_TCR_TSEVNTENA;
622 
623 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
624 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
625 			break;
626 
627 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
628 			/* PTP v1, UDP, Delay_req packet */
629 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
630 			/* take time stamp for Delay_Req messages only */
631 			ts_master_en = PTP_TCR_TSMSTRENA;
632 			ts_event_en = PTP_TCR_TSEVNTENA;
633 
634 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
635 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
636 			break;
637 
638 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
639 			/* PTP v2, UDP, any kind of event packet */
640 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
641 			ptp_v2 = PTP_TCR_TSVER2ENA;
642 			/* take time stamp for all event messages */
643 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
644 
645 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
646 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
647 			break;
648 
649 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
650 			/* PTP v2, UDP, Sync packet */
651 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
652 			ptp_v2 = PTP_TCR_TSVER2ENA;
653 			/* take time stamp for SYNC messages only */
654 			ts_event_en = PTP_TCR_TSEVNTENA;
655 
656 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
657 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
658 			break;
659 
660 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
661 			/* PTP v2, UDP, Delay_req packet */
662 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
663 			ptp_v2 = PTP_TCR_TSVER2ENA;
664 			/* take time stamp for Delay_Req messages only */
665 			ts_master_en = PTP_TCR_TSMSTRENA;
666 			ts_event_en = PTP_TCR_TSEVNTENA;
667 
668 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
669 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
670 			break;
671 
672 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
673 			/* PTP v2/802.AS1 any layer, any kind of event packet */
674 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
675 			ptp_v2 = PTP_TCR_TSVER2ENA;
676 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
677 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 			ptp_over_ethernet = PTP_TCR_TSIPENA;
680 			break;
681 
682 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
683 			/* PTP v2/802.AS1, any layer, Sync packet */
684 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
685 			ptp_v2 = PTP_TCR_TSVER2ENA;
686 			/* take time stamp for SYNC messages only */
687 			ts_event_en = PTP_TCR_TSEVNTENA;
688 
689 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
690 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
691 			ptp_over_ethernet = PTP_TCR_TSIPENA;
692 			break;
693 
694 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
695 			/* PTP v2/802.AS1, any layer, Delay_req packet */
696 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
697 			ptp_v2 = PTP_TCR_TSVER2ENA;
698 			/* take time stamp for Delay_Req messages only */
699 			ts_master_en = PTP_TCR_TSMSTRENA;
700 			ts_event_en = PTP_TCR_TSEVNTENA;
701 
702 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
703 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
704 			ptp_over_ethernet = PTP_TCR_TSIPENA;
705 			break;
706 
707 		case HWTSTAMP_FILTER_NTP_ALL:
708 		case HWTSTAMP_FILTER_ALL:
709 			/* time stamp any incoming packet */
710 			config.rx_filter = HWTSTAMP_FILTER_ALL;
711 			tstamp_all = PTP_TCR_TSENALL;
712 			break;
713 
714 		default:
715 			return -ERANGE;
716 		}
717 	} else {
718 		switch (config.rx_filter) {
719 		case HWTSTAMP_FILTER_NONE:
720 			config.rx_filter = HWTSTAMP_FILTER_NONE;
721 			break;
722 		default:
723 			/* PTP v1, UDP, any kind of event packet */
724 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
725 			break;
726 		}
727 	}
728 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
729 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
730 
731 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
732 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
733 	else {
734 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
735 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
736 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
737 			 ts_master_en | snap_type_sel);
738 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
739 
740 		/* program Sub Second Increment reg */
741 		stmmac_config_sub_second_increment(priv,
742 				priv->ptpaddr, priv->plat->clk_ptp_rate,
743 				xmac, &sec_inc);
744 		temp = div_u64(1000000000ULL, sec_inc);
745 
746 		/* Store sub second increment and flags for later use */
747 		priv->sub_second_inc = sec_inc;
748 		priv->systime_flags = value;
749 
750 		/* calculate default added value:
751 		 * formula is :
752 		 * addend = (2^32)/freq_div_ratio;
753 		 * where, freq_div_ratio = 1e9ns/sec_inc
754 		 */
755 		temp = (u64)(temp << 32);
756 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
757 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
758 
759 		/* initialize system time */
760 		ktime_get_real_ts64(&now);
761 
762 		/* lower 32 bits of tv_sec are safe until y2106 */
763 		stmmac_init_systime(priv, priv->ptpaddr,
764 				(u32)now.tv_sec, now.tv_nsec);
765 	}
766 
767 	memcpy(&priv->tstamp_config, &config, sizeof(config));
768 
769 	return copy_to_user(ifr->ifr_data, &config,
770 			    sizeof(config)) ? -EFAULT : 0;
771 }
772 
773 /**
774  *  stmmac_hwtstamp_get - read hardware timestamping.
775  *  @dev: device pointer.
776  *  @ifr: An IOCTL specific structure, that can contain a pointer to
777  *  a proprietary structure used to pass information to the driver.
778  *  Description:
779  *  This function obtain the current hardware timestamping settings
780     as requested.
781  */
782 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
783 {
784 	struct stmmac_priv *priv = netdev_priv(dev);
785 	struct hwtstamp_config *config = &priv->tstamp_config;
786 
787 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
788 		return -EOPNOTSUPP;
789 
790 	return copy_to_user(ifr->ifr_data, config,
791 			    sizeof(*config)) ? -EFAULT : 0;
792 }
793 
794 /**
795  * stmmac_init_ptp - init PTP
796  * @priv: driver private structure
797  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
798  * This is done by looking at the HW cap. register.
799  * This function also registers the ptp driver.
800  */
801 static int stmmac_init_ptp(struct stmmac_priv *priv)
802 {
803 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
804 
805 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
806 		return -EOPNOTSUPP;
807 
808 	priv->adv_ts = 0;
809 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
810 	if (xmac && priv->dma_cap.atime_stamp)
811 		priv->adv_ts = 1;
812 	/* Dwmac 3.x core with extend_desc can support adv_ts */
813 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
814 		priv->adv_ts = 1;
815 
816 	if (priv->dma_cap.time_stamp)
817 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
818 
819 	if (priv->adv_ts)
820 		netdev_info(priv->dev,
821 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
822 
823 	priv->hwts_tx_en = 0;
824 	priv->hwts_rx_en = 0;
825 
826 	stmmac_ptp_register(priv);
827 
828 	return 0;
829 }
830 
831 static void stmmac_release_ptp(struct stmmac_priv *priv)
832 {
833 	if (priv->plat->clk_ptp_ref)
834 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
835 	stmmac_ptp_unregister(priv);
836 }
837 
838 /**
839  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
840  *  @priv: driver private structure
841  *  Description: It is used for configuring the flow control in all queues
842  */
843 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
844 {
845 	u32 tx_cnt = priv->plat->tx_queues_to_use;
846 
847 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
848 			priv->pause, tx_cnt);
849 }
850 
851 /**
852  * stmmac_adjust_link - adjusts the link parameters
853  * @dev: net device structure
854  * Description: this is the helper called by the physical abstraction layer
855  * drivers to communicate the phy link status. According the speed and duplex
856  * this driver can invoke registered glue-logic as well.
857  * It also invoke the eee initialization because it could happen when switch
858  * on different networks (that are eee capable).
859  */
860 static void stmmac_adjust_link(struct net_device *dev)
861 {
862 	struct stmmac_priv *priv = netdev_priv(dev);
863 	struct phy_device *phydev = dev->phydev;
864 	bool new_state = false;
865 
866 	if (!phydev)
867 		return;
868 
869 	mutex_lock(&priv->lock);
870 
871 	if (phydev->link) {
872 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
873 
874 		/* Now we make sure that we can be in full duplex mode.
875 		 * If not, we operate in half-duplex mode. */
876 		if (phydev->duplex != priv->oldduplex) {
877 			new_state = true;
878 			if (!phydev->duplex)
879 				ctrl &= ~priv->hw->link.duplex;
880 			else
881 				ctrl |= priv->hw->link.duplex;
882 			priv->oldduplex = phydev->duplex;
883 		}
884 		/* Flow Control operation */
885 		if (phydev->pause)
886 			stmmac_mac_flow_ctrl(priv, phydev->duplex);
887 
888 		if (phydev->speed != priv->speed) {
889 			new_state = true;
890 			ctrl &= ~priv->hw->link.speed_mask;
891 			switch (phydev->speed) {
892 			case SPEED_1000:
893 				ctrl |= priv->hw->link.speed1000;
894 				break;
895 			case SPEED_100:
896 				ctrl |= priv->hw->link.speed100;
897 				break;
898 			case SPEED_10:
899 				ctrl |= priv->hw->link.speed10;
900 				break;
901 			default:
902 				netif_warn(priv, link, priv->dev,
903 					   "broken speed: %d\n", phydev->speed);
904 				phydev->speed = SPEED_UNKNOWN;
905 				break;
906 			}
907 			if (phydev->speed != SPEED_UNKNOWN)
908 				stmmac_hw_fix_mac_speed(priv);
909 			priv->speed = phydev->speed;
910 		}
911 
912 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
913 
914 		if (!priv->oldlink) {
915 			new_state = true;
916 			priv->oldlink = true;
917 		}
918 	} else if (priv->oldlink) {
919 		new_state = true;
920 		priv->oldlink = false;
921 		priv->speed = SPEED_UNKNOWN;
922 		priv->oldduplex = DUPLEX_UNKNOWN;
923 	}
924 
925 	if (new_state && netif_msg_link(priv))
926 		phy_print_status(phydev);
927 
928 	mutex_unlock(&priv->lock);
929 
930 	if (phydev->is_pseudo_fixed_link)
931 		/* Stop PHY layer to call the hook to adjust the link in case
932 		 * of a switch is attached to the stmmac driver.
933 		 */
934 		phydev->irq = PHY_IGNORE_INTERRUPT;
935 	else
936 		/* At this stage, init the EEE if supported.
937 		 * Never called in case of fixed_link.
938 		 */
939 		priv->eee_enabled = stmmac_eee_init(priv);
940 }
941 
942 /**
943  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
944  * @priv: driver private structure
945  * Description: this is to verify if the HW supports the PCS.
946  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
947  * configured for the TBI, RTBI, or SGMII PHY interface.
948  */
949 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
950 {
951 	int interface = priv->plat->interface;
952 
953 	if (priv->dma_cap.pcs) {
954 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
955 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
956 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
957 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
958 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
959 			priv->hw->pcs = STMMAC_PCS_RGMII;
960 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
961 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
962 			priv->hw->pcs = STMMAC_PCS_SGMII;
963 		}
964 	}
965 }
966 
967 /**
968  * stmmac_init_phy - PHY initialization
969  * @dev: net device structure
970  * Description: it initializes the driver's PHY state, and attaches the PHY
971  * to the mac driver.
972  *  Return value:
973  *  0 on success
974  */
975 static int stmmac_init_phy(struct net_device *dev)
976 {
977 	struct stmmac_priv *priv = netdev_priv(dev);
978 	u32 tx_cnt = priv->plat->tx_queues_to_use;
979 	struct phy_device *phydev;
980 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
981 	char bus_id[MII_BUS_ID_SIZE];
982 	int interface = priv->plat->interface;
983 	int max_speed = priv->plat->max_speed;
984 	priv->oldlink = false;
985 	priv->speed = SPEED_UNKNOWN;
986 	priv->oldduplex = DUPLEX_UNKNOWN;
987 
988 	if (priv->plat->phy_node) {
989 		phydev = of_phy_connect(dev, priv->plat->phy_node,
990 					&stmmac_adjust_link, 0, interface);
991 	} else {
992 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
993 			 priv->plat->bus_id);
994 
995 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
996 			 priv->plat->phy_addr);
997 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
998 			   phy_id_fmt);
999 
1000 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
1001 				     interface);
1002 	}
1003 
1004 	if (IS_ERR_OR_NULL(phydev)) {
1005 		netdev_err(priv->dev, "Could not attach to PHY\n");
1006 		if (!phydev)
1007 			return -ENODEV;
1008 
1009 		return PTR_ERR(phydev);
1010 	}
1011 
1012 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
1013 	if ((interface == PHY_INTERFACE_MODE_MII) ||
1014 	    (interface == PHY_INTERFACE_MODE_RMII) ||
1015 		(max_speed < 1000 && max_speed > 0))
1016 		phy_set_max_speed(phydev, SPEED_100);
1017 
1018 	/*
1019 	 * Half-duplex mode not supported with multiqueue
1020 	 * half-duplex can only works with single queue
1021 	 */
1022 	if (tx_cnt > 1) {
1023 		phy_remove_link_mode(phydev,
1024 				     ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1025 		phy_remove_link_mode(phydev,
1026 				     ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1027 		phy_remove_link_mode(phydev,
1028 				     ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1029 	}
1030 
1031 	/*
1032 	 * Broken HW is sometimes missing the pull-up resistor on the
1033 	 * MDIO line, which results in reads to non-existent devices returning
1034 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
1035 	 * device as well.
1036 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
1037 	 */
1038 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
1039 		phy_disconnect(phydev);
1040 		return -ENODEV;
1041 	}
1042 
1043 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
1044 	 * subsequent PHY polling, make sure we force a link transition if
1045 	 * we have a UP/DOWN/UP transition
1046 	 */
1047 	if (phydev->is_pseudo_fixed_link)
1048 		phydev->irq = PHY_POLL;
1049 
1050 	phy_attached_info(phydev);
1051 	return 0;
1052 }
1053 
1054 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1055 {
1056 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1057 	void *head_rx;
1058 	u32 queue;
1059 
1060 	/* Display RX rings */
1061 	for (queue = 0; queue < rx_cnt; queue++) {
1062 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1063 
1064 		pr_info("\tRX Queue %u rings\n", queue);
1065 
1066 		if (priv->extend_desc)
1067 			head_rx = (void *)rx_q->dma_erx;
1068 		else
1069 			head_rx = (void *)rx_q->dma_rx;
1070 
1071 		/* Display RX ring */
1072 		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1073 	}
1074 }
1075 
1076 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1077 {
1078 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1079 	void *head_tx;
1080 	u32 queue;
1081 
1082 	/* Display TX rings */
1083 	for (queue = 0; queue < tx_cnt; queue++) {
1084 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1085 
1086 		pr_info("\tTX Queue %d rings\n", queue);
1087 
1088 		if (priv->extend_desc)
1089 			head_tx = (void *)tx_q->dma_etx;
1090 		else
1091 			head_tx = (void *)tx_q->dma_tx;
1092 
1093 		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1094 	}
1095 }
1096 
1097 static void stmmac_display_rings(struct stmmac_priv *priv)
1098 {
1099 	/* Display RX ring */
1100 	stmmac_display_rx_rings(priv);
1101 
1102 	/* Display TX ring */
1103 	stmmac_display_tx_rings(priv);
1104 }
1105 
1106 static int stmmac_set_bfsize(int mtu, int bufsize)
1107 {
1108 	int ret = bufsize;
1109 
1110 	if (mtu >= BUF_SIZE_4KiB)
1111 		ret = BUF_SIZE_8KiB;
1112 	else if (mtu >= BUF_SIZE_2KiB)
1113 		ret = BUF_SIZE_4KiB;
1114 	else if (mtu > DEFAULT_BUFSIZE)
1115 		ret = BUF_SIZE_2KiB;
1116 	else
1117 		ret = DEFAULT_BUFSIZE;
1118 
1119 	return ret;
1120 }
1121 
1122 /**
1123  * stmmac_clear_rx_descriptors - clear RX descriptors
1124  * @priv: driver private structure
1125  * @queue: RX queue index
1126  * Description: this function is called to clear the RX descriptors
1127  * in case of both basic and extended descriptors are used.
1128  */
1129 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1130 {
1131 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1132 	int i;
1133 
1134 	/* Clear the RX descriptors */
1135 	for (i = 0; i < DMA_RX_SIZE; i++)
1136 		if (priv->extend_desc)
1137 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1138 					priv->use_riwt, priv->mode,
1139 					(i == DMA_RX_SIZE - 1),
1140 					priv->dma_buf_sz);
1141 		else
1142 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1143 					priv->use_riwt, priv->mode,
1144 					(i == DMA_RX_SIZE - 1),
1145 					priv->dma_buf_sz);
1146 }
1147 
1148 /**
1149  * stmmac_clear_tx_descriptors - clear tx descriptors
1150  * @priv: driver private structure
1151  * @queue: TX queue index.
1152  * Description: this function is called to clear the TX descriptors
1153  * in case of both basic and extended descriptors are used.
1154  */
1155 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1156 {
1157 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1158 	int i;
1159 
1160 	/* Clear the TX descriptors */
1161 	for (i = 0; i < DMA_TX_SIZE; i++)
1162 		if (priv->extend_desc)
1163 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1164 					priv->mode, (i == DMA_TX_SIZE - 1));
1165 		else
1166 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1167 					priv->mode, (i == DMA_TX_SIZE - 1));
1168 }
1169 
1170 /**
1171  * stmmac_clear_descriptors - clear descriptors
1172  * @priv: driver private structure
1173  * Description: this function is called to clear the TX and RX descriptors
1174  * in case of both basic and extended descriptors are used.
1175  */
1176 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1177 {
1178 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1179 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1180 	u32 queue;
1181 
1182 	/* Clear the RX descriptors */
1183 	for (queue = 0; queue < rx_queue_cnt; queue++)
1184 		stmmac_clear_rx_descriptors(priv, queue);
1185 
1186 	/* Clear the TX descriptors */
1187 	for (queue = 0; queue < tx_queue_cnt; queue++)
1188 		stmmac_clear_tx_descriptors(priv, queue);
1189 }
1190 
1191 /**
1192  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1193  * @priv: driver private structure
1194  * @p: descriptor pointer
1195  * @i: descriptor index
1196  * @flags: gfp flag
1197  * @queue: RX queue index
1198  * Description: this function is called to allocate a receive buffer, perform
1199  * the DMA mapping and init the descriptor.
1200  */
1201 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1202 				  int i, gfp_t flags, u32 queue)
1203 {
1204 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1205 	struct sk_buff *skb;
1206 
1207 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1208 	if (!skb) {
1209 		netdev_err(priv->dev,
1210 			   "%s: Rx init fails; skb is NULL\n", __func__);
1211 		return -ENOMEM;
1212 	}
1213 	rx_q->rx_skbuff[i] = skb;
1214 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1215 						priv->dma_buf_sz,
1216 						DMA_FROM_DEVICE);
1217 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1218 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1219 		dev_kfree_skb_any(skb);
1220 		return -EINVAL;
1221 	}
1222 
1223 	stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1224 
1225 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1226 		stmmac_init_desc3(priv, p);
1227 
1228 	return 0;
1229 }
1230 
1231 /**
1232  * stmmac_free_rx_buffer - free RX dma buffers
1233  * @priv: private structure
1234  * @queue: RX queue index
1235  * @i: buffer index.
1236  */
1237 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1238 {
1239 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1240 
1241 	if (rx_q->rx_skbuff[i]) {
1242 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1243 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
1244 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1245 	}
1246 	rx_q->rx_skbuff[i] = NULL;
1247 }
1248 
1249 /**
1250  * stmmac_free_tx_buffer - free RX dma buffers
1251  * @priv: private structure
1252  * @queue: RX queue index
1253  * @i: buffer index.
1254  */
1255 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1256 {
1257 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1258 
1259 	if (tx_q->tx_skbuff_dma[i].buf) {
1260 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1261 			dma_unmap_page(priv->device,
1262 				       tx_q->tx_skbuff_dma[i].buf,
1263 				       tx_q->tx_skbuff_dma[i].len,
1264 				       DMA_TO_DEVICE);
1265 		else
1266 			dma_unmap_single(priv->device,
1267 					 tx_q->tx_skbuff_dma[i].buf,
1268 					 tx_q->tx_skbuff_dma[i].len,
1269 					 DMA_TO_DEVICE);
1270 	}
1271 
1272 	if (tx_q->tx_skbuff[i]) {
1273 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1274 		tx_q->tx_skbuff[i] = NULL;
1275 		tx_q->tx_skbuff_dma[i].buf = 0;
1276 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1277 	}
1278 }
1279 
1280 /**
1281  * init_dma_rx_desc_rings - init the RX descriptor rings
1282  * @dev: net device structure
1283  * @flags: gfp flag.
1284  * Description: this function initializes the DMA RX descriptors
1285  * and allocates the socket buffers. It supports the chained and ring
1286  * modes.
1287  */
1288 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1289 {
1290 	struct stmmac_priv *priv = netdev_priv(dev);
1291 	u32 rx_count = priv->plat->rx_queues_to_use;
1292 	int ret = -ENOMEM;
1293 	int bfsize = 0;
1294 	int queue;
1295 	int i;
1296 
1297 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1298 	if (bfsize < 0)
1299 		bfsize = 0;
1300 
1301 	if (bfsize < BUF_SIZE_16KiB)
1302 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1303 
1304 	priv->dma_buf_sz = bfsize;
1305 
1306 	/* RX INITIALIZATION */
1307 	netif_dbg(priv, probe, priv->dev,
1308 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1309 
1310 	for (queue = 0; queue < rx_count; queue++) {
1311 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1312 
1313 		netif_dbg(priv, probe, priv->dev,
1314 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
1315 			  (u32)rx_q->dma_rx_phy);
1316 
1317 		for (i = 0; i < DMA_RX_SIZE; i++) {
1318 			struct dma_desc *p;
1319 
1320 			if (priv->extend_desc)
1321 				p = &((rx_q->dma_erx + i)->basic);
1322 			else
1323 				p = rx_q->dma_rx + i;
1324 
1325 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
1326 						     queue);
1327 			if (ret)
1328 				goto err_init_rx_buffers;
1329 
1330 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1331 				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1332 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
1333 		}
1334 
1335 		rx_q->cur_rx = 0;
1336 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1337 
1338 		stmmac_clear_rx_descriptors(priv, queue);
1339 
1340 		/* Setup the chained descriptor addresses */
1341 		if (priv->mode == STMMAC_CHAIN_MODE) {
1342 			if (priv->extend_desc)
1343 				stmmac_mode_init(priv, rx_q->dma_erx,
1344 						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1345 			else
1346 				stmmac_mode_init(priv, rx_q->dma_rx,
1347 						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1348 		}
1349 	}
1350 
1351 	buf_sz = bfsize;
1352 
1353 	return 0;
1354 
1355 err_init_rx_buffers:
1356 	while (queue >= 0) {
1357 		while (--i >= 0)
1358 			stmmac_free_rx_buffer(priv, queue, i);
1359 
1360 		if (queue == 0)
1361 			break;
1362 
1363 		i = DMA_RX_SIZE;
1364 		queue--;
1365 	}
1366 
1367 	return ret;
1368 }
1369 
1370 /**
1371  * init_dma_tx_desc_rings - init the TX descriptor rings
1372  * @dev: net device structure.
1373  * Description: this function initializes the DMA TX descriptors
1374  * and allocates the socket buffers. It supports the chained and ring
1375  * modes.
1376  */
1377 static int init_dma_tx_desc_rings(struct net_device *dev)
1378 {
1379 	struct stmmac_priv *priv = netdev_priv(dev);
1380 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1381 	u32 queue;
1382 	int i;
1383 
1384 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1385 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1386 
1387 		netif_dbg(priv, probe, priv->dev,
1388 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1389 			 (u32)tx_q->dma_tx_phy);
1390 
1391 		/* Setup the chained descriptor addresses */
1392 		if (priv->mode == STMMAC_CHAIN_MODE) {
1393 			if (priv->extend_desc)
1394 				stmmac_mode_init(priv, tx_q->dma_etx,
1395 						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1396 			else
1397 				stmmac_mode_init(priv, tx_q->dma_tx,
1398 						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1399 		}
1400 
1401 		for (i = 0; i < DMA_TX_SIZE; i++) {
1402 			struct dma_desc *p;
1403 			if (priv->extend_desc)
1404 				p = &((tx_q->dma_etx + i)->basic);
1405 			else
1406 				p = tx_q->dma_tx + i;
1407 
1408 			stmmac_clear_desc(priv, p);
1409 
1410 			tx_q->tx_skbuff_dma[i].buf = 0;
1411 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1412 			tx_q->tx_skbuff_dma[i].len = 0;
1413 			tx_q->tx_skbuff_dma[i].last_segment = false;
1414 			tx_q->tx_skbuff[i] = NULL;
1415 		}
1416 
1417 		tx_q->dirty_tx = 0;
1418 		tx_q->cur_tx = 0;
1419 		tx_q->mss = 0;
1420 
1421 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1422 	}
1423 
1424 	return 0;
1425 }
1426 
1427 /**
1428  * init_dma_desc_rings - init the RX/TX descriptor rings
1429  * @dev: net device structure
1430  * @flags: gfp flag.
1431  * Description: this function initializes the DMA RX/TX descriptors
1432  * and allocates the socket buffers. It supports the chained and ring
1433  * modes.
1434  */
1435 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1436 {
1437 	struct stmmac_priv *priv = netdev_priv(dev);
1438 	int ret;
1439 
1440 	ret = init_dma_rx_desc_rings(dev, flags);
1441 	if (ret)
1442 		return ret;
1443 
1444 	ret = init_dma_tx_desc_rings(dev);
1445 
1446 	stmmac_clear_descriptors(priv);
1447 
1448 	if (netif_msg_hw(priv))
1449 		stmmac_display_rings(priv);
1450 
1451 	return ret;
1452 }
1453 
1454 /**
1455  * dma_free_rx_skbufs - free RX dma buffers
1456  * @priv: private structure
1457  * @queue: RX queue index
1458  */
1459 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1460 {
1461 	int i;
1462 
1463 	for (i = 0; i < DMA_RX_SIZE; i++)
1464 		stmmac_free_rx_buffer(priv, queue, i);
1465 }
1466 
1467 /**
1468  * dma_free_tx_skbufs - free TX dma buffers
1469  * @priv: private structure
1470  * @queue: TX queue index
1471  */
1472 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1473 {
1474 	int i;
1475 
1476 	for (i = 0; i < DMA_TX_SIZE; i++)
1477 		stmmac_free_tx_buffer(priv, queue, i);
1478 }
1479 
1480 /**
1481  * free_dma_rx_desc_resources - free RX dma desc resources
1482  * @priv: private structure
1483  */
1484 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1485 {
1486 	u32 rx_count = priv->plat->rx_queues_to_use;
1487 	u32 queue;
1488 
1489 	/* Free RX queue resources */
1490 	for (queue = 0; queue < rx_count; queue++) {
1491 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1492 
1493 		/* Release the DMA RX socket buffers */
1494 		dma_free_rx_skbufs(priv, queue);
1495 
1496 		/* Free DMA regions of consistent memory previously allocated */
1497 		if (!priv->extend_desc)
1498 			dma_free_coherent(priv->device,
1499 					  DMA_RX_SIZE * sizeof(struct dma_desc),
1500 					  rx_q->dma_rx, rx_q->dma_rx_phy);
1501 		else
1502 			dma_free_coherent(priv->device, DMA_RX_SIZE *
1503 					  sizeof(struct dma_extended_desc),
1504 					  rx_q->dma_erx, rx_q->dma_rx_phy);
1505 
1506 		kfree(rx_q->rx_skbuff_dma);
1507 		kfree(rx_q->rx_skbuff);
1508 	}
1509 }
1510 
1511 /**
1512  * free_dma_tx_desc_resources - free TX dma desc resources
1513  * @priv: private structure
1514  */
1515 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1516 {
1517 	u32 tx_count = priv->plat->tx_queues_to_use;
1518 	u32 queue;
1519 
1520 	/* Free TX queue resources */
1521 	for (queue = 0; queue < tx_count; queue++) {
1522 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1523 
1524 		/* Release the DMA TX socket buffers */
1525 		dma_free_tx_skbufs(priv, queue);
1526 
1527 		/* Free DMA regions of consistent memory previously allocated */
1528 		if (!priv->extend_desc)
1529 			dma_free_coherent(priv->device,
1530 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1531 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1532 		else
1533 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1534 					  sizeof(struct dma_extended_desc),
1535 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1536 
1537 		kfree(tx_q->tx_skbuff_dma);
1538 		kfree(tx_q->tx_skbuff);
1539 	}
1540 }
1541 
1542 /**
1543  * alloc_dma_rx_desc_resources - alloc RX resources.
1544  * @priv: private structure
1545  * Description: according to which descriptor can be used (extend or basic)
1546  * this function allocates the resources for TX and RX paths. In case of
1547  * reception, for example, it pre-allocated the RX socket buffer in order to
1548  * allow zero-copy mechanism.
1549  */
1550 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1551 {
1552 	u32 rx_count = priv->plat->rx_queues_to_use;
1553 	int ret = -ENOMEM;
1554 	u32 queue;
1555 
1556 	/* RX queues buffers and DMA */
1557 	for (queue = 0; queue < rx_count; queue++) {
1558 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1559 
1560 		rx_q->queue_index = queue;
1561 		rx_q->priv_data = priv;
1562 
1563 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1564 						    sizeof(dma_addr_t),
1565 						    GFP_KERNEL);
1566 		if (!rx_q->rx_skbuff_dma)
1567 			goto err_dma;
1568 
1569 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1570 						sizeof(struct sk_buff *),
1571 						GFP_KERNEL);
1572 		if (!rx_q->rx_skbuff)
1573 			goto err_dma;
1574 
1575 		if (priv->extend_desc) {
1576 			rx_q->dma_erx = dma_alloc_coherent(priv->device,
1577 							   DMA_RX_SIZE * sizeof(struct dma_extended_desc),
1578 							   &rx_q->dma_rx_phy,
1579 							   GFP_KERNEL);
1580 			if (!rx_q->dma_erx)
1581 				goto err_dma;
1582 
1583 		} else {
1584 			rx_q->dma_rx = dma_alloc_coherent(priv->device,
1585 							  DMA_RX_SIZE * sizeof(struct dma_desc),
1586 							  &rx_q->dma_rx_phy,
1587 							  GFP_KERNEL);
1588 			if (!rx_q->dma_rx)
1589 				goto err_dma;
1590 		}
1591 	}
1592 
1593 	return 0;
1594 
1595 err_dma:
1596 	free_dma_rx_desc_resources(priv);
1597 
1598 	return ret;
1599 }
1600 
1601 /**
1602  * alloc_dma_tx_desc_resources - alloc TX resources.
1603  * @priv: private structure
1604  * Description: according to which descriptor can be used (extend or basic)
1605  * this function allocates the resources for TX and RX paths. In case of
1606  * reception, for example, it pre-allocated the RX socket buffer in order to
1607  * allow zero-copy mechanism.
1608  */
1609 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1610 {
1611 	u32 tx_count = priv->plat->tx_queues_to_use;
1612 	int ret = -ENOMEM;
1613 	u32 queue;
1614 
1615 	/* TX queues buffers and DMA */
1616 	for (queue = 0; queue < tx_count; queue++) {
1617 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1618 
1619 		tx_q->queue_index = queue;
1620 		tx_q->priv_data = priv;
1621 
1622 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1623 						    sizeof(*tx_q->tx_skbuff_dma),
1624 						    GFP_KERNEL);
1625 		if (!tx_q->tx_skbuff_dma)
1626 			goto err_dma;
1627 
1628 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1629 						sizeof(struct sk_buff *),
1630 						GFP_KERNEL);
1631 		if (!tx_q->tx_skbuff)
1632 			goto err_dma;
1633 
1634 		if (priv->extend_desc) {
1635 			tx_q->dma_etx = dma_alloc_coherent(priv->device,
1636 							   DMA_TX_SIZE * sizeof(struct dma_extended_desc),
1637 							   &tx_q->dma_tx_phy,
1638 							   GFP_KERNEL);
1639 			if (!tx_q->dma_etx)
1640 				goto err_dma;
1641 		} else {
1642 			tx_q->dma_tx = dma_alloc_coherent(priv->device,
1643 							  DMA_TX_SIZE * sizeof(struct dma_desc),
1644 							  &tx_q->dma_tx_phy,
1645 							  GFP_KERNEL);
1646 			if (!tx_q->dma_tx)
1647 				goto err_dma;
1648 		}
1649 	}
1650 
1651 	return 0;
1652 
1653 err_dma:
1654 	free_dma_tx_desc_resources(priv);
1655 
1656 	return ret;
1657 }
1658 
1659 /**
1660  * alloc_dma_desc_resources - alloc TX/RX resources.
1661  * @priv: private structure
1662  * Description: according to which descriptor can be used (extend or basic)
1663  * this function allocates the resources for TX and RX paths. In case of
1664  * reception, for example, it pre-allocated the RX socket buffer in order to
1665  * allow zero-copy mechanism.
1666  */
1667 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1668 {
1669 	/* RX Allocation */
1670 	int ret = alloc_dma_rx_desc_resources(priv);
1671 
1672 	if (ret)
1673 		return ret;
1674 
1675 	ret = alloc_dma_tx_desc_resources(priv);
1676 
1677 	return ret;
1678 }
1679 
1680 /**
1681  * free_dma_desc_resources - free dma desc resources
1682  * @priv: private structure
1683  */
1684 static void free_dma_desc_resources(struct stmmac_priv *priv)
1685 {
1686 	/* Release the DMA RX socket buffers */
1687 	free_dma_rx_desc_resources(priv);
1688 
1689 	/* Release the DMA TX socket buffers */
1690 	free_dma_tx_desc_resources(priv);
1691 }
1692 
1693 /**
1694  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1695  *  @priv: driver private structure
1696  *  Description: It is used for enabling the rx queues in the MAC
1697  */
1698 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1699 {
1700 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
1701 	int queue;
1702 	u8 mode;
1703 
1704 	for (queue = 0; queue < rx_queues_count; queue++) {
1705 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1706 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1707 	}
1708 }
1709 
1710 /**
1711  * stmmac_start_rx_dma - start RX DMA channel
1712  * @priv: driver private structure
1713  * @chan: RX channel index
1714  * Description:
1715  * This starts a RX DMA channel
1716  */
1717 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1718 {
1719 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1720 	stmmac_start_rx(priv, priv->ioaddr, chan);
1721 }
1722 
1723 /**
1724  * stmmac_start_tx_dma - start TX DMA channel
1725  * @priv: driver private structure
1726  * @chan: TX channel index
1727  * Description:
1728  * This starts a TX DMA channel
1729  */
1730 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1731 {
1732 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1733 	stmmac_start_tx(priv, priv->ioaddr, chan);
1734 }
1735 
1736 /**
1737  * stmmac_stop_rx_dma - stop RX DMA channel
1738  * @priv: driver private structure
1739  * @chan: RX channel index
1740  * Description:
1741  * This stops a RX DMA channel
1742  */
1743 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1744 {
1745 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1746 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1747 }
1748 
1749 /**
1750  * stmmac_stop_tx_dma - stop TX DMA channel
1751  * @priv: driver private structure
1752  * @chan: TX channel index
1753  * Description:
1754  * This stops a TX DMA channel
1755  */
1756 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1757 {
1758 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1759 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1760 }
1761 
1762 /**
1763  * stmmac_start_all_dma - start all RX and TX DMA channels
1764  * @priv: driver private structure
1765  * Description:
1766  * This starts all the RX and TX DMA channels
1767  */
1768 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1769 {
1770 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1771 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1772 	u32 chan = 0;
1773 
1774 	for (chan = 0; chan < rx_channels_count; chan++)
1775 		stmmac_start_rx_dma(priv, chan);
1776 
1777 	for (chan = 0; chan < tx_channels_count; chan++)
1778 		stmmac_start_tx_dma(priv, chan);
1779 }
1780 
1781 /**
1782  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1783  * @priv: driver private structure
1784  * Description:
1785  * This stops the RX and TX DMA channels
1786  */
1787 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1788 {
1789 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1790 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1791 	u32 chan = 0;
1792 
1793 	for (chan = 0; chan < rx_channels_count; chan++)
1794 		stmmac_stop_rx_dma(priv, chan);
1795 
1796 	for (chan = 0; chan < tx_channels_count; chan++)
1797 		stmmac_stop_tx_dma(priv, chan);
1798 }
1799 
1800 /**
1801  *  stmmac_dma_operation_mode - HW DMA operation mode
1802  *  @priv: driver private structure
1803  *  Description: it is used for configuring the DMA operation mode register in
1804  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1805  */
1806 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1807 {
1808 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1809 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1810 	int rxfifosz = priv->plat->rx_fifo_size;
1811 	int txfifosz = priv->plat->tx_fifo_size;
1812 	u32 txmode = 0;
1813 	u32 rxmode = 0;
1814 	u32 chan = 0;
1815 	u8 qmode = 0;
1816 
1817 	if (rxfifosz == 0)
1818 		rxfifosz = priv->dma_cap.rx_fifo_size;
1819 	if (txfifosz == 0)
1820 		txfifosz = priv->dma_cap.tx_fifo_size;
1821 
1822 	/* Adjust for real per queue fifo size */
1823 	rxfifosz /= rx_channels_count;
1824 	txfifosz /= tx_channels_count;
1825 
1826 	if (priv->plat->force_thresh_dma_mode) {
1827 		txmode = tc;
1828 		rxmode = tc;
1829 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1830 		/*
1831 		 * In case of GMAC, SF mode can be enabled
1832 		 * to perform the TX COE in HW. This depends on:
1833 		 * 1) TX COE if actually supported
1834 		 * 2) There is no bugged Jumbo frame support
1835 		 *    that needs to not insert csum in the TDES.
1836 		 */
1837 		txmode = SF_DMA_MODE;
1838 		rxmode = SF_DMA_MODE;
1839 		priv->xstats.threshold = SF_DMA_MODE;
1840 	} else {
1841 		txmode = tc;
1842 		rxmode = SF_DMA_MODE;
1843 	}
1844 
1845 	/* configure all channels */
1846 	for (chan = 0; chan < rx_channels_count; chan++) {
1847 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1848 
1849 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1850 				rxfifosz, qmode);
1851 		stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1852 				chan);
1853 	}
1854 
1855 	for (chan = 0; chan < tx_channels_count; chan++) {
1856 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1857 
1858 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1859 				txfifosz, qmode);
1860 	}
1861 }
1862 
1863 /**
1864  * stmmac_tx_clean - to manage the transmission completion
1865  * @priv: driver private structure
1866  * @queue: TX queue index
1867  * Description: it reclaims the transmit resources after transmission completes.
1868  */
1869 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
1870 {
1871 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1872 	unsigned int bytes_compl = 0, pkts_compl = 0;
1873 	unsigned int entry, count = 0;
1874 
1875 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1876 
1877 	priv->xstats.tx_clean++;
1878 
1879 	entry = tx_q->dirty_tx;
1880 	while ((entry != tx_q->cur_tx) && (count < budget)) {
1881 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1882 		struct dma_desc *p;
1883 		int status;
1884 
1885 		if (priv->extend_desc)
1886 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1887 		else
1888 			p = tx_q->dma_tx + entry;
1889 
1890 		status = stmmac_tx_status(priv, &priv->dev->stats,
1891 				&priv->xstats, p, priv->ioaddr);
1892 		/* Check if the descriptor is owned by the DMA */
1893 		if (unlikely(status & tx_dma_own))
1894 			break;
1895 
1896 		count++;
1897 
1898 		/* Make sure descriptor fields are read after reading
1899 		 * the own bit.
1900 		 */
1901 		dma_rmb();
1902 
1903 		/* Just consider the last segment and ...*/
1904 		if (likely(!(status & tx_not_ls))) {
1905 			/* ... verify the status error condition */
1906 			if (unlikely(status & tx_err)) {
1907 				priv->dev->stats.tx_errors++;
1908 			} else {
1909 				priv->dev->stats.tx_packets++;
1910 				priv->xstats.tx_pkt_n++;
1911 			}
1912 			stmmac_get_tx_hwtstamp(priv, p, skb);
1913 		}
1914 
1915 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1916 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1917 				dma_unmap_page(priv->device,
1918 					       tx_q->tx_skbuff_dma[entry].buf,
1919 					       tx_q->tx_skbuff_dma[entry].len,
1920 					       DMA_TO_DEVICE);
1921 			else
1922 				dma_unmap_single(priv->device,
1923 						 tx_q->tx_skbuff_dma[entry].buf,
1924 						 tx_q->tx_skbuff_dma[entry].len,
1925 						 DMA_TO_DEVICE);
1926 			tx_q->tx_skbuff_dma[entry].buf = 0;
1927 			tx_q->tx_skbuff_dma[entry].len = 0;
1928 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1929 		}
1930 
1931 		stmmac_clean_desc3(priv, tx_q, p);
1932 
1933 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1934 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1935 
1936 		if (likely(skb != NULL)) {
1937 			pkts_compl++;
1938 			bytes_compl += skb->len;
1939 			dev_consume_skb_any(skb);
1940 			tx_q->tx_skbuff[entry] = NULL;
1941 		}
1942 
1943 		stmmac_release_tx_desc(priv, p, priv->mode);
1944 
1945 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1946 	}
1947 	tx_q->dirty_tx = entry;
1948 
1949 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1950 				  pkts_compl, bytes_compl);
1951 
1952 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1953 								queue))) &&
1954 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1955 
1956 		netif_dbg(priv, tx_done, priv->dev,
1957 			  "%s: restart transmit\n", __func__);
1958 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1959 	}
1960 
1961 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1962 		stmmac_enable_eee_mode(priv);
1963 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1964 	}
1965 
1966 	/* We still have pending packets, let's call for a new scheduling */
1967 	if (tx_q->dirty_tx != tx_q->cur_tx)
1968 		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
1969 
1970 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
1971 
1972 	return count;
1973 }
1974 
1975 /**
1976  * stmmac_tx_err - to manage the tx error
1977  * @priv: driver private structure
1978  * @chan: channel index
1979  * Description: it cleans the descriptors and restarts the transmission
1980  * in case of transmission errors.
1981  */
1982 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1983 {
1984 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1985 	int i;
1986 
1987 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1988 
1989 	stmmac_stop_tx_dma(priv, chan);
1990 	dma_free_tx_skbufs(priv, chan);
1991 	for (i = 0; i < DMA_TX_SIZE; i++)
1992 		if (priv->extend_desc)
1993 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1994 					priv->mode, (i == DMA_TX_SIZE - 1));
1995 		else
1996 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1997 					priv->mode, (i == DMA_TX_SIZE - 1));
1998 	tx_q->dirty_tx = 0;
1999 	tx_q->cur_tx = 0;
2000 	tx_q->mss = 0;
2001 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2002 	stmmac_start_tx_dma(priv, chan);
2003 
2004 	priv->dev->stats.tx_errors++;
2005 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2006 }
2007 
2008 /**
2009  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2010  *  @priv: driver private structure
2011  *  @txmode: TX operating mode
2012  *  @rxmode: RX operating mode
2013  *  @chan: channel index
2014  *  Description: it is used for configuring of the DMA operation mode in
2015  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2016  *  mode.
2017  */
2018 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2019 					  u32 rxmode, u32 chan)
2020 {
2021 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2022 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2023 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2024 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2025 	int rxfifosz = priv->plat->rx_fifo_size;
2026 	int txfifosz = priv->plat->tx_fifo_size;
2027 
2028 	if (rxfifosz == 0)
2029 		rxfifosz = priv->dma_cap.rx_fifo_size;
2030 	if (txfifosz == 0)
2031 		txfifosz = priv->dma_cap.tx_fifo_size;
2032 
2033 	/* Adjust for real per queue fifo size */
2034 	rxfifosz /= rx_channels_count;
2035 	txfifosz /= tx_channels_count;
2036 
2037 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2038 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2039 }
2040 
2041 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2042 {
2043 	int ret;
2044 
2045 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2046 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2047 	if (ret && (ret != -EINVAL)) {
2048 		stmmac_global_err(priv);
2049 		return true;
2050 	}
2051 
2052 	return false;
2053 }
2054 
2055 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2056 {
2057 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2058 						 &priv->xstats, chan);
2059 	struct stmmac_channel *ch = &priv->channel[chan];
2060 
2061 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2062 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2063 		napi_schedule_irqoff(&ch->rx_napi);
2064 	}
2065 
2066 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2067 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2068 		napi_schedule_irqoff(&ch->tx_napi);
2069 	}
2070 
2071 	return status;
2072 }
2073 
2074 /**
2075  * stmmac_dma_interrupt - DMA ISR
2076  * @priv: driver private structure
2077  * Description: this is the DMA ISR. It is called by the main ISR.
2078  * It calls the dwmac dma routine and schedule poll method in case of some
2079  * work can be done.
2080  */
2081 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2082 {
2083 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2084 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2085 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2086 				tx_channel_count : rx_channel_count;
2087 	u32 chan;
2088 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2089 
2090 	/* Make sure we never check beyond our status buffer. */
2091 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2092 		channels_to_check = ARRAY_SIZE(status);
2093 
2094 	for (chan = 0; chan < channels_to_check; chan++)
2095 		status[chan] = stmmac_napi_check(priv, chan);
2096 
2097 	for (chan = 0; chan < tx_channel_count; chan++) {
2098 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2099 			/* Try to bump up the dma threshold on this failure */
2100 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2101 			    (tc <= 256)) {
2102 				tc += 64;
2103 				if (priv->plat->force_thresh_dma_mode)
2104 					stmmac_set_dma_operation_mode(priv,
2105 								      tc,
2106 								      tc,
2107 								      chan);
2108 				else
2109 					stmmac_set_dma_operation_mode(priv,
2110 								    tc,
2111 								    SF_DMA_MODE,
2112 								    chan);
2113 				priv->xstats.threshold = tc;
2114 			}
2115 		} else if (unlikely(status[chan] == tx_hard_error)) {
2116 			stmmac_tx_err(priv, chan);
2117 		}
2118 	}
2119 }
2120 
2121 /**
2122  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2123  * @priv: driver private structure
2124  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2125  */
2126 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2127 {
2128 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2129 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2130 
2131 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
2132 
2133 	if (priv->dma_cap.rmon) {
2134 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
2135 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2136 	} else
2137 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2138 }
2139 
2140 /**
2141  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2142  * @priv: driver private structure
2143  * Description:
2144  *  new GMAC chip generations have a new register to indicate the
2145  *  presence of the optional feature/functions.
2146  *  This can be also used to override the value passed through the
2147  *  platform and necessary for old MAC10/100 and GMAC chips.
2148  */
2149 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2150 {
2151 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2152 }
2153 
2154 /**
2155  * stmmac_check_ether_addr - check if the MAC addr is valid
2156  * @priv: driver private structure
2157  * Description:
2158  * it is to verify if the MAC address is valid, in case of failures it
2159  * generates a random MAC address
2160  */
2161 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2162 {
2163 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2164 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2165 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2166 			eth_hw_addr_random(priv->dev);
2167 		netdev_info(priv->dev, "device MAC address %pM\n",
2168 			    priv->dev->dev_addr);
2169 	}
2170 }
2171 
2172 /**
2173  * stmmac_init_dma_engine - DMA init.
2174  * @priv: driver private structure
2175  * Description:
2176  * It inits the DMA invoking the specific MAC/GMAC callback.
2177  * Some DMA parameters can be passed from the platform;
2178  * in case of these are not passed a default is kept for the MAC or GMAC.
2179  */
2180 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2181 {
2182 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2183 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2184 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2185 	struct stmmac_rx_queue *rx_q;
2186 	struct stmmac_tx_queue *tx_q;
2187 	u32 chan = 0;
2188 	int atds = 0;
2189 	int ret = 0;
2190 
2191 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2192 		dev_err(priv->device, "Invalid DMA configuration\n");
2193 		return -EINVAL;
2194 	}
2195 
2196 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2197 		atds = 1;
2198 
2199 	ret = stmmac_reset(priv, priv->ioaddr);
2200 	if (ret) {
2201 		dev_err(priv->device, "Failed to reset the dma\n");
2202 		return ret;
2203 	}
2204 
2205 	/* DMA Configuration */
2206 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2207 
2208 	if (priv->plat->axi)
2209 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2210 
2211 	/* DMA CSR Channel configuration */
2212 	for (chan = 0; chan < dma_csr_ch; chan++)
2213 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2214 
2215 	/* DMA RX Channel Configuration */
2216 	for (chan = 0; chan < rx_channels_count; chan++) {
2217 		rx_q = &priv->rx_queue[chan];
2218 
2219 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2220 				    rx_q->dma_rx_phy, chan);
2221 
2222 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2223 			    (DMA_RX_SIZE * sizeof(struct dma_desc));
2224 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2225 				       rx_q->rx_tail_addr, chan);
2226 	}
2227 
2228 	/* DMA TX Channel Configuration */
2229 	for (chan = 0; chan < tx_channels_count; chan++) {
2230 		tx_q = &priv->tx_queue[chan];
2231 
2232 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2233 				    tx_q->dma_tx_phy, chan);
2234 
2235 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2236 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2237 				       tx_q->tx_tail_addr, chan);
2238 	}
2239 
2240 	return ret;
2241 }
2242 
2243 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2244 {
2245 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2246 
2247 	mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2248 }
2249 
2250 /**
2251  * stmmac_tx_timer - mitigation sw timer for tx.
2252  * @data: data pointer
2253  * Description:
2254  * This is the timer handler to directly invoke the stmmac_tx_clean.
2255  */
2256 static void stmmac_tx_timer(struct timer_list *t)
2257 {
2258 	struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
2259 	struct stmmac_priv *priv = tx_q->priv_data;
2260 	struct stmmac_channel *ch;
2261 
2262 	ch = &priv->channel[tx_q->queue_index];
2263 
2264 	/*
2265 	 * If NAPI is already running we can miss some events. Let's rearm
2266 	 * the timer and try again.
2267 	 */
2268 	if (likely(napi_schedule_prep(&ch->tx_napi)))
2269 		__napi_schedule(&ch->tx_napi);
2270 	else
2271 		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
2272 }
2273 
2274 /**
2275  * stmmac_init_tx_coalesce - init tx mitigation options.
2276  * @priv: driver private structure
2277  * Description:
2278  * This inits the transmit coalesce parameters: i.e. timer rate,
2279  * timer handler and default threshold used for enabling the
2280  * interrupt on completion bit.
2281  */
2282 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2283 {
2284 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2285 	u32 chan;
2286 
2287 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
2288 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2289 
2290 	for (chan = 0; chan < tx_channel_count; chan++) {
2291 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2292 
2293 		timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
2294 	}
2295 }
2296 
2297 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2298 {
2299 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2300 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2301 	u32 chan;
2302 
2303 	/* set TX ring length */
2304 	for (chan = 0; chan < tx_channels_count; chan++)
2305 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
2306 				(DMA_TX_SIZE - 1), chan);
2307 
2308 	/* set RX ring length */
2309 	for (chan = 0; chan < rx_channels_count; chan++)
2310 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
2311 				(DMA_RX_SIZE - 1), chan);
2312 }
2313 
2314 /**
2315  *  stmmac_set_tx_queue_weight - Set TX queue weight
2316  *  @priv: driver private structure
2317  *  Description: It is used for setting TX queues weight
2318  */
2319 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2320 {
2321 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2322 	u32 weight;
2323 	u32 queue;
2324 
2325 	for (queue = 0; queue < tx_queues_count; queue++) {
2326 		weight = priv->plat->tx_queues_cfg[queue].weight;
2327 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2328 	}
2329 }
2330 
2331 /**
2332  *  stmmac_configure_cbs - Configure CBS in TX queue
2333  *  @priv: driver private structure
2334  *  Description: It is used for configuring CBS in AVB TX queues
2335  */
2336 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2337 {
2338 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2339 	u32 mode_to_use;
2340 	u32 queue;
2341 
2342 	/* queue 0 is reserved for legacy traffic */
2343 	for (queue = 1; queue < tx_queues_count; queue++) {
2344 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2345 		if (mode_to_use == MTL_QUEUE_DCB)
2346 			continue;
2347 
2348 		stmmac_config_cbs(priv, priv->hw,
2349 				priv->plat->tx_queues_cfg[queue].send_slope,
2350 				priv->plat->tx_queues_cfg[queue].idle_slope,
2351 				priv->plat->tx_queues_cfg[queue].high_credit,
2352 				priv->plat->tx_queues_cfg[queue].low_credit,
2353 				queue);
2354 	}
2355 }
2356 
2357 /**
2358  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2359  *  @priv: driver private structure
2360  *  Description: It is used for mapping RX queues to RX dma channels
2361  */
2362 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2363 {
2364 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2365 	u32 queue;
2366 	u32 chan;
2367 
2368 	for (queue = 0; queue < rx_queues_count; queue++) {
2369 		chan = priv->plat->rx_queues_cfg[queue].chan;
2370 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2371 	}
2372 }
2373 
2374 /**
2375  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2376  *  @priv: driver private structure
2377  *  Description: It is used for configuring the RX Queue Priority
2378  */
2379 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2380 {
2381 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2382 	u32 queue;
2383 	u32 prio;
2384 
2385 	for (queue = 0; queue < rx_queues_count; queue++) {
2386 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2387 			continue;
2388 
2389 		prio = priv->plat->rx_queues_cfg[queue].prio;
2390 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2391 	}
2392 }
2393 
2394 /**
2395  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2396  *  @priv: driver private structure
2397  *  Description: It is used for configuring the TX Queue Priority
2398  */
2399 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2400 {
2401 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2402 	u32 queue;
2403 	u32 prio;
2404 
2405 	for (queue = 0; queue < tx_queues_count; queue++) {
2406 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2407 			continue;
2408 
2409 		prio = priv->plat->tx_queues_cfg[queue].prio;
2410 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2411 	}
2412 }
2413 
2414 /**
2415  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2416  *  @priv: driver private structure
2417  *  Description: It is used for configuring the RX queue routing
2418  */
2419 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2420 {
2421 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2422 	u32 queue;
2423 	u8 packet;
2424 
2425 	for (queue = 0; queue < rx_queues_count; queue++) {
2426 		/* no specific packet type routing specified for the queue */
2427 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2428 			continue;
2429 
2430 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2431 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2432 	}
2433 }
2434 
2435 /**
2436  *  stmmac_mtl_configuration - Configure MTL
2437  *  @priv: driver private structure
2438  *  Description: It is used for configurring MTL
2439  */
2440 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2441 {
2442 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2443 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2444 
2445 	if (tx_queues_count > 1)
2446 		stmmac_set_tx_queue_weight(priv);
2447 
2448 	/* Configure MTL RX algorithms */
2449 	if (rx_queues_count > 1)
2450 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2451 				priv->plat->rx_sched_algorithm);
2452 
2453 	/* Configure MTL TX algorithms */
2454 	if (tx_queues_count > 1)
2455 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2456 				priv->plat->tx_sched_algorithm);
2457 
2458 	/* Configure CBS in AVB TX queues */
2459 	if (tx_queues_count > 1)
2460 		stmmac_configure_cbs(priv);
2461 
2462 	/* Map RX MTL to DMA channels */
2463 	stmmac_rx_queue_dma_chan_map(priv);
2464 
2465 	/* Enable MAC RX Queues */
2466 	stmmac_mac_enable_rx_queues(priv);
2467 
2468 	/* Set RX priorities */
2469 	if (rx_queues_count > 1)
2470 		stmmac_mac_config_rx_queues_prio(priv);
2471 
2472 	/* Set TX priorities */
2473 	if (tx_queues_count > 1)
2474 		stmmac_mac_config_tx_queues_prio(priv);
2475 
2476 	/* Set RX routing */
2477 	if (rx_queues_count > 1)
2478 		stmmac_mac_config_rx_queues_routing(priv);
2479 }
2480 
2481 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2482 {
2483 	if (priv->dma_cap.asp) {
2484 		netdev_info(priv->dev, "Enabling Safety Features\n");
2485 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2486 	} else {
2487 		netdev_info(priv->dev, "No Safety Features support found\n");
2488 	}
2489 }
2490 
2491 /**
2492  * stmmac_hw_setup - setup mac in a usable state.
2493  *  @dev : pointer to the device structure.
2494  *  Description:
2495  *  this is the main function to setup the HW in a usable state because the
2496  *  dma engine is reset, the core registers are configured (e.g. AXI,
2497  *  Checksum features, timers). The DMA is ready to start receiving and
2498  *  transmitting.
2499  *  Return value:
2500  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2501  *  file on failure.
2502  */
2503 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2504 {
2505 	struct stmmac_priv *priv = netdev_priv(dev);
2506 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2507 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2508 	u32 chan;
2509 	int ret;
2510 
2511 	/* DMA initialization and SW reset */
2512 	ret = stmmac_init_dma_engine(priv);
2513 	if (ret < 0) {
2514 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2515 			   __func__);
2516 		return ret;
2517 	}
2518 
2519 	/* Copy the MAC addr into the HW  */
2520 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2521 
2522 	/* PS and related bits will be programmed according to the speed */
2523 	if (priv->hw->pcs) {
2524 		int speed = priv->plat->mac_port_sel_speed;
2525 
2526 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
2527 		    (speed == SPEED_1000)) {
2528 			priv->hw->ps = speed;
2529 		} else {
2530 			dev_warn(priv->device, "invalid port speed\n");
2531 			priv->hw->ps = 0;
2532 		}
2533 	}
2534 
2535 	/* Initialize the MAC Core */
2536 	stmmac_core_init(priv, priv->hw, dev);
2537 
2538 	/* Initialize MTL*/
2539 	stmmac_mtl_configuration(priv);
2540 
2541 	/* Initialize Safety Features */
2542 	stmmac_safety_feat_configuration(priv);
2543 
2544 	ret = stmmac_rx_ipc(priv, priv->hw);
2545 	if (!ret) {
2546 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2547 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2548 		priv->hw->rx_csum = 0;
2549 	}
2550 
2551 	/* Enable the MAC Rx/Tx */
2552 	stmmac_mac_set(priv, priv->ioaddr, true);
2553 
2554 	/* Set the HW DMA mode and the COE */
2555 	stmmac_dma_operation_mode(priv);
2556 
2557 	stmmac_mmc_setup(priv);
2558 
2559 	if (init_ptp) {
2560 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2561 		if (ret < 0)
2562 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2563 
2564 		ret = stmmac_init_ptp(priv);
2565 		if (ret == -EOPNOTSUPP)
2566 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2567 		else if (ret)
2568 			netdev_warn(priv->dev, "PTP init failed\n");
2569 	}
2570 
2571 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2572 
2573 	if (priv->use_riwt) {
2574 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2575 		if (!ret)
2576 			priv->rx_riwt = MAX_DMA_RIWT;
2577 	}
2578 
2579 	if (priv->hw->pcs)
2580 		stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2581 
2582 	/* set TX and RX rings length */
2583 	stmmac_set_rings_length(priv);
2584 
2585 	/* Enable TSO */
2586 	if (priv->tso) {
2587 		for (chan = 0; chan < tx_cnt; chan++)
2588 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2589 	}
2590 
2591 	/* Start the ball rolling... */
2592 	stmmac_start_all_dma(priv);
2593 
2594 	return 0;
2595 }
2596 
2597 static void stmmac_hw_teardown(struct net_device *dev)
2598 {
2599 	struct stmmac_priv *priv = netdev_priv(dev);
2600 
2601 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2602 }
2603 
2604 /**
2605  *  stmmac_open - open entry point of the driver
2606  *  @dev : pointer to the device structure.
2607  *  Description:
2608  *  This function is the open entry point of the driver.
2609  *  Return value:
2610  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2611  *  file on failure.
2612  */
2613 static int stmmac_open(struct net_device *dev)
2614 {
2615 	struct stmmac_priv *priv = netdev_priv(dev);
2616 	u32 chan;
2617 	int ret;
2618 
2619 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2620 	    priv->hw->pcs != STMMAC_PCS_TBI &&
2621 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
2622 		ret = stmmac_init_phy(dev);
2623 		if (ret) {
2624 			netdev_err(priv->dev,
2625 				   "%s: Cannot attach to PHY (error: %d)\n",
2626 				   __func__, ret);
2627 			return ret;
2628 		}
2629 	}
2630 
2631 	/* Extra statistics */
2632 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2633 	priv->xstats.threshold = tc;
2634 
2635 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2636 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2637 
2638 	ret = alloc_dma_desc_resources(priv);
2639 	if (ret < 0) {
2640 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2641 			   __func__);
2642 		goto dma_desc_error;
2643 	}
2644 
2645 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
2646 	if (ret < 0) {
2647 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2648 			   __func__);
2649 		goto init_error;
2650 	}
2651 
2652 	ret = stmmac_hw_setup(dev, true);
2653 	if (ret < 0) {
2654 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2655 		goto init_error;
2656 	}
2657 
2658 	stmmac_init_tx_coalesce(priv);
2659 
2660 	if (dev->phydev)
2661 		phy_start(dev->phydev);
2662 
2663 	/* Request the IRQ lines */
2664 	ret = request_irq(dev->irq, stmmac_interrupt,
2665 			  IRQF_SHARED, dev->name, dev);
2666 	if (unlikely(ret < 0)) {
2667 		netdev_err(priv->dev,
2668 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2669 			   __func__, dev->irq, ret);
2670 		goto irq_error;
2671 	}
2672 
2673 	/* Request the Wake IRQ in case of another line is used for WoL */
2674 	if (priv->wol_irq != dev->irq) {
2675 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
2676 				  IRQF_SHARED, dev->name, dev);
2677 		if (unlikely(ret < 0)) {
2678 			netdev_err(priv->dev,
2679 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2680 				   __func__, priv->wol_irq, ret);
2681 			goto wolirq_error;
2682 		}
2683 	}
2684 
2685 	/* Request the IRQ lines */
2686 	if (priv->lpi_irq > 0) {
2687 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2688 				  dev->name, dev);
2689 		if (unlikely(ret < 0)) {
2690 			netdev_err(priv->dev,
2691 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2692 				   __func__, priv->lpi_irq, ret);
2693 			goto lpiirq_error;
2694 		}
2695 	}
2696 
2697 	stmmac_enable_all_queues(priv);
2698 	stmmac_start_all_queues(priv);
2699 
2700 	return 0;
2701 
2702 lpiirq_error:
2703 	if (priv->wol_irq != dev->irq)
2704 		free_irq(priv->wol_irq, dev);
2705 wolirq_error:
2706 	free_irq(dev->irq, dev);
2707 irq_error:
2708 	if (dev->phydev)
2709 		phy_stop(dev->phydev);
2710 
2711 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2712 		del_timer_sync(&priv->tx_queue[chan].txtimer);
2713 
2714 	stmmac_hw_teardown(dev);
2715 init_error:
2716 	free_dma_desc_resources(priv);
2717 dma_desc_error:
2718 	if (dev->phydev)
2719 		phy_disconnect(dev->phydev);
2720 
2721 	return ret;
2722 }
2723 
2724 /**
2725  *  stmmac_release - close entry point of the driver
2726  *  @dev : device pointer.
2727  *  Description:
2728  *  This is the stop entry point of the driver.
2729  */
2730 static int stmmac_release(struct net_device *dev)
2731 {
2732 	struct stmmac_priv *priv = netdev_priv(dev);
2733 	u32 chan;
2734 
2735 	if (priv->eee_enabled)
2736 		del_timer_sync(&priv->eee_ctrl_timer);
2737 
2738 	/* Stop and disconnect the PHY */
2739 	if (dev->phydev) {
2740 		phy_stop(dev->phydev);
2741 		phy_disconnect(dev->phydev);
2742 	}
2743 
2744 	stmmac_stop_all_queues(priv);
2745 
2746 	stmmac_disable_all_queues(priv);
2747 
2748 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2749 		del_timer_sync(&priv->tx_queue[chan].txtimer);
2750 
2751 	/* Free the IRQ lines */
2752 	free_irq(dev->irq, dev);
2753 	if (priv->wol_irq != dev->irq)
2754 		free_irq(priv->wol_irq, dev);
2755 	if (priv->lpi_irq > 0)
2756 		free_irq(priv->lpi_irq, dev);
2757 
2758 	/* Stop TX/RX DMA and clear the descriptors */
2759 	stmmac_stop_all_dma(priv);
2760 
2761 	/* Release and free the Rx/Tx resources */
2762 	free_dma_desc_resources(priv);
2763 
2764 	/* Disable the MAC Rx/Tx */
2765 	stmmac_mac_set(priv, priv->ioaddr, false);
2766 
2767 	netif_carrier_off(dev);
2768 
2769 	stmmac_release_ptp(priv);
2770 
2771 	return 0;
2772 }
2773 
2774 /**
2775  *  stmmac_tso_allocator - close entry point of the driver
2776  *  @priv: driver private structure
2777  *  @des: buffer start address
2778  *  @total_len: total length to fill in descriptors
2779  *  @last_segmant: condition for the last descriptor
2780  *  @queue: TX queue index
2781  *  Description:
2782  *  This function fills descriptor and request new descriptors according to
2783  *  buffer length to fill
2784  */
2785 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2786 				 int total_len, bool last_segment, u32 queue)
2787 {
2788 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2789 	struct dma_desc *desc;
2790 	u32 buff_size;
2791 	int tmp_len;
2792 
2793 	tmp_len = total_len;
2794 
2795 	while (tmp_len > 0) {
2796 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2797 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2798 		desc = tx_q->dma_tx + tx_q->cur_tx;
2799 
2800 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2801 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2802 			    TSO_MAX_BUFF_SIZE : tmp_len;
2803 
2804 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2805 				0, 1,
2806 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2807 				0, 0);
2808 
2809 		tmp_len -= TSO_MAX_BUFF_SIZE;
2810 	}
2811 }
2812 
2813 /**
2814  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2815  *  @skb : the socket buffer
2816  *  @dev : device pointer
2817  *  Description: this is the transmit function that is called on TSO frames
2818  *  (support available on GMAC4 and newer chips).
2819  *  Diagram below show the ring programming in case of TSO frames:
2820  *
2821  *  First Descriptor
2822  *   --------
2823  *   | DES0 |---> buffer1 = L2/L3/L4 header
2824  *   | DES1 |---> TCP Payload (can continue on next descr...)
2825  *   | DES2 |---> buffer 1 and 2 len
2826  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2827  *   --------
2828  *	|
2829  *     ...
2830  *	|
2831  *   --------
2832  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2833  *   | DES1 | --|
2834  *   | DES2 | --> buffer 1 and 2 len
2835  *   | DES3 |
2836  *   --------
2837  *
2838  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2839  */
2840 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2841 {
2842 	struct dma_desc *desc, *first, *mss_desc = NULL;
2843 	struct stmmac_priv *priv = netdev_priv(dev);
2844 	int nfrags = skb_shinfo(skb)->nr_frags;
2845 	u32 queue = skb_get_queue_mapping(skb);
2846 	unsigned int first_entry, des;
2847 	struct stmmac_tx_queue *tx_q;
2848 	int tmp_pay_len = 0;
2849 	u32 pay_len, mss;
2850 	u8 proto_hdr_len;
2851 	int i;
2852 
2853 	tx_q = &priv->tx_queue[queue];
2854 
2855 	/* Compute header lengths */
2856 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2857 
2858 	/* Desc availability based on threshold should be enough safe */
2859 	if (unlikely(stmmac_tx_avail(priv, queue) <
2860 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2861 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2862 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2863 								queue));
2864 			/* This is a hard error, log it. */
2865 			netdev_err(priv->dev,
2866 				   "%s: Tx Ring full when queue awake\n",
2867 				   __func__);
2868 		}
2869 		return NETDEV_TX_BUSY;
2870 	}
2871 
2872 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2873 
2874 	mss = skb_shinfo(skb)->gso_size;
2875 
2876 	/* set new MSS value if needed */
2877 	if (mss != tx_q->mss) {
2878 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2879 		stmmac_set_mss(priv, mss_desc, mss);
2880 		tx_q->mss = mss;
2881 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2882 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2883 	}
2884 
2885 	if (netif_msg_tx_queued(priv)) {
2886 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2887 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2888 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2889 			skb->data_len);
2890 	}
2891 
2892 	first_entry = tx_q->cur_tx;
2893 	WARN_ON(tx_q->tx_skbuff[first_entry]);
2894 
2895 	desc = tx_q->dma_tx + first_entry;
2896 	first = desc;
2897 
2898 	/* first descriptor: fill Headers on Buf1 */
2899 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2900 			     DMA_TO_DEVICE);
2901 	if (dma_mapping_error(priv->device, des))
2902 		goto dma_map_err;
2903 
2904 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2905 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2906 
2907 	first->des0 = cpu_to_le32(des);
2908 
2909 	/* Fill start of payload in buff2 of first descriptor */
2910 	if (pay_len)
2911 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2912 
2913 	/* If needed take extra descriptors to fill the remaining payload */
2914 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2915 
2916 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2917 
2918 	/* Prepare fragments */
2919 	for (i = 0; i < nfrags; i++) {
2920 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2921 
2922 		des = skb_frag_dma_map(priv->device, frag, 0,
2923 				       skb_frag_size(frag),
2924 				       DMA_TO_DEVICE);
2925 		if (dma_mapping_error(priv->device, des))
2926 			goto dma_map_err;
2927 
2928 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2929 				     (i == nfrags - 1), queue);
2930 
2931 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2932 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2933 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2934 	}
2935 
2936 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2937 
2938 	/* Only the last descriptor gets to point to the skb. */
2939 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2940 
2941 	/* We've used all descriptors we need for this skb, however,
2942 	 * advance cur_tx so that it references a fresh descriptor.
2943 	 * ndo_start_xmit will fill this descriptor the next time it's
2944 	 * called and stmmac_tx_clean may clean up to this descriptor.
2945 	 */
2946 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2947 
2948 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2949 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2950 			  __func__);
2951 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2952 	}
2953 
2954 	dev->stats.tx_bytes += skb->len;
2955 	priv->xstats.tx_tso_frames++;
2956 	priv->xstats.tx_tso_nfrags += nfrags;
2957 
2958 	/* Manage tx mitigation */
2959 	tx_q->tx_count_frames += nfrags + 1;
2960 	if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
2961 		stmmac_set_tx_ic(priv, desc);
2962 		priv->xstats.tx_set_ic_bit++;
2963 		tx_q->tx_count_frames = 0;
2964 	} else {
2965 		stmmac_tx_timer_arm(priv, queue);
2966 	}
2967 
2968 	skb_tx_timestamp(skb);
2969 
2970 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2971 		     priv->hwts_tx_en)) {
2972 		/* declare that device is doing timestamping */
2973 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2974 		stmmac_enable_tx_timestamp(priv, first);
2975 	}
2976 
2977 	/* Complete the first descriptor before granting the DMA */
2978 	stmmac_prepare_tso_tx_desc(priv, first, 1,
2979 			proto_hdr_len,
2980 			pay_len,
2981 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2982 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2983 
2984 	/* If context desc is used to change MSS */
2985 	if (mss_desc) {
2986 		/* Make sure that first descriptor has been completely
2987 		 * written, including its own bit. This is because MSS is
2988 		 * actually before first descriptor, so we need to make
2989 		 * sure that MSS's own bit is the last thing written.
2990 		 */
2991 		dma_wmb();
2992 		stmmac_set_tx_owner(priv, mss_desc);
2993 	}
2994 
2995 	/* The own bit must be the latest setting done when prepare the
2996 	 * descriptor and then barrier is needed to make sure that
2997 	 * all is coherent before granting the DMA engine.
2998 	 */
2999 	wmb();
3000 
3001 	if (netif_msg_pktdata(priv)) {
3002 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3003 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3004 			tx_q->cur_tx, first, nfrags);
3005 
3006 		stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
3007 
3008 		pr_info(">>> frame to be transmitted: ");
3009 		print_pkt(skb->data, skb_headlen(skb));
3010 	}
3011 
3012 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3013 
3014 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3015 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3016 
3017 	return NETDEV_TX_OK;
3018 
3019 dma_map_err:
3020 	dev_err(priv->device, "Tx dma map failed\n");
3021 	dev_kfree_skb(skb);
3022 	priv->dev->stats.tx_dropped++;
3023 	return NETDEV_TX_OK;
3024 }
3025 
3026 /**
3027  *  stmmac_xmit - Tx entry point of the driver
3028  *  @skb : the socket buffer
3029  *  @dev : device pointer
3030  *  Description : this is the tx entry point of the driver.
3031  *  It programs the chain or the ring and supports oversized frames
3032  *  and SG feature.
3033  */
3034 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3035 {
3036 	struct stmmac_priv *priv = netdev_priv(dev);
3037 	unsigned int nopaged_len = skb_headlen(skb);
3038 	int i, csum_insertion = 0, is_jumbo = 0;
3039 	u32 queue = skb_get_queue_mapping(skb);
3040 	int nfrags = skb_shinfo(skb)->nr_frags;
3041 	int entry;
3042 	unsigned int first_entry;
3043 	struct dma_desc *desc, *first;
3044 	struct stmmac_tx_queue *tx_q;
3045 	unsigned int enh_desc;
3046 	unsigned int des;
3047 
3048 	tx_q = &priv->tx_queue[queue];
3049 
3050 	if (priv->tx_path_in_lpi_mode)
3051 		stmmac_disable_eee_mode(priv);
3052 
3053 	/* Manage oversized TCP frames for GMAC4 device */
3054 	if (skb_is_gso(skb) && priv->tso) {
3055 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3056 			/*
3057 			 * There is no way to determine the number of TSO
3058 			 * capable Queues. Let's use always the Queue 0
3059 			 * because if TSO is supported then at least this
3060 			 * one will be capable.
3061 			 */
3062 			skb_set_queue_mapping(skb, 0);
3063 
3064 			return stmmac_tso_xmit(skb, dev);
3065 		}
3066 	}
3067 
3068 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3069 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3070 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3071 								queue));
3072 			/* This is a hard error, log it. */
3073 			netdev_err(priv->dev,
3074 				   "%s: Tx Ring full when queue awake\n",
3075 				   __func__);
3076 		}
3077 		return NETDEV_TX_BUSY;
3078 	}
3079 
3080 	entry = tx_q->cur_tx;
3081 	first_entry = entry;
3082 	WARN_ON(tx_q->tx_skbuff[first_entry]);
3083 
3084 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3085 
3086 	if (likely(priv->extend_desc))
3087 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3088 	else
3089 		desc = tx_q->dma_tx + entry;
3090 
3091 	first = desc;
3092 
3093 	enh_desc = priv->plat->enh_desc;
3094 	/* To program the descriptors according to the size of the frame */
3095 	if (enh_desc)
3096 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3097 
3098 	if (unlikely(is_jumbo)) {
3099 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3100 		if (unlikely(entry < 0) && (entry != -EINVAL))
3101 			goto dma_map_err;
3102 	}
3103 
3104 	for (i = 0; i < nfrags; i++) {
3105 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3106 		int len = skb_frag_size(frag);
3107 		bool last_segment = (i == (nfrags - 1));
3108 
3109 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3110 		WARN_ON(tx_q->tx_skbuff[entry]);
3111 
3112 		if (likely(priv->extend_desc))
3113 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3114 		else
3115 			desc = tx_q->dma_tx + entry;
3116 
3117 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3118 				       DMA_TO_DEVICE);
3119 		if (dma_mapping_error(priv->device, des))
3120 			goto dma_map_err; /* should reuse desc w/o issues */
3121 
3122 		tx_q->tx_skbuff_dma[entry].buf = des;
3123 
3124 		stmmac_set_desc_addr(priv, desc, des);
3125 
3126 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3127 		tx_q->tx_skbuff_dma[entry].len = len;
3128 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3129 
3130 		/* Prepare the descriptor and set the own bit too */
3131 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3132 				priv->mode, 1, last_segment, skb->len);
3133 	}
3134 
3135 	/* Only the last descriptor gets to point to the skb. */
3136 	tx_q->tx_skbuff[entry] = skb;
3137 
3138 	/* We've used all descriptors we need for this skb, however,
3139 	 * advance cur_tx so that it references a fresh descriptor.
3140 	 * ndo_start_xmit will fill this descriptor the next time it's
3141 	 * called and stmmac_tx_clean may clean up to this descriptor.
3142 	 */
3143 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3144 	tx_q->cur_tx = entry;
3145 
3146 	if (netif_msg_pktdata(priv)) {
3147 		void *tx_head;
3148 
3149 		netdev_dbg(priv->dev,
3150 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3151 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3152 			   entry, first, nfrags);
3153 
3154 		if (priv->extend_desc)
3155 			tx_head = (void *)tx_q->dma_etx;
3156 		else
3157 			tx_head = (void *)tx_q->dma_tx;
3158 
3159 		stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3160 
3161 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3162 		print_pkt(skb->data, skb->len);
3163 	}
3164 
3165 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3166 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3167 			  __func__);
3168 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3169 	}
3170 
3171 	dev->stats.tx_bytes += skb->len;
3172 
3173 	/* According to the coalesce parameter the IC bit for the latest
3174 	 * segment is reset and the timer re-started to clean the tx status.
3175 	 * This approach takes care about the fragments: desc is the first
3176 	 * element in case of no SG.
3177 	 */
3178 	tx_q->tx_count_frames += nfrags + 1;
3179 	if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
3180 		stmmac_set_tx_ic(priv, desc);
3181 		priv->xstats.tx_set_ic_bit++;
3182 		tx_q->tx_count_frames = 0;
3183 	} else {
3184 		stmmac_tx_timer_arm(priv, queue);
3185 	}
3186 
3187 	skb_tx_timestamp(skb);
3188 
3189 	/* Ready to fill the first descriptor and set the OWN bit w/o any
3190 	 * problems because all the descriptors are actually ready to be
3191 	 * passed to the DMA engine.
3192 	 */
3193 	if (likely(!is_jumbo)) {
3194 		bool last_segment = (nfrags == 0);
3195 
3196 		des = dma_map_single(priv->device, skb->data,
3197 				     nopaged_len, DMA_TO_DEVICE);
3198 		if (dma_mapping_error(priv->device, des))
3199 			goto dma_map_err;
3200 
3201 		tx_q->tx_skbuff_dma[first_entry].buf = des;
3202 
3203 		stmmac_set_desc_addr(priv, first, des);
3204 
3205 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3206 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3207 
3208 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3209 			     priv->hwts_tx_en)) {
3210 			/* declare that device is doing timestamping */
3211 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3212 			stmmac_enable_tx_timestamp(priv, first);
3213 		}
3214 
3215 		/* Prepare the first descriptor setting the OWN bit too */
3216 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3217 				csum_insertion, priv->mode, 1, last_segment,
3218 				skb->len);
3219 	} else {
3220 		stmmac_set_tx_owner(priv, first);
3221 	}
3222 
3223 	/* The own bit must be the latest setting done when prepare the
3224 	 * descriptor and then barrier is needed to make sure that
3225 	 * all is coherent before granting the DMA engine.
3226 	 */
3227 	wmb();
3228 
3229 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3230 
3231 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
3232 
3233 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3234 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3235 
3236 	return NETDEV_TX_OK;
3237 
3238 dma_map_err:
3239 	netdev_err(priv->dev, "Tx DMA map failed\n");
3240 	dev_kfree_skb(skb);
3241 	priv->dev->stats.tx_dropped++;
3242 	return NETDEV_TX_OK;
3243 }
3244 
3245 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3246 {
3247 	struct vlan_ethhdr *veth;
3248 	__be16 vlan_proto;
3249 	u16 vlanid;
3250 
3251 	veth = (struct vlan_ethhdr *)skb->data;
3252 	vlan_proto = veth->h_vlan_proto;
3253 
3254 	if ((vlan_proto == htons(ETH_P_8021Q) &&
3255 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3256 	    (vlan_proto == htons(ETH_P_8021AD) &&
3257 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3258 		/* pop the vlan tag */
3259 		vlanid = ntohs(veth->h_vlan_TCI);
3260 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3261 		skb_pull(skb, VLAN_HLEN);
3262 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3263 	}
3264 }
3265 
3266 
3267 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3268 {
3269 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3270 		return 0;
3271 
3272 	return 1;
3273 }
3274 
3275 /**
3276  * stmmac_rx_refill - refill used skb preallocated buffers
3277  * @priv: driver private structure
3278  * @queue: RX queue index
3279  * Description : this is to reallocate the skb for the reception process
3280  * that is based on zero-copy.
3281  */
3282 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3283 {
3284 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3285 	int dirty = stmmac_rx_dirty(priv, queue);
3286 	unsigned int entry = rx_q->dirty_rx;
3287 
3288 	int bfsize = priv->dma_buf_sz;
3289 
3290 	while (dirty-- > 0) {
3291 		struct dma_desc *p;
3292 
3293 		if (priv->extend_desc)
3294 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3295 		else
3296 			p = rx_q->dma_rx + entry;
3297 
3298 		if (likely(!rx_q->rx_skbuff[entry])) {
3299 			struct sk_buff *skb;
3300 
3301 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3302 			if (unlikely(!skb)) {
3303 				/* so for a while no zero-copy! */
3304 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3305 				if (unlikely(net_ratelimit()))
3306 					dev_err(priv->device,
3307 						"fail to alloc skb entry %d\n",
3308 						entry);
3309 				break;
3310 			}
3311 
3312 			rx_q->rx_skbuff[entry] = skb;
3313 			rx_q->rx_skbuff_dma[entry] =
3314 			    dma_map_single(priv->device, skb->data, bfsize,
3315 					   DMA_FROM_DEVICE);
3316 			if (dma_mapping_error(priv->device,
3317 					      rx_q->rx_skbuff_dma[entry])) {
3318 				netdev_err(priv->dev, "Rx DMA map failed\n");
3319 				dev_kfree_skb(skb);
3320 				break;
3321 			}
3322 
3323 			stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3324 			stmmac_refill_desc3(priv, rx_q, p);
3325 
3326 			if (rx_q->rx_zeroc_thresh > 0)
3327 				rx_q->rx_zeroc_thresh--;
3328 
3329 			netif_dbg(priv, rx_status, priv->dev,
3330 				  "refill entry #%d\n", entry);
3331 		}
3332 		dma_wmb();
3333 
3334 		stmmac_set_rx_owner(priv, p, priv->use_riwt);
3335 
3336 		dma_wmb();
3337 
3338 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3339 	}
3340 	rx_q->dirty_rx = entry;
3341 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
3342 }
3343 
3344 /**
3345  * stmmac_rx - manage the receive process
3346  * @priv: driver private structure
3347  * @limit: napi bugget
3348  * @queue: RX queue index.
3349  * Description :  this the function called by the napi poll method.
3350  * It gets all the frames inside the ring.
3351  */
3352 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3353 {
3354 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3355 	struct stmmac_channel *ch = &priv->channel[queue];
3356 	unsigned int next_entry = rx_q->cur_rx;
3357 	int coe = priv->hw->rx_csum;
3358 	unsigned int count = 0;
3359 	bool xmac;
3360 
3361 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3362 
3363 	if (netif_msg_rx_status(priv)) {
3364 		void *rx_head;
3365 
3366 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3367 		if (priv->extend_desc)
3368 			rx_head = (void *)rx_q->dma_erx;
3369 		else
3370 			rx_head = (void *)rx_q->dma_rx;
3371 
3372 		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3373 	}
3374 	while (count < limit) {
3375 		int entry, status;
3376 		struct dma_desc *p;
3377 		struct dma_desc *np;
3378 
3379 		entry = next_entry;
3380 
3381 		if (priv->extend_desc)
3382 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3383 		else
3384 			p = rx_q->dma_rx + entry;
3385 
3386 		/* read the status of the incoming frame */
3387 		status = stmmac_rx_status(priv, &priv->dev->stats,
3388 				&priv->xstats, p);
3389 		/* check if managed by the DMA otherwise go ahead */
3390 		if (unlikely(status & dma_own))
3391 			break;
3392 
3393 		count++;
3394 
3395 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3396 		next_entry = rx_q->cur_rx;
3397 
3398 		if (priv->extend_desc)
3399 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3400 		else
3401 			np = rx_q->dma_rx + next_entry;
3402 
3403 		prefetch(np);
3404 
3405 		if (priv->extend_desc)
3406 			stmmac_rx_extended_status(priv, &priv->dev->stats,
3407 					&priv->xstats, rx_q->dma_erx + entry);
3408 		if (unlikely(status == discard_frame)) {
3409 			priv->dev->stats.rx_errors++;
3410 			if (priv->hwts_rx_en && !priv->extend_desc) {
3411 				/* DESC2 & DESC3 will be overwritten by device
3412 				 * with timestamp value, hence reinitialize
3413 				 * them in stmmac_rx_refill() function so that
3414 				 * device can reuse it.
3415 				 */
3416 				dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3417 				rx_q->rx_skbuff[entry] = NULL;
3418 				dma_unmap_single(priv->device,
3419 						 rx_q->rx_skbuff_dma[entry],
3420 						 priv->dma_buf_sz,
3421 						 DMA_FROM_DEVICE);
3422 			}
3423 		} else {
3424 			struct sk_buff *skb;
3425 			int frame_len;
3426 			unsigned int des;
3427 
3428 			stmmac_get_desc_addr(priv, p, &des);
3429 			frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3430 
3431 			/*  If frame length is greater than skb buffer size
3432 			 *  (preallocated during init) then the packet is
3433 			 *  ignored
3434 			 */
3435 			if (frame_len > priv->dma_buf_sz) {
3436 				if (net_ratelimit())
3437 					netdev_err(priv->dev,
3438 						   "len %d larger than size (%d)\n",
3439 						   frame_len, priv->dma_buf_sz);
3440 				priv->dev->stats.rx_length_errors++;
3441 				continue;
3442 			}
3443 
3444 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3445 			 * Type frames (LLC/LLC-SNAP)
3446 			 *
3447 			 * llc_snap is never checked in GMAC >= 4, so this ACS
3448 			 * feature is always disabled and packets need to be
3449 			 * stripped manually.
3450 			 */
3451 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3452 			    unlikely(status != llc_snap))
3453 				frame_len -= ETH_FCS_LEN;
3454 
3455 			if (netif_msg_rx_status(priv)) {
3456 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3457 					   p, entry, des);
3458 				netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3459 					   frame_len, status);
3460 			}
3461 
3462 			/* The zero-copy is always used for all the sizes
3463 			 * in case of GMAC4 because it needs
3464 			 * to refill the used descriptors, always.
3465 			 */
3466 			if (unlikely(!xmac &&
3467 				     ((frame_len < priv->rx_copybreak) ||
3468 				     stmmac_rx_threshold_count(rx_q)))) {
3469 				skb = netdev_alloc_skb_ip_align(priv->dev,
3470 								frame_len);
3471 				if (unlikely(!skb)) {
3472 					if (net_ratelimit())
3473 						dev_warn(priv->device,
3474 							 "packet dropped\n");
3475 					priv->dev->stats.rx_dropped++;
3476 					continue;
3477 				}
3478 
3479 				dma_sync_single_for_cpu(priv->device,
3480 							rx_q->rx_skbuff_dma
3481 							[entry], frame_len,
3482 							DMA_FROM_DEVICE);
3483 				skb_copy_to_linear_data(skb,
3484 							rx_q->
3485 							rx_skbuff[entry]->data,
3486 							frame_len);
3487 
3488 				skb_put(skb, frame_len);
3489 				dma_sync_single_for_device(priv->device,
3490 							   rx_q->rx_skbuff_dma
3491 							   [entry], frame_len,
3492 							   DMA_FROM_DEVICE);
3493 			} else {
3494 				skb = rx_q->rx_skbuff[entry];
3495 				if (unlikely(!skb)) {
3496 					if (net_ratelimit())
3497 						netdev_err(priv->dev,
3498 							   "%s: Inconsistent Rx chain\n",
3499 							   priv->dev->name);
3500 					priv->dev->stats.rx_dropped++;
3501 					continue;
3502 				}
3503 				prefetch(skb->data - NET_IP_ALIGN);
3504 				rx_q->rx_skbuff[entry] = NULL;
3505 				rx_q->rx_zeroc_thresh++;
3506 
3507 				skb_put(skb, frame_len);
3508 				dma_unmap_single(priv->device,
3509 						 rx_q->rx_skbuff_dma[entry],
3510 						 priv->dma_buf_sz,
3511 						 DMA_FROM_DEVICE);
3512 			}
3513 
3514 			if (netif_msg_pktdata(priv)) {
3515 				netdev_dbg(priv->dev, "frame received (%dbytes)",
3516 					   frame_len);
3517 				print_pkt(skb->data, frame_len);
3518 			}
3519 
3520 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3521 
3522 			stmmac_rx_vlan(priv->dev, skb);
3523 
3524 			skb->protocol = eth_type_trans(skb, priv->dev);
3525 
3526 			if (unlikely(!coe))
3527 				skb_checksum_none_assert(skb);
3528 			else
3529 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3530 
3531 			napi_gro_receive(&ch->rx_napi, skb);
3532 
3533 			priv->dev->stats.rx_packets++;
3534 			priv->dev->stats.rx_bytes += frame_len;
3535 		}
3536 	}
3537 
3538 	stmmac_rx_refill(priv, queue);
3539 
3540 	priv->xstats.rx_pkt_n += count;
3541 
3542 	return count;
3543 }
3544 
3545 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
3546 {
3547 	struct stmmac_channel *ch =
3548 		container_of(napi, struct stmmac_channel, rx_napi);
3549 	struct stmmac_priv *priv = ch->priv_data;
3550 	u32 chan = ch->index;
3551 	int work_done;
3552 
3553 	priv->xstats.napi_poll++;
3554 
3555 	work_done = stmmac_rx(priv, budget, chan);
3556 	if (work_done < budget && napi_complete_done(napi, work_done))
3557 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3558 	return work_done;
3559 }
3560 
3561 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
3562 {
3563 	struct stmmac_channel *ch =
3564 		container_of(napi, struct stmmac_channel, tx_napi);
3565 	struct stmmac_priv *priv = ch->priv_data;
3566 	struct stmmac_tx_queue *tx_q;
3567 	u32 chan = ch->index;
3568 	int work_done;
3569 
3570 	priv->xstats.napi_poll++;
3571 
3572 	work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
3573 	work_done = min(work_done, budget);
3574 
3575 	if (work_done < budget && napi_complete_done(napi, work_done))
3576 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3577 
3578 	/* Force transmission restart */
3579 	tx_q = &priv->tx_queue[chan];
3580 	if (tx_q->cur_tx != tx_q->dirty_tx) {
3581 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
3582 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
3583 				       chan);
3584 	}
3585 
3586 	return work_done;
3587 }
3588 
3589 /**
3590  *  stmmac_tx_timeout
3591  *  @dev : Pointer to net device structure
3592  *  Description: this function is called when a packet transmission fails to
3593  *   complete within a reasonable time. The driver will mark the error in the
3594  *   netdev structure and arrange for the device to be reset to a sane state
3595  *   in order to transmit a new packet.
3596  */
3597 static void stmmac_tx_timeout(struct net_device *dev)
3598 {
3599 	struct stmmac_priv *priv = netdev_priv(dev);
3600 
3601 	stmmac_global_err(priv);
3602 }
3603 
3604 /**
3605  *  stmmac_set_rx_mode - entry point for multicast addressing
3606  *  @dev : pointer to the device structure
3607  *  Description:
3608  *  This function is a driver entry point which gets called by the kernel
3609  *  whenever multicast addresses must be enabled/disabled.
3610  *  Return value:
3611  *  void.
3612  */
3613 static void stmmac_set_rx_mode(struct net_device *dev)
3614 {
3615 	struct stmmac_priv *priv = netdev_priv(dev);
3616 
3617 	stmmac_set_filter(priv, priv->hw, dev);
3618 }
3619 
3620 /**
3621  *  stmmac_change_mtu - entry point to change MTU size for the device.
3622  *  @dev : device pointer.
3623  *  @new_mtu : the new MTU size for the device.
3624  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3625  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3626  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3627  *  Return value:
3628  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3629  *  file on failure.
3630  */
3631 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3632 {
3633 	struct stmmac_priv *priv = netdev_priv(dev);
3634 
3635 	if (netif_running(dev)) {
3636 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
3637 		return -EBUSY;
3638 	}
3639 
3640 	dev->mtu = new_mtu;
3641 
3642 	netdev_update_features(dev);
3643 
3644 	return 0;
3645 }
3646 
3647 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3648 					     netdev_features_t features)
3649 {
3650 	struct stmmac_priv *priv = netdev_priv(dev);
3651 
3652 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3653 		features &= ~NETIF_F_RXCSUM;
3654 
3655 	if (!priv->plat->tx_coe)
3656 		features &= ~NETIF_F_CSUM_MASK;
3657 
3658 	/* Some GMAC devices have a bugged Jumbo frame support that
3659 	 * needs to have the Tx COE disabled for oversized frames
3660 	 * (due to limited buffer sizes). In this case we disable
3661 	 * the TX csum insertion in the TDES and not use SF.
3662 	 */
3663 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3664 		features &= ~NETIF_F_CSUM_MASK;
3665 
3666 	/* Disable tso if asked by ethtool */
3667 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3668 		if (features & NETIF_F_TSO)
3669 			priv->tso = true;
3670 		else
3671 			priv->tso = false;
3672 	}
3673 
3674 	return features;
3675 }
3676 
3677 static int stmmac_set_features(struct net_device *netdev,
3678 			       netdev_features_t features)
3679 {
3680 	struct stmmac_priv *priv = netdev_priv(netdev);
3681 
3682 	/* Keep the COE Type in case of csum is supporting */
3683 	if (features & NETIF_F_RXCSUM)
3684 		priv->hw->rx_csum = priv->plat->rx_coe;
3685 	else
3686 		priv->hw->rx_csum = 0;
3687 	/* No check needed because rx_coe has been set before and it will be
3688 	 * fixed in case of issue.
3689 	 */
3690 	stmmac_rx_ipc(priv, priv->hw);
3691 
3692 	return 0;
3693 }
3694 
3695 /**
3696  *  stmmac_interrupt - main ISR
3697  *  @irq: interrupt number.
3698  *  @dev_id: to pass the net device pointer.
3699  *  Description: this is the main driver interrupt service routine.
3700  *  It can call:
3701  *  o DMA service routine (to manage incoming frame reception and transmission
3702  *    status)
3703  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3704  *    interrupts.
3705  */
3706 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3707 {
3708 	struct net_device *dev = (struct net_device *)dev_id;
3709 	struct stmmac_priv *priv = netdev_priv(dev);
3710 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3711 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3712 	u32 queues_count;
3713 	u32 queue;
3714 	bool xmac;
3715 
3716 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3717 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3718 
3719 	if (priv->irq_wake)
3720 		pm_wakeup_event(priv->device, 0);
3721 
3722 	if (unlikely(!dev)) {
3723 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3724 		return IRQ_NONE;
3725 	}
3726 
3727 	/* Check if adapter is up */
3728 	if (test_bit(STMMAC_DOWN, &priv->state))
3729 		return IRQ_HANDLED;
3730 	/* Check if a fatal error happened */
3731 	if (stmmac_safety_feat_interrupt(priv))
3732 		return IRQ_HANDLED;
3733 
3734 	/* To handle GMAC own interrupts */
3735 	if ((priv->plat->has_gmac) || xmac) {
3736 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3737 		int mtl_status;
3738 
3739 		if (unlikely(status)) {
3740 			/* For LPI we need to save the tx status */
3741 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3742 				priv->tx_path_in_lpi_mode = true;
3743 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3744 				priv->tx_path_in_lpi_mode = false;
3745 		}
3746 
3747 		for (queue = 0; queue < queues_count; queue++) {
3748 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3749 
3750 			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3751 								queue);
3752 			if (mtl_status != -EINVAL)
3753 				status |= mtl_status;
3754 
3755 			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3756 				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3757 						       rx_q->rx_tail_addr,
3758 						       queue);
3759 		}
3760 
3761 		/* PCS link status */
3762 		if (priv->hw->pcs) {
3763 			if (priv->xstats.pcs_link)
3764 				netif_carrier_on(dev);
3765 			else
3766 				netif_carrier_off(dev);
3767 		}
3768 	}
3769 
3770 	/* To handle DMA interrupts */
3771 	stmmac_dma_interrupt(priv);
3772 
3773 	return IRQ_HANDLED;
3774 }
3775 
3776 #ifdef CONFIG_NET_POLL_CONTROLLER
3777 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3778  * to allow network I/O with interrupts disabled.
3779  */
3780 static void stmmac_poll_controller(struct net_device *dev)
3781 {
3782 	disable_irq(dev->irq);
3783 	stmmac_interrupt(dev->irq, dev);
3784 	enable_irq(dev->irq);
3785 }
3786 #endif
3787 
3788 /**
3789  *  stmmac_ioctl - Entry point for the Ioctl
3790  *  @dev: Device pointer.
3791  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3792  *  a proprietary structure used to pass information to the driver.
3793  *  @cmd: IOCTL command
3794  *  Description:
3795  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3796  */
3797 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3798 {
3799 	int ret = -EOPNOTSUPP;
3800 
3801 	if (!netif_running(dev))
3802 		return -EINVAL;
3803 
3804 	switch (cmd) {
3805 	case SIOCGMIIPHY:
3806 	case SIOCGMIIREG:
3807 	case SIOCSMIIREG:
3808 		if (!dev->phydev)
3809 			return -EINVAL;
3810 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3811 		break;
3812 	case SIOCSHWTSTAMP:
3813 		ret = stmmac_hwtstamp_set(dev, rq);
3814 		break;
3815 	case SIOCGHWTSTAMP:
3816 		ret = stmmac_hwtstamp_get(dev, rq);
3817 		break;
3818 	default:
3819 		break;
3820 	}
3821 
3822 	return ret;
3823 }
3824 
3825 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3826 				    void *cb_priv)
3827 {
3828 	struct stmmac_priv *priv = cb_priv;
3829 	int ret = -EOPNOTSUPP;
3830 
3831 	stmmac_disable_all_queues(priv);
3832 
3833 	switch (type) {
3834 	case TC_SETUP_CLSU32:
3835 		if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3836 			ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3837 		break;
3838 	default:
3839 		break;
3840 	}
3841 
3842 	stmmac_enable_all_queues(priv);
3843 	return ret;
3844 }
3845 
3846 static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3847 				 struct tc_block_offload *f)
3848 {
3849 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3850 		return -EOPNOTSUPP;
3851 
3852 	switch (f->command) {
3853 	case TC_BLOCK_BIND:
3854 		return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3855 				priv, priv, f->extack);
3856 	case TC_BLOCK_UNBIND:
3857 		tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3858 		return 0;
3859 	default:
3860 		return -EOPNOTSUPP;
3861 	}
3862 }
3863 
3864 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3865 			   void *type_data)
3866 {
3867 	struct stmmac_priv *priv = netdev_priv(ndev);
3868 
3869 	switch (type) {
3870 	case TC_SETUP_BLOCK:
3871 		return stmmac_setup_tc_block(priv, type_data);
3872 	case TC_SETUP_QDISC_CBS:
3873 		return stmmac_tc_setup_cbs(priv, priv, type_data);
3874 	default:
3875 		return -EOPNOTSUPP;
3876 	}
3877 }
3878 
3879 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3880 {
3881 	struct stmmac_priv *priv = netdev_priv(ndev);
3882 	int ret = 0;
3883 
3884 	ret = eth_mac_addr(ndev, addr);
3885 	if (ret)
3886 		return ret;
3887 
3888 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3889 
3890 	return ret;
3891 }
3892 
3893 #ifdef CONFIG_DEBUG_FS
3894 static struct dentry *stmmac_fs_dir;
3895 
3896 static void sysfs_display_ring(void *head, int size, int extend_desc,
3897 			       struct seq_file *seq)
3898 {
3899 	int i;
3900 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3901 	struct dma_desc *p = (struct dma_desc *)head;
3902 
3903 	for (i = 0; i < size; i++) {
3904 		if (extend_desc) {
3905 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3906 				   i, (unsigned int)virt_to_phys(ep),
3907 				   le32_to_cpu(ep->basic.des0),
3908 				   le32_to_cpu(ep->basic.des1),
3909 				   le32_to_cpu(ep->basic.des2),
3910 				   le32_to_cpu(ep->basic.des3));
3911 			ep++;
3912 		} else {
3913 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3914 				   i, (unsigned int)virt_to_phys(p),
3915 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3916 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3917 			p++;
3918 		}
3919 		seq_printf(seq, "\n");
3920 	}
3921 }
3922 
3923 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
3924 {
3925 	struct net_device *dev = seq->private;
3926 	struct stmmac_priv *priv = netdev_priv(dev);
3927 	u32 rx_count = priv->plat->rx_queues_to_use;
3928 	u32 tx_count = priv->plat->tx_queues_to_use;
3929 	u32 queue;
3930 
3931 	if ((dev->flags & IFF_UP) == 0)
3932 		return 0;
3933 
3934 	for (queue = 0; queue < rx_count; queue++) {
3935 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3936 
3937 		seq_printf(seq, "RX Queue %d:\n", queue);
3938 
3939 		if (priv->extend_desc) {
3940 			seq_printf(seq, "Extended descriptor ring:\n");
3941 			sysfs_display_ring((void *)rx_q->dma_erx,
3942 					   DMA_RX_SIZE, 1, seq);
3943 		} else {
3944 			seq_printf(seq, "Descriptor ring:\n");
3945 			sysfs_display_ring((void *)rx_q->dma_rx,
3946 					   DMA_RX_SIZE, 0, seq);
3947 		}
3948 	}
3949 
3950 	for (queue = 0; queue < tx_count; queue++) {
3951 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3952 
3953 		seq_printf(seq, "TX Queue %d:\n", queue);
3954 
3955 		if (priv->extend_desc) {
3956 			seq_printf(seq, "Extended descriptor ring:\n");
3957 			sysfs_display_ring((void *)tx_q->dma_etx,
3958 					   DMA_TX_SIZE, 1, seq);
3959 		} else {
3960 			seq_printf(seq, "Descriptor ring:\n");
3961 			sysfs_display_ring((void *)tx_q->dma_tx,
3962 					   DMA_TX_SIZE, 0, seq);
3963 		}
3964 	}
3965 
3966 	return 0;
3967 }
3968 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
3969 
3970 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
3971 {
3972 	struct net_device *dev = seq->private;
3973 	struct stmmac_priv *priv = netdev_priv(dev);
3974 
3975 	if (!priv->hw_cap_support) {
3976 		seq_printf(seq, "DMA HW features not supported\n");
3977 		return 0;
3978 	}
3979 
3980 	seq_printf(seq, "==============================\n");
3981 	seq_printf(seq, "\tDMA HW features\n");
3982 	seq_printf(seq, "==============================\n");
3983 
3984 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3985 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3986 	seq_printf(seq, "\t1000 Mbps: %s\n",
3987 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
3988 	seq_printf(seq, "\tHalf duplex: %s\n",
3989 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3990 	seq_printf(seq, "\tHash Filter: %s\n",
3991 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3992 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3993 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
3994 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3995 		   (priv->dma_cap.pcs) ? "Y" : "N");
3996 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3997 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3998 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
3999 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
4000 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
4001 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
4002 	seq_printf(seq, "\tRMON module: %s\n",
4003 		   (priv->dma_cap.rmon) ? "Y" : "N");
4004 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4005 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
4006 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4007 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
4008 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4009 		   (priv->dma_cap.eee) ? "Y" : "N");
4010 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4011 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4012 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
4013 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4014 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4015 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
4016 	} else {
4017 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4018 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4019 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4020 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4021 	}
4022 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4023 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4024 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4025 		   priv->dma_cap.number_rx_channel);
4026 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4027 		   priv->dma_cap.number_tx_channel);
4028 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
4029 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
4030 
4031 	return 0;
4032 }
4033 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4034 
4035 static int stmmac_init_fs(struct net_device *dev)
4036 {
4037 	struct stmmac_priv *priv = netdev_priv(dev);
4038 
4039 	/* Create per netdev entries */
4040 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4041 
4042 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
4043 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
4044 
4045 		return -ENOMEM;
4046 	}
4047 
4048 	/* Entry to report DMA RX/TX rings */
4049 	priv->dbgfs_rings_status =
4050 		debugfs_create_file("descriptors_status", 0444,
4051 				    priv->dbgfs_dir, dev,
4052 				    &stmmac_rings_status_fops);
4053 
4054 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
4055 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4056 		debugfs_remove_recursive(priv->dbgfs_dir);
4057 
4058 		return -ENOMEM;
4059 	}
4060 
4061 	/* Entry to report the DMA HW features */
4062 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4063 						  priv->dbgfs_dir,
4064 						  dev, &stmmac_dma_cap_fops);
4065 
4066 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4067 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4068 		debugfs_remove_recursive(priv->dbgfs_dir);
4069 
4070 		return -ENOMEM;
4071 	}
4072 
4073 	return 0;
4074 }
4075 
4076 static void stmmac_exit_fs(struct net_device *dev)
4077 {
4078 	struct stmmac_priv *priv = netdev_priv(dev);
4079 
4080 	debugfs_remove_recursive(priv->dbgfs_dir);
4081 }
4082 #endif /* CONFIG_DEBUG_FS */
4083 
4084 static const struct net_device_ops stmmac_netdev_ops = {
4085 	.ndo_open = stmmac_open,
4086 	.ndo_start_xmit = stmmac_xmit,
4087 	.ndo_stop = stmmac_release,
4088 	.ndo_change_mtu = stmmac_change_mtu,
4089 	.ndo_fix_features = stmmac_fix_features,
4090 	.ndo_set_features = stmmac_set_features,
4091 	.ndo_set_rx_mode = stmmac_set_rx_mode,
4092 	.ndo_tx_timeout = stmmac_tx_timeout,
4093 	.ndo_do_ioctl = stmmac_ioctl,
4094 	.ndo_setup_tc = stmmac_setup_tc,
4095 #ifdef CONFIG_NET_POLL_CONTROLLER
4096 	.ndo_poll_controller = stmmac_poll_controller,
4097 #endif
4098 	.ndo_set_mac_address = stmmac_set_mac_address,
4099 };
4100 
4101 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4102 {
4103 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4104 		return;
4105 	if (test_bit(STMMAC_DOWN, &priv->state))
4106 		return;
4107 
4108 	netdev_err(priv->dev, "Reset adapter.\n");
4109 
4110 	rtnl_lock();
4111 	netif_trans_update(priv->dev);
4112 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4113 		usleep_range(1000, 2000);
4114 
4115 	set_bit(STMMAC_DOWN, &priv->state);
4116 	dev_close(priv->dev);
4117 	dev_open(priv->dev, NULL);
4118 	clear_bit(STMMAC_DOWN, &priv->state);
4119 	clear_bit(STMMAC_RESETING, &priv->state);
4120 	rtnl_unlock();
4121 }
4122 
4123 static void stmmac_service_task(struct work_struct *work)
4124 {
4125 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4126 			service_task);
4127 
4128 	stmmac_reset_subtask(priv);
4129 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4130 }
4131 
4132 /**
4133  *  stmmac_hw_init - Init the MAC device
4134  *  @priv: driver private structure
4135  *  Description: this function is to configure the MAC device according to
4136  *  some platform parameters or the HW capability register. It prepares the
4137  *  driver to use either ring or chain modes and to setup either enhanced or
4138  *  normal descriptors.
4139  */
4140 static int stmmac_hw_init(struct stmmac_priv *priv)
4141 {
4142 	int ret;
4143 
4144 	/* dwmac-sun8i only work in chain mode */
4145 	if (priv->plat->has_sun8i)
4146 		chain_mode = 1;
4147 	priv->chain_mode = chain_mode;
4148 
4149 	/* Initialize HW Interface */
4150 	ret = stmmac_hwif_init(priv);
4151 	if (ret)
4152 		return ret;
4153 
4154 	/* Get the HW capability (new GMAC newer than 3.50a) */
4155 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4156 	if (priv->hw_cap_support) {
4157 		dev_info(priv->device, "DMA HW capability register supported\n");
4158 
4159 		/* We can override some gmac/dma configuration fields: e.g.
4160 		 * enh_desc, tx_coe (e.g. that are passed through the
4161 		 * platform) with the values from the HW capability
4162 		 * register (if supported).
4163 		 */
4164 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4165 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4166 		priv->hw->pmt = priv->plat->pmt;
4167 
4168 		/* TXCOE doesn't work in thresh DMA mode */
4169 		if (priv->plat->force_thresh_dma_mode)
4170 			priv->plat->tx_coe = 0;
4171 		else
4172 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4173 
4174 		/* In case of GMAC4 rx_coe is from HW cap register. */
4175 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4176 
4177 		if (priv->dma_cap.rx_coe_type2)
4178 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4179 		else if (priv->dma_cap.rx_coe_type1)
4180 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4181 
4182 	} else {
4183 		dev_info(priv->device, "No HW DMA feature register supported\n");
4184 	}
4185 
4186 	if (priv->plat->rx_coe) {
4187 		priv->hw->rx_csum = priv->plat->rx_coe;
4188 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4189 		if (priv->synopsys_id < DWMAC_CORE_4_00)
4190 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4191 	}
4192 	if (priv->plat->tx_coe)
4193 		dev_info(priv->device, "TX Checksum insertion supported\n");
4194 
4195 	if (priv->plat->pmt) {
4196 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4197 		device_set_wakeup_capable(priv->device, 1);
4198 	}
4199 
4200 	if (priv->dma_cap.tsoen)
4201 		dev_info(priv->device, "TSO supported\n");
4202 
4203 	/* Run HW quirks, if any */
4204 	if (priv->hwif_quirks) {
4205 		ret = priv->hwif_quirks(priv);
4206 		if (ret)
4207 			return ret;
4208 	}
4209 
4210 	/* Rx Watchdog is available in the COREs newer than the 3.40.
4211 	 * In some case, for example on bugged HW this feature
4212 	 * has to be disable and this can be done by passing the
4213 	 * riwt_off field from the platform.
4214 	 */
4215 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4216 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4217 		priv->use_riwt = 1;
4218 		dev_info(priv->device,
4219 			 "Enable RX Mitigation via HW Watchdog Timer\n");
4220 	}
4221 
4222 	return 0;
4223 }
4224 
4225 /**
4226  * stmmac_dvr_probe
4227  * @device: device pointer
4228  * @plat_dat: platform data pointer
4229  * @res: stmmac resource pointer
4230  * Description: this is the main probe function used to
4231  * call the alloc_etherdev, allocate the priv structure.
4232  * Return:
4233  * returns 0 on success, otherwise errno.
4234  */
4235 int stmmac_dvr_probe(struct device *device,
4236 		     struct plat_stmmacenet_data *plat_dat,
4237 		     struct stmmac_resources *res)
4238 {
4239 	struct net_device *ndev = NULL;
4240 	struct stmmac_priv *priv;
4241 	u32 queue, maxq;
4242 	int ret = 0;
4243 
4244 	ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4245 				  MTL_MAX_TX_QUEUES,
4246 				  MTL_MAX_RX_QUEUES);
4247 	if (!ndev)
4248 		return -ENOMEM;
4249 
4250 	SET_NETDEV_DEV(ndev, device);
4251 
4252 	priv = netdev_priv(ndev);
4253 	priv->device = device;
4254 	priv->dev = ndev;
4255 
4256 	stmmac_set_ethtool_ops(ndev);
4257 	priv->pause = pause;
4258 	priv->plat = plat_dat;
4259 	priv->ioaddr = res->addr;
4260 	priv->dev->base_addr = (unsigned long)res->addr;
4261 
4262 	priv->dev->irq = res->irq;
4263 	priv->wol_irq = res->wol_irq;
4264 	priv->lpi_irq = res->lpi_irq;
4265 
4266 	if (!IS_ERR_OR_NULL(res->mac))
4267 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4268 
4269 	dev_set_drvdata(device, priv->dev);
4270 
4271 	/* Verify driver arguments */
4272 	stmmac_verify_args();
4273 
4274 	/* Allocate workqueue */
4275 	priv->wq = create_singlethread_workqueue("stmmac_wq");
4276 	if (!priv->wq) {
4277 		dev_err(priv->device, "failed to create workqueue\n");
4278 		ret = -ENOMEM;
4279 		goto error_wq;
4280 	}
4281 
4282 	INIT_WORK(&priv->service_task, stmmac_service_task);
4283 
4284 	/* Override with kernel parameters if supplied XXX CRS XXX
4285 	 * this needs to have multiple instances
4286 	 */
4287 	if ((phyaddr >= 0) && (phyaddr <= 31))
4288 		priv->plat->phy_addr = phyaddr;
4289 
4290 	if (priv->plat->stmmac_rst) {
4291 		ret = reset_control_assert(priv->plat->stmmac_rst);
4292 		reset_control_deassert(priv->plat->stmmac_rst);
4293 		/* Some reset controllers have only reset callback instead of
4294 		 * assert + deassert callbacks pair.
4295 		 */
4296 		if (ret == -ENOTSUPP)
4297 			reset_control_reset(priv->plat->stmmac_rst);
4298 	}
4299 
4300 	/* Init MAC and get the capabilities */
4301 	ret = stmmac_hw_init(priv);
4302 	if (ret)
4303 		goto error_hw_init;
4304 
4305 	stmmac_check_ether_addr(priv);
4306 
4307 	/* Configure real RX and TX queues */
4308 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4309 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4310 
4311 	ndev->netdev_ops = &stmmac_netdev_ops;
4312 
4313 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4314 			    NETIF_F_RXCSUM;
4315 
4316 	ret = stmmac_tc_init(priv, priv);
4317 	if (!ret) {
4318 		ndev->hw_features |= NETIF_F_HW_TC;
4319 	}
4320 
4321 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4322 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4323 		priv->tso = true;
4324 		dev_info(priv->device, "TSO feature enabled\n");
4325 	}
4326 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4327 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4328 #ifdef STMMAC_VLAN_TAG_USED
4329 	/* Both mac100 and gmac support receive VLAN tag detection */
4330 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4331 #endif
4332 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
4333 
4334 	/* MTU range: 46 - hw-specific max */
4335 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4336 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4337 		ndev->max_mtu = JUMBO_LEN;
4338 	else if (priv->plat->has_xgmac)
4339 		ndev->max_mtu = XGMAC_JUMBO_LEN;
4340 	else
4341 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4342 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4343 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4344 	 */
4345 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4346 	    (priv->plat->maxmtu >= ndev->min_mtu))
4347 		ndev->max_mtu = priv->plat->maxmtu;
4348 	else if (priv->plat->maxmtu < ndev->min_mtu)
4349 		dev_warn(priv->device,
4350 			 "%s: warning: maxmtu having invalid value (%d)\n",
4351 			 __func__, priv->plat->maxmtu);
4352 
4353 	if (flow_ctrl)
4354 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
4355 
4356 	/* Setup channels NAPI */
4357 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4358 
4359 	for (queue = 0; queue < maxq; queue++) {
4360 		struct stmmac_channel *ch = &priv->channel[queue];
4361 
4362 		ch->priv_data = priv;
4363 		ch->index = queue;
4364 
4365 		if (queue < priv->plat->rx_queues_to_use) {
4366 			netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx,
4367 				       NAPI_POLL_WEIGHT);
4368 		}
4369 		if (queue < priv->plat->tx_queues_to_use) {
4370 			netif_napi_add(ndev, &ch->tx_napi, stmmac_napi_poll_tx,
4371 				       NAPI_POLL_WEIGHT);
4372 		}
4373 	}
4374 
4375 	mutex_init(&priv->lock);
4376 
4377 	/* If a specific clk_csr value is passed from the platform
4378 	 * this means that the CSR Clock Range selection cannot be
4379 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4380 	 * set the MDC clock dynamically according to the csr actual
4381 	 * clock input.
4382 	 */
4383 	if (priv->plat->clk_csr >= 0)
4384 		priv->clk_csr = priv->plat->clk_csr;
4385 	else
4386 		stmmac_clk_csr_set(priv);
4387 
4388 	stmmac_check_pcs_mode(priv);
4389 
4390 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4391 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4392 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
4393 		/* MDIO bus Registration */
4394 		ret = stmmac_mdio_register(ndev);
4395 		if (ret < 0) {
4396 			dev_err(priv->device,
4397 				"%s: MDIO bus (id: %d) registration failed",
4398 				__func__, priv->plat->bus_id);
4399 			goto error_mdio_register;
4400 		}
4401 	}
4402 
4403 	ret = register_netdev(ndev);
4404 	if (ret) {
4405 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
4406 			__func__, ret);
4407 		goto error_netdev_register;
4408 	}
4409 
4410 #ifdef CONFIG_DEBUG_FS
4411 	ret = stmmac_init_fs(ndev);
4412 	if (ret < 0)
4413 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
4414 			    __func__);
4415 #endif
4416 
4417 	return ret;
4418 
4419 error_netdev_register:
4420 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4421 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4422 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4423 		stmmac_mdio_unregister(ndev);
4424 error_mdio_register:
4425 	for (queue = 0; queue < maxq; queue++) {
4426 		struct stmmac_channel *ch = &priv->channel[queue];
4427 
4428 		if (queue < priv->plat->rx_queues_to_use)
4429 			netif_napi_del(&ch->rx_napi);
4430 		if (queue < priv->plat->tx_queues_to_use)
4431 			netif_napi_del(&ch->tx_napi);
4432 	}
4433 error_hw_init:
4434 	destroy_workqueue(priv->wq);
4435 error_wq:
4436 	free_netdev(ndev);
4437 
4438 	return ret;
4439 }
4440 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4441 
4442 /**
4443  * stmmac_dvr_remove
4444  * @dev: device pointer
4445  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4446  * changes the link status, releases the DMA descriptor rings.
4447  */
4448 int stmmac_dvr_remove(struct device *dev)
4449 {
4450 	struct net_device *ndev = dev_get_drvdata(dev);
4451 	struct stmmac_priv *priv = netdev_priv(ndev);
4452 
4453 	netdev_info(priv->dev, "%s: removing driver", __func__);
4454 
4455 #ifdef CONFIG_DEBUG_FS
4456 	stmmac_exit_fs(ndev);
4457 #endif
4458 	stmmac_stop_all_dma(priv);
4459 
4460 	stmmac_mac_set(priv, priv->ioaddr, false);
4461 	netif_carrier_off(ndev);
4462 	unregister_netdev(ndev);
4463 	if (priv->plat->stmmac_rst)
4464 		reset_control_assert(priv->plat->stmmac_rst);
4465 	clk_disable_unprepare(priv->plat->pclk);
4466 	clk_disable_unprepare(priv->plat->stmmac_clk);
4467 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4468 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4469 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4470 		stmmac_mdio_unregister(ndev);
4471 	destroy_workqueue(priv->wq);
4472 	mutex_destroy(&priv->lock);
4473 	free_netdev(ndev);
4474 
4475 	return 0;
4476 }
4477 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4478 
4479 /**
4480  * stmmac_suspend - suspend callback
4481  * @dev: device pointer
4482  * Description: this is the function to suspend the device and it is called
4483  * by the platform driver to stop the network queue, release the resources,
4484  * program the PMT register (for WoL), clean and release driver resources.
4485  */
4486 int stmmac_suspend(struct device *dev)
4487 {
4488 	struct net_device *ndev = dev_get_drvdata(dev);
4489 	struct stmmac_priv *priv = netdev_priv(ndev);
4490 
4491 	if (!ndev || !netif_running(ndev))
4492 		return 0;
4493 
4494 	if (ndev->phydev)
4495 		phy_stop(ndev->phydev);
4496 
4497 	mutex_lock(&priv->lock);
4498 
4499 	netif_device_detach(ndev);
4500 	stmmac_stop_all_queues(priv);
4501 
4502 	stmmac_disable_all_queues(priv);
4503 
4504 	/* Stop TX/RX DMA */
4505 	stmmac_stop_all_dma(priv);
4506 
4507 	/* Enable Power down mode by programming the PMT regs */
4508 	if (device_may_wakeup(priv->device)) {
4509 		stmmac_pmt(priv, priv->hw, priv->wolopts);
4510 		priv->irq_wake = 1;
4511 	} else {
4512 		stmmac_mac_set(priv, priv->ioaddr, false);
4513 		pinctrl_pm_select_sleep_state(priv->device);
4514 		/* Disable clock in case of PWM is off */
4515 		clk_disable(priv->plat->pclk);
4516 		clk_disable(priv->plat->stmmac_clk);
4517 	}
4518 	mutex_unlock(&priv->lock);
4519 
4520 	priv->oldlink = false;
4521 	priv->speed = SPEED_UNKNOWN;
4522 	priv->oldduplex = DUPLEX_UNKNOWN;
4523 	return 0;
4524 }
4525 EXPORT_SYMBOL_GPL(stmmac_suspend);
4526 
4527 /**
4528  * stmmac_reset_queues_param - reset queue parameters
4529  * @dev: device pointer
4530  */
4531 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4532 {
4533 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4534 	u32 tx_cnt = priv->plat->tx_queues_to_use;
4535 	u32 queue;
4536 
4537 	for (queue = 0; queue < rx_cnt; queue++) {
4538 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4539 
4540 		rx_q->cur_rx = 0;
4541 		rx_q->dirty_rx = 0;
4542 	}
4543 
4544 	for (queue = 0; queue < tx_cnt; queue++) {
4545 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4546 
4547 		tx_q->cur_tx = 0;
4548 		tx_q->dirty_tx = 0;
4549 		tx_q->mss = 0;
4550 	}
4551 }
4552 
4553 /**
4554  * stmmac_resume - resume callback
4555  * @dev: device pointer
4556  * Description: when resume this function is invoked to setup the DMA and CORE
4557  * in a usable state.
4558  */
4559 int stmmac_resume(struct device *dev)
4560 {
4561 	struct net_device *ndev = dev_get_drvdata(dev);
4562 	struct stmmac_priv *priv = netdev_priv(ndev);
4563 
4564 	if (!netif_running(ndev))
4565 		return 0;
4566 
4567 	/* Power Down bit, into the PM register, is cleared
4568 	 * automatically as soon as a magic packet or a Wake-up frame
4569 	 * is received. Anyway, it's better to manually clear
4570 	 * this bit because it can generate problems while resuming
4571 	 * from another devices (e.g. serial console).
4572 	 */
4573 	if (device_may_wakeup(priv->device)) {
4574 		mutex_lock(&priv->lock);
4575 		stmmac_pmt(priv, priv->hw, 0);
4576 		mutex_unlock(&priv->lock);
4577 		priv->irq_wake = 0;
4578 	} else {
4579 		pinctrl_pm_select_default_state(priv->device);
4580 		/* enable the clk previously disabled */
4581 		clk_enable(priv->plat->stmmac_clk);
4582 		clk_enable(priv->plat->pclk);
4583 		/* reset the phy so that it's ready */
4584 		if (priv->mii)
4585 			stmmac_mdio_reset(priv->mii);
4586 	}
4587 
4588 	netif_device_attach(ndev);
4589 
4590 	mutex_lock(&priv->lock);
4591 
4592 	stmmac_reset_queues_param(priv);
4593 
4594 	stmmac_clear_descriptors(priv);
4595 
4596 	stmmac_hw_setup(ndev, false);
4597 	stmmac_init_tx_coalesce(priv);
4598 	stmmac_set_rx_mode(ndev);
4599 
4600 	stmmac_enable_all_queues(priv);
4601 
4602 	stmmac_start_all_queues(priv);
4603 
4604 	mutex_unlock(&priv->lock);
4605 
4606 	if (ndev->phydev)
4607 		phy_start(ndev->phydev);
4608 
4609 	return 0;
4610 }
4611 EXPORT_SYMBOL_GPL(stmmac_resume);
4612 
4613 #ifndef MODULE
4614 static int __init stmmac_cmdline_opt(char *str)
4615 {
4616 	char *opt;
4617 
4618 	if (!str || !*str)
4619 		return -EINVAL;
4620 	while ((opt = strsep(&str, ",")) != NULL) {
4621 		if (!strncmp(opt, "debug:", 6)) {
4622 			if (kstrtoint(opt + 6, 0, &debug))
4623 				goto err;
4624 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4625 			if (kstrtoint(opt + 8, 0, &phyaddr))
4626 				goto err;
4627 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4628 			if (kstrtoint(opt + 7, 0, &buf_sz))
4629 				goto err;
4630 		} else if (!strncmp(opt, "tc:", 3)) {
4631 			if (kstrtoint(opt + 3, 0, &tc))
4632 				goto err;
4633 		} else if (!strncmp(opt, "watchdog:", 9)) {
4634 			if (kstrtoint(opt + 9, 0, &watchdog))
4635 				goto err;
4636 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4637 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
4638 				goto err;
4639 		} else if (!strncmp(opt, "pause:", 6)) {
4640 			if (kstrtoint(opt + 6, 0, &pause))
4641 				goto err;
4642 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4643 			if (kstrtoint(opt + 10, 0, &eee_timer))
4644 				goto err;
4645 		} else if (!strncmp(opt, "chain_mode:", 11)) {
4646 			if (kstrtoint(opt + 11, 0, &chain_mode))
4647 				goto err;
4648 		}
4649 	}
4650 	return 0;
4651 
4652 err:
4653 	pr_err("%s: ERROR broken module parameter conversion", __func__);
4654 	return -EINVAL;
4655 }
4656 
4657 __setup("stmmaceth=", stmmac_cmdline_opt);
4658 #endif /* MODULE */
4659 
4660 static int __init stmmac_init(void)
4661 {
4662 #ifdef CONFIG_DEBUG_FS
4663 	/* Create debugfs main directory if it doesn't exist yet */
4664 	if (!stmmac_fs_dir) {
4665 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4666 
4667 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4668 			pr_err("ERROR %s, debugfs create directory failed\n",
4669 			       STMMAC_RESOURCE_NAME);
4670 
4671 			return -ENOMEM;
4672 		}
4673 	}
4674 #endif
4675 
4676 	return 0;
4677 }
4678 
4679 static void __exit stmmac_exit(void)
4680 {
4681 #ifdef CONFIG_DEBUG_FS
4682 	debugfs_remove_recursive(stmmac_fs_dir);
4683 #endif
4684 }
4685 
4686 module_init(stmmac_init)
4687 module_exit(stmmac_exit)
4688 
4689 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4690 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4691 MODULE_LICENSE("GPL");
4692