xref: /openbmc/linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 023e41632e065d49bcbe31b3c4b336217f96a271)
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4 
5 	Copyright(C) 2007-2011 STMicroelectronics Ltd
6 
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10 
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15 
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18 
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20 
21   Documentation available at:
22 	http://www.stlinux.com
23   Support available at:
24 	https://bugzilla.stlinux.com/
25 *******************************************************************************/
26 
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include <net/pkt_cls.h>
49 #include "stmmac_ptp.h"
50 #include "stmmac.h"
51 #include <linux/reset.h>
52 #include <linux/of_mdio.h>
53 #include "dwmac1000.h"
54 #include "dwxgmac2.h"
55 #include "hwif.h"
56 
57 #define	STMMAC_ALIGN(x)		__ALIGN_KERNEL(x, SMP_CACHE_BYTES)
58 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
59 
60 /* Module parameters */
61 #define TX_TIMEO	5000
62 static int watchdog = TX_TIMEO;
63 module_param(watchdog, int, 0644);
64 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
65 
66 static int debug = -1;
67 module_param(debug, int, 0644);
68 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
69 
70 static int phyaddr = -1;
71 module_param(phyaddr, int, 0444);
72 MODULE_PARM_DESC(phyaddr, "Physical device address");
73 
74 #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
75 #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
76 
77 static int flow_ctrl = FLOW_OFF;
78 module_param(flow_ctrl, int, 0644);
79 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
80 
81 static int pause = PAUSE_TIME;
82 module_param(pause, int, 0644);
83 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
84 
85 #define TC_DEFAULT 64
86 static int tc = TC_DEFAULT;
87 module_param(tc, int, 0644);
88 MODULE_PARM_DESC(tc, "DMA threshold control value");
89 
90 #define	DEFAULT_BUFSIZE	1536
91 static int buf_sz = DEFAULT_BUFSIZE;
92 module_param(buf_sz, int, 0644);
93 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
94 
95 #define	STMMAC_RX_COPYBREAK	256
96 
97 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
98 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
99 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
100 
101 #define STMMAC_DEFAULT_LPI_TIMER	1000
102 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
103 module_param(eee_timer, int, 0644);
104 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
105 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
106 
107 /* By default the driver will use the ring mode to manage tx and rx descriptors,
108  * but allow user to force to use the chain instead of the ring
109  */
110 static unsigned int chain_mode;
111 module_param(chain_mode, int, 0444);
112 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
113 
114 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
115 
116 #ifdef CONFIG_DEBUG_FS
117 static int stmmac_init_fs(struct net_device *dev);
118 static void stmmac_exit_fs(struct net_device *dev);
119 #endif
120 
121 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
122 
123 /**
124  * stmmac_verify_args - verify the driver parameters.
125  * Description: it checks the driver parameters and set a default in case of
126  * errors.
127  */
128 static void stmmac_verify_args(void)
129 {
130 	if (unlikely(watchdog < 0))
131 		watchdog = TX_TIMEO;
132 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
133 		buf_sz = DEFAULT_BUFSIZE;
134 	if (unlikely(flow_ctrl > 1))
135 		flow_ctrl = FLOW_AUTO;
136 	else if (likely(flow_ctrl < 0))
137 		flow_ctrl = FLOW_OFF;
138 	if (unlikely((pause < 0) || (pause > 0xffff)))
139 		pause = PAUSE_TIME;
140 	if (eee_timer < 0)
141 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
142 }
143 
144 /**
145  * stmmac_disable_all_queues - Disable all queues
146  * @priv: driver private structure
147  */
148 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
149 {
150 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
151 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
152 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
153 	u32 queue;
154 
155 	for (queue = 0; queue < maxq; queue++) {
156 		struct stmmac_channel *ch = &priv->channel[queue];
157 
158 		if (queue < rx_queues_cnt)
159 			napi_disable(&ch->rx_napi);
160 		if (queue < tx_queues_cnt)
161 			napi_disable(&ch->tx_napi);
162 	}
163 }
164 
165 /**
166  * stmmac_enable_all_queues - Enable all queues
167  * @priv: driver private structure
168  */
169 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
170 {
171 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
172 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
173 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
174 	u32 queue;
175 
176 	for (queue = 0; queue < maxq; queue++) {
177 		struct stmmac_channel *ch = &priv->channel[queue];
178 
179 		if (queue < rx_queues_cnt)
180 			napi_enable(&ch->rx_napi);
181 		if (queue < tx_queues_cnt)
182 			napi_enable(&ch->tx_napi);
183 	}
184 }
185 
186 /**
187  * stmmac_stop_all_queues - Stop all queues
188  * @priv: driver private structure
189  */
190 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
191 {
192 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193 	u32 queue;
194 
195 	for (queue = 0; queue < tx_queues_cnt; queue++)
196 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
197 }
198 
199 /**
200  * stmmac_start_all_queues - Start all queues
201  * @priv: driver private structure
202  */
203 static void stmmac_start_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
206 	u32 queue;
207 
208 	for (queue = 0; queue < tx_queues_cnt; queue++)
209 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
210 }
211 
212 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
213 {
214 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
215 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
216 		queue_work(priv->wq, &priv->service_task);
217 }
218 
219 static void stmmac_global_err(struct stmmac_priv *priv)
220 {
221 	netif_carrier_off(priv->dev);
222 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
223 	stmmac_service_event_schedule(priv);
224 }
225 
226 /**
227  * stmmac_clk_csr_set - dynamically set the MDC clock
228  * @priv: driver private structure
229  * Description: this is to dynamically set the MDC clock according to the csr
230  * clock input.
231  * Note:
232  *	If a specific clk_csr value is passed from the platform
233  *	this means that the CSR Clock Range selection cannot be
234  *	changed at run-time and it is fixed (as reported in the driver
235  *	documentation). Viceversa the driver will try to set the MDC
236  *	clock dynamically according to the actual clock input.
237  */
238 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
239 {
240 	u32 clk_rate;
241 
242 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
243 
244 	/* Platform provided default clk_csr would be assumed valid
245 	 * for all other cases except for the below mentioned ones.
246 	 * For values higher than the IEEE 802.3 specified frequency
247 	 * we can not estimate the proper divider as it is not known
248 	 * the frequency of clk_csr_i. So we do not change the default
249 	 * divider.
250 	 */
251 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
252 		if (clk_rate < CSR_F_35M)
253 			priv->clk_csr = STMMAC_CSR_20_35M;
254 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
255 			priv->clk_csr = STMMAC_CSR_35_60M;
256 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
257 			priv->clk_csr = STMMAC_CSR_60_100M;
258 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
259 			priv->clk_csr = STMMAC_CSR_100_150M;
260 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
261 			priv->clk_csr = STMMAC_CSR_150_250M;
262 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
263 			priv->clk_csr = STMMAC_CSR_250_300M;
264 	}
265 
266 	if (priv->plat->has_sun8i) {
267 		if (clk_rate > 160000000)
268 			priv->clk_csr = 0x03;
269 		else if (clk_rate > 80000000)
270 			priv->clk_csr = 0x02;
271 		else if (clk_rate > 40000000)
272 			priv->clk_csr = 0x01;
273 		else
274 			priv->clk_csr = 0;
275 	}
276 
277 	if (priv->plat->has_xgmac) {
278 		if (clk_rate > 400000000)
279 			priv->clk_csr = 0x5;
280 		else if (clk_rate > 350000000)
281 			priv->clk_csr = 0x4;
282 		else if (clk_rate > 300000000)
283 			priv->clk_csr = 0x3;
284 		else if (clk_rate > 250000000)
285 			priv->clk_csr = 0x2;
286 		else if (clk_rate > 150000000)
287 			priv->clk_csr = 0x1;
288 		else
289 			priv->clk_csr = 0x0;
290 	}
291 }
292 
293 static void print_pkt(unsigned char *buf, int len)
294 {
295 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
296 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
297 }
298 
299 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
300 {
301 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
302 	u32 avail;
303 
304 	if (tx_q->dirty_tx > tx_q->cur_tx)
305 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
306 	else
307 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
308 
309 	return avail;
310 }
311 
312 /**
313  * stmmac_rx_dirty - Get RX queue dirty
314  * @priv: driver private structure
315  * @queue: RX queue index
316  */
317 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
318 {
319 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
320 	u32 dirty;
321 
322 	if (rx_q->dirty_rx <= rx_q->cur_rx)
323 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
324 	else
325 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
326 
327 	return dirty;
328 }
329 
330 /**
331  * stmmac_hw_fix_mac_speed - callback for speed selection
332  * @priv: driver private structure
333  * Description: on some platforms (e.g. ST), some HW system configuration
334  * registers have to be set according to the link speed negotiated.
335  */
336 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
337 {
338 	struct net_device *ndev = priv->dev;
339 	struct phy_device *phydev = ndev->phydev;
340 
341 	if (likely(priv->plat->fix_mac_speed))
342 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
343 }
344 
345 /**
346  * stmmac_enable_eee_mode - check and enter in LPI mode
347  * @priv: driver private structure
348  * Description: this function is to verify and enter in LPI mode in case of
349  * EEE.
350  */
351 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
352 {
353 	u32 tx_cnt = priv->plat->tx_queues_to_use;
354 	u32 queue;
355 
356 	/* check if all TX queues have the work finished */
357 	for (queue = 0; queue < tx_cnt; queue++) {
358 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
359 
360 		if (tx_q->dirty_tx != tx_q->cur_tx)
361 			return; /* still unfinished work */
362 	}
363 
364 	/* Check and enter in LPI mode */
365 	if (!priv->tx_path_in_lpi_mode)
366 		stmmac_set_eee_mode(priv, priv->hw,
367 				priv->plat->en_tx_lpi_clockgating);
368 }
369 
370 /**
371  * stmmac_disable_eee_mode - disable and exit from LPI mode
372  * @priv: driver private structure
373  * Description: this function is to exit and disable EEE in case of
374  * LPI state is true. This is called by the xmit.
375  */
376 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
377 {
378 	stmmac_reset_eee_mode(priv, priv->hw);
379 	del_timer_sync(&priv->eee_ctrl_timer);
380 	priv->tx_path_in_lpi_mode = false;
381 }
382 
383 /**
384  * stmmac_eee_ctrl_timer - EEE TX SW timer.
385  * @arg : data hook
386  * Description:
387  *  if there is no data transfer and if we are not in LPI state,
388  *  then MAC Transmitter can be moved to LPI state.
389  */
390 static void stmmac_eee_ctrl_timer(struct timer_list *t)
391 {
392 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
393 
394 	stmmac_enable_eee_mode(priv);
395 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
396 }
397 
398 /**
399  * stmmac_eee_init - init EEE
400  * @priv: driver private structure
401  * Description:
402  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
403  *  can also manage EEE, this function enable the LPI state and start related
404  *  timer.
405  */
406 bool stmmac_eee_init(struct stmmac_priv *priv)
407 {
408 	struct net_device *ndev = priv->dev;
409 	int interface = priv->plat->interface;
410 	bool ret = false;
411 
412 	if ((interface != PHY_INTERFACE_MODE_MII) &&
413 	    (interface != PHY_INTERFACE_MODE_GMII) &&
414 	    !phy_interface_mode_is_rgmii(interface))
415 		goto out;
416 
417 	/* Using PCS we cannot dial with the phy registers at this stage
418 	 * so we do not support extra feature like EEE.
419 	 */
420 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
421 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
422 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
423 		goto out;
424 
425 	/* MAC core supports the EEE feature. */
426 	if (priv->dma_cap.eee) {
427 		int tx_lpi_timer = priv->tx_lpi_timer;
428 
429 		/* Check if the PHY supports EEE */
430 		if (phy_init_eee(ndev->phydev, 1)) {
431 			/* To manage at run-time if the EEE cannot be supported
432 			 * anymore (for example because the lp caps have been
433 			 * changed).
434 			 * In that case the driver disable own timers.
435 			 */
436 			mutex_lock(&priv->lock);
437 			if (priv->eee_active) {
438 				netdev_dbg(priv->dev, "disable EEE\n");
439 				del_timer_sync(&priv->eee_ctrl_timer);
440 				stmmac_set_eee_timer(priv, priv->hw, 0,
441 						tx_lpi_timer);
442 			}
443 			priv->eee_active = 0;
444 			mutex_unlock(&priv->lock);
445 			goto out;
446 		}
447 		/* Activate the EEE and start timers */
448 		mutex_lock(&priv->lock);
449 		if (!priv->eee_active) {
450 			priv->eee_active = 1;
451 			timer_setup(&priv->eee_ctrl_timer,
452 				    stmmac_eee_ctrl_timer, 0);
453 			mod_timer(&priv->eee_ctrl_timer,
454 				  STMMAC_LPI_T(eee_timer));
455 
456 			stmmac_set_eee_timer(priv, priv->hw,
457 					STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
458 		}
459 		/* Set HW EEE according to the speed */
460 		stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
461 
462 		ret = true;
463 		mutex_unlock(&priv->lock);
464 
465 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
466 	}
467 out:
468 	return ret;
469 }
470 
471 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
472  * @priv: driver private structure
473  * @p : descriptor pointer
474  * @skb : the socket buffer
475  * Description :
476  * This function will read timestamp from the descriptor & pass it to stack.
477  * and also perform some sanity checks.
478  */
479 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
480 				   struct dma_desc *p, struct sk_buff *skb)
481 {
482 	struct skb_shared_hwtstamps shhwtstamp;
483 	u64 ns = 0;
484 
485 	if (!priv->hwts_tx_en)
486 		return;
487 
488 	/* exit if skb doesn't support hw tstamp */
489 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
490 		return;
491 
492 	/* check tx tstamp status */
493 	if (stmmac_get_tx_timestamp_status(priv, p)) {
494 		/* get the valid tstamp */
495 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
496 
497 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
498 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
499 
500 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
501 		/* pass tstamp to stack */
502 		skb_tstamp_tx(skb, &shhwtstamp);
503 	}
504 
505 	return;
506 }
507 
508 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
509  * @priv: driver private structure
510  * @p : descriptor pointer
511  * @np : next descriptor pointer
512  * @skb : the socket buffer
513  * Description :
514  * This function will read received packet's timestamp from the descriptor
515  * and pass it to stack. It also perform some sanity checks.
516  */
517 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
518 				   struct dma_desc *np, struct sk_buff *skb)
519 {
520 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
521 	struct dma_desc *desc = p;
522 	u64 ns = 0;
523 
524 	if (!priv->hwts_rx_en)
525 		return;
526 	/* For GMAC4, the valid timestamp is from CTX next desc. */
527 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
528 		desc = np;
529 
530 	/* Check if timestamp is available */
531 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
532 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
533 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
534 		shhwtstamp = skb_hwtstamps(skb);
535 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
536 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
537 	} else  {
538 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
539 	}
540 }
541 
542 /**
543  *  stmmac_hwtstamp_set - control hardware timestamping.
544  *  @dev: device pointer.
545  *  @ifr: An IOCTL specific structure, that can contain a pointer to
546  *  a proprietary structure used to pass information to the driver.
547  *  Description:
548  *  This function configures the MAC to enable/disable both outgoing(TX)
549  *  and incoming(RX) packets time stamping based on user input.
550  *  Return Value:
551  *  0 on success and an appropriate -ve integer on failure.
552  */
553 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
554 {
555 	struct stmmac_priv *priv = netdev_priv(dev);
556 	struct hwtstamp_config config;
557 	struct timespec64 now;
558 	u64 temp = 0;
559 	u32 ptp_v2 = 0;
560 	u32 tstamp_all = 0;
561 	u32 ptp_over_ipv4_udp = 0;
562 	u32 ptp_over_ipv6_udp = 0;
563 	u32 ptp_over_ethernet = 0;
564 	u32 snap_type_sel = 0;
565 	u32 ts_master_en = 0;
566 	u32 ts_event_en = 0;
567 	u32 sec_inc = 0;
568 	u32 value = 0;
569 	bool xmac;
570 
571 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
572 
573 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
574 		netdev_alert(priv->dev, "No support for HW time stamping\n");
575 		priv->hwts_tx_en = 0;
576 		priv->hwts_rx_en = 0;
577 
578 		return -EOPNOTSUPP;
579 	}
580 
581 	if (copy_from_user(&config, ifr->ifr_data,
582 			   sizeof(config)))
583 		return -EFAULT;
584 
585 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
586 		   __func__, config.flags, config.tx_type, config.rx_filter);
587 
588 	/* reserved for future extensions */
589 	if (config.flags)
590 		return -EINVAL;
591 
592 	if (config.tx_type != HWTSTAMP_TX_OFF &&
593 	    config.tx_type != HWTSTAMP_TX_ON)
594 		return -ERANGE;
595 
596 	if (priv->adv_ts) {
597 		switch (config.rx_filter) {
598 		case HWTSTAMP_FILTER_NONE:
599 			/* time stamp no incoming packet at all */
600 			config.rx_filter = HWTSTAMP_FILTER_NONE;
601 			break;
602 
603 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
604 			/* PTP v1, UDP, any kind of event packet */
605 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
606 			/* 'xmac' hardware can support Sync, Pdelay_Req and
607 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
608 			 * This leaves Delay_Req timestamps out.
609 			 * Enable all events *and* general purpose message
610 			 * timestamping
611 			 */
612 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
613 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
614 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
615 			break;
616 
617 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
618 			/* PTP v1, UDP, Sync packet */
619 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
620 			/* take time stamp for SYNC messages only */
621 			ts_event_en = PTP_TCR_TSEVNTENA;
622 
623 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
624 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
625 			break;
626 
627 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
628 			/* PTP v1, UDP, Delay_req packet */
629 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
630 			/* take time stamp for Delay_Req messages only */
631 			ts_master_en = PTP_TCR_TSMSTRENA;
632 			ts_event_en = PTP_TCR_TSEVNTENA;
633 
634 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
635 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
636 			break;
637 
638 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
639 			/* PTP v2, UDP, any kind of event packet */
640 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
641 			ptp_v2 = PTP_TCR_TSVER2ENA;
642 			/* take time stamp for all event messages */
643 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
644 
645 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
646 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
647 			break;
648 
649 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
650 			/* PTP v2, UDP, Sync packet */
651 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
652 			ptp_v2 = PTP_TCR_TSVER2ENA;
653 			/* take time stamp for SYNC messages only */
654 			ts_event_en = PTP_TCR_TSEVNTENA;
655 
656 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
657 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
658 			break;
659 
660 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
661 			/* PTP v2, UDP, Delay_req packet */
662 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
663 			ptp_v2 = PTP_TCR_TSVER2ENA;
664 			/* take time stamp for Delay_Req messages only */
665 			ts_master_en = PTP_TCR_TSMSTRENA;
666 			ts_event_en = PTP_TCR_TSEVNTENA;
667 
668 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
669 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
670 			break;
671 
672 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
673 			/* PTP v2/802.AS1 any layer, any kind of event packet */
674 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
675 			ptp_v2 = PTP_TCR_TSVER2ENA;
676 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
677 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 			ptp_over_ethernet = PTP_TCR_TSIPENA;
680 			break;
681 
682 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
683 			/* PTP v2/802.AS1, any layer, Sync packet */
684 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
685 			ptp_v2 = PTP_TCR_TSVER2ENA;
686 			/* take time stamp for SYNC messages only */
687 			ts_event_en = PTP_TCR_TSEVNTENA;
688 
689 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
690 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
691 			ptp_over_ethernet = PTP_TCR_TSIPENA;
692 			break;
693 
694 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
695 			/* PTP v2/802.AS1, any layer, Delay_req packet */
696 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
697 			ptp_v2 = PTP_TCR_TSVER2ENA;
698 			/* take time stamp for Delay_Req messages only */
699 			ts_master_en = PTP_TCR_TSMSTRENA;
700 			ts_event_en = PTP_TCR_TSEVNTENA;
701 
702 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
703 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
704 			ptp_over_ethernet = PTP_TCR_TSIPENA;
705 			break;
706 
707 		case HWTSTAMP_FILTER_NTP_ALL:
708 		case HWTSTAMP_FILTER_ALL:
709 			/* time stamp any incoming packet */
710 			config.rx_filter = HWTSTAMP_FILTER_ALL;
711 			tstamp_all = PTP_TCR_TSENALL;
712 			break;
713 
714 		default:
715 			return -ERANGE;
716 		}
717 	} else {
718 		switch (config.rx_filter) {
719 		case HWTSTAMP_FILTER_NONE:
720 			config.rx_filter = HWTSTAMP_FILTER_NONE;
721 			break;
722 		default:
723 			/* PTP v1, UDP, any kind of event packet */
724 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
725 			break;
726 		}
727 	}
728 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
729 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
730 
731 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
732 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
733 	else {
734 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
735 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
736 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
737 			 ts_master_en | snap_type_sel);
738 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
739 
740 		/* program Sub Second Increment reg */
741 		stmmac_config_sub_second_increment(priv,
742 				priv->ptpaddr, priv->plat->clk_ptp_rate,
743 				xmac, &sec_inc);
744 		temp = div_u64(1000000000ULL, sec_inc);
745 
746 		/* Store sub second increment and flags for later use */
747 		priv->sub_second_inc = sec_inc;
748 		priv->systime_flags = value;
749 
750 		/* calculate default added value:
751 		 * formula is :
752 		 * addend = (2^32)/freq_div_ratio;
753 		 * where, freq_div_ratio = 1e9ns/sec_inc
754 		 */
755 		temp = (u64)(temp << 32);
756 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
757 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
758 
759 		/* initialize system time */
760 		ktime_get_real_ts64(&now);
761 
762 		/* lower 32 bits of tv_sec are safe until y2106 */
763 		stmmac_init_systime(priv, priv->ptpaddr,
764 				(u32)now.tv_sec, now.tv_nsec);
765 	}
766 
767 	memcpy(&priv->tstamp_config, &config, sizeof(config));
768 
769 	return copy_to_user(ifr->ifr_data, &config,
770 			    sizeof(config)) ? -EFAULT : 0;
771 }
772 
773 /**
774  *  stmmac_hwtstamp_get - read hardware timestamping.
775  *  @dev: device pointer.
776  *  @ifr: An IOCTL specific structure, that can contain a pointer to
777  *  a proprietary structure used to pass information to the driver.
778  *  Description:
779  *  This function obtain the current hardware timestamping settings
780     as requested.
781  */
782 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
783 {
784 	struct stmmac_priv *priv = netdev_priv(dev);
785 	struct hwtstamp_config *config = &priv->tstamp_config;
786 
787 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
788 		return -EOPNOTSUPP;
789 
790 	return copy_to_user(ifr->ifr_data, config,
791 			    sizeof(*config)) ? -EFAULT : 0;
792 }
793 
794 /**
795  * stmmac_init_ptp - init PTP
796  * @priv: driver private structure
797  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
798  * This is done by looking at the HW cap. register.
799  * This function also registers the ptp driver.
800  */
801 static int stmmac_init_ptp(struct stmmac_priv *priv)
802 {
803 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
804 
805 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
806 		return -EOPNOTSUPP;
807 
808 	priv->adv_ts = 0;
809 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
810 	if (xmac && priv->dma_cap.atime_stamp)
811 		priv->adv_ts = 1;
812 	/* Dwmac 3.x core with extend_desc can support adv_ts */
813 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
814 		priv->adv_ts = 1;
815 
816 	if (priv->dma_cap.time_stamp)
817 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
818 
819 	if (priv->adv_ts)
820 		netdev_info(priv->dev,
821 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
822 
823 	priv->hwts_tx_en = 0;
824 	priv->hwts_rx_en = 0;
825 
826 	stmmac_ptp_register(priv);
827 
828 	return 0;
829 }
830 
831 static void stmmac_release_ptp(struct stmmac_priv *priv)
832 {
833 	if (priv->plat->clk_ptp_ref)
834 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
835 	stmmac_ptp_unregister(priv);
836 }
837 
838 /**
839  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
840  *  @priv: driver private structure
841  *  Description: It is used for configuring the flow control in all queues
842  */
843 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
844 {
845 	u32 tx_cnt = priv->plat->tx_queues_to_use;
846 
847 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
848 			priv->pause, tx_cnt);
849 }
850 
851 /**
852  * stmmac_adjust_link - adjusts the link parameters
853  * @dev: net device structure
854  * Description: this is the helper called by the physical abstraction layer
855  * drivers to communicate the phy link status. According the speed and duplex
856  * this driver can invoke registered glue-logic as well.
857  * It also invoke the eee initialization because it could happen when switch
858  * on different networks (that are eee capable).
859  */
860 static void stmmac_adjust_link(struct net_device *dev)
861 {
862 	struct stmmac_priv *priv = netdev_priv(dev);
863 	struct phy_device *phydev = dev->phydev;
864 	bool new_state = false;
865 
866 	if (!phydev)
867 		return;
868 
869 	mutex_lock(&priv->lock);
870 
871 	if (phydev->link) {
872 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
873 
874 		/* Now we make sure that we can be in full duplex mode.
875 		 * If not, we operate in half-duplex mode. */
876 		if (phydev->duplex != priv->oldduplex) {
877 			new_state = true;
878 			if (!phydev->duplex)
879 				ctrl &= ~priv->hw->link.duplex;
880 			else
881 				ctrl |= priv->hw->link.duplex;
882 			priv->oldduplex = phydev->duplex;
883 		}
884 		/* Flow Control operation */
885 		if (phydev->pause)
886 			stmmac_mac_flow_ctrl(priv, phydev->duplex);
887 
888 		if (phydev->speed != priv->speed) {
889 			new_state = true;
890 			ctrl &= ~priv->hw->link.speed_mask;
891 			switch (phydev->speed) {
892 			case SPEED_1000:
893 				ctrl |= priv->hw->link.speed1000;
894 				break;
895 			case SPEED_100:
896 				ctrl |= priv->hw->link.speed100;
897 				break;
898 			case SPEED_10:
899 				ctrl |= priv->hw->link.speed10;
900 				break;
901 			default:
902 				netif_warn(priv, link, priv->dev,
903 					   "broken speed: %d\n", phydev->speed);
904 				phydev->speed = SPEED_UNKNOWN;
905 				break;
906 			}
907 			if (phydev->speed != SPEED_UNKNOWN)
908 				stmmac_hw_fix_mac_speed(priv);
909 			priv->speed = phydev->speed;
910 		}
911 
912 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
913 
914 		if (!priv->oldlink) {
915 			new_state = true;
916 			priv->oldlink = true;
917 		}
918 	} else if (priv->oldlink) {
919 		new_state = true;
920 		priv->oldlink = false;
921 		priv->speed = SPEED_UNKNOWN;
922 		priv->oldduplex = DUPLEX_UNKNOWN;
923 	}
924 
925 	if (new_state && netif_msg_link(priv))
926 		phy_print_status(phydev);
927 
928 	mutex_unlock(&priv->lock);
929 
930 	if (phydev->is_pseudo_fixed_link)
931 		/* Stop PHY layer to call the hook to adjust the link in case
932 		 * of a switch is attached to the stmmac driver.
933 		 */
934 		phydev->irq = PHY_IGNORE_INTERRUPT;
935 	else
936 		/* At this stage, init the EEE if supported.
937 		 * Never called in case of fixed_link.
938 		 */
939 		priv->eee_enabled = stmmac_eee_init(priv);
940 }
941 
942 /**
943  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
944  * @priv: driver private structure
945  * Description: this is to verify if the HW supports the PCS.
946  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
947  * configured for the TBI, RTBI, or SGMII PHY interface.
948  */
949 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
950 {
951 	int interface = priv->plat->interface;
952 
953 	if (priv->dma_cap.pcs) {
954 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
955 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
956 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
957 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
958 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
959 			priv->hw->pcs = STMMAC_PCS_RGMII;
960 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
961 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
962 			priv->hw->pcs = STMMAC_PCS_SGMII;
963 		}
964 	}
965 }
966 
967 /**
968  * stmmac_init_phy - PHY initialization
969  * @dev: net device structure
970  * Description: it initializes the driver's PHY state, and attaches the PHY
971  * to the mac driver.
972  *  Return value:
973  *  0 on success
974  */
975 static int stmmac_init_phy(struct net_device *dev)
976 {
977 	struct stmmac_priv *priv = netdev_priv(dev);
978 	u32 tx_cnt = priv->plat->tx_queues_to_use;
979 	struct phy_device *phydev;
980 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
981 	char bus_id[MII_BUS_ID_SIZE];
982 	int interface = priv->plat->interface;
983 	int max_speed = priv->plat->max_speed;
984 	priv->oldlink = false;
985 	priv->speed = SPEED_UNKNOWN;
986 	priv->oldduplex = DUPLEX_UNKNOWN;
987 
988 	if (priv->plat->phy_node) {
989 		phydev = of_phy_connect(dev, priv->plat->phy_node,
990 					&stmmac_adjust_link, 0, interface);
991 	} else {
992 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
993 			 priv->plat->bus_id);
994 
995 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
996 			 priv->plat->phy_addr);
997 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
998 			   phy_id_fmt);
999 
1000 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
1001 				     interface);
1002 	}
1003 
1004 	if (IS_ERR_OR_NULL(phydev)) {
1005 		netdev_err(priv->dev, "Could not attach to PHY\n");
1006 		if (!phydev)
1007 			return -ENODEV;
1008 
1009 		return PTR_ERR(phydev);
1010 	}
1011 
1012 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
1013 	if ((interface == PHY_INTERFACE_MODE_MII) ||
1014 	    (interface == PHY_INTERFACE_MODE_RMII) ||
1015 		(max_speed < 1000 && max_speed > 0))
1016 		phy_set_max_speed(phydev, SPEED_100);
1017 
1018 	/*
1019 	 * Half-duplex mode not supported with multiqueue
1020 	 * half-duplex can only works with single queue
1021 	 */
1022 	if (tx_cnt > 1) {
1023 		phy_remove_link_mode(phydev,
1024 				     ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1025 		phy_remove_link_mode(phydev,
1026 				     ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1027 		phy_remove_link_mode(phydev,
1028 				     ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1029 	}
1030 
1031 	/*
1032 	 * Broken HW is sometimes missing the pull-up resistor on the
1033 	 * MDIO line, which results in reads to non-existent devices returning
1034 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
1035 	 * device as well.
1036 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
1037 	 */
1038 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
1039 		phy_disconnect(phydev);
1040 		return -ENODEV;
1041 	}
1042 
1043 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
1044 	 * subsequent PHY polling, make sure we force a link transition if
1045 	 * we have a UP/DOWN/UP transition
1046 	 */
1047 	if (phydev->is_pseudo_fixed_link)
1048 		phydev->irq = PHY_POLL;
1049 
1050 	phy_attached_info(phydev);
1051 	return 0;
1052 }
1053 
1054 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1055 {
1056 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1057 	void *head_rx;
1058 	u32 queue;
1059 
1060 	/* Display RX rings */
1061 	for (queue = 0; queue < rx_cnt; queue++) {
1062 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1063 
1064 		pr_info("\tRX Queue %u rings\n", queue);
1065 
1066 		if (priv->extend_desc)
1067 			head_rx = (void *)rx_q->dma_erx;
1068 		else
1069 			head_rx = (void *)rx_q->dma_rx;
1070 
1071 		/* Display RX ring */
1072 		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1073 	}
1074 }
1075 
1076 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1077 {
1078 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1079 	void *head_tx;
1080 	u32 queue;
1081 
1082 	/* Display TX rings */
1083 	for (queue = 0; queue < tx_cnt; queue++) {
1084 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1085 
1086 		pr_info("\tTX Queue %d rings\n", queue);
1087 
1088 		if (priv->extend_desc)
1089 			head_tx = (void *)tx_q->dma_etx;
1090 		else
1091 			head_tx = (void *)tx_q->dma_tx;
1092 
1093 		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1094 	}
1095 }
1096 
1097 static void stmmac_display_rings(struct stmmac_priv *priv)
1098 {
1099 	/* Display RX ring */
1100 	stmmac_display_rx_rings(priv);
1101 
1102 	/* Display TX ring */
1103 	stmmac_display_tx_rings(priv);
1104 }
1105 
1106 static int stmmac_set_bfsize(int mtu, int bufsize)
1107 {
1108 	int ret = bufsize;
1109 
1110 	if (mtu >= BUF_SIZE_4KiB)
1111 		ret = BUF_SIZE_8KiB;
1112 	else if (mtu >= BUF_SIZE_2KiB)
1113 		ret = BUF_SIZE_4KiB;
1114 	else if (mtu > DEFAULT_BUFSIZE)
1115 		ret = BUF_SIZE_2KiB;
1116 	else
1117 		ret = DEFAULT_BUFSIZE;
1118 
1119 	return ret;
1120 }
1121 
1122 /**
1123  * stmmac_clear_rx_descriptors - clear RX descriptors
1124  * @priv: driver private structure
1125  * @queue: RX queue index
1126  * Description: this function is called to clear the RX descriptors
1127  * in case of both basic and extended descriptors are used.
1128  */
1129 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1130 {
1131 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1132 	int i;
1133 
1134 	/* Clear the RX descriptors */
1135 	for (i = 0; i < DMA_RX_SIZE; i++)
1136 		if (priv->extend_desc)
1137 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1138 					priv->use_riwt, priv->mode,
1139 					(i == DMA_RX_SIZE - 1));
1140 		else
1141 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1142 					priv->use_riwt, priv->mode,
1143 					(i == DMA_RX_SIZE - 1));
1144 }
1145 
1146 /**
1147  * stmmac_clear_tx_descriptors - clear tx descriptors
1148  * @priv: driver private structure
1149  * @queue: TX queue index.
1150  * Description: this function is called to clear the TX descriptors
1151  * in case of both basic and extended descriptors are used.
1152  */
1153 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1154 {
1155 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1156 	int i;
1157 
1158 	/* Clear the TX descriptors */
1159 	for (i = 0; i < DMA_TX_SIZE; i++)
1160 		if (priv->extend_desc)
1161 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1162 					priv->mode, (i == DMA_TX_SIZE - 1));
1163 		else
1164 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1165 					priv->mode, (i == DMA_TX_SIZE - 1));
1166 }
1167 
1168 /**
1169  * stmmac_clear_descriptors - clear descriptors
1170  * @priv: driver private structure
1171  * Description: this function is called to clear the TX and RX descriptors
1172  * in case of both basic and extended descriptors are used.
1173  */
1174 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1175 {
1176 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1177 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1178 	u32 queue;
1179 
1180 	/* Clear the RX descriptors */
1181 	for (queue = 0; queue < rx_queue_cnt; queue++)
1182 		stmmac_clear_rx_descriptors(priv, queue);
1183 
1184 	/* Clear the TX descriptors */
1185 	for (queue = 0; queue < tx_queue_cnt; queue++)
1186 		stmmac_clear_tx_descriptors(priv, queue);
1187 }
1188 
1189 /**
1190  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1191  * @priv: driver private structure
1192  * @p: descriptor pointer
1193  * @i: descriptor index
1194  * @flags: gfp flag
1195  * @queue: RX queue index
1196  * Description: this function is called to allocate a receive buffer, perform
1197  * the DMA mapping and init the descriptor.
1198  */
1199 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1200 				  int i, gfp_t flags, u32 queue)
1201 {
1202 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1203 	struct sk_buff *skb;
1204 
1205 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1206 	if (!skb) {
1207 		netdev_err(priv->dev,
1208 			   "%s: Rx init fails; skb is NULL\n", __func__);
1209 		return -ENOMEM;
1210 	}
1211 	rx_q->rx_skbuff[i] = skb;
1212 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1213 						priv->dma_buf_sz,
1214 						DMA_FROM_DEVICE);
1215 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1216 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1217 		dev_kfree_skb_any(skb);
1218 		return -EINVAL;
1219 	}
1220 
1221 	stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1222 
1223 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1224 		stmmac_init_desc3(priv, p);
1225 
1226 	return 0;
1227 }
1228 
1229 /**
1230  * stmmac_free_rx_buffer - free RX dma buffers
1231  * @priv: private structure
1232  * @queue: RX queue index
1233  * @i: buffer index.
1234  */
1235 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1236 {
1237 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1238 
1239 	if (rx_q->rx_skbuff[i]) {
1240 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1241 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
1242 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1243 	}
1244 	rx_q->rx_skbuff[i] = NULL;
1245 }
1246 
1247 /**
1248  * stmmac_free_tx_buffer - free RX dma buffers
1249  * @priv: private structure
1250  * @queue: RX queue index
1251  * @i: buffer index.
1252  */
1253 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1254 {
1255 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1256 
1257 	if (tx_q->tx_skbuff_dma[i].buf) {
1258 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1259 			dma_unmap_page(priv->device,
1260 				       tx_q->tx_skbuff_dma[i].buf,
1261 				       tx_q->tx_skbuff_dma[i].len,
1262 				       DMA_TO_DEVICE);
1263 		else
1264 			dma_unmap_single(priv->device,
1265 					 tx_q->tx_skbuff_dma[i].buf,
1266 					 tx_q->tx_skbuff_dma[i].len,
1267 					 DMA_TO_DEVICE);
1268 	}
1269 
1270 	if (tx_q->tx_skbuff[i]) {
1271 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1272 		tx_q->tx_skbuff[i] = NULL;
1273 		tx_q->tx_skbuff_dma[i].buf = 0;
1274 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1275 	}
1276 }
1277 
1278 /**
1279  * init_dma_rx_desc_rings - init the RX descriptor rings
1280  * @dev: net device structure
1281  * @flags: gfp flag.
1282  * Description: this function initializes the DMA RX descriptors
1283  * and allocates the socket buffers. It supports the chained and ring
1284  * modes.
1285  */
1286 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1287 {
1288 	struct stmmac_priv *priv = netdev_priv(dev);
1289 	u32 rx_count = priv->plat->rx_queues_to_use;
1290 	int ret = -ENOMEM;
1291 	int bfsize = 0;
1292 	int queue;
1293 	int i;
1294 
1295 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1296 	if (bfsize < 0)
1297 		bfsize = 0;
1298 
1299 	if (bfsize < BUF_SIZE_16KiB)
1300 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1301 
1302 	priv->dma_buf_sz = bfsize;
1303 
1304 	/* RX INITIALIZATION */
1305 	netif_dbg(priv, probe, priv->dev,
1306 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1307 
1308 	for (queue = 0; queue < rx_count; queue++) {
1309 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1310 
1311 		netif_dbg(priv, probe, priv->dev,
1312 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
1313 			  (u32)rx_q->dma_rx_phy);
1314 
1315 		for (i = 0; i < DMA_RX_SIZE; i++) {
1316 			struct dma_desc *p;
1317 
1318 			if (priv->extend_desc)
1319 				p = &((rx_q->dma_erx + i)->basic);
1320 			else
1321 				p = rx_q->dma_rx + i;
1322 
1323 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
1324 						     queue);
1325 			if (ret)
1326 				goto err_init_rx_buffers;
1327 
1328 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1329 				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1330 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
1331 		}
1332 
1333 		rx_q->cur_rx = 0;
1334 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1335 
1336 		stmmac_clear_rx_descriptors(priv, queue);
1337 
1338 		/* Setup the chained descriptor addresses */
1339 		if (priv->mode == STMMAC_CHAIN_MODE) {
1340 			if (priv->extend_desc)
1341 				stmmac_mode_init(priv, rx_q->dma_erx,
1342 						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1343 			else
1344 				stmmac_mode_init(priv, rx_q->dma_rx,
1345 						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1346 		}
1347 	}
1348 
1349 	buf_sz = bfsize;
1350 
1351 	return 0;
1352 
1353 err_init_rx_buffers:
1354 	while (queue >= 0) {
1355 		while (--i >= 0)
1356 			stmmac_free_rx_buffer(priv, queue, i);
1357 
1358 		if (queue == 0)
1359 			break;
1360 
1361 		i = DMA_RX_SIZE;
1362 		queue--;
1363 	}
1364 
1365 	return ret;
1366 }
1367 
1368 /**
1369  * init_dma_tx_desc_rings - init the TX descriptor rings
1370  * @dev: net device structure.
1371  * Description: this function initializes the DMA TX descriptors
1372  * and allocates the socket buffers. It supports the chained and ring
1373  * modes.
1374  */
1375 static int init_dma_tx_desc_rings(struct net_device *dev)
1376 {
1377 	struct stmmac_priv *priv = netdev_priv(dev);
1378 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1379 	u32 queue;
1380 	int i;
1381 
1382 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1383 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1384 
1385 		netif_dbg(priv, probe, priv->dev,
1386 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1387 			 (u32)tx_q->dma_tx_phy);
1388 
1389 		/* Setup the chained descriptor addresses */
1390 		if (priv->mode == STMMAC_CHAIN_MODE) {
1391 			if (priv->extend_desc)
1392 				stmmac_mode_init(priv, tx_q->dma_etx,
1393 						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1394 			else
1395 				stmmac_mode_init(priv, tx_q->dma_tx,
1396 						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1397 		}
1398 
1399 		for (i = 0; i < DMA_TX_SIZE; i++) {
1400 			struct dma_desc *p;
1401 			if (priv->extend_desc)
1402 				p = &((tx_q->dma_etx + i)->basic);
1403 			else
1404 				p = tx_q->dma_tx + i;
1405 
1406 			stmmac_clear_desc(priv, p);
1407 
1408 			tx_q->tx_skbuff_dma[i].buf = 0;
1409 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1410 			tx_q->tx_skbuff_dma[i].len = 0;
1411 			tx_q->tx_skbuff_dma[i].last_segment = false;
1412 			tx_q->tx_skbuff[i] = NULL;
1413 		}
1414 
1415 		tx_q->dirty_tx = 0;
1416 		tx_q->cur_tx = 0;
1417 		tx_q->mss = 0;
1418 
1419 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1420 	}
1421 
1422 	return 0;
1423 }
1424 
1425 /**
1426  * init_dma_desc_rings - init the RX/TX descriptor rings
1427  * @dev: net device structure
1428  * @flags: gfp flag.
1429  * Description: this function initializes the DMA RX/TX descriptors
1430  * and allocates the socket buffers. It supports the chained and ring
1431  * modes.
1432  */
1433 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1434 {
1435 	struct stmmac_priv *priv = netdev_priv(dev);
1436 	int ret;
1437 
1438 	ret = init_dma_rx_desc_rings(dev, flags);
1439 	if (ret)
1440 		return ret;
1441 
1442 	ret = init_dma_tx_desc_rings(dev);
1443 
1444 	stmmac_clear_descriptors(priv);
1445 
1446 	if (netif_msg_hw(priv))
1447 		stmmac_display_rings(priv);
1448 
1449 	return ret;
1450 }
1451 
1452 /**
1453  * dma_free_rx_skbufs - free RX dma buffers
1454  * @priv: private structure
1455  * @queue: RX queue index
1456  */
1457 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1458 {
1459 	int i;
1460 
1461 	for (i = 0; i < DMA_RX_SIZE; i++)
1462 		stmmac_free_rx_buffer(priv, queue, i);
1463 }
1464 
1465 /**
1466  * dma_free_tx_skbufs - free TX dma buffers
1467  * @priv: private structure
1468  * @queue: TX queue index
1469  */
1470 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1471 {
1472 	int i;
1473 
1474 	for (i = 0; i < DMA_TX_SIZE; i++)
1475 		stmmac_free_tx_buffer(priv, queue, i);
1476 }
1477 
1478 /**
1479  * free_dma_rx_desc_resources - free RX dma desc resources
1480  * @priv: private structure
1481  */
1482 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1483 {
1484 	u32 rx_count = priv->plat->rx_queues_to_use;
1485 	u32 queue;
1486 
1487 	/* Free RX queue resources */
1488 	for (queue = 0; queue < rx_count; queue++) {
1489 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1490 
1491 		/* Release the DMA RX socket buffers */
1492 		dma_free_rx_skbufs(priv, queue);
1493 
1494 		/* Free DMA regions of consistent memory previously allocated */
1495 		if (!priv->extend_desc)
1496 			dma_free_coherent(priv->device,
1497 					  DMA_RX_SIZE * sizeof(struct dma_desc),
1498 					  rx_q->dma_rx, rx_q->dma_rx_phy);
1499 		else
1500 			dma_free_coherent(priv->device, DMA_RX_SIZE *
1501 					  sizeof(struct dma_extended_desc),
1502 					  rx_q->dma_erx, rx_q->dma_rx_phy);
1503 
1504 		kfree(rx_q->rx_skbuff_dma);
1505 		kfree(rx_q->rx_skbuff);
1506 	}
1507 }
1508 
1509 /**
1510  * free_dma_tx_desc_resources - free TX dma desc resources
1511  * @priv: private structure
1512  */
1513 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1514 {
1515 	u32 tx_count = priv->plat->tx_queues_to_use;
1516 	u32 queue;
1517 
1518 	/* Free TX queue resources */
1519 	for (queue = 0; queue < tx_count; queue++) {
1520 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1521 
1522 		/* Release the DMA TX socket buffers */
1523 		dma_free_tx_skbufs(priv, queue);
1524 
1525 		/* Free DMA regions of consistent memory previously allocated */
1526 		if (!priv->extend_desc)
1527 			dma_free_coherent(priv->device,
1528 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1529 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1530 		else
1531 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1532 					  sizeof(struct dma_extended_desc),
1533 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1534 
1535 		kfree(tx_q->tx_skbuff_dma);
1536 		kfree(tx_q->tx_skbuff);
1537 	}
1538 }
1539 
1540 /**
1541  * alloc_dma_rx_desc_resources - alloc RX resources.
1542  * @priv: private structure
1543  * Description: according to which descriptor can be used (extend or basic)
1544  * this function allocates the resources for TX and RX paths. In case of
1545  * reception, for example, it pre-allocated the RX socket buffer in order to
1546  * allow zero-copy mechanism.
1547  */
1548 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1549 {
1550 	u32 rx_count = priv->plat->rx_queues_to_use;
1551 	int ret = -ENOMEM;
1552 	u32 queue;
1553 
1554 	/* RX queues buffers and DMA */
1555 	for (queue = 0; queue < rx_count; queue++) {
1556 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1557 
1558 		rx_q->queue_index = queue;
1559 		rx_q->priv_data = priv;
1560 
1561 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1562 						    sizeof(dma_addr_t),
1563 						    GFP_KERNEL);
1564 		if (!rx_q->rx_skbuff_dma)
1565 			goto err_dma;
1566 
1567 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1568 						sizeof(struct sk_buff *),
1569 						GFP_KERNEL);
1570 		if (!rx_q->rx_skbuff)
1571 			goto err_dma;
1572 
1573 		if (priv->extend_desc) {
1574 			rx_q->dma_erx = dma_alloc_coherent(priv->device,
1575 							   DMA_RX_SIZE * sizeof(struct dma_extended_desc),
1576 							   &rx_q->dma_rx_phy,
1577 							   GFP_KERNEL);
1578 			if (!rx_q->dma_erx)
1579 				goto err_dma;
1580 
1581 		} else {
1582 			rx_q->dma_rx = dma_alloc_coherent(priv->device,
1583 							  DMA_RX_SIZE * sizeof(struct dma_desc),
1584 							  &rx_q->dma_rx_phy,
1585 							  GFP_KERNEL);
1586 			if (!rx_q->dma_rx)
1587 				goto err_dma;
1588 		}
1589 	}
1590 
1591 	return 0;
1592 
1593 err_dma:
1594 	free_dma_rx_desc_resources(priv);
1595 
1596 	return ret;
1597 }
1598 
1599 /**
1600  * alloc_dma_tx_desc_resources - alloc TX resources.
1601  * @priv: private structure
1602  * Description: according to which descriptor can be used (extend or basic)
1603  * this function allocates the resources for TX and RX paths. In case of
1604  * reception, for example, it pre-allocated the RX socket buffer in order to
1605  * allow zero-copy mechanism.
1606  */
1607 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1608 {
1609 	u32 tx_count = priv->plat->tx_queues_to_use;
1610 	int ret = -ENOMEM;
1611 	u32 queue;
1612 
1613 	/* TX queues buffers and DMA */
1614 	for (queue = 0; queue < tx_count; queue++) {
1615 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1616 
1617 		tx_q->queue_index = queue;
1618 		tx_q->priv_data = priv;
1619 
1620 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1621 						    sizeof(*tx_q->tx_skbuff_dma),
1622 						    GFP_KERNEL);
1623 		if (!tx_q->tx_skbuff_dma)
1624 			goto err_dma;
1625 
1626 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1627 						sizeof(struct sk_buff *),
1628 						GFP_KERNEL);
1629 		if (!tx_q->tx_skbuff)
1630 			goto err_dma;
1631 
1632 		if (priv->extend_desc) {
1633 			tx_q->dma_etx = dma_alloc_coherent(priv->device,
1634 							   DMA_TX_SIZE * sizeof(struct dma_extended_desc),
1635 							   &tx_q->dma_tx_phy,
1636 							   GFP_KERNEL);
1637 			if (!tx_q->dma_etx)
1638 				goto err_dma;
1639 		} else {
1640 			tx_q->dma_tx = dma_alloc_coherent(priv->device,
1641 							  DMA_TX_SIZE * sizeof(struct dma_desc),
1642 							  &tx_q->dma_tx_phy,
1643 							  GFP_KERNEL);
1644 			if (!tx_q->dma_tx)
1645 				goto err_dma;
1646 		}
1647 	}
1648 
1649 	return 0;
1650 
1651 err_dma:
1652 	free_dma_tx_desc_resources(priv);
1653 
1654 	return ret;
1655 }
1656 
1657 /**
1658  * alloc_dma_desc_resources - alloc TX/RX resources.
1659  * @priv: private structure
1660  * Description: according to which descriptor can be used (extend or basic)
1661  * this function allocates the resources for TX and RX paths. In case of
1662  * reception, for example, it pre-allocated the RX socket buffer in order to
1663  * allow zero-copy mechanism.
1664  */
1665 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1666 {
1667 	/* RX Allocation */
1668 	int ret = alloc_dma_rx_desc_resources(priv);
1669 
1670 	if (ret)
1671 		return ret;
1672 
1673 	ret = alloc_dma_tx_desc_resources(priv);
1674 
1675 	return ret;
1676 }
1677 
1678 /**
1679  * free_dma_desc_resources - free dma desc resources
1680  * @priv: private structure
1681  */
1682 static void free_dma_desc_resources(struct stmmac_priv *priv)
1683 {
1684 	/* Release the DMA RX socket buffers */
1685 	free_dma_rx_desc_resources(priv);
1686 
1687 	/* Release the DMA TX socket buffers */
1688 	free_dma_tx_desc_resources(priv);
1689 }
1690 
1691 /**
1692  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1693  *  @priv: driver private structure
1694  *  Description: It is used for enabling the rx queues in the MAC
1695  */
1696 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1697 {
1698 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
1699 	int queue;
1700 	u8 mode;
1701 
1702 	for (queue = 0; queue < rx_queues_count; queue++) {
1703 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1704 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1705 	}
1706 }
1707 
1708 /**
1709  * stmmac_start_rx_dma - start RX DMA channel
1710  * @priv: driver private structure
1711  * @chan: RX channel index
1712  * Description:
1713  * This starts a RX DMA channel
1714  */
1715 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1716 {
1717 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1718 	stmmac_start_rx(priv, priv->ioaddr, chan);
1719 }
1720 
1721 /**
1722  * stmmac_start_tx_dma - start TX DMA channel
1723  * @priv: driver private structure
1724  * @chan: TX channel index
1725  * Description:
1726  * This starts a TX DMA channel
1727  */
1728 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1729 {
1730 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1731 	stmmac_start_tx(priv, priv->ioaddr, chan);
1732 }
1733 
1734 /**
1735  * stmmac_stop_rx_dma - stop RX DMA channel
1736  * @priv: driver private structure
1737  * @chan: RX channel index
1738  * Description:
1739  * This stops a RX DMA channel
1740  */
1741 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1742 {
1743 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1744 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1745 }
1746 
1747 /**
1748  * stmmac_stop_tx_dma - stop TX DMA channel
1749  * @priv: driver private structure
1750  * @chan: TX channel index
1751  * Description:
1752  * This stops a TX DMA channel
1753  */
1754 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1755 {
1756 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1757 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1758 }
1759 
1760 /**
1761  * stmmac_start_all_dma - start all RX and TX DMA channels
1762  * @priv: driver private structure
1763  * Description:
1764  * This starts all the RX and TX DMA channels
1765  */
1766 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1767 {
1768 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1769 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1770 	u32 chan = 0;
1771 
1772 	for (chan = 0; chan < rx_channels_count; chan++)
1773 		stmmac_start_rx_dma(priv, chan);
1774 
1775 	for (chan = 0; chan < tx_channels_count; chan++)
1776 		stmmac_start_tx_dma(priv, chan);
1777 }
1778 
1779 /**
1780  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1781  * @priv: driver private structure
1782  * Description:
1783  * This stops the RX and TX DMA channels
1784  */
1785 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1786 {
1787 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1788 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1789 	u32 chan = 0;
1790 
1791 	for (chan = 0; chan < rx_channels_count; chan++)
1792 		stmmac_stop_rx_dma(priv, chan);
1793 
1794 	for (chan = 0; chan < tx_channels_count; chan++)
1795 		stmmac_stop_tx_dma(priv, chan);
1796 }
1797 
1798 /**
1799  *  stmmac_dma_operation_mode - HW DMA operation mode
1800  *  @priv: driver private structure
1801  *  Description: it is used for configuring the DMA operation mode register in
1802  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1803  */
1804 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1805 {
1806 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1807 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1808 	int rxfifosz = priv->plat->rx_fifo_size;
1809 	int txfifosz = priv->plat->tx_fifo_size;
1810 	u32 txmode = 0;
1811 	u32 rxmode = 0;
1812 	u32 chan = 0;
1813 	u8 qmode = 0;
1814 
1815 	if (rxfifosz == 0)
1816 		rxfifosz = priv->dma_cap.rx_fifo_size;
1817 	if (txfifosz == 0)
1818 		txfifosz = priv->dma_cap.tx_fifo_size;
1819 
1820 	/* Adjust for real per queue fifo size */
1821 	rxfifosz /= rx_channels_count;
1822 	txfifosz /= tx_channels_count;
1823 
1824 	if (priv->plat->force_thresh_dma_mode) {
1825 		txmode = tc;
1826 		rxmode = tc;
1827 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1828 		/*
1829 		 * In case of GMAC, SF mode can be enabled
1830 		 * to perform the TX COE in HW. This depends on:
1831 		 * 1) TX COE if actually supported
1832 		 * 2) There is no bugged Jumbo frame support
1833 		 *    that needs to not insert csum in the TDES.
1834 		 */
1835 		txmode = SF_DMA_MODE;
1836 		rxmode = SF_DMA_MODE;
1837 		priv->xstats.threshold = SF_DMA_MODE;
1838 	} else {
1839 		txmode = tc;
1840 		rxmode = SF_DMA_MODE;
1841 	}
1842 
1843 	/* configure all channels */
1844 	for (chan = 0; chan < rx_channels_count; chan++) {
1845 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1846 
1847 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1848 				rxfifosz, qmode);
1849 		stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1850 				chan);
1851 	}
1852 
1853 	for (chan = 0; chan < tx_channels_count; chan++) {
1854 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1855 
1856 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1857 				txfifosz, qmode);
1858 	}
1859 }
1860 
1861 /**
1862  * stmmac_tx_clean - to manage the transmission completion
1863  * @priv: driver private structure
1864  * @queue: TX queue index
1865  * Description: it reclaims the transmit resources after transmission completes.
1866  */
1867 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
1868 {
1869 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1870 	unsigned int bytes_compl = 0, pkts_compl = 0;
1871 	unsigned int entry, count = 0;
1872 
1873 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1874 
1875 	priv->xstats.tx_clean++;
1876 
1877 	entry = tx_q->dirty_tx;
1878 	while ((entry != tx_q->cur_tx) && (count < budget)) {
1879 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1880 		struct dma_desc *p;
1881 		int status;
1882 
1883 		if (priv->extend_desc)
1884 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1885 		else
1886 			p = tx_q->dma_tx + entry;
1887 
1888 		status = stmmac_tx_status(priv, &priv->dev->stats,
1889 				&priv->xstats, p, priv->ioaddr);
1890 		/* Check if the descriptor is owned by the DMA */
1891 		if (unlikely(status & tx_dma_own))
1892 			break;
1893 
1894 		count++;
1895 
1896 		/* Make sure descriptor fields are read after reading
1897 		 * the own bit.
1898 		 */
1899 		dma_rmb();
1900 
1901 		/* Just consider the last segment and ...*/
1902 		if (likely(!(status & tx_not_ls))) {
1903 			/* ... verify the status error condition */
1904 			if (unlikely(status & tx_err)) {
1905 				priv->dev->stats.tx_errors++;
1906 			} else {
1907 				priv->dev->stats.tx_packets++;
1908 				priv->xstats.tx_pkt_n++;
1909 			}
1910 			stmmac_get_tx_hwtstamp(priv, p, skb);
1911 		}
1912 
1913 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1914 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1915 				dma_unmap_page(priv->device,
1916 					       tx_q->tx_skbuff_dma[entry].buf,
1917 					       tx_q->tx_skbuff_dma[entry].len,
1918 					       DMA_TO_DEVICE);
1919 			else
1920 				dma_unmap_single(priv->device,
1921 						 tx_q->tx_skbuff_dma[entry].buf,
1922 						 tx_q->tx_skbuff_dma[entry].len,
1923 						 DMA_TO_DEVICE);
1924 			tx_q->tx_skbuff_dma[entry].buf = 0;
1925 			tx_q->tx_skbuff_dma[entry].len = 0;
1926 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1927 		}
1928 
1929 		stmmac_clean_desc3(priv, tx_q, p);
1930 
1931 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1932 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1933 
1934 		if (likely(skb != NULL)) {
1935 			pkts_compl++;
1936 			bytes_compl += skb->len;
1937 			dev_consume_skb_any(skb);
1938 			tx_q->tx_skbuff[entry] = NULL;
1939 		}
1940 
1941 		stmmac_release_tx_desc(priv, p, priv->mode);
1942 
1943 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1944 	}
1945 	tx_q->dirty_tx = entry;
1946 
1947 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1948 				  pkts_compl, bytes_compl);
1949 
1950 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1951 								queue))) &&
1952 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1953 
1954 		netif_dbg(priv, tx_done, priv->dev,
1955 			  "%s: restart transmit\n", __func__);
1956 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1957 	}
1958 
1959 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1960 		stmmac_enable_eee_mode(priv);
1961 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1962 	}
1963 
1964 	/* We still have pending packets, let's call for a new scheduling */
1965 	if (tx_q->dirty_tx != tx_q->cur_tx)
1966 		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
1967 
1968 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
1969 
1970 	return count;
1971 }
1972 
1973 /**
1974  * stmmac_tx_err - to manage the tx error
1975  * @priv: driver private structure
1976  * @chan: channel index
1977  * Description: it cleans the descriptors and restarts the transmission
1978  * in case of transmission errors.
1979  */
1980 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1981 {
1982 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1983 	int i;
1984 
1985 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1986 
1987 	stmmac_stop_tx_dma(priv, chan);
1988 	dma_free_tx_skbufs(priv, chan);
1989 	for (i = 0; i < DMA_TX_SIZE; i++)
1990 		if (priv->extend_desc)
1991 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1992 					priv->mode, (i == DMA_TX_SIZE - 1));
1993 		else
1994 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1995 					priv->mode, (i == DMA_TX_SIZE - 1));
1996 	tx_q->dirty_tx = 0;
1997 	tx_q->cur_tx = 0;
1998 	tx_q->mss = 0;
1999 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2000 	stmmac_start_tx_dma(priv, chan);
2001 
2002 	priv->dev->stats.tx_errors++;
2003 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2004 }
2005 
2006 /**
2007  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2008  *  @priv: driver private structure
2009  *  @txmode: TX operating mode
2010  *  @rxmode: RX operating mode
2011  *  @chan: channel index
2012  *  Description: it is used for configuring of the DMA operation mode in
2013  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2014  *  mode.
2015  */
2016 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2017 					  u32 rxmode, u32 chan)
2018 {
2019 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2020 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2021 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2022 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2023 	int rxfifosz = priv->plat->rx_fifo_size;
2024 	int txfifosz = priv->plat->tx_fifo_size;
2025 
2026 	if (rxfifosz == 0)
2027 		rxfifosz = priv->dma_cap.rx_fifo_size;
2028 	if (txfifosz == 0)
2029 		txfifosz = priv->dma_cap.tx_fifo_size;
2030 
2031 	/* Adjust for real per queue fifo size */
2032 	rxfifosz /= rx_channels_count;
2033 	txfifosz /= tx_channels_count;
2034 
2035 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2036 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2037 }
2038 
2039 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2040 {
2041 	int ret;
2042 
2043 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2044 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2045 	if (ret && (ret != -EINVAL)) {
2046 		stmmac_global_err(priv);
2047 		return true;
2048 	}
2049 
2050 	return false;
2051 }
2052 
2053 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2054 {
2055 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2056 						 &priv->xstats, chan);
2057 	struct stmmac_channel *ch = &priv->channel[chan];
2058 
2059 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2060 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2061 		napi_schedule_irqoff(&ch->rx_napi);
2062 	}
2063 
2064 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2065 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2066 		napi_schedule_irqoff(&ch->tx_napi);
2067 	}
2068 
2069 	return status;
2070 }
2071 
2072 /**
2073  * stmmac_dma_interrupt - DMA ISR
2074  * @priv: driver private structure
2075  * Description: this is the DMA ISR. It is called by the main ISR.
2076  * It calls the dwmac dma routine and schedule poll method in case of some
2077  * work can be done.
2078  */
2079 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2080 {
2081 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2082 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2083 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2084 				tx_channel_count : rx_channel_count;
2085 	u32 chan;
2086 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2087 
2088 	/* Make sure we never check beyond our status buffer. */
2089 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2090 		channels_to_check = ARRAY_SIZE(status);
2091 
2092 	for (chan = 0; chan < channels_to_check; chan++)
2093 		status[chan] = stmmac_napi_check(priv, chan);
2094 
2095 	for (chan = 0; chan < tx_channel_count; chan++) {
2096 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2097 			/* Try to bump up the dma threshold on this failure */
2098 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2099 			    (tc <= 256)) {
2100 				tc += 64;
2101 				if (priv->plat->force_thresh_dma_mode)
2102 					stmmac_set_dma_operation_mode(priv,
2103 								      tc,
2104 								      tc,
2105 								      chan);
2106 				else
2107 					stmmac_set_dma_operation_mode(priv,
2108 								    tc,
2109 								    SF_DMA_MODE,
2110 								    chan);
2111 				priv->xstats.threshold = tc;
2112 			}
2113 		} else if (unlikely(status[chan] == tx_hard_error)) {
2114 			stmmac_tx_err(priv, chan);
2115 		}
2116 	}
2117 }
2118 
2119 /**
2120  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2121  * @priv: driver private structure
2122  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2123  */
2124 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2125 {
2126 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2127 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2128 
2129 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
2130 
2131 	if (priv->dma_cap.rmon) {
2132 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
2133 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2134 	} else
2135 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2136 }
2137 
2138 /**
2139  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2140  * @priv: driver private structure
2141  * Description:
2142  *  new GMAC chip generations have a new register to indicate the
2143  *  presence of the optional feature/functions.
2144  *  This can be also used to override the value passed through the
2145  *  platform and necessary for old MAC10/100 and GMAC chips.
2146  */
2147 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2148 {
2149 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2150 }
2151 
2152 /**
2153  * stmmac_check_ether_addr - check if the MAC addr is valid
2154  * @priv: driver private structure
2155  * Description:
2156  * it is to verify if the MAC address is valid, in case of failures it
2157  * generates a random MAC address
2158  */
2159 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2160 {
2161 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2162 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2163 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2164 			eth_hw_addr_random(priv->dev);
2165 		netdev_info(priv->dev, "device MAC address %pM\n",
2166 			    priv->dev->dev_addr);
2167 	}
2168 }
2169 
2170 /**
2171  * stmmac_init_dma_engine - DMA init.
2172  * @priv: driver private structure
2173  * Description:
2174  * It inits the DMA invoking the specific MAC/GMAC callback.
2175  * Some DMA parameters can be passed from the platform;
2176  * in case of these are not passed a default is kept for the MAC or GMAC.
2177  */
2178 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2179 {
2180 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2181 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2182 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2183 	struct stmmac_rx_queue *rx_q;
2184 	struct stmmac_tx_queue *tx_q;
2185 	u32 chan = 0;
2186 	int atds = 0;
2187 	int ret = 0;
2188 
2189 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2190 		dev_err(priv->device, "Invalid DMA configuration\n");
2191 		return -EINVAL;
2192 	}
2193 
2194 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2195 		atds = 1;
2196 
2197 	ret = stmmac_reset(priv, priv->ioaddr);
2198 	if (ret) {
2199 		dev_err(priv->device, "Failed to reset the dma\n");
2200 		return ret;
2201 	}
2202 
2203 	/* DMA Configuration */
2204 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2205 
2206 	if (priv->plat->axi)
2207 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2208 
2209 	/* DMA RX Channel Configuration */
2210 	for (chan = 0; chan < rx_channels_count; chan++) {
2211 		rx_q = &priv->rx_queue[chan];
2212 
2213 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2214 				    rx_q->dma_rx_phy, chan);
2215 
2216 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2217 			    (DMA_RX_SIZE * sizeof(struct dma_desc));
2218 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2219 				       rx_q->rx_tail_addr, chan);
2220 	}
2221 
2222 	/* DMA TX Channel Configuration */
2223 	for (chan = 0; chan < tx_channels_count; chan++) {
2224 		tx_q = &priv->tx_queue[chan];
2225 
2226 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2227 				    tx_q->dma_tx_phy, chan);
2228 
2229 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2230 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2231 				       tx_q->tx_tail_addr, chan);
2232 	}
2233 
2234 	/* DMA CSR Channel configuration */
2235 	for (chan = 0; chan < dma_csr_ch; chan++)
2236 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2237 
2238 	return ret;
2239 }
2240 
2241 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2242 {
2243 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2244 
2245 	mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2246 }
2247 
2248 /**
2249  * stmmac_tx_timer - mitigation sw timer for tx.
2250  * @data: data pointer
2251  * Description:
2252  * This is the timer handler to directly invoke the stmmac_tx_clean.
2253  */
2254 static void stmmac_tx_timer(struct timer_list *t)
2255 {
2256 	struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
2257 	struct stmmac_priv *priv = tx_q->priv_data;
2258 	struct stmmac_channel *ch;
2259 
2260 	ch = &priv->channel[tx_q->queue_index];
2261 
2262 	/*
2263 	 * If NAPI is already running we can miss some events. Let's rearm
2264 	 * the timer and try again.
2265 	 */
2266 	if (likely(napi_schedule_prep(&ch->tx_napi)))
2267 		__napi_schedule(&ch->tx_napi);
2268 	else
2269 		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
2270 }
2271 
2272 /**
2273  * stmmac_init_tx_coalesce - init tx mitigation options.
2274  * @priv: driver private structure
2275  * Description:
2276  * This inits the transmit coalesce parameters: i.e. timer rate,
2277  * timer handler and default threshold used for enabling the
2278  * interrupt on completion bit.
2279  */
2280 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2281 {
2282 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2283 	u32 chan;
2284 
2285 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
2286 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2287 
2288 	for (chan = 0; chan < tx_channel_count; chan++) {
2289 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2290 
2291 		timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
2292 	}
2293 }
2294 
2295 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2296 {
2297 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2298 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2299 	u32 chan;
2300 
2301 	/* set TX ring length */
2302 	for (chan = 0; chan < tx_channels_count; chan++)
2303 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
2304 				(DMA_TX_SIZE - 1), chan);
2305 
2306 	/* set RX ring length */
2307 	for (chan = 0; chan < rx_channels_count; chan++)
2308 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
2309 				(DMA_RX_SIZE - 1), chan);
2310 }
2311 
2312 /**
2313  *  stmmac_set_tx_queue_weight - Set TX queue weight
2314  *  @priv: driver private structure
2315  *  Description: It is used for setting TX queues weight
2316  */
2317 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2318 {
2319 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2320 	u32 weight;
2321 	u32 queue;
2322 
2323 	for (queue = 0; queue < tx_queues_count; queue++) {
2324 		weight = priv->plat->tx_queues_cfg[queue].weight;
2325 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2326 	}
2327 }
2328 
2329 /**
2330  *  stmmac_configure_cbs - Configure CBS in TX queue
2331  *  @priv: driver private structure
2332  *  Description: It is used for configuring CBS in AVB TX queues
2333  */
2334 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2335 {
2336 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2337 	u32 mode_to_use;
2338 	u32 queue;
2339 
2340 	/* queue 0 is reserved for legacy traffic */
2341 	for (queue = 1; queue < tx_queues_count; queue++) {
2342 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2343 		if (mode_to_use == MTL_QUEUE_DCB)
2344 			continue;
2345 
2346 		stmmac_config_cbs(priv, priv->hw,
2347 				priv->plat->tx_queues_cfg[queue].send_slope,
2348 				priv->plat->tx_queues_cfg[queue].idle_slope,
2349 				priv->plat->tx_queues_cfg[queue].high_credit,
2350 				priv->plat->tx_queues_cfg[queue].low_credit,
2351 				queue);
2352 	}
2353 }
2354 
2355 /**
2356  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2357  *  @priv: driver private structure
2358  *  Description: It is used for mapping RX queues to RX dma channels
2359  */
2360 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2361 {
2362 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2363 	u32 queue;
2364 	u32 chan;
2365 
2366 	for (queue = 0; queue < rx_queues_count; queue++) {
2367 		chan = priv->plat->rx_queues_cfg[queue].chan;
2368 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2369 	}
2370 }
2371 
2372 /**
2373  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2374  *  @priv: driver private structure
2375  *  Description: It is used for configuring the RX Queue Priority
2376  */
2377 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2378 {
2379 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2380 	u32 queue;
2381 	u32 prio;
2382 
2383 	for (queue = 0; queue < rx_queues_count; queue++) {
2384 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2385 			continue;
2386 
2387 		prio = priv->plat->rx_queues_cfg[queue].prio;
2388 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2389 	}
2390 }
2391 
2392 /**
2393  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2394  *  @priv: driver private structure
2395  *  Description: It is used for configuring the TX Queue Priority
2396  */
2397 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2398 {
2399 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2400 	u32 queue;
2401 	u32 prio;
2402 
2403 	for (queue = 0; queue < tx_queues_count; queue++) {
2404 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2405 			continue;
2406 
2407 		prio = priv->plat->tx_queues_cfg[queue].prio;
2408 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2409 	}
2410 }
2411 
2412 /**
2413  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2414  *  @priv: driver private structure
2415  *  Description: It is used for configuring the RX queue routing
2416  */
2417 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2418 {
2419 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2420 	u32 queue;
2421 	u8 packet;
2422 
2423 	for (queue = 0; queue < rx_queues_count; queue++) {
2424 		/* no specific packet type routing specified for the queue */
2425 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2426 			continue;
2427 
2428 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2429 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2430 	}
2431 }
2432 
2433 /**
2434  *  stmmac_mtl_configuration - Configure MTL
2435  *  @priv: driver private structure
2436  *  Description: It is used for configurring MTL
2437  */
2438 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2439 {
2440 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2441 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2442 
2443 	if (tx_queues_count > 1)
2444 		stmmac_set_tx_queue_weight(priv);
2445 
2446 	/* Configure MTL RX algorithms */
2447 	if (rx_queues_count > 1)
2448 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2449 				priv->plat->rx_sched_algorithm);
2450 
2451 	/* Configure MTL TX algorithms */
2452 	if (tx_queues_count > 1)
2453 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2454 				priv->plat->tx_sched_algorithm);
2455 
2456 	/* Configure CBS in AVB TX queues */
2457 	if (tx_queues_count > 1)
2458 		stmmac_configure_cbs(priv);
2459 
2460 	/* Map RX MTL to DMA channels */
2461 	stmmac_rx_queue_dma_chan_map(priv);
2462 
2463 	/* Enable MAC RX Queues */
2464 	stmmac_mac_enable_rx_queues(priv);
2465 
2466 	/* Set RX priorities */
2467 	if (rx_queues_count > 1)
2468 		stmmac_mac_config_rx_queues_prio(priv);
2469 
2470 	/* Set TX priorities */
2471 	if (tx_queues_count > 1)
2472 		stmmac_mac_config_tx_queues_prio(priv);
2473 
2474 	/* Set RX routing */
2475 	if (rx_queues_count > 1)
2476 		stmmac_mac_config_rx_queues_routing(priv);
2477 }
2478 
2479 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2480 {
2481 	if (priv->dma_cap.asp) {
2482 		netdev_info(priv->dev, "Enabling Safety Features\n");
2483 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2484 	} else {
2485 		netdev_info(priv->dev, "No Safety Features support found\n");
2486 	}
2487 }
2488 
2489 /**
2490  * stmmac_hw_setup - setup mac in a usable state.
2491  *  @dev : pointer to the device structure.
2492  *  Description:
2493  *  this is the main function to setup the HW in a usable state because the
2494  *  dma engine is reset, the core registers are configured (e.g. AXI,
2495  *  Checksum features, timers). The DMA is ready to start receiving and
2496  *  transmitting.
2497  *  Return value:
2498  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2499  *  file on failure.
2500  */
2501 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2502 {
2503 	struct stmmac_priv *priv = netdev_priv(dev);
2504 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2505 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2506 	u32 chan;
2507 	int ret;
2508 
2509 	/* DMA initialization and SW reset */
2510 	ret = stmmac_init_dma_engine(priv);
2511 	if (ret < 0) {
2512 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2513 			   __func__);
2514 		return ret;
2515 	}
2516 
2517 	/* Copy the MAC addr into the HW  */
2518 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2519 
2520 	/* PS and related bits will be programmed according to the speed */
2521 	if (priv->hw->pcs) {
2522 		int speed = priv->plat->mac_port_sel_speed;
2523 
2524 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
2525 		    (speed == SPEED_1000)) {
2526 			priv->hw->ps = speed;
2527 		} else {
2528 			dev_warn(priv->device, "invalid port speed\n");
2529 			priv->hw->ps = 0;
2530 		}
2531 	}
2532 
2533 	/* Initialize the MAC Core */
2534 	stmmac_core_init(priv, priv->hw, dev);
2535 
2536 	/* Initialize MTL*/
2537 	stmmac_mtl_configuration(priv);
2538 
2539 	/* Initialize Safety Features */
2540 	stmmac_safety_feat_configuration(priv);
2541 
2542 	ret = stmmac_rx_ipc(priv, priv->hw);
2543 	if (!ret) {
2544 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2545 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2546 		priv->hw->rx_csum = 0;
2547 	}
2548 
2549 	/* Enable the MAC Rx/Tx */
2550 	stmmac_mac_set(priv, priv->ioaddr, true);
2551 
2552 	/* Set the HW DMA mode and the COE */
2553 	stmmac_dma_operation_mode(priv);
2554 
2555 	stmmac_mmc_setup(priv);
2556 
2557 	if (init_ptp) {
2558 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2559 		if (ret < 0)
2560 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2561 
2562 		ret = stmmac_init_ptp(priv);
2563 		if (ret == -EOPNOTSUPP)
2564 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2565 		else if (ret)
2566 			netdev_warn(priv->dev, "PTP init failed\n");
2567 	}
2568 
2569 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2570 
2571 	if (priv->use_riwt) {
2572 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2573 		if (!ret)
2574 			priv->rx_riwt = MAX_DMA_RIWT;
2575 	}
2576 
2577 	if (priv->hw->pcs)
2578 		stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2579 
2580 	/* set TX and RX rings length */
2581 	stmmac_set_rings_length(priv);
2582 
2583 	/* Enable TSO */
2584 	if (priv->tso) {
2585 		for (chan = 0; chan < tx_cnt; chan++)
2586 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2587 	}
2588 
2589 	/* Start the ball rolling... */
2590 	stmmac_start_all_dma(priv);
2591 
2592 	return 0;
2593 }
2594 
2595 static void stmmac_hw_teardown(struct net_device *dev)
2596 {
2597 	struct stmmac_priv *priv = netdev_priv(dev);
2598 
2599 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2600 }
2601 
2602 /**
2603  *  stmmac_open - open entry point of the driver
2604  *  @dev : pointer to the device structure.
2605  *  Description:
2606  *  This function is the open entry point of the driver.
2607  *  Return value:
2608  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2609  *  file on failure.
2610  */
2611 static int stmmac_open(struct net_device *dev)
2612 {
2613 	struct stmmac_priv *priv = netdev_priv(dev);
2614 	u32 chan;
2615 	int ret;
2616 
2617 	stmmac_check_ether_addr(priv);
2618 
2619 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2620 	    priv->hw->pcs != STMMAC_PCS_TBI &&
2621 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
2622 		ret = stmmac_init_phy(dev);
2623 		if (ret) {
2624 			netdev_err(priv->dev,
2625 				   "%s: Cannot attach to PHY (error: %d)\n",
2626 				   __func__, ret);
2627 			return ret;
2628 		}
2629 	}
2630 
2631 	/* Extra statistics */
2632 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2633 	priv->xstats.threshold = tc;
2634 
2635 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2636 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2637 
2638 	ret = alloc_dma_desc_resources(priv);
2639 	if (ret < 0) {
2640 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2641 			   __func__);
2642 		goto dma_desc_error;
2643 	}
2644 
2645 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
2646 	if (ret < 0) {
2647 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2648 			   __func__);
2649 		goto init_error;
2650 	}
2651 
2652 	ret = stmmac_hw_setup(dev, true);
2653 	if (ret < 0) {
2654 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2655 		goto init_error;
2656 	}
2657 
2658 	stmmac_init_tx_coalesce(priv);
2659 
2660 	if (dev->phydev)
2661 		phy_start(dev->phydev);
2662 
2663 	/* Request the IRQ lines */
2664 	ret = request_irq(dev->irq, stmmac_interrupt,
2665 			  IRQF_SHARED, dev->name, dev);
2666 	if (unlikely(ret < 0)) {
2667 		netdev_err(priv->dev,
2668 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2669 			   __func__, dev->irq, ret);
2670 		goto irq_error;
2671 	}
2672 
2673 	/* Request the Wake IRQ in case of another line is used for WoL */
2674 	if (priv->wol_irq != dev->irq) {
2675 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
2676 				  IRQF_SHARED, dev->name, dev);
2677 		if (unlikely(ret < 0)) {
2678 			netdev_err(priv->dev,
2679 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2680 				   __func__, priv->wol_irq, ret);
2681 			goto wolirq_error;
2682 		}
2683 	}
2684 
2685 	/* Request the IRQ lines */
2686 	if (priv->lpi_irq > 0) {
2687 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2688 				  dev->name, dev);
2689 		if (unlikely(ret < 0)) {
2690 			netdev_err(priv->dev,
2691 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2692 				   __func__, priv->lpi_irq, ret);
2693 			goto lpiirq_error;
2694 		}
2695 	}
2696 
2697 	stmmac_enable_all_queues(priv);
2698 	stmmac_start_all_queues(priv);
2699 
2700 	return 0;
2701 
2702 lpiirq_error:
2703 	if (priv->wol_irq != dev->irq)
2704 		free_irq(priv->wol_irq, dev);
2705 wolirq_error:
2706 	free_irq(dev->irq, dev);
2707 irq_error:
2708 	if (dev->phydev)
2709 		phy_stop(dev->phydev);
2710 
2711 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2712 		del_timer_sync(&priv->tx_queue[chan].txtimer);
2713 
2714 	stmmac_hw_teardown(dev);
2715 init_error:
2716 	free_dma_desc_resources(priv);
2717 dma_desc_error:
2718 	if (dev->phydev)
2719 		phy_disconnect(dev->phydev);
2720 
2721 	return ret;
2722 }
2723 
2724 /**
2725  *  stmmac_release - close entry point of the driver
2726  *  @dev : device pointer.
2727  *  Description:
2728  *  This is the stop entry point of the driver.
2729  */
2730 static int stmmac_release(struct net_device *dev)
2731 {
2732 	struct stmmac_priv *priv = netdev_priv(dev);
2733 	u32 chan;
2734 
2735 	if (priv->eee_enabled)
2736 		del_timer_sync(&priv->eee_ctrl_timer);
2737 
2738 	/* Stop and disconnect the PHY */
2739 	if (dev->phydev) {
2740 		phy_stop(dev->phydev);
2741 		phy_disconnect(dev->phydev);
2742 	}
2743 
2744 	stmmac_stop_all_queues(priv);
2745 
2746 	stmmac_disable_all_queues(priv);
2747 
2748 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2749 		del_timer_sync(&priv->tx_queue[chan].txtimer);
2750 
2751 	/* Free the IRQ lines */
2752 	free_irq(dev->irq, dev);
2753 	if (priv->wol_irq != dev->irq)
2754 		free_irq(priv->wol_irq, dev);
2755 	if (priv->lpi_irq > 0)
2756 		free_irq(priv->lpi_irq, dev);
2757 
2758 	/* Stop TX/RX DMA and clear the descriptors */
2759 	stmmac_stop_all_dma(priv);
2760 
2761 	/* Release and free the Rx/Tx resources */
2762 	free_dma_desc_resources(priv);
2763 
2764 	/* Disable the MAC Rx/Tx */
2765 	stmmac_mac_set(priv, priv->ioaddr, false);
2766 
2767 	netif_carrier_off(dev);
2768 
2769 	stmmac_release_ptp(priv);
2770 
2771 	return 0;
2772 }
2773 
2774 /**
2775  *  stmmac_tso_allocator - close entry point of the driver
2776  *  @priv: driver private structure
2777  *  @des: buffer start address
2778  *  @total_len: total length to fill in descriptors
2779  *  @last_segmant: condition for the last descriptor
2780  *  @queue: TX queue index
2781  *  Description:
2782  *  This function fills descriptor and request new descriptors according to
2783  *  buffer length to fill
2784  */
2785 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2786 				 int total_len, bool last_segment, u32 queue)
2787 {
2788 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2789 	struct dma_desc *desc;
2790 	u32 buff_size;
2791 	int tmp_len;
2792 
2793 	tmp_len = total_len;
2794 
2795 	while (tmp_len > 0) {
2796 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2797 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2798 		desc = tx_q->dma_tx + tx_q->cur_tx;
2799 
2800 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2801 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2802 			    TSO_MAX_BUFF_SIZE : tmp_len;
2803 
2804 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2805 				0, 1,
2806 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2807 				0, 0);
2808 
2809 		tmp_len -= TSO_MAX_BUFF_SIZE;
2810 	}
2811 }
2812 
2813 /**
2814  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2815  *  @skb : the socket buffer
2816  *  @dev : device pointer
2817  *  Description: this is the transmit function that is called on TSO frames
2818  *  (support available on GMAC4 and newer chips).
2819  *  Diagram below show the ring programming in case of TSO frames:
2820  *
2821  *  First Descriptor
2822  *   --------
2823  *   | DES0 |---> buffer1 = L2/L3/L4 header
2824  *   | DES1 |---> TCP Payload (can continue on next descr...)
2825  *   | DES2 |---> buffer 1 and 2 len
2826  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2827  *   --------
2828  *	|
2829  *     ...
2830  *	|
2831  *   --------
2832  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2833  *   | DES1 | --|
2834  *   | DES2 | --> buffer 1 and 2 len
2835  *   | DES3 |
2836  *   --------
2837  *
2838  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2839  */
2840 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2841 {
2842 	struct dma_desc *desc, *first, *mss_desc = NULL;
2843 	struct stmmac_priv *priv = netdev_priv(dev);
2844 	int nfrags = skb_shinfo(skb)->nr_frags;
2845 	u32 queue = skb_get_queue_mapping(skb);
2846 	unsigned int first_entry, des;
2847 	struct stmmac_tx_queue *tx_q;
2848 	int tmp_pay_len = 0;
2849 	u32 pay_len, mss;
2850 	u8 proto_hdr_len;
2851 	int i;
2852 
2853 	tx_q = &priv->tx_queue[queue];
2854 
2855 	/* Compute header lengths */
2856 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2857 
2858 	/* Desc availability based on threshold should be enough safe */
2859 	if (unlikely(stmmac_tx_avail(priv, queue) <
2860 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2861 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2862 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2863 								queue));
2864 			/* This is a hard error, log it. */
2865 			netdev_err(priv->dev,
2866 				   "%s: Tx Ring full when queue awake\n",
2867 				   __func__);
2868 		}
2869 		return NETDEV_TX_BUSY;
2870 	}
2871 
2872 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2873 
2874 	mss = skb_shinfo(skb)->gso_size;
2875 
2876 	/* set new MSS value if needed */
2877 	if (mss != tx_q->mss) {
2878 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2879 		stmmac_set_mss(priv, mss_desc, mss);
2880 		tx_q->mss = mss;
2881 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2882 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2883 	}
2884 
2885 	if (netif_msg_tx_queued(priv)) {
2886 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2887 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2888 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2889 			skb->data_len);
2890 	}
2891 
2892 	first_entry = tx_q->cur_tx;
2893 	WARN_ON(tx_q->tx_skbuff[first_entry]);
2894 
2895 	desc = tx_q->dma_tx + first_entry;
2896 	first = desc;
2897 
2898 	/* first descriptor: fill Headers on Buf1 */
2899 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2900 			     DMA_TO_DEVICE);
2901 	if (dma_mapping_error(priv->device, des))
2902 		goto dma_map_err;
2903 
2904 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2905 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2906 
2907 	first->des0 = cpu_to_le32(des);
2908 
2909 	/* Fill start of payload in buff2 of first descriptor */
2910 	if (pay_len)
2911 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2912 
2913 	/* If needed take extra descriptors to fill the remaining payload */
2914 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2915 
2916 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2917 
2918 	/* Prepare fragments */
2919 	for (i = 0; i < nfrags; i++) {
2920 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2921 
2922 		des = skb_frag_dma_map(priv->device, frag, 0,
2923 				       skb_frag_size(frag),
2924 				       DMA_TO_DEVICE);
2925 		if (dma_mapping_error(priv->device, des))
2926 			goto dma_map_err;
2927 
2928 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2929 				     (i == nfrags - 1), queue);
2930 
2931 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2932 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2933 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2934 	}
2935 
2936 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2937 
2938 	/* Only the last descriptor gets to point to the skb. */
2939 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2940 
2941 	/* We've used all descriptors we need for this skb, however,
2942 	 * advance cur_tx so that it references a fresh descriptor.
2943 	 * ndo_start_xmit will fill this descriptor the next time it's
2944 	 * called and stmmac_tx_clean may clean up to this descriptor.
2945 	 */
2946 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2947 
2948 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2949 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2950 			  __func__);
2951 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2952 	}
2953 
2954 	dev->stats.tx_bytes += skb->len;
2955 	priv->xstats.tx_tso_frames++;
2956 	priv->xstats.tx_tso_nfrags += nfrags;
2957 
2958 	/* Manage tx mitigation */
2959 	tx_q->tx_count_frames += nfrags + 1;
2960 	if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
2961 		stmmac_set_tx_ic(priv, desc);
2962 		priv->xstats.tx_set_ic_bit++;
2963 		tx_q->tx_count_frames = 0;
2964 	} else {
2965 		stmmac_tx_timer_arm(priv, queue);
2966 	}
2967 
2968 	skb_tx_timestamp(skb);
2969 
2970 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2971 		     priv->hwts_tx_en)) {
2972 		/* declare that device is doing timestamping */
2973 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2974 		stmmac_enable_tx_timestamp(priv, first);
2975 	}
2976 
2977 	/* Complete the first descriptor before granting the DMA */
2978 	stmmac_prepare_tso_tx_desc(priv, first, 1,
2979 			proto_hdr_len,
2980 			pay_len,
2981 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2982 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2983 
2984 	/* If context desc is used to change MSS */
2985 	if (mss_desc) {
2986 		/* Make sure that first descriptor has been completely
2987 		 * written, including its own bit. This is because MSS is
2988 		 * actually before first descriptor, so we need to make
2989 		 * sure that MSS's own bit is the last thing written.
2990 		 */
2991 		dma_wmb();
2992 		stmmac_set_tx_owner(priv, mss_desc);
2993 	}
2994 
2995 	/* The own bit must be the latest setting done when prepare the
2996 	 * descriptor and then barrier is needed to make sure that
2997 	 * all is coherent before granting the DMA engine.
2998 	 */
2999 	wmb();
3000 
3001 	if (netif_msg_pktdata(priv)) {
3002 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3003 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3004 			tx_q->cur_tx, first, nfrags);
3005 
3006 		stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
3007 
3008 		pr_info(">>> frame to be transmitted: ");
3009 		print_pkt(skb->data, skb_headlen(skb));
3010 	}
3011 
3012 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3013 
3014 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3015 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3016 
3017 	return NETDEV_TX_OK;
3018 
3019 dma_map_err:
3020 	dev_err(priv->device, "Tx dma map failed\n");
3021 	dev_kfree_skb(skb);
3022 	priv->dev->stats.tx_dropped++;
3023 	return NETDEV_TX_OK;
3024 }
3025 
3026 /**
3027  *  stmmac_xmit - Tx entry point of the driver
3028  *  @skb : the socket buffer
3029  *  @dev : device pointer
3030  *  Description : this is the tx entry point of the driver.
3031  *  It programs the chain or the ring and supports oversized frames
3032  *  and SG feature.
3033  */
3034 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3035 {
3036 	struct stmmac_priv *priv = netdev_priv(dev);
3037 	unsigned int nopaged_len = skb_headlen(skb);
3038 	int i, csum_insertion = 0, is_jumbo = 0;
3039 	u32 queue = skb_get_queue_mapping(skb);
3040 	int nfrags = skb_shinfo(skb)->nr_frags;
3041 	int entry;
3042 	unsigned int first_entry;
3043 	struct dma_desc *desc, *first;
3044 	struct stmmac_tx_queue *tx_q;
3045 	unsigned int enh_desc;
3046 	unsigned int des;
3047 
3048 	tx_q = &priv->tx_queue[queue];
3049 
3050 	if (priv->tx_path_in_lpi_mode)
3051 		stmmac_disable_eee_mode(priv);
3052 
3053 	/* Manage oversized TCP frames for GMAC4 device */
3054 	if (skb_is_gso(skb) && priv->tso) {
3055 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3056 			/*
3057 			 * There is no way to determine the number of TSO
3058 			 * capable Queues. Let's use always the Queue 0
3059 			 * because if TSO is supported then at least this
3060 			 * one will be capable.
3061 			 */
3062 			skb_set_queue_mapping(skb, 0);
3063 
3064 			return stmmac_tso_xmit(skb, dev);
3065 		}
3066 	}
3067 
3068 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3069 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3070 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3071 								queue));
3072 			/* This is a hard error, log it. */
3073 			netdev_err(priv->dev,
3074 				   "%s: Tx Ring full when queue awake\n",
3075 				   __func__);
3076 		}
3077 		return NETDEV_TX_BUSY;
3078 	}
3079 
3080 	entry = tx_q->cur_tx;
3081 	first_entry = entry;
3082 	WARN_ON(tx_q->tx_skbuff[first_entry]);
3083 
3084 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3085 
3086 	if (likely(priv->extend_desc))
3087 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3088 	else
3089 		desc = tx_q->dma_tx + entry;
3090 
3091 	first = desc;
3092 
3093 	enh_desc = priv->plat->enh_desc;
3094 	/* To program the descriptors according to the size of the frame */
3095 	if (enh_desc)
3096 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3097 
3098 	if (unlikely(is_jumbo)) {
3099 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3100 		if (unlikely(entry < 0) && (entry != -EINVAL))
3101 			goto dma_map_err;
3102 	}
3103 
3104 	for (i = 0; i < nfrags; i++) {
3105 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3106 		int len = skb_frag_size(frag);
3107 		bool last_segment = (i == (nfrags - 1));
3108 
3109 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3110 		WARN_ON(tx_q->tx_skbuff[entry]);
3111 
3112 		if (likely(priv->extend_desc))
3113 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3114 		else
3115 			desc = tx_q->dma_tx + entry;
3116 
3117 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3118 				       DMA_TO_DEVICE);
3119 		if (dma_mapping_error(priv->device, des))
3120 			goto dma_map_err; /* should reuse desc w/o issues */
3121 
3122 		tx_q->tx_skbuff_dma[entry].buf = des;
3123 
3124 		stmmac_set_desc_addr(priv, desc, des);
3125 
3126 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3127 		tx_q->tx_skbuff_dma[entry].len = len;
3128 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3129 
3130 		/* Prepare the descriptor and set the own bit too */
3131 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3132 				priv->mode, 1, last_segment, skb->len);
3133 	}
3134 
3135 	/* Only the last descriptor gets to point to the skb. */
3136 	tx_q->tx_skbuff[entry] = skb;
3137 
3138 	/* We've used all descriptors we need for this skb, however,
3139 	 * advance cur_tx so that it references a fresh descriptor.
3140 	 * ndo_start_xmit will fill this descriptor the next time it's
3141 	 * called and stmmac_tx_clean may clean up to this descriptor.
3142 	 */
3143 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3144 	tx_q->cur_tx = entry;
3145 
3146 	if (netif_msg_pktdata(priv)) {
3147 		void *tx_head;
3148 
3149 		netdev_dbg(priv->dev,
3150 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3151 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3152 			   entry, first, nfrags);
3153 
3154 		if (priv->extend_desc)
3155 			tx_head = (void *)tx_q->dma_etx;
3156 		else
3157 			tx_head = (void *)tx_q->dma_tx;
3158 
3159 		stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3160 
3161 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3162 		print_pkt(skb->data, skb->len);
3163 	}
3164 
3165 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3166 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3167 			  __func__);
3168 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3169 	}
3170 
3171 	dev->stats.tx_bytes += skb->len;
3172 
3173 	/* According to the coalesce parameter the IC bit for the latest
3174 	 * segment is reset and the timer re-started to clean the tx status.
3175 	 * This approach takes care about the fragments: desc is the first
3176 	 * element in case of no SG.
3177 	 */
3178 	tx_q->tx_count_frames += nfrags + 1;
3179 	if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
3180 		stmmac_set_tx_ic(priv, desc);
3181 		priv->xstats.tx_set_ic_bit++;
3182 		tx_q->tx_count_frames = 0;
3183 	} else {
3184 		stmmac_tx_timer_arm(priv, queue);
3185 	}
3186 
3187 	skb_tx_timestamp(skb);
3188 
3189 	/* Ready to fill the first descriptor and set the OWN bit w/o any
3190 	 * problems because all the descriptors are actually ready to be
3191 	 * passed to the DMA engine.
3192 	 */
3193 	if (likely(!is_jumbo)) {
3194 		bool last_segment = (nfrags == 0);
3195 
3196 		des = dma_map_single(priv->device, skb->data,
3197 				     nopaged_len, DMA_TO_DEVICE);
3198 		if (dma_mapping_error(priv->device, des))
3199 			goto dma_map_err;
3200 
3201 		tx_q->tx_skbuff_dma[first_entry].buf = des;
3202 
3203 		stmmac_set_desc_addr(priv, first, des);
3204 
3205 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3206 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3207 
3208 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3209 			     priv->hwts_tx_en)) {
3210 			/* declare that device is doing timestamping */
3211 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3212 			stmmac_enable_tx_timestamp(priv, first);
3213 		}
3214 
3215 		/* Prepare the first descriptor setting the OWN bit too */
3216 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3217 				csum_insertion, priv->mode, 1, last_segment,
3218 				skb->len);
3219 	} else {
3220 		stmmac_set_tx_owner(priv, first);
3221 	}
3222 
3223 	/* The own bit must be the latest setting done when prepare the
3224 	 * descriptor and then barrier is needed to make sure that
3225 	 * all is coherent before granting the DMA engine.
3226 	 */
3227 	wmb();
3228 
3229 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3230 
3231 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
3232 
3233 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3234 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3235 
3236 	return NETDEV_TX_OK;
3237 
3238 dma_map_err:
3239 	netdev_err(priv->dev, "Tx DMA map failed\n");
3240 	dev_kfree_skb(skb);
3241 	priv->dev->stats.tx_dropped++;
3242 	return NETDEV_TX_OK;
3243 }
3244 
3245 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3246 {
3247 	struct vlan_ethhdr *veth;
3248 	__be16 vlan_proto;
3249 	u16 vlanid;
3250 
3251 	veth = (struct vlan_ethhdr *)skb->data;
3252 	vlan_proto = veth->h_vlan_proto;
3253 
3254 	if ((vlan_proto == htons(ETH_P_8021Q) &&
3255 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3256 	    (vlan_proto == htons(ETH_P_8021AD) &&
3257 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3258 		/* pop the vlan tag */
3259 		vlanid = ntohs(veth->h_vlan_TCI);
3260 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3261 		skb_pull(skb, VLAN_HLEN);
3262 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3263 	}
3264 }
3265 
3266 
3267 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3268 {
3269 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3270 		return 0;
3271 
3272 	return 1;
3273 }
3274 
3275 /**
3276  * stmmac_rx_refill - refill used skb preallocated buffers
3277  * @priv: driver private structure
3278  * @queue: RX queue index
3279  * Description : this is to reallocate the skb for the reception process
3280  * that is based on zero-copy.
3281  */
3282 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3283 {
3284 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3285 	int dirty = stmmac_rx_dirty(priv, queue);
3286 	unsigned int entry = rx_q->dirty_rx;
3287 
3288 	int bfsize = priv->dma_buf_sz;
3289 
3290 	while (dirty-- > 0) {
3291 		struct dma_desc *p;
3292 
3293 		if (priv->extend_desc)
3294 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3295 		else
3296 			p = rx_q->dma_rx + entry;
3297 
3298 		if (likely(!rx_q->rx_skbuff[entry])) {
3299 			struct sk_buff *skb;
3300 
3301 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3302 			if (unlikely(!skb)) {
3303 				/* so for a while no zero-copy! */
3304 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3305 				if (unlikely(net_ratelimit()))
3306 					dev_err(priv->device,
3307 						"fail to alloc skb entry %d\n",
3308 						entry);
3309 				break;
3310 			}
3311 
3312 			rx_q->rx_skbuff[entry] = skb;
3313 			rx_q->rx_skbuff_dma[entry] =
3314 			    dma_map_single(priv->device, skb->data, bfsize,
3315 					   DMA_FROM_DEVICE);
3316 			if (dma_mapping_error(priv->device,
3317 					      rx_q->rx_skbuff_dma[entry])) {
3318 				netdev_err(priv->dev, "Rx DMA map failed\n");
3319 				dev_kfree_skb(skb);
3320 				break;
3321 			}
3322 
3323 			stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3324 			stmmac_refill_desc3(priv, rx_q, p);
3325 
3326 			if (rx_q->rx_zeroc_thresh > 0)
3327 				rx_q->rx_zeroc_thresh--;
3328 
3329 			netif_dbg(priv, rx_status, priv->dev,
3330 				  "refill entry #%d\n", entry);
3331 		}
3332 		dma_wmb();
3333 
3334 		stmmac_set_rx_owner(priv, p, priv->use_riwt);
3335 
3336 		dma_wmb();
3337 
3338 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3339 	}
3340 	rx_q->dirty_rx = entry;
3341 }
3342 
3343 /**
3344  * stmmac_rx - manage the receive process
3345  * @priv: driver private structure
3346  * @limit: napi bugget
3347  * @queue: RX queue index.
3348  * Description :  this the function called by the napi poll method.
3349  * It gets all the frames inside the ring.
3350  */
3351 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3352 {
3353 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3354 	struct stmmac_channel *ch = &priv->channel[queue];
3355 	unsigned int entry = rx_q->cur_rx;
3356 	int coe = priv->hw->rx_csum;
3357 	unsigned int next_entry;
3358 	unsigned int count = 0;
3359 	bool xmac;
3360 
3361 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3362 
3363 	if (netif_msg_rx_status(priv)) {
3364 		void *rx_head;
3365 
3366 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3367 		if (priv->extend_desc)
3368 			rx_head = (void *)rx_q->dma_erx;
3369 		else
3370 			rx_head = (void *)rx_q->dma_rx;
3371 
3372 		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3373 	}
3374 	while (count < limit) {
3375 		int status;
3376 		struct dma_desc *p;
3377 		struct dma_desc *np;
3378 
3379 		if (priv->extend_desc)
3380 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3381 		else
3382 			p = rx_q->dma_rx + entry;
3383 
3384 		/* read the status of the incoming frame */
3385 		status = stmmac_rx_status(priv, &priv->dev->stats,
3386 				&priv->xstats, p);
3387 		/* check if managed by the DMA otherwise go ahead */
3388 		if (unlikely(status & dma_own))
3389 			break;
3390 
3391 		count++;
3392 
3393 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3394 		next_entry = rx_q->cur_rx;
3395 
3396 		if (priv->extend_desc)
3397 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3398 		else
3399 			np = rx_q->dma_rx + next_entry;
3400 
3401 		prefetch(np);
3402 
3403 		if (priv->extend_desc)
3404 			stmmac_rx_extended_status(priv, &priv->dev->stats,
3405 					&priv->xstats, rx_q->dma_erx + entry);
3406 		if (unlikely(status == discard_frame)) {
3407 			priv->dev->stats.rx_errors++;
3408 			if (priv->hwts_rx_en && !priv->extend_desc) {
3409 				/* DESC2 & DESC3 will be overwritten by device
3410 				 * with timestamp value, hence reinitialize
3411 				 * them in stmmac_rx_refill() function so that
3412 				 * device can reuse it.
3413 				 */
3414 				dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3415 				rx_q->rx_skbuff[entry] = NULL;
3416 				dma_unmap_single(priv->device,
3417 						 rx_q->rx_skbuff_dma[entry],
3418 						 priv->dma_buf_sz,
3419 						 DMA_FROM_DEVICE);
3420 			}
3421 		} else {
3422 			struct sk_buff *skb;
3423 			int frame_len;
3424 			unsigned int des;
3425 
3426 			stmmac_get_desc_addr(priv, p, &des);
3427 			frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3428 
3429 			/*  If frame length is greater than skb buffer size
3430 			 *  (preallocated during init) then the packet is
3431 			 *  ignored
3432 			 */
3433 			if (frame_len > priv->dma_buf_sz) {
3434 				netdev_err(priv->dev,
3435 					   "len %d larger than size (%d)\n",
3436 					   frame_len, priv->dma_buf_sz);
3437 				priv->dev->stats.rx_length_errors++;
3438 				break;
3439 			}
3440 
3441 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3442 			 * Type frames (LLC/LLC-SNAP)
3443 			 *
3444 			 * llc_snap is never checked in GMAC >= 4, so this ACS
3445 			 * feature is always disabled and packets need to be
3446 			 * stripped manually.
3447 			 */
3448 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3449 			    unlikely(status != llc_snap))
3450 				frame_len -= ETH_FCS_LEN;
3451 
3452 			if (netif_msg_rx_status(priv)) {
3453 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3454 					   p, entry, des);
3455 				netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3456 					   frame_len, status);
3457 			}
3458 
3459 			/* The zero-copy is always used for all the sizes
3460 			 * in case of GMAC4 because it needs
3461 			 * to refill the used descriptors, always.
3462 			 */
3463 			if (unlikely(!xmac &&
3464 				     ((frame_len < priv->rx_copybreak) ||
3465 				     stmmac_rx_threshold_count(rx_q)))) {
3466 				skb = netdev_alloc_skb_ip_align(priv->dev,
3467 								frame_len);
3468 				if (unlikely(!skb)) {
3469 					if (net_ratelimit())
3470 						dev_warn(priv->device,
3471 							 "packet dropped\n");
3472 					priv->dev->stats.rx_dropped++;
3473 					break;
3474 				}
3475 
3476 				dma_sync_single_for_cpu(priv->device,
3477 							rx_q->rx_skbuff_dma
3478 							[entry], frame_len,
3479 							DMA_FROM_DEVICE);
3480 				skb_copy_to_linear_data(skb,
3481 							rx_q->
3482 							rx_skbuff[entry]->data,
3483 							frame_len);
3484 
3485 				skb_put(skb, frame_len);
3486 				dma_sync_single_for_device(priv->device,
3487 							   rx_q->rx_skbuff_dma
3488 							   [entry], frame_len,
3489 							   DMA_FROM_DEVICE);
3490 			} else {
3491 				skb = rx_q->rx_skbuff[entry];
3492 				if (unlikely(!skb)) {
3493 					netdev_err(priv->dev,
3494 						   "%s: Inconsistent Rx chain\n",
3495 						   priv->dev->name);
3496 					priv->dev->stats.rx_dropped++;
3497 					break;
3498 				}
3499 				prefetch(skb->data - NET_IP_ALIGN);
3500 				rx_q->rx_skbuff[entry] = NULL;
3501 				rx_q->rx_zeroc_thresh++;
3502 
3503 				skb_put(skb, frame_len);
3504 				dma_unmap_single(priv->device,
3505 						 rx_q->rx_skbuff_dma[entry],
3506 						 priv->dma_buf_sz,
3507 						 DMA_FROM_DEVICE);
3508 			}
3509 
3510 			if (netif_msg_pktdata(priv)) {
3511 				netdev_dbg(priv->dev, "frame received (%dbytes)",
3512 					   frame_len);
3513 				print_pkt(skb->data, frame_len);
3514 			}
3515 
3516 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3517 
3518 			stmmac_rx_vlan(priv->dev, skb);
3519 
3520 			skb->protocol = eth_type_trans(skb, priv->dev);
3521 
3522 			if (unlikely(!coe))
3523 				skb_checksum_none_assert(skb);
3524 			else
3525 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3526 
3527 			napi_gro_receive(&ch->rx_napi, skb);
3528 
3529 			priv->dev->stats.rx_packets++;
3530 			priv->dev->stats.rx_bytes += frame_len;
3531 		}
3532 		entry = next_entry;
3533 	}
3534 
3535 	stmmac_rx_refill(priv, queue);
3536 
3537 	priv->xstats.rx_pkt_n += count;
3538 
3539 	return count;
3540 }
3541 
3542 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
3543 {
3544 	struct stmmac_channel *ch =
3545 		container_of(napi, struct stmmac_channel, rx_napi);
3546 	struct stmmac_priv *priv = ch->priv_data;
3547 	u32 chan = ch->index;
3548 	int work_done;
3549 
3550 	priv->xstats.napi_poll++;
3551 
3552 	work_done = stmmac_rx(priv, budget, chan);
3553 	if (work_done < budget && napi_complete_done(napi, work_done))
3554 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3555 	return work_done;
3556 }
3557 
3558 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
3559 {
3560 	struct stmmac_channel *ch =
3561 		container_of(napi, struct stmmac_channel, tx_napi);
3562 	struct stmmac_priv *priv = ch->priv_data;
3563 	struct stmmac_tx_queue *tx_q;
3564 	u32 chan = ch->index;
3565 	int work_done;
3566 
3567 	priv->xstats.napi_poll++;
3568 
3569 	work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
3570 	work_done = min(work_done, budget);
3571 
3572 	if (work_done < budget && napi_complete_done(napi, work_done))
3573 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3574 
3575 	/* Force transmission restart */
3576 	tx_q = &priv->tx_queue[chan];
3577 	if (tx_q->cur_tx != tx_q->dirty_tx) {
3578 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
3579 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
3580 				       chan);
3581 	}
3582 
3583 	return work_done;
3584 }
3585 
3586 /**
3587  *  stmmac_tx_timeout
3588  *  @dev : Pointer to net device structure
3589  *  Description: this function is called when a packet transmission fails to
3590  *   complete within a reasonable time. The driver will mark the error in the
3591  *   netdev structure and arrange for the device to be reset to a sane state
3592  *   in order to transmit a new packet.
3593  */
3594 static void stmmac_tx_timeout(struct net_device *dev)
3595 {
3596 	struct stmmac_priv *priv = netdev_priv(dev);
3597 
3598 	stmmac_global_err(priv);
3599 }
3600 
3601 /**
3602  *  stmmac_set_rx_mode - entry point for multicast addressing
3603  *  @dev : pointer to the device structure
3604  *  Description:
3605  *  This function is a driver entry point which gets called by the kernel
3606  *  whenever multicast addresses must be enabled/disabled.
3607  *  Return value:
3608  *  void.
3609  */
3610 static void stmmac_set_rx_mode(struct net_device *dev)
3611 {
3612 	struct stmmac_priv *priv = netdev_priv(dev);
3613 
3614 	stmmac_set_filter(priv, priv->hw, dev);
3615 }
3616 
3617 /**
3618  *  stmmac_change_mtu - entry point to change MTU size for the device.
3619  *  @dev : device pointer.
3620  *  @new_mtu : the new MTU size for the device.
3621  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3622  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3623  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3624  *  Return value:
3625  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3626  *  file on failure.
3627  */
3628 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3629 {
3630 	struct stmmac_priv *priv = netdev_priv(dev);
3631 
3632 	if (netif_running(dev)) {
3633 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
3634 		return -EBUSY;
3635 	}
3636 
3637 	dev->mtu = new_mtu;
3638 
3639 	netdev_update_features(dev);
3640 
3641 	return 0;
3642 }
3643 
3644 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3645 					     netdev_features_t features)
3646 {
3647 	struct stmmac_priv *priv = netdev_priv(dev);
3648 
3649 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3650 		features &= ~NETIF_F_RXCSUM;
3651 
3652 	if (!priv->plat->tx_coe)
3653 		features &= ~NETIF_F_CSUM_MASK;
3654 
3655 	/* Some GMAC devices have a bugged Jumbo frame support that
3656 	 * needs to have the Tx COE disabled for oversized frames
3657 	 * (due to limited buffer sizes). In this case we disable
3658 	 * the TX csum insertion in the TDES and not use SF.
3659 	 */
3660 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3661 		features &= ~NETIF_F_CSUM_MASK;
3662 
3663 	/* Disable tso if asked by ethtool */
3664 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3665 		if (features & NETIF_F_TSO)
3666 			priv->tso = true;
3667 		else
3668 			priv->tso = false;
3669 	}
3670 
3671 	return features;
3672 }
3673 
3674 static int stmmac_set_features(struct net_device *netdev,
3675 			       netdev_features_t features)
3676 {
3677 	struct stmmac_priv *priv = netdev_priv(netdev);
3678 
3679 	/* Keep the COE Type in case of csum is supporting */
3680 	if (features & NETIF_F_RXCSUM)
3681 		priv->hw->rx_csum = priv->plat->rx_coe;
3682 	else
3683 		priv->hw->rx_csum = 0;
3684 	/* No check needed because rx_coe has been set before and it will be
3685 	 * fixed in case of issue.
3686 	 */
3687 	stmmac_rx_ipc(priv, priv->hw);
3688 
3689 	return 0;
3690 }
3691 
3692 /**
3693  *  stmmac_interrupt - main ISR
3694  *  @irq: interrupt number.
3695  *  @dev_id: to pass the net device pointer.
3696  *  Description: this is the main driver interrupt service routine.
3697  *  It can call:
3698  *  o DMA service routine (to manage incoming frame reception and transmission
3699  *    status)
3700  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3701  *    interrupts.
3702  */
3703 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3704 {
3705 	struct net_device *dev = (struct net_device *)dev_id;
3706 	struct stmmac_priv *priv = netdev_priv(dev);
3707 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3708 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3709 	u32 queues_count;
3710 	u32 queue;
3711 	bool xmac;
3712 
3713 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3714 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3715 
3716 	if (priv->irq_wake)
3717 		pm_wakeup_event(priv->device, 0);
3718 
3719 	if (unlikely(!dev)) {
3720 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3721 		return IRQ_NONE;
3722 	}
3723 
3724 	/* Check if adapter is up */
3725 	if (test_bit(STMMAC_DOWN, &priv->state))
3726 		return IRQ_HANDLED;
3727 	/* Check if a fatal error happened */
3728 	if (stmmac_safety_feat_interrupt(priv))
3729 		return IRQ_HANDLED;
3730 
3731 	/* To handle GMAC own interrupts */
3732 	if ((priv->plat->has_gmac) || xmac) {
3733 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3734 		int mtl_status;
3735 
3736 		if (unlikely(status)) {
3737 			/* For LPI we need to save the tx status */
3738 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3739 				priv->tx_path_in_lpi_mode = true;
3740 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3741 				priv->tx_path_in_lpi_mode = false;
3742 		}
3743 
3744 		for (queue = 0; queue < queues_count; queue++) {
3745 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3746 
3747 			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3748 								queue);
3749 			if (mtl_status != -EINVAL)
3750 				status |= mtl_status;
3751 
3752 			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3753 				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3754 						       rx_q->rx_tail_addr,
3755 						       queue);
3756 		}
3757 
3758 		/* PCS link status */
3759 		if (priv->hw->pcs) {
3760 			if (priv->xstats.pcs_link)
3761 				netif_carrier_on(dev);
3762 			else
3763 				netif_carrier_off(dev);
3764 		}
3765 	}
3766 
3767 	/* To handle DMA interrupts */
3768 	stmmac_dma_interrupt(priv);
3769 
3770 	return IRQ_HANDLED;
3771 }
3772 
3773 #ifdef CONFIG_NET_POLL_CONTROLLER
3774 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3775  * to allow network I/O with interrupts disabled.
3776  */
3777 static void stmmac_poll_controller(struct net_device *dev)
3778 {
3779 	disable_irq(dev->irq);
3780 	stmmac_interrupt(dev->irq, dev);
3781 	enable_irq(dev->irq);
3782 }
3783 #endif
3784 
3785 /**
3786  *  stmmac_ioctl - Entry point for the Ioctl
3787  *  @dev: Device pointer.
3788  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3789  *  a proprietary structure used to pass information to the driver.
3790  *  @cmd: IOCTL command
3791  *  Description:
3792  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3793  */
3794 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3795 {
3796 	int ret = -EOPNOTSUPP;
3797 
3798 	if (!netif_running(dev))
3799 		return -EINVAL;
3800 
3801 	switch (cmd) {
3802 	case SIOCGMIIPHY:
3803 	case SIOCGMIIREG:
3804 	case SIOCSMIIREG:
3805 		if (!dev->phydev)
3806 			return -EINVAL;
3807 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3808 		break;
3809 	case SIOCSHWTSTAMP:
3810 		ret = stmmac_hwtstamp_set(dev, rq);
3811 		break;
3812 	case SIOCGHWTSTAMP:
3813 		ret = stmmac_hwtstamp_get(dev, rq);
3814 		break;
3815 	default:
3816 		break;
3817 	}
3818 
3819 	return ret;
3820 }
3821 
3822 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3823 				    void *cb_priv)
3824 {
3825 	struct stmmac_priv *priv = cb_priv;
3826 	int ret = -EOPNOTSUPP;
3827 
3828 	stmmac_disable_all_queues(priv);
3829 
3830 	switch (type) {
3831 	case TC_SETUP_CLSU32:
3832 		if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3833 			ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3834 		break;
3835 	default:
3836 		break;
3837 	}
3838 
3839 	stmmac_enable_all_queues(priv);
3840 	return ret;
3841 }
3842 
3843 static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3844 				 struct tc_block_offload *f)
3845 {
3846 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3847 		return -EOPNOTSUPP;
3848 
3849 	switch (f->command) {
3850 	case TC_BLOCK_BIND:
3851 		return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3852 				priv, priv, f->extack);
3853 	case TC_BLOCK_UNBIND:
3854 		tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3855 		return 0;
3856 	default:
3857 		return -EOPNOTSUPP;
3858 	}
3859 }
3860 
3861 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3862 			   void *type_data)
3863 {
3864 	struct stmmac_priv *priv = netdev_priv(ndev);
3865 
3866 	switch (type) {
3867 	case TC_SETUP_BLOCK:
3868 		return stmmac_setup_tc_block(priv, type_data);
3869 	case TC_SETUP_QDISC_CBS:
3870 		return stmmac_tc_setup_cbs(priv, priv, type_data);
3871 	default:
3872 		return -EOPNOTSUPP;
3873 	}
3874 }
3875 
3876 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3877 {
3878 	struct stmmac_priv *priv = netdev_priv(ndev);
3879 	int ret = 0;
3880 
3881 	ret = eth_mac_addr(ndev, addr);
3882 	if (ret)
3883 		return ret;
3884 
3885 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3886 
3887 	return ret;
3888 }
3889 
3890 #ifdef CONFIG_DEBUG_FS
3891 static struct dentry *stmmac_fs_dir;
3892 
3893 static void sysfs_display_ring(void *head, int size, int extend_desc,
3894 			       struct seq_file *seq)
3895 {
3896 	int i;
3897 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3898 	struct dma_desc *p = (struct dma_desc *)head;
3899 
3900 	for (i = 0; i < size; i++) {
3901 		if (extend_desc) {
3902 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3903 				   i, (unsigned int)virt_to_phys(ep),
3904 				   le32_to_cpu(ep->basic.des0),
3905 				   le32_to_cpu(ep->basic.des1),
3906 				   le32_to_cpu(ep->basic.des2),
3907 				   le32_to_cpu(ep->basic.des3));
3908 			ep++;
3909 		} else {
3910 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3911 				   i, (unsigned int)virt_to_phys(p),
3912 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3913 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3914 			p++;
3915 		}
3916 		seq_printf(seq, "\n");
3917 	}
3918 }
3919 
3920 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
3921 {
3922 	struct net_device *dev = seq->private;
3923 	struct stmmac_priv *priv = netdev_priv(dev);
3924 	u32 rx_count = priv->plat->rx_queues_to_use;
3925 	u32 tx_count = priv->plat->tx_queues_to_use;
3926 	u32 queue;
3927 
3928 	if ((dev->flags & IFF_UP) == 0)
3929 		return 0;
3930 
3931 	for (queue = 0; queue < rx_count; queue++) {
3932 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3933 
3934 		seq_printf(seq, "RX Queue %d:\n", queue);
3935 
3936 		if (priv->extend_desc) {
3937 			seq_printf(seq, "Extended descriptor ring:\n");
3938 			sysfs_display_ring((void *)rx_q->dma_erx,
3939 					   DMA_RX_SIZE, 1, seq);
3940 		} else {
3941 			seq_printf(seq, "Descriptor ring:\n");
3942 			sysfs_display_ring((void *)rx_q->dma_rx,
3943 					   DMA_RX_SIZE, 0, seq);
3944 		}
3945 	}
3946 
3947 	for (queue = 0; queue < tx_count; queue++) {
3948 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3949 
3950 		seq_printf(seq, "TX Queue %d:\n", queue);
3951 
3952 		if (priv->extend_desc) {
3953 			seq_printf(seq, "Extended descriptor ring:\n");
3954 			sysfs_display_ring((void *)tx_q->dma_etx,
3955 					   DMA_TX_SIZE, 1, seq);
3956 		} else {
3957 			seq_printf(seq, "Descriptor ring:\n");
3958 			sysfs_display_ring((void *)tx_q->dma_tx,
3959 					   DMA_TX_SIZE, 0, seq);
3960 		}
3961 	}
3962 
3963 	return 0;
3964 }
3965 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
3966 
3967 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
3968 {
3969 	struct net_device *dev = seq->private;
3970 	struct stmmac_priv *priv = netdev_priv(dev);
3971 
3972 	if (!priv->hw_cap_support) {
3973 		seq_printf(seq, "DMA HW features not supported\n");
3974 		return 0;
3975 	}
3976 
3977 	seq_printf(seq, "==============================\n");
3978 	seq_printf(seq, "\tDMA HW features\n");
3979 	seq_printf(seq, "==============================\n");
3980 
3981 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3982 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3983 	seq_printf(seq, "\t1000 Mbps: %s\n",
3984 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
3985 	seq_printf(seq, "\tHalf duplex: %s\n",
3986 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3987 	seq_printf(seq, "\tHash Filter: %s\n",
3988 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3989 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3990 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
3991 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3992 		   (priv->dma_cap.pcs) ? "Y" : "N");
3993 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3994 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3995 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
3996 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3997 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
3998 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3999 	seq_printf(seq, "\tRMON module: %s\n",
4000 		   (priv->dma_cap.rmon) ? "Y" : "N");
4001 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4002 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
4003 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4004 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
4005 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4006 		   (priv->dma_cap.eee) ? "Y" : "N");
4007 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4008 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4009 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
4010 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4011 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4012 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
4013 	} else {
4014 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4015 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4016 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4017 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4018 	}
4019 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4020 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4021 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4022 		   priv->dma_cap.number_rx_channel);
4023 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4024 		   priv->dma_cap.number_tx_channel);
4025 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
4026 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
4027 
4028 	return 0;
4029 }
4030 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4031 
4032 static int stmmac_init_fs(struct net_device *dev)
4033 {
4034 	struct stmmac_priv *priv = netdev_priv(dev);
4035 
4036 	/* Create per netdev entries */
4037 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4038 
4039 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
4040 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
4041 
4042 		return -ENOMEM;
4043 	}
4044 
4045 	/* Entry to report DMA RX/TX rings */
4046 	priv->dbgfs_rings_status =
4047 		debugfs_create_file("descriptors_status", 0444,
4048 				    priv->dbgfs_dir, dev,
4049 				    &stmmac_rings_status_fops);
4050 
4051 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
4052 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4053 		debugfs_remove_recursive(priv->dbgfs_dir);
4054 
4055 		return -ENOMEM;
4056 	}
4057 
4058 	/* Entry to report the DMA HW features */
4059 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4060 						  priv->dbgfs_dir,
4061 						  dev, &stmmac_dma_cap_fops);
4062 
4063 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4064 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4065 		debugfs_remove_recursive(priv->dbgfs_dir);
4066 
4067 		return -ENOMEM;
4068 	}
4069 
4070 	return 0;
4071 }
4072 
4073 static void stmmac_exit_fs(struct net_device *dev)
4074 {
4075 	struct stmmac_priv *priv = netdev_priv(dev);
4076 
4077 	debugfs_remove_recursive(priv->dbgfs_dir);
4078 }
4079 #endif /* CONFIG_DEBUG_FS */
4080 
4081 static const struct net_device_ops stmmac_netdev_ops = {
4082 	.ndo_open = stmmac_open,
4083 	.ndo_start_xmit = stmmac_xmit,
4084 	.ndo_stop = stmmac_release,
4085 	.ndo_change_mtu = stmmac_change_mtu,
4086 	.ndo_fix_features = stmmac_fix_features,
4087 	.ndo_set_features = stmmac_set_features,
4088 	.ndo_set_rx_mode = stmmac_set_rx_mode,
4089 	.ndo_tx_timeout = stmmac_tx_timeout,
4090 	.ndo_do_ioctl = stmmac_ioctl,
4091 	.ndo_setup_tc = stmmac_setup_tc,
4092 #ifdef CONFIG_NET_POLL_CONTROLLER
4093 	.ndo_poll_controller = stmmac_poll_controller,
4094 #endif
4095 	.ndo_set_mac_address = stmmac_set_mac_address,
4096 };
4097 
4098 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4099 {
4100 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4101 		return;
4102 	if (test_bit(STMMAC_DOWN, &priv->state))
4103 		return;
4104 
4105 	netdev_err(priv->dev, "Reset adapter.\n");
4106 
4107 	rtnl_lock();
4108 	netif_trans_update(priv->dev);
4109 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4110 		usleep_range(1000, 2000);
4111 
4112 	set_bit(STMMAC_DOWN, &priv->state);
4113 	dev_close(priv->dev);
4114 	dev_open(priv->dev, NULL);
4115 	clear_bit(STMMAC_DOWN, &priv->state);
4116 	clear_bit(STMMAC_RESETING, &priv->state);
4117 	rtnl_unlock();
4118 }
4119 
4120 static void stmmac_service_task(struct work_struct *work)
4121 {
4122 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4123 			service_task);
4124 
4125 	stmmac_reset_subtask(priv);
4126 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4127 }
4128 
4129 /**
4130  *  stmmac_hw_init - Init the MAC device
4131  *  @priv: driver private structure
4132  *  Description: this function is to configure the MAC device according to
4133  *  some platform parameters or the HW capability register. It prepares the
4134  *  driver to use either ring or chain modes and to setup either enhanced or
4135  *  normal descriptors.
4136  */
4137 static int stmmac_hw_init(struct stmmac_priv *priv)
4138 {
4139 	int ret;
4140 
4141 	/* dwmac-sun8i only work in chain mode */
4142 	if (priv->plat->has_sun8i)
4143 		chain_mode = 1;
4144 	priv->chain_mode = chain_mode;
4145 
4146 	/* Initialize HW Interface */
4147 	ret = stmmac_hwif_init(priv);
4148 	if (ret)
4149 		return ret;
4150 
4151 	/* Get the HW capability (new GMAC newer than 3.50a) */
4152 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4153 	if (priv->hw_cap_support) {
4154 		dev_info(priv->device, "DMA HW capability register supported\n");
4155 
4156 		/* We can override some gmac/dma configuration fields: e.g.
4157 		 * enh_desc, tx_coe (e.g. that are passed through the
4158 		 * platform) with the values from the HW capability
4159 		 * register (if supported).
4160 		 */
4161 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4162 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4163 		priv->hw->pmt = priv->plat->pmt;
4164 
4165 		/* TXCOE doesn't work in thresh DMA mode */
4166 		if (priv->plat->force_thresh_dma_mode)
4167 			priv->plat->tx_coe = 0;
4168 		else
4169 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4170 
4171 		/* In case of GMAC4 rx_coe is from HW cap register. */
4172 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4173 
4174 		if (priv->dma_cap.rx_coe_type2)
4175 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4176 		else if (priv->dma_cap.rx_coe_type1)
4177 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4178 
4179 	} else {
4180 		dev_info(priv->device, "No HW DMA feature register supported\n");
4181 	}
4182 
4183 	if (priv->plat->rx_coe) {
4184 		priv->hw->rx_csum = priv->plat->rx_coe;
4185 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4186 		if (priv->synopsys_id < DWMAC_CORE_4_00)
4187 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4188 	}
4189 	if (priv->plat->tx_coe)
4190 		dev_info(priv->device, "TX Checksum insertion supported\n");
4191 
4192 	if (priv->plat->pmt) {
4193 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4194 		device_set_wakeup_capable(priv->device, 1);
4195 	}
4196 
4197 	if (priv->dma_cap.tsoen)
4198 		dev_info(priv->device, "TSO supported\n");
4199 
4200 	/* Run HW quirks, if any */
4201 	if (priv->hwif_quirks) {
4202 		ret = priv->hwif_quirks(priv);
4203 		if (ret)
4204 			return ret;
4205 	}
4206 
4207 	/* Rx Watchdog is available in the COREs newer than the 3.40.
4208 	 * In some case, for example on bugged HW this feature
4209 	 * has to be disable and this can be done by passing the
4210 	 * riwt_off field from the platform.
4211 	 */
4212 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4213 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4214 		priv->use_riwt = 1;
4215 		dev_info(priv->device,
4216 			 "Enable RX Mitigation via HW Watchdog Timer\n");
4217 	}
4218 
4219 	return 0;
4220 }
4221 
4222 /**
4223  * stmmac_dvr_probe
4224  * @device: device pointer
4225  * @plat_dat: platform data pointer
4226  * @res: stmmac resource pointer
4227  * Description: this is the main probe function used to
4228  * call the alloc_etherdev, allocate the priv structure.
4229  * Return:
4230  * returns 0 on success, otherwise errno.
4231  */
4232 int stmmac_dvr_probe(struct device *device,
4233 		     struct plat_stmmacenet_data *plat_dat,
4234 		     struct stmmac_resources *res)
4235 {
4236 	struct net_device *ndev = NULL;
4237 	struct stmmac_priv *priv;
4238 	u32 queue, maxq;
4239 	int ret = 0;
4240 
4241 	ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4242 				  MTL_MAX_TX_QUEUES,
4243 				  MTL_MAX_RX_QUEUES);
4244 	if (!ndev)
4245 		return -ENOMEM;
4246 
4247 	SET_NETDEV_DEV(ndev, device);
4248 
4249 	priv = netdev_priv(ndev);
4250 	priv->device = device;
4251 	priv->dev = ndev;
4252 
4253 	stmmac_set_ethtool_ops(ndev);
4254 	priv->pause = pause;
4255 	priv->plat = plat_dat;
4256 	priv->ioaddr = res->addr;
4257 	priv->dev->base_addr = (unsigned long)res->addr;
4258 
4259 	priv->dev->irq = res->irq;
4260 	priv->wol_irq = res->wol_irq;
4261 	priv->lpi_irq = res->lpi_irq;
4262 
4263 	if (res->mac)
4264 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4265 
4266 	dev_set_drvdata(device, priv->dev);
4267 
4268 	/* Verify driver arguments */
4269 	stmmac_verify_args();
4270 
4271 	/* Allocate workqueue */
4272 	priv->wq = create_singlethread_workqueue("stmmac_wq");
4273 	if (!priv->wq) {
4274 		dev_err(priv->device, "failed to create workqueue\n");
4275 		ret = -ENOMEM;
4276 		goto error_wq;
4277 	}
4278 
4279 	INIT_WORK(&priv->service_task, stmmac_service_task);
4280 
4281 	/* Override with kernel parameters if supplied XXX CRS XXX
4282 	 * this needs to have multiple instances
4283 	 */
4284 	if ((phyaddr >= 0) && (phyaddr <= 31))
4285 		priv->plat->phy_addr = phyaddr;
4286 
4287 	if (priv->plat->stmmac_rst) {
4288 		ret = reset_control_assert(priv->plat->stmmac_rst);
4289 		reset_control_deassert(priv->plat->stmmac_rst);
4290 		/* Some reset controllers have only reset callback instead of
4291 		 * assert + deassert callbacks pair.
4292 		 */
4293 		if (ret == -ENOTSUPP)
4294 			reset_control_reset(priv->plat->stmmac_rst);
4295 	}
4296 
4297 	/* Init MAC and get the capabilities */
4298 	ret = stmmac_hw_init(priv);
4299 	if (ret)
4300 		goto error_hw_init;
4301 
4302 	/* Configure real RX and TX queues */
4303 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4304 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4305 
4306 	ndev->netdev_ops = &stmmac_netdev_ops;
4307 
4308 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4309 			    NETIF_F_RXCSUM;
4310 
4311 	ret = stmmac_tc_init(priv, priv);
4312 	if (!ret) {
4313 		ndev->hw_features |= NETIF_F_HW_TC;
4314 	}
4315 
4316 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4317 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4318 		priv->tso = true;
4319 		dev_info(priv->device, "TSO feature enabled\n");
4320 	}
4321 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4322 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4323 #ifdef STMMAC_VLAN_TAG_USED
4324 	/* Both mac100 and gmac support receive VLAN tag detection */
4325 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4326 #endif
4327 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
4328 
4329 	/* MTU range: 46 - hw-specific max */
4330 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4331 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4332 		ndev->max_mtu = JUMBO_LEN;
4333 	else if (priv->plat->has_xgmac)
4334 		ndev->max_mtu = XGMAC_JUMBO_LEN;
4335 	else
4336 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4337 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4338 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4339 	 */
4340 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4341 	    (priv->plat->maxmtu >= ndev->min_mtu))
4342 		ndev->max_mtu = priv->plat->maxmtu;
4343 	else if (priv->plat->maxmtu < ndev->min_mtu)
4344 		dev_warn(priv->device,
4345 			 "%s: warning: maxmtu having invalid value (%d)\n",
4346 			 __func__, priv->plat->maxmtu);
4347 
4348 	if (flow_ctrl)
4349 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
4350 
4351 	/* Setup channels NAPI */
4352 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4353 
4354 	for (queue = 0; queue < maxq; queue++) {
4355 		struct stmmac_channel *ch = &priv->channel[queue];
4356 
4357 		ch->priv_data = priv;
4358 		ch->index = queue;
4359 
4360 		if (queue < priv->plat->rx_queues_to_use) {
4361 			netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx,
4362 				       NAPI_POLL_WEIGHT);
4363 		}
4364 		if (queue < priv->plat->tx_queues_to_use) {
4365 			netif_napi_add(ndev, &ch->tx_napi, stmmac_napi_poll_tx,
4366 				       NAPI_POLL_WEIGHT);
4367 		}
4368 	}
4369 
4370 	mutex_init(&priv->lock);
4371 
4372 	/* If a specific clk_csr value is passed from the platform
4373 	 * this means that the CSR Clock Range selection cannot be
4374 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4375 	 * set the MDC clock dynamically according to the csr actual
4376 	 * clock input.
4377 	 */
4378 	if (!priv->plat->clk_csr)
4379 		stmmac_clk_csr_set(priv);
4380 	else
4381 		priv->clk_csr = priv->plat->clk_csr;
4382 
4383 	stmmac_check_pcs_mode(priv);
4384 
4385 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4386 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4387 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
4388 		/* MDIO bus Registration */
4389 		ret = stmmac_mdio_register(ndev);
4390 		if (ret < 0) {
4391 			dev_err(priv->device,
4392 				"%s: MDIO bus (id: %d) registration failed",
4393 				__func__, priv->plat->bus_id);
4394 			goto error_mdio_register;
4395 		}
4396 	}
4397 
4398 	ret = register_netdev(ndev);
4399 	if (ret) {
4400 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
4401 			__func__, ret);
4402 		goto error_netdev_register;
4403 	}
4404 
4405 #ifdef CONFIG_DEBUG_FS
4406 	ret = stmmac_init_fs(ndev);
4407 	if (ret < 0)
4408 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
4409 			    __func__);
4410 #endif
4411 
4412 	return ret;
4413 
4414 error_netdev_register:
4415 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4416 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4417 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4418 		stmmac_mdio_unregister(ndev);
4419 error_mdio_register:
4420 	for (queue = 0; queue < maxq; queue++) {
4421 		struct stmmac_channel *ch = &priv->channel[queue];
4422 
4423 		if (queue < priv->plat->rx_queues_to_use)
4424 			netif_napi_del(&ch->rx_napi);
4425 		if (queue < priv->plat->tx_queues_to_use)
4426 			netif_napi_del(&ch->tx_napi);
4427 	}
4428 error_hw_init:
4429 	destroy_workqueue(priv->wq);
4430 error_wq:
4431 	free_netdev(ndev);
4432 
4433 	return ret;
4434 }
4435 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4436 
4437 /**
4438  * stmmac_dvr_remove
4439  * @dev: device pointer
4440  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4441  * changes the link status, releases the DMA descriptor rings.
4442  */
4443 int stmmac_dvr_remove(struct device *dev)
4444 {
4445 	struct net_device *ndev = dev_get_drvdata(dev);
4446 	struct stmmac_priv *priv = netdev_priv(ndev);
4447 
4448 	netdev_info(priv->dev, "%s: removing driver", __func__);
4449 
4450 #ifdef CONFIG_DEBUG_FS
4451 	stmmac_exit_fs(ndev);
4452 #endif
4453 	stmmac_stop_all_dma(priv);
4454 
4455 	stmmac_mac_set(priv, priv->ioaddr, false);
4456 	netif_carrier_off(ndev);
4457 	unregister_netdev(ndev);
4458 	if (priv->plat->stmmac_rst)
4459 		reset_control_assert(priv->plat->stmmac_rst);
4460 	clk_disable_unprepare(priv->plat->pclk);
4461 	clk_disable_unprepare(priv->plat->stmmac_clk);
4462 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4463 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4464 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4465 		stmmac_mdio_unregister(ndev);
4466 	destroy_workqueue(priv->wq);
4467 	mutex_destroy(&priv->lock);
4468 	free_netdev(ndev);
4469 
4470 	return 0;
4471 }
4472 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4473 
4474 /**
4475  * stmmac_suspend - suspend callback
4476  * @dev: device pointer
4477  * Description: this is the function to suspend the device and it is called
4478  * by the platform driver to stop the network queue, release the resources,
4479  * program the PMT register (for WoL), clean and release driver resources.
4480  */
4481 int stmmac_suspend(struct device *dev)
4482 {
4483 	struct net_device *ndev = dev_get_drvdata(dev);
4484 	struct stmmac_priv *priv = netdev_priv(ndev);
4485 
4486 	if (!ndev || !netif_running(ndev))
4487 		return 0;
4488 
4489 	if (ndev->phydev)
4490 		phy_stop(ndev->phydev);
4491 
4492 	mutex_lock(&priv->lock);
4493 
4494 	netif_device_detach(ndev);
4495 	stmmac_stop_all_queues(priv);
4496 
4497 	stmmac_disable_all_queues(priv);
4498 
4499 	/* Stop TX/RX DMA */
4500 	stmmac_stop_all_dma(priv);
4501 
4502 	/* Enable Power down mode by programming the PMT regs */
4503 	if (device_may_wakeup(priv->device)) {
4504 		stmmac_pmt(priv, priv->hw, priv->wolopts);
4505 		priv->irq_wake = 1;
4506 	} else {
4507 		stmmac_mac_set(priv, priv->ioaddr, false);
4508 		pinctrl_pm_select_sleep_state(priv->device);
4509 		/* Disable clock in case of PWM is off */
4510 		clk_disable(priv->plat->pclk);
4511 		clk_disable(priv->plat->stmmac_clk);
4512 	}
4513 	mutex_unlock(&priv->lock);
4514 
4515 	priv->oldlink = false;
4516 	priv->speed = SPEED_UNKNOWN;
4517 	priv->oldduplex = DUPLEX_UNKNOWN;
4518 	return 0;
4519 }
4520 EXPORT_SYMBOL_GPL(stmmac_suspend);
4521 
4522 /**
4523  * stmmac_reset_queues_param - reset queue parameters
4524  * @dev: device pointer
4525  */
4526 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4527 {
4528 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4529 	u32 tx_cnt = priv->plat->tx_queues_to_use;
4530 	u32 queue;
4531 
4532 	for (queue = 0; queue < rx_cnt; queue++) {
4533 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4534 
4535 		rx_q->cur_rx = 0;
4536 		rx_q->dirty_rx = 0;
4537 	}
4538 
4539 	for (queue = 0; queue < tx_cnt; queue++) {
4540 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4541 
4542 		tx_q->cur_tx = 0;
4543 		tx_q->dirty_tx = 0;
4544 		tx_q->mss = 0;
4545 	}
4546 }
4547 
4548 /**
4549  * stmmac_resume - resume callback
4550  * @dev: device pointer
4551  * Description: when resume this function is invoked to setup the DMA and CORE
4552  * in a usable state.
4553  */
4554 int stmmac_resume(struct device *dev)
4555 {
4556 	struct net_device *ndev = dev_get_drvdata(dev);
4557 	struct stmmac_priv *priv = netdev_priv(ndev);
4558 
4559 	if (!netif_running(ndev))
4560 		return 0;
4561 
4562 	/* Power Down bit, into the PM register, is cleared
4563 	 * automatically as soon as a magic packet or a Wake-up frame
4564 	 * is received. Anyway, it's better to manually clear
4565 	 * this bit because it can generate problems while resuming
4566 	 * from another devices (e.g. serial console).
4567 	 */
4568 	if (device_may_wakeup(priv->device)) {
4569 		mutex_lock(&priv->lock);
4570 		stmmac_pmt(priv, priv->hw, 0);
4571 		mutex_unlock(&priv->lock);
4572 		priv->irq_wake = 0;
4573 	} else {
4574 		pinctrl_pm_select_default_state(priv->device);
4575 		/* enable the clk previously disabled */
4576 		clk_enable(priv->plat->stmmac_clk);
4577 		clk_enable(priv->plat->pclk);
4578 		/* reset the phy so that it's ready */
4579 		if (priv->mii)
4580 			stmmac_mdio_reset(priv->mii);
4581 	}
4582 
4583 	netif_device_attach(ndev);
4584 
4585 	mutex_lock(&priv->lock);
4586 
4587 	stmmac_reset_queues_param(priv);
4588 
4589 	stmmac_clear_descriptors(priv);
4590 
4591 	stmmac_hw_setup(ndev, false);
4592 	stmmac_init_tx_coalesce(priv);
4593 	stmmac_set_rx_mode(ndev);
4594 
4595 	stmmac_enable_all_queues(priv);
4596 
4597 	stmmac_start_all_queues(priv);
4598 
4599 	mutex_unlock(&priv->lock);
4600 
4601 	if (ndev->phydev)
4602 		phy_start(ndev->phydev);
4603 
4604 	return 0;
4605 }
4606 EXPORT_SYMBOL_GPL(stmmac_resume);
4607 
4608 #ifndef MODULE
4609 static int __init stmmac_cmdline_opt(char *str)
4610 {
4611 	char *opt;
4612 
4613 	if (!str || !*str)
4614 		return -EINVAL;
4615 	while ((opt = strsep(&str, ",")) != NULL) {
4616 		if (!strncmp(opt, "debug:", 6)) {
4617 			if (kstrtoint(opt + 6, 0, &debug))
4618 				goto err;
4619 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4620 			if (kstrtoint(opt + 8, 0, &phyaddr))
4621 				goto err;
4622 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4623 			if (kstrtoint(opt + 7, 0, &buf_sz))
4624 				goto err;
4625 		} else if (!strncmp(opt, "tc:", 3)) {
4626 			if (kstrtoint(opt + 3, 0, &tc))
4627 				goto err;
4628 		} else if (!strncmp(opt, "watchdog:", 9)) {
4629 			if (kstrtoint(opt + 9, 0, &watchdog))
4630 				goto err;
4631 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4632 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
4633 				goto err;
4634 		} else if (!strncmp(opt, "pause:", 6)) {
4635 			if (kstrtoint(opt + 6, 0, &pause))
4636 				goto err;
4637 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4638 			if (kstrtoint(opt + 10, 0, &eee_timer))
4639 				goto err;
4640 		} else if (!strncmp(opt, "chain_mode:", 11)) {
4641 			if (kstrtoint(opt + 11, 0, &chain_mode))
4642 				goto err;
4643 		}
4644 	}
4645 	return 0;
4646 
4647 err:
4648 	pr_err("%s: ERROR broken module parameter conversion", __func__);
4649 	return -EINVAL;
4650 }
4651 
4652 __setup("stmmaceth=", stmmac_cmdline_opt);
4653 #endif /* MODULE */
4654 
4655 static int __init stmmac_init(void)
4656 {
4657 #ifdef CONFIG_DEBUG_FS
4658 	/* Create debugfs main directory if it doesn't exist yet */
4659 	if (!stmmac_fs_dir) {
4660 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4661 
4662 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4663 			pr_err("ERROR %s, debugfs create directory failed\n",
4664 			       STMMAC_RESOURCE_NAME);
4665 
4666 			return -ENOMEM;
4667 		}
4668 	}
4669 #endif
4670 
4671 	return 0;
4672 }
4673 
4674 static void __exit stmmac_exit(void)
4675 {
4676 #ifdef CONFIG_DEBUG_FS
4677 	debugfs_remove_recursive(stmmac_fs_dir);
4678 #endif
4679 }
4680 
4681 module_init(stmmac_init)
4682 module_exit(stmmac_exit)
4683 
4684 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4685 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4686 MODULE_LICENSE("GPL");
4687