1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4 
5 	Copyright(C) 2007-2011 STMicroelectronics Ltd
6 
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10 
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15 
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18 
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20 
21   Documentation available at:
22 	http://www.stlinux.com
23   Support available at:
24 	https://bugzilla.stlinux.com/
25 *******************************************************************************/
26 
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include <net/pkt_cls.h>
49 #include "stmmac_ptp.h"
50 #include "stmmac.h"
51 #include <linux/reset.h>
52 #include <linux/of_mdio.h>
53 #include "dwmac1000.h"
54 #include "dwxgmac2.h"
55 #include "hwif.h"
56 
57 #define	STMMAC_ALIGN(x)		__ALIGN_KERNEL(x, SMP_CACHE_BYTES)
58 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
59 
60 /* Module parameters */
61 #define TX_TIMEO	5000
62 static int watchdog = TX_TIMEO;
63 module_param(watchdog, int, 0644);
64 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
65 
66 static int debug = -1;
67 module_param(debug, int, 0644);
68 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
69 
70 static int phyaddr = -1;
71 module_param(phyaddr, int, 0444);
72 MODULE_PARM_DESC(phyaddr, "Physical device address");
73 
74 #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
75 #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
76 
77 static int flow_ctrl = FLOW_OFF;
78 module_param(flow_ctrl, int, 0644);
79 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
80 
81 static int pause = PAUSE_TIME;
82 module_param(pause, int, 0644);
83 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
84 
85 #define TC_DEFAULT 64
86 static int tc = TC_DEFAULT;
87 module_param(tc, int, 0644);
88 MODULE_PARM_DESC(tc, "DMA threshold control value");
89 
90 #define	DEFAULT_BUFSIZE	1536
91 static int buf_sz = DEFAULT_BUFSIZE;
92 module_param(buf_sz, int, 0644);
93 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
94 
95 #define	STMMAC_RX_COPYBREAK	256
96 
97 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
98 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
99 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
100 
101 #define STMMAC_DEFAULT_LPI_TIMER	1000
102 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
103 module_param(eee_timer, int, 0644);
104 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
105 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
106 
107 /* By default the driver will use the ring mode to manage tx and rx descriptors,
108  * but allow user to force to use the chain instead of the ring
109  */
110 static unsigned int chain_mode;
111 module_param(chain_mode, int, 0444);
112 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
113 
114 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
115 
116 #ifdef CONFIG_DEBUG_FS
117 static int stmmac_init_fs(struct net_device *dev);
118 static void stmmac_exit_fs(struct net_device *dev);
119 #endif
120 
121 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
122 
123 /**
124  * stmmac_verify_args - verify the driver parameters.
125  * Description: it checks the driver parameters and set a default in case of
126  * errors.
127  */
128 static void stmmac_verify_args(void)
129 {
130 	if (unlikely(watchdog < 0))
131 		watchdog = TX_TIMEO;
132 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
133 		buf_sz = DEFAULT_BUFSIZE;
134 	if (unlikely(flow_ctrl > 1))
135 		flow_ctrl = FLOW_AUTO;
136 	else if (likely(flow_ctrl < 0))
137 		flow_ctrl = FLOW_OFF;
138 	if (unlikely((pause < 0) || (pause > 0xffff)))
139 		pause = PAUSE_TIME;
140 	if (eee_timer < 0)
141 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
142 }
143 
144 /**
145  * stmmac_disable_all_queues - Disable all queues
146  * @priv: driver private structure
147  */
148 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
149 {
150 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
151 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
152 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
153 	u32 queue;
154 
155 	for (queue = 0; queue < maxq; queue++) {
156 		struct stmmac_channel *ch = &priv->channel[queue];
157 
158 		if (queue < rx_queues_cnt)
159 			napi_disable(&ch->rx_napi);
160 		if (queue < tx_queues_cnt)
161 			napi_disable(&ch->tx_napi);
162 	}
163 }
164 
165 /**
166  * stmmac_enable_all_queues - Enable all queues
167  * @priv: driver private structure
168  */
169 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
170 {
171 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
172 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
173 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
174 	u32 queue;
175 
176 	for (queue = 0; queue < maxq; queue++) {
177 		struct stmmac_channel *ch = &priv->channel[queue];
178 
179 		if (queue < rx_queues_cnt)
180 			napi_enable(&ch->rx_napi);
181 		if (queue < tx_queues_cnt)
182 			napi_enable(&ch->tx_napi);
183 	}
184 }
185 
186 /**
187  * stmmac_stop_all_queues - Stop all queues
188  * @priv: driver private structure
189  */
190 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
191 {
192 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193 	u32 queue;
194 
195 	for (queue = 0; queue < tx_queues_cnt; queue++)
196 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
197 }
198 
199 /**
200  * stmmac_start_all_queues - Start all queues
201  * @priv: driver private structure
202  */
203 static void stmmac_start_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
206 	u32 queue;
207 
208 	for (queue = 0; queue < tx_queues_cnt; queue++)
209 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
210 }
211 
212 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
213 {
214 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
215 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
216 		queue_work(priv->wq, &priv->service_task);
217 }
218 
219 static void stmmac_global_err(struct stmmac_priv *priv)
220 {
221 	netif_carrier_off(priv->dev);
222 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
223 	stmmac_service_event_schedule(priv);
224 }
225 
226 /**
227  * stmmac_clk_csr_set - dynamically set the MDC clock
228  * @priv: driver private structure
229  * Description: this is to dynamically set the MDC clock according to the csr
230  * clock input.
231  * Note:
232  *	If a specific clk_csr value is passed from the platform
233  *	this means that the CSR Clock Range selection cannot be
234  *	changed at run-time and it is fixed (as reported in the driver
235  *	documentation). Viceversa the driver will try to set the MDC
236  *	clock dynamically according to the actual clock input.
237  */
238 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
239 {
240 	u32 clk_rate;
241 
242 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
243 
244 	/* Platform provided default clk_csr would be assumed valid
245 	 * for all other cases except for the below mentioned ones.
246 	 * For values higher than the IEEE 802.3 specified frequency
247 	 * we can not estimate the proper divider as it is not known
248 	 * the frequency of clk_csr_i. So we do not change the default
249 	 * divider.
250 	 */
251 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
252 		if (clk_rate < CSR_F_35M)
253 			priv->clk_csr = STMMAC_CSR_20_35M;
254 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
255 			priv->clk_csr = STMMAC_CSR_35_60M;
256 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
257 			priv->clk_csr = STMMAC_CSR_60_100M;
258 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
259 			priv->clk_csr = STMMAC_CSR_100_150M;
260 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
261 			priv->clk_csr = STMMAC_CSR_150_250M;
262 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
263 			priv->clk_csr = STMMAC_CSR_250_300M;
264 	}
265 
266 	if (priv->plat->has_sun8i) {
267 		if (clk_rate > 160000000)
268 			priv->clk_csr = 0x03;
269 		else if (clk_rate > 80000000)
270 			priv->clk_csr = 0x02;
271 		else if (clk_rate > 40000000)
272 			priv->clk_csr = 0x01;
273 		else
274 			priv->clk_csr = 0;
275 	}
276 
277 	if (priv->plat->has_xgmac) {
278 		if (clk_rate > 400000000)
279 			priv->clk_csr = 0x5;
280 		else if (clk_rate > 350000000)
281 			priv->clk_csr = 0x4;
282 		else if (clk_rate > 300000000)
283 			priv->clk_csr = 0x3;
284 		else if (clk_rate > 250000000)
285 			priv->clk_csr = 0x2;
286 		else if (clk_rate > 150000000)
287 			priv->clk_csr = 0x1;
288 		else
289 			priv->clk_csr = 0x0;
290 	}
291 }
292 
293 static void print_pkt(unsigned char *buf, int len)
294 {
295 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
296 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
297 }
298 
299 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
300 {
301 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
302 	u32 avail;
303 
304 	if (tx_q->dirty_tx > tx_q->cur_tx)
305 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
306 	else
307 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
308 
309 	return avail;
310 }
311 
312 /**
313  * stmmac_rx_dirty - Get RX queue dirty
314  * @priv: driver private structure
315  * @queue: RX queue index
316  */
317 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
318 {
319 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
320 	u32 dirty;
321 
322 	if (rx_q->dirty_rx <= rx_q->cur_rx)
323 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
324 	else
325 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
326 
327 	return dirty;
328 }
329 
330 /**
331  * stmmac_hw_fix_mac_speed - callback for speed selection
332  * @priv: driver private structure
333  * Description: on some platforms (e.g. ST), some HW system configuration
334  * registers have to be set according to the link speed negotiated.
335  */
336 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
337 {
338 	struct net_device *ndev = priv->dev;
339 	struct phy_device *phydev = ndev->phydev;
340 
341 	if (likely(priv->plat->fix_mac_speed))
342 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
343 }
344 
345 /**
346  * stmmac_enable_eee_mode - check and enter in LPI mode
347  * @priv: driver private structure
348  * Description: this function is to verify and enter in LPI mode in case of
349  * EEE.
350  */
351 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
352 {
353 	u32 tx_cnt = priv->plat->tx_queues_to_use;
354 	u32 queue;
355 
356 	/* check if all TX queues have the work finished */
357 	for (queue = 0; queue < tx_cnt; queue++) {
358 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
359 
360 		if (tx_q->dirty_tx != tx_q->cur_tx)
361 			return; /* still unfinished work */
362 	}
363 
364 	/* Check and enter in LPI mode */
365 	if (!priv->tx_path_in_lpi_mode)
366 		stmmac_set_eee_mode(priv, priv->hw,
367 				priv->plat->en_tx_lpi_clockgating);
368 }
369 
370 /**
371  * stmmac_disable_eee_mode - disable and exit from LPI mode
372  * @priv: driver private structure
373  * Description: this function is to exit and disable EEE in case of
374  * LPI state is true. This is called by the xmit.
375  */
376 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
377 {
378 	stmmac_reset_eee_mode(priv, priv->hw);
379 	del_timer_sync(&priv->eee_ctrl_timer);
380 	priv->tx_path_in_lpi_mode = false;
381 }
382 
383 /**
384  * stmmac_eee_ctrl_timer - EEE TX SW timer.
385  * @arg : data hook
386  * Description:
387  *  if there is no data transfer and if we are not in LPI state,
388  *  then MAC Transmitter can be moved to LPI state.
389  */
390 static void stmmac_eee_ctrl_timer(struct timer_list *t)
391 {
392 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
393 
394 	stmmac_enable_eee_mode(priv);
395 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
396 }
397 
398 /**
399  * stmmac_eee_init - init EEE
400  * @priv: driver private structure
401  * Description:
402  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
403  *  can also manage EEE, this function enable the LPI state and start related
404  *  timer.
405  */
406 bool stmmac_eee_init(struct stmmac_priv *priv)
407 {
408 	struct net_device *ndev = priv->dev;
409 	int interface = priv->plat->interface;
410 	bool ret = false;
411 
412 	if ((interface != PHY_INTERFACE_MODE_MII) &&
413 	    (interface != PHY_INTERFACE_MODE_GMII) &&
414 	    !phy_interface_mode_is_rgmii(interface))
415 		goto out;
416 
417 	/* Using PCS we cannot dial with the phy registers at this stage
418 	 * so we do not support extra feature like EEE.
419 	 */
420 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
421 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
422 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
423 		goto out;
424 
425 	/* MAC core supports the EEE feature. */
426 	if (priv->dma_cap.eee) {
427 		int tx_lpi_timer = priv->tx_lpi_timer;
428 
429 		/* Check if the PHY supports EEE */
430 		if (phy_init_eee(ndev->phydev, 1)) {
431 			/* To manage at run-time if the EEE cannot be supported
432 			 * anymore (for example because the lp caps have been
433 			 * changed).
434 			 * In that case the driver disable own timers.
435 			 */
436 			mutex_lock(&priv->lock);
437 			if (priv->eee_active) {
438 				netdev_dbg(priv->dev, "disable EEE\n");
439 				del_timer_sync(&priv->eee_ctrl_timer);
440 				stmmac_set_eee_timer(priv, priv->hw, 0,
441 						tx_lpi_timer);
442 			}
443 			priv->eee_active = 0;
444 			mutex_unlock(&priv->lock);
445 			goto out;
446 		}
447 		/* Activate the EEE and start timers */
448 		mutex_lock(&priv->lock);
449 		if (!priv->eee_active) {
450 			priv->eee_active = 1;
451 			timer_setup(&priv->eee_ctrl_timer,
452 				    stmmac_eee_ctrl_timer, 0);
453 			mod_timer(&priv->eee_ctrl_timer,
454 				  STMMAC_LPI_T(eee_timer));
455 
456 			stmmac_set_eee_timer(priv, priv->hw,
457 					STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
458 		}
459 		/* Set HW EEE according to the speed */
460 		stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
461 
462 		ret = true;
463 		mutex_unlock(&priv->lock);
464 
465 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
466 	}
467 out:
468 	return ret;
469 }
470 
471 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
472  * @priv: driver private structure
473  * @p : descriptor pointer
474  * @skb : the socket buffer
475  * Description :
476  * This function will read timestamp from the descriptor & pass it to stack.
477  * and also perform some sanity checks.
478  */
479 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
480 				   struct dma_desc *p, struct sk_buff *skb)
481 {
482 	struct skb_shared_hwtstamps shhwtstamp;
483 	u64 ns = 0;
484 
485 	if (!priv->hwts_tx_en)
486 		return;
487 
488 	/* exit if skb doesn't support hw tstamp */
489 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
490 		return;
491 
492 	/* check tx tstamp status */
493 	if (stmmac_get_tx_timestamp_status(priv, p)) {
494 		/* get the valid tstamp */
495 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
496 
497 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
498 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
499 
500 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
501 		/* pass tstamp to stack */
502 		skb_tstamp_tx(skb, &shhwtstamp);
503 	}
504 
505 	return;
506 }
507 
508 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
509  * @priv: driver private structure
510  * @p : descriptor pointer
511  * @np : next descriptor pointer
512  * @skb : the socket buffer
513  * Description :
514  * This function will read received packet's timestamp from the descriptor
515  * and pass it to stack. It also perform some sanity checks.
516  */
517 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
518 				   struct dma_desc *np, struct sk_buff *skb)
519 {
520 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
521 	struct dma_desc *desc = p;
522 	u64 ns = 0;
523 
524 	if (!priv->hwts_rx_en)
525 		return;
526 	/* For GMAC4, the valid timestamp is from CTX next desc. */
527 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
528 		desc = np;
529 
530 	/* Check if timestamp is available */
531 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
532 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
533 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
534 		shhwtstamp = skb_hwtstamps(skb);
535 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
536 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
537 	} else  {
538 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
539 	}
540 }
541 
542 /**
543  *  stmmac_hwtstamp_set - control hardware timestamping.
544  *  @dev: device pointer.
545  *  @ifr: An IOCTL specific structure, that can contain a pointer to
546  *  a proprietary structure used to pass information to the driver.
547  *  Description:
548  *  This function configures the MAC to enable/disable both outgoing(TX)
549  *  and incoming(RX) packets time stamping based on user input.
550  *  Return Value:
551  *  0 on success and an appropriate -ve integer on failure.
552  */
553 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
554 {
555 	struct stmmac_priv *priv = netdev_priv(dev);
556 	struct hwtstamp_config config;
557 	struct timespec64 now;
558 	u64 temp = 0;
559 	u32 ptp_v2 = 0;
560 	u32 tstamp_all = 0;
561 	u32 ptp_over_ipv4_udp = 0;
562 	u32 ptp_over_ipv6_udp = 0;
563 	u32 ptp_over_ethernet = 0;
564 	u32 snap_type_sel = 0;
565 	u32 ts_master_en = 0;
566 	u32 ts_event_en = 0;
567 	u32 sec_inc = 0;
568 	u32 value = 0;
569 	bool xmac;
570 
571 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
572 
573 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
574 		netdev_alert(priv->dev, "No support for HW time stamping\n");
575 		priv->hwts_tx_en = 0;
576 		priv->hwts_rx_en = 0;
577 
578 		return -EOPNOTSUPP;
579 	}
580 
581 	if (copy_from_user(&config, ifr->ifr_data,
582 			   sizeof(config)))
583 		return -EFAULT;
584 
585 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
586 		   __func__, config.flags, config.tx_type, config.rx_filter);
587 
588 	/* reserved for future extensions */
589 	if (config.flags)
590 		return -EINVAL;
591 
592 	if (config.tx_type != HWTSTAMP_TX_OFF &&
593 	    config.tx_type != HWTSTAMP_TX_ON)
594 		return -ERANGE;
595 
596 	if (priv->adv_ts) {
597 		switch (config.rx_filter) {
598 		case HWTSTAMP_FILTER_NONE:
599 			/* time stamp no incoming packet at all */
600 			config.rx_filter = HWTSTAMP_FILTER_NONE;
601 			break;
602 
603 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
604 			/* PTP v1, UDP, any kind of event packet */
605 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
606 			/* 'xmac' hardware can support Sync, Pdelay_Req and
607 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
608 			 * This leaves Delay_Req timestamps out.
609 			 * Enable all events *and* general purpose message
610 			 * timestamping
611 			 */
612 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
613 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
614 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
615 			break;
616 
617 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
618 			/* PTP v1, UDP, Sync packet */
619 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
620 			/* take time stamp for SYNC messages only */
621 			ts_event_en = PTP_TCR_TSEVNTENA;
622 
623 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
624 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
625 			break;
626 
627 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
628 			/* PTP v1, UDP, Delay_req packet */
629 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
630 			/* take time stamp for Delay_Req messages only */
631 			ts_master_en = PTP_TCR_TSMSTRENA;
632 			ts_event_en = PTP_TCR_TSEVNTENA;
633 
634 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
635 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
636 			break;
637 
638 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
639 			/* PTP v2, UDP, any kind of event packet */
640 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
641 			ptp_v2 = PTP_TCR_TSVER2ENA;
642 			/* take time stamp for all event messages */
643 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
644 
645 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
646 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
647 			break;
648 
649 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
650 			/* PTP v2, UDP, Sync packet */
651 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
652 			ptp_v2 = PTP_TCR_TSVER2ENA;
653 			/* take time stamp for SYNC messages only */
654 			ts_event_en = PTP_TCR_TSEVNTENA;
655 
656 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
657 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
658 			break;
659 
660 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
661 			/* PTP v2, UDP, Delay_req packet */
662 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
663 			ptp_v2 = PTP_TCR_TSVER2ENA;
664 			/* take time stamp for Delay_Req messages only */
665 			ts_master_en = PTP_TCR_TSMSTRENA;
666 			ts_event_en = PTP_TCR_TSEVNTENA;
667 
668 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
669 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
670 			break;
671 
672 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
673 			/* PTP v2/802.AS1 any layer, any kind of event packet */
674 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
675 			ptp_v2 = PTP_TCR_TSVER2ENA;
676 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
677 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 			ptp_over_ethernet = PTP_TCR_TSIPENA;
680 			break;
681 
682 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
683 			/* PTP v2/802.AS1, any layer, Sync packet */
684 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
685 			ptp_v2 = PTP_TCR_TSVER2ENA;
686 			/* take time stamp for SYNC messages only */
687 			ts_event_en = PTP_TCR_TSEVNTENA;
688 
689 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
690 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
691 			ptp_over_ethernet = PTP_TCR_TSIPENA;
692 			break;
693 
694 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
695 			/* PTP v2/802.AS1, any layer, Delay_req packet */
696 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
697 			ptp_v2 = PTP_TCR_TSVER2ENA;
698 			/* take time stamp for Delay_Req messages only */
699 			ts_master_en = PTP_TCR_TSMSTRENA;
700 			ts_event_en = PTP_TCR_TSEVNTENA;
701 
702 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
703 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
704 			ptp_over_ethernet = PTP_TCR_TSIPENA;
705 			break;
706 
707 		case HWTSTAMP_FILTER_NTP_ALL:
708 		case HWTSTAMP_FILTER_ALL:
709 			/* time stamp any incoming packet */
710 			config.rx_filter = HWTSTAMP_FILTER_ALL;
711 			tstamp_all = PTP_TCR_TSENALL;
712 			break;
713 
714 		default:
715 			return -ERANGE;
716 		}
717 	} else {
718 		switch (config.rx_filter) {
719 		case HWTSTAMP_FILTER_NONE:
720 			config.rx_filter = HWTSTAMP_FILTER_NONE;
721 			break;
722 		default:
723 			/* PTP v1, UDP, any kind of event packet */
724 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
725 			break;
726 		}
727 	}
728 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
729 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
730 
731 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
732 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
733 	else {
734 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
735 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
736 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
737 			 ts_master_en | snap_type_sel);
738 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
739 
740 		/* program Sub Second Increment reg */
741 		stmmac_config_sub_second_increment(priv,
742 				priv->ptpaddr, priv->plat->clk_ptp_rate,
743 				xmac, &sec_inc);
744 		temp = div_u64(1000000000ULL, sec_inc);
745 
746 		/* Store sub second increment and flags for later use */
747 		priv->sub_second_inc = sec_inc;
748 		priv->systime_flags = value;
749 
750 		/* calculate default added value:
751 		 * formula is :
752 		 * addend = (2^32)/freq_div_ratio;
753 		 * where, freq_div_ratio = 1e9ns/sec_inc
754 		 */
755 		temp = (u64)(temp << 32);
756 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
757 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
758 
759 		/* initialize system time */
760 		ktime_get_real_ts64(&now);
761 
762 		/* lower 32 bits of tv_sec are safe until y2106 */
763 		stmmac_init_systime(priv, priv->ptpaddr,
764 				(u32)now.tv_sec, now.tv_nsec);
765 	}
766 
767 	memcpy(&priv->tstamp_config, &config, sizeof(config));
768 
769 	return copy_to_user(ifr->ifr_data, &config,
770 			    sizeof(config)) ? -EFAULT : 0;
771 }
772 
773 /**
774  *  stmmac_hwtstamp_get - read hardware timestamping.
775  *  @dev: device pointer.
776  *  @ifr: An IOCTL specific structure, that can contain a pointer to
777  *  a proprietary structure used to pass information to the driver.
778  *  Description:
779  *  This function obtain the current hardware timestamping settings
780     as requested.
781  */
782 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
783 {
784 	struct stmmac_priv *priv = netdev_priv(dev);
785 	struct hwtstamp_config *config = &priv->tstamp_config;
786 
787 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
788 		return -EOPNOTSUPP;
789 
790 	return copy_to_user(ifr->ifr_data, config,
791 			    sizeof(*config)) ? -EFAULT : 0;
792 }
793 
794 /**
795  * stmmac_init_ptp - init PTP
796  * @priv: driver private structure
797  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
798  * This is done by looking at the HW cap. register.
799  * This function also registers the ptp driver.
800  */
801 static int stmmac_init_ptp(struct stmmac_priv *priv)
802 {
803 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
804 
805 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
806 		return -EOPNOTSUPP;
807 
808 	priv->adv_ts = 0;
809 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
810 	if (xmac && priv->dma_cap.atime_stamp)
811 		priv->adv_ts = 1;
812 	/* Dwmac 3.x core with extend_desc can support adv_ts */
813 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
814 		priv->adv_ts = 1;
815 
816 	if (priv->dma_cap.time_stamp)
817 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
818 
819 	if (priv->adv_ts)
820 		netdev_info(priv->dev,
821 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
822 
823 	priv->hwts_tx_en = 0;
824 	priv->hwts_rx_en = 0;
825 
826 	stmmac_ptp_register(priv);
827 
828 	return 0;
829 }
830 
831 static void stmmac_release_ptp(struct stmmac_priv *priv)
832 {
833 	if (priv->plat->clk_ptp_ref)
834 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
835 	stmmac_ptp_unregister(priv);
836 }
837 
838 /**
839  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
840  *  @priv: driver private structure
841  *  Description: It is used for configuring the flow control in all queues
842  */
843 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
844 {
845 	u32 tx_cnt = priv->plat->tx_queues_to_use;
846 
847 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
848 			priv->pause, tx_cnt);
849 }
850 
851 /**
852  * stmmac_adjust_link - adjusts the link parameters
853  * @dev: net device structure
854  * Description: this is the helper called by the physical abstraction layer
855  * drivers to communicate the phy link status. According the speed and duplex
856  * this driver can invoke registered glue-logic as well.
857  * It also invoke the eee initialization because it could happen when switch
858  * on different networks (that are eee capable).
859  */
860 static void stmmac_adjust_link(struct net_device *dev)
861 {
862 	struct stmmac_priv *priv = netdev_priv(dev);
863 	struct phy_device *phydev = dev->phydev;
864 	bool new_state = false;
865 
866 	if (!phydev)
867 		return;
868 
869 	mutex_lock(&priv->lock);
870 
871 	if (phydev->link) {
872 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
873 
874 		/* Now we make sure that we can be in full duplex mode.
875 		 * If not, we operate in half-duplex mode. */
876 		if (phydev->duplex != priv->oldduplex) {
877 			new_state = true;
878 			if (!phydev->duplex)
879 				ctrl &= ~priv->hw->link.duplex;
880 			else
881 				ctrl |= priv->hw->link.duplex;
882 			priv->oldduplex = phydev->duplex;
883 		}
884 		/* Flow Control operation */
885 		if (phydev->pause)
886 			stmmac_mac_flow_ctrl(priv, phydev->duplex);
887 
888 		if (phydev->speed != priv->speed) {
889 			new_state = true;
890 			ctrl &= ~priv->hw->link.speed_mask;
891 			switch (phydev->speed) {
892 			case SPEED_1000:
893 				ctrl |= priv->hw->link.speed1000;
894 				break;
895 			case SPEED_100:
896 				ctrl |= priv->hw->link.speed100;
897 				break;
898 			case SPEED_10:
899 				ctrl |= priv->hw->link.speed10;
900 				break;
901 			default:
902 				netif_warn(priv, link, priv->dev,
903 					   "broken speed: %d\n", phydev->speed);
904 				phydev->speed = SPEED_UNKNOWN;
905 				break;
906 			}
907 			if (phydev->speed != SPEED_UNKNOWN)
908 				stmmac_hw_fix_mac_speed(priv);
909 			priv->speed = phydev->speed;
910 		}
911 
912 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
913 
914 		if (!priv->oldlink) {
915 			new_state = true;
916 			priv->oldlink = true;
917 		}
918 	} else if (priv->oldlink) {
919 		new_state = true;
920 		priv->oldlink = false;
921 		priv->speed = SPEED_UNKNOWN;
922 		priv->oldduplex = DUPLEX_UNKNOWN;
923 	}
924 
925 	if (new_state && netif_msg_link(priv))
926 		phy_print_status(phydev);
927 
928 	mutex_unlock(&priv->lock);
929 
930 	if (phydev->is_pseudo_fixed_link)
931 		/* Stop PHY layer to call the hook to adjust the link in case
932 		 * of a switch is attached to the stmmac driver.
933 		 */
934 		phydev->irq = PHY_IGNORE_INTERRUPT;
935 	else
936 		/* At this stage, init the EEE if supported.
937 		 * Never called in case of fixed_link.
938 		 */
939 		priv->eee_enabled = stmmac_eee_init(priv);
940 }
941 
942 /**
943  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
944  * @priv: driver private structure
945  * Description: this is to verify if the HW supports the PCS.
946  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
947  * configured for the TBI, RTBI, or SGMII PHY interface.
948  */
949 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
950 {
951 	int interface = priv->plat->interface;
952 
953 	if (priv->dma_cap.pcs) {
954 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
955 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
956 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
957 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
958 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
959 			priv->hw->pcs = STMMAC_PCS_RGMII;
960 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
961 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
962 			priv->hw->pcs = STMMAC_PCS_SGMII;
963 		}
964 	}
965 }
966 
967 /**
968  * stmmac_init_phy - PHY initialization
969  * @dev: net device structure
970  * Description: it initializes the driver's PHY state, and attaches the PHY
971  * to the mac driver.
972  *  Return value:
973  *  0 on success
974  */
975 static int stmmac_init_phy(struct net_device *dev)
976 {
977 	struct stmmac_priv *priv = netdev_priv(dev);
978 	u32 tx_cnt = priv->plat->tx_queues_to_use;
979 	struct phy_device *phydev;
980 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
981 	char bus_id[MII_BUS_ID_SIZE];
982 	int interface = priv->plat->interface;
983 	int max_speed = priv->plat->max_speed;
984 	priv->oldlink = false;
985 	priv->speed = SPEED_UNKNOWN;
986 	priv->oldduplex = DUPLEX_UNKNOWN;
987 
988 	if (priv->plat->phy_node) {
989 		phydev = of_phy_connect(dev, priv->plat->phy_node,
990 					&stmmac_adjust_link, 0, interface);
991 	} else {
992 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
993 			 priv->plat->bus_id);
994 
995 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
996 			 priv->plat->phy_addr);
997 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
998 			   phy_id_fmt);
999 
1000 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
1001 				     interface);
1002 	}
1003 
1004 	if (IS_ERR_OR_NULL(phydev)) {
1005 		netdev_err(priv->dev, "Could not attach to PHY\n");
1006 		if (!phydev)
1007 			return -ENODEV;
1008 
1009 		return PTR_ERR(phydev);
1010 	}
1011 
1012 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
1013 	if ((interface == PHY_INTERFACE_MODE_MII) ||
1014 	    (interface == PHY_INTERFACE_MODE_RMII) ||
1015 		(max_speed < 1000 && max_speed > 0))
1016 		phy_set_max_speed(phydev, SPEED_100);
1017 
1018 	/*
1019 	 * Half-duplex mode not supported with multiqueue
1020 	 * half-duplex can only works with single queue
1021 	 */
1022 	if (tx_cnt > 1) {
1023 		phy_remove_link_mode(phydev,
1024 				     ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1025 		phy_remove_link_mode(phydev,
1026 				     ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1027 		phy_remove_link_mode(phydev,
1028 				     ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1029 	}
1030 
1031 	/*
1032 	 * Broken HW is sometimes missing the pull-up resistor on the
1033 	 * MDIO line, which results in reads to non-existent devices returning
1034 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
1035 	 * device as well.
1036 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
1037 	 */
1038 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
1039 		phy_disconnect(phydev);
1040 		return -ENODEV;
1041 	}
1042 
1043 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
1044 	 * subsequent PHY polling, make sure we force a link transition if
1045 	 * we have a UP/DOWN/UP transition
1046 	 */
1047 	if (phydev->is_pseudo_fixed_link)
1048 		phydev->irq = PHY_POLL;
1049 
1050 	phy_attached_info(phydev);
1051 	return 0;
1052 }
1053 
1054 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1055 {
1056 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1057 	void *head_rx;
1058 	u32 queue;
1059 
1060 	/* Display RX rings */
1061 	for (queue = 0; queue < rx_cnt; queue++) {
1062 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1063 
1064 		pr_info("\tRX Queue %u rings\n", queue);
1065 
1066 		if (priv->extend_desc)
1067 			head_rx = (void *)rx_q->dma_erx;
1068 		else
1069 			head_rx = (void *)rx_q->dma_rx;
1070 
1071 		/* Display RX ring */
1072 		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1073 	}
1074 }
1075 
1076 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1077 {
1078 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1079 	void *head_tx;
1080 	u32 queue;
1081 
1082 	/* Display TX rings */
1083 	for (queue = 0; queue < tx_cnt; queue++) {
1084 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1085 
1086 		pr_info("\tTX Queue %d rings\n", queue);
1087 
1088 		if (priv->extend_desc)
1089 			head_tx = (void *)tx_q->dma_etx;
1090 		else
1091 			head_tx = (void *)tx_q->dma_tx;
1092 
1093 		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1094 	}
1095 }
1096 
1097 static void stmmac_display_rings(struct stmmac_priv *priv)
1098 {
1099 	/* Display RX ring */
1100 	stmmac_display_rx_rings(priv);
1101 
1102 	/* Display TX ring */
1103 	stmmac_display_tx_rings(priv);
1104 }
1105 
1106 static int stmmac_set_bfsize(int mtu, int bufsize)
1107 {
1108 	int ret = bufsize;
1109 
1110 	if (mtu >= BUF_SIZE_4KiB)
1111 		ret = BUF_SIZE_8KiB;
1112 	else if (mtu >= BUF_SIZE_2KiB)
1113 		ret = BUF_SIZE_4KiB;
1114 	else if (mtu > DEFAULT_BUFSIZE)
1115 		ret = BUF_SIZE_2KiB;
1116 	else
1117 		ret = DEFAULT_BUFSIZE;
1118 
1119 	return ret;
1120 }
1121 
1122 /**
1123  * stmmac_clear_rx_descriptors - clear RX descriptors
1124  * @priv: driver private structure
1125  * @queue: RX queue index
1126  * Description: this function is called to clear the RX descriptors
1127  * in case of both basic and extended descriptors are used.
1128  */
1129 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1130 {
1131 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1132 	int i;
1133 
1134 	/* Clear the RX descriptors */
1135 	for (i = 0; i < DMA_RX_SIZE; i++)
1136 		if (priv->extend_desc)
1137 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1138 					priv->use_riwt, priv->mode,
1139 					(i == DMA_RX_SIZE - 1),
1140 					priv->dma_buf_sz);
1141 		else
1142 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1143 					priv->use_riwt, priv->mode,
1144 					(i == DMA_RX_SIZE - 1),
1145 					priv->dma_buf_sz);
1146 }
1147 
1148 /**
1149  * stmmac_clear_tx_descriptors - clear tx descriptors
1150  * @priv: driver private structure
1151  * @queue: TX queue index.
1152  * Description: this function is called to clear the TX descriptors
1153  * in case of both basic and extended descriptors are used.
1154  */
1155 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1156 {
1157 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1158 	int i;
1159 
1160 	/* Clear the TX descriptors */
1161 	for (i = 0; i < DMA_TX_SIZE; i++)
1162 		if (priv->extend_desc)
1163 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1164 					priv->mode, (i == DMA_TX_SIZE - 1));
1165 		else
1166 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1167 					priv->mode, (i == DMA_TX_SIZE - 1));
1168 }
1169 
1170 /**
1171  * stmmac_clear_descriptors - clear descriptors
1172  * @priv: driver private structure
1173  * Description: this function is called to clear the TX and RX descriptors
1174  * in case of both basic and extended descriptors are used.
1175  */
1176 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1177 {
1178 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1179 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1180 	u32 queue;
1181 
1182 	/* Clear the RX descriptors */
1183 	for (queue = 0; queue < rx_queue_cnt; queue++)
1184 		stmmac_clear_rx_descriptors(priv, queue);
1185 
1186 	/* Clear the TX descriptors */
1187 	for (queue = 0; queue < tx_queue_cnt; queue++)
1188 		stmmac_clear_tx_descriptors(priv, queue);
1189 }
1190 
1191 /**
1192  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1193  * @priv: driver private structure
1194  * @p: descriptor pointer
1195  * @i: descriptor index
1196  * @flags: gfp flag
1197  * @queue: RX queue index
1198  * Description: this function is called to allocate a receive buffer, perform
1199  * the DMA mapping and init the descriptor.
1200  */
1201 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1202 				  int i, gfp_t flags, u32 queue)
1203 {
1204 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1205 	struct sk_buff *skb;
1206 
1207 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1208 	if (!skb) {
1209 		netdev_err(priv->dev,
1210 			   "%s: Rx init fails; skb is NULL\n", __func__);
1211 		return -ENOMEM;
1212 	}
1213 	rx_q->rx_skbuff[i] = skb;
1214 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1215 						priv->dma_buf_sz,
1216 						DMA_FROM_DEVICE);
1217 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1218 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1219 		dev_kfree_skb_any(skb);
1220 		return -EINVAL;
1221 	}
1222 
1223 	stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1224 
1225 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1226 		stmmac_init_desc3(priv, p);
1227 
1228 	return 0;
1229 }
1230 
1231 /**
1232  * stmmac_free_rx_buffer - free RX dma buffers
1233  * @priv: private structure
1234  * @queue: RX queue index
1235  * @i: buffer index.
1236  */
1237 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1238 {
1239 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1240 
1241 	if (rx_q->rx_skbuff[i]) {
1242 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1243 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
1244 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1245 	}
1246 	rx_q->rx_skbuff[i] = NULL;
1247 }
1248 
1249 /**
1250  * stmmac_free_tx_buffer - free RX dma buffers
1251  * @priv: private structure
1252  * @queue: RX queue index
1253  * @i: buffer index.
1254  */
1255 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1256 {
1257 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1258 
1259 	if (tx_q->tx_skbuff_dma[i].buf) {
1260 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1261 			dma_unmap_page(priv->device,
1262 				       tx_q->tx_skbuff_dma[i].buf,
1263 				       tx_q->tx_skbuff_dma[i].len,
1264 				       DMA_TO_DEVICE);
1265 		else
1266 			dma_unmap_single(priv->device,
1267 					 tx_q->tx_skbuff_dma[i].buf,
1268 					 tx_q->tx_skbuff_dma[i].len,
1269 					 DMA_TO_DEVICE);
1270 	}
1271 
1272 	if (tx_q->tx_skbuff[i]) {
1273 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1274 		tx_q->tx_skbuff[i] = NULL;
1275 		tx_q->tx_skbuff_dma[i].buf = 0;
1276 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1277 	}
1278 }
1279 
1280 /**
1281  * init_dma_rx_desc_rings - init the RX descriptor rings
1282  * @dev: net device structure
1283  * @flags: gfp flag.
1284  * Description: this function initializes the DMA RX descriptors
1285  * and allocates the socket buffers. It supports the chained and ring
1286  * modes.
1287  */
1288 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1289 {
1290 	struct stmmac_priv *priv = netdev_priv(dev);
1291 	u32 rx_count = priv->plat->rx_queues_to_use;
1292 	int ret = -ENOMEM;
1293 	int bfsize = 0;
1294 	int queue;
1295 	int i;
1296 
1297 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1298 	if (bfsize < 0)
1299 		bfsize = 0;
1300 
1301 	if (bfsize < BUF_SIZE_16KiB)
1302 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1303 
1304 	priv->dma_buf_sz = bfsize;
1305 
1306 	/* RX INITIALIZATION */
1307 	netif_dbg(priv, probe, priv->dev,
1308 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1309 
1310 	for (queue = 0; queue < rx_count; queue++) {
1311 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1312 
1313 		netif_dbg(priv, probe, priv->dev,
1314 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
1315 			  (u32)rx_q->dma_rx_phy);
1316 
1317 		for (i = 0; i < DMA_RX_SIZE; i++) {
1318 			struct dma_desc *p;
1319 
1320 			if (priv->extend_desc)
1321 				p = &((rx_q->dma_erx + i)->basic);
1322 			else
1323 				p = rx_q->dma_rx + i;
1324 
1325 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
1326 						     queue);
1327 			if (ret)
1328 				goto err_init_rx_buffers;
1329 
1330 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1331 				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1332 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
1333 		}
1334 
1335 		rx_q->cur_rx = 0;
1336 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1337 
1338 		stmmac_clear_rx_descriptors(priv, queue);
1339 
1340 		/* Setup the chained descriptor addresses */
1341 		if (priv->mode == STMMAC_CHAIN_MODE) {
1342 			if (priv->extend_desc)
1343 				stmmac_mode_init(priv, rx_q->dma_erx,
1344 						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1345 			else
1346 				stmmac_mode_init(priv, rx_q->dma_rx,
1347 						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1348 		}
1349 	}
1350 
1351 	buf_sz = bfsize;
1352 
1353 	return 0;
1354 
1355 err_init_rx_buffers:
1356 	while (queue >= 0) {
1357 		while (--i >= 0)
1358 			stmmac_free_rx_buffer(priv, queue, i);
1359 
1360 		if (queue == 0)
1361 			break;
1362 
1363 		i = DMA_RX_SIZE;
1364 		queue--;
1365 	}
1366 
1367 	return ret;
1368 }
1369 
1370 /**
1371  * init_dma_tx_desc_rings - init the TX descriptor rings
1372  * @dev: net device structure.
1373  * Description: this function initializes the DMA TX descriptors
1374  * and allocates the socket buffers. It supports the chained and ring
1375  * modes.
1376  */
1377 static int init_dma_tx_desc_rings(struct net_device *dev)
1378 {
1379 	struct stmmac_priv *priv = netdev_priv(dev);
1380 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1381 	u32 queue;
1382 	int i;
1383 
1384 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1385 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1386 
1387 		netif_dbg(priv, probe, priv->dev,
1388 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1389 			 (u32)tx_q->dma_tx_phy);
1390 
1391 		/* Setup the chained descriptor addresses */
1392 		if (priv->mode == STMMAC_CHAIN_MODE) {
1393 			if (priv->extend_desc)
1394 				stmmac_mode_init(priv, tx_q->dma_etx,
1395 						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1396 			else
1397 				stmmac_mode_init(priv, tx_q->dma_tx,
1398 						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1399 		}
1400 
1401 		for (i = 0; i < DMA_TX_SIZE; i++) {
1402 			struct dma_desc *p;
1403 			if (priv->extend_desc)
1404 				p = &((tx_q->dma_etx + i)->basic);
1405 			else
1406 				p = tx_q->dma_tx + i;
1407 
1408 			stmmac_clear_desc(priv, p);
1409 
1410 			tx_q->tx_skbuff_dma[i].buf = 0;
1411 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1412 			tx_q->tx_skbuff_dma[i].len = 0;
1413 			tx_q->tx_skbuff_dma[i].last_segment = false;
1414 			tx_q->tx_skbuff[i] = NULL;
1415 		}
1416 
1417 		tx_q->dirty_tx = 0;
1418 		tx_q->cur_tx = 0;
1419 		tx_q->mss = 0;
1420 
1421 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1422 	}
1423 
1424 	return 0;
1425 }
1426 
1427 /**
1428  * init_dma_desc_rings - init the RX/TX descriptor rings
1429  * @dev: net device structure
1430  * @flags: gfp flag.
1431  * Description: this function initializes the DMA RX/TX descriptors
1432  * and allocates the socket buffers. It supports the chained and ring
1433  * modes.
1434  */
1435 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1436 {
1437 	struct stmmac_priv *priv = netdev_priv(dev);
1438 	int ret;
1439 
1440 	ret = init_dma_rx_desc_rings(dev, flags);
1441 	if (ret)
1442 		return ret;
1443 
1444 	ret = init_dma_tx_desc_rings(dev);
1445 
1446 	stmmac_clear_descriptors(priv);
1447 
1448 	if (netif_msg_hw(priv))
1449 		stmmac_display_rings(priv);
1450 
1451 	return ret;
1452 }
1453 
1454 /**
1455  * dma_free_rx_skbufs - free RX dma buffers
1456  * @priv: private structure
1457  * @queue: RX queue index
1458  */
1459 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1460 {
1461 	int i;
1462 
1463 	for (i = 0; i < DMA_RX_SIZE; i++)
1464 		stmmac_free_rx_buffer(priv, queue, i);
1465 }
1466 
1467 /**
1468  * dma_free_tx_skbufs - free TX dma buffers
1469  * @priv: private structure
1470  * @queue: TX queue index
1471  */
1472 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1473 {
1474 	int i;
1475 
1476 	for (i = 0; i < DMA_TX_SIZE; i++)
1477 		stmmac_free_tx_buffer(priv, queue, i);
1478 }
1479 
1480 /**
1481  * free_dma_rx_desc_resources - free RX dma desc resources
1482  * @priv: private structure
1483  */
1484 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1485 {
1486 	u32 rx_count = priv->plat->rx_queues_to_use;
1487 	u32 queue;
1488 
1489 	/* Free RX queue resources */
1490 	for (queue = 0; queue < rx_count; queue++) {
1491 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1492 
1493 		/* Release the DMA RX socket buffers */
1494 		dma_free_rx_skbufs(priv, queue);
1495 
1496 		/* Free DMA regions of consistent memory previously allocated */
1497 		if (!priv->extend_desc)
1498 			dma_free_coherent(priv->device,
1499 					  DMA_RX_SIZE * sizeof(struct dma_desc),
1500 					  rx_q->dma_rx, rx_q->dma_rx_phy);
1501 		else
1502 			dma_free_coherent(priv->device, DMA_RX_SIZE *
1503 					  sizeof(struct dma_extended_desc),
1504 					  rx_q->dma_erx, rx_q->dma_rx_phy);
1505 
1506 		kfree(rx_q->rx_skbuff_dma);
1507 		kfree(rx_q->rx_skbuff);
1508 	}
1509 }
1510 
1511 /**
1512  * free_dma_tx_desc_resources - free TX dma desc resources
1513  * @priv: private structure
1514  */
1515 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1516 {
1517 	u32 tx_count = priv->plat->tx_queues_to_use;
1518 	u32 queue;
1519 
1520 	/* Free TX queue resources */
1521 	for (queue = 0; queue < tx_count; queue++) {
1522 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1523 
1524 		/* Release the DMA TX socket buffers */
1525 		dma_free_tx_skbufs(priv, queue);
1526 
1527 		/* Free DMA regions of consistent memory previously allocated */
1528 		if (!priv->extend_desc)
1529 			dma_free_coherent(priv->device,
1530 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1531 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1532 		else
1533 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1534 					  sizeof(struct dma_extended_desc),
1535 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1536 
1537 		kfree(tx_q->tx_skbuff_dma);
1538 		kfree(tx_q->tx_skbuff);
1539 	}
1540 }
1541 
1542 /**
1543  * alloc_dma_rx_desc_resources - alloc RX resources.
1544  * @priv: private structure
1545  * Description: according to which descriptor can be used (extend or basic)
1546  * this function allocates the resources for TX and RX paths. In case of
1547  * reception, for example, it pre-allocated the RX socket buffer in order to
1548  * allow zero-copy mechanism.
1549  */
1550 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1551 {
1552 	u32 rx_count = priv->plat->rx_queues_to_use;
1553 	int ret = -ENOMEM;
1554 	u32 queue;
1555 
1556 	/* RX queues buffers and DMA */
1557 	for (queue = 0; queue < rx_count; queue++) {
1558 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1559 
1560 		rx_q->queue_index = queue;
1561 		rx_q->priv_data = priv;
1562 
1563 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1564 						    sizeof(dma_addr_t),
1565 						    GFP_KERNEL);
1566 		if (!rx_q->rx_skbuff_dma)
1567 			goto err_dma;
1568 
1569 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1570 						sizeof(struct sk_buff *),
1571 						GFP_KERNEL);
1572 		if (!rx_q->rx_skbuff)
1573 			goto err_dma;
1574 
1575 		if (priv->extend_desc) {
1576 			rx_q->dma_erx = dma_alloc_coherent(priv->device,
1577 							   DMA_RX_SIZE * sizeof(struct dma_extended_desc),
1578 							   &rx_q->dma_rx_phy,
1579 							   GFP_KERNEL);
1580 			if (!rx_q->dma_erx)
1581 				goto err_dma;
1582 
1583 		} else {
1584 			rx_q->dma_rx = dma_alloc_coherent(priv->device,
1585 							  DMA_RX_SIZE * sizeof(struct dma_desc),
1586 							  &rx_q->dma_rx_phy,
1587 							  GFP_KERNEL);
1588 			if (!rx_q->dma_rx)
1589 				goto err_dma;
1590 		}
1591 	}
1592 
1593 	return 0;
1594 
1595 err_dma:
1596 	free_dma_rx_desc_resources(priv);
1597 
1598 	return ret;
1599 }
1600 
1601 /**
1602  * alloc_dma_tx_desc_resources - alloc TX resources.
1603  * @priv: private structure
1604  * Description: according to which descriptor can be used (extend or basic)
1605  * this function allocates the resources for TX and RX paths. In case of
1606  * reception, for example, it pre-allocated the RX socket buffer in order to
1607  * allow zero-copy mechanism.
1608  */
1609 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1610 {
1611 	u32 tx_count = priv->plat->tx_queues_to_use;
1612 	int ret = -ENOMEM;
1613 	u32 queue;
1614 
1615 	/* TX queues buffers and DMA */
1616 	for (queue = 0; queue < tx_count; queue++) {
1617 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1618 
1619 		tx_q->queue_index = queue;
1620 		tx_q->priv_data = priv;
1621 
1622 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1623 						    sizeof(*tx_q->tx_skbuff_dma),
1624 						    GFP_KERNEL);
1625 		if (!tx_q->tx_skbuff_dma)
1626 			goto err_dma;
1627 
1628 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1629 						sizeof(struct sk_buff *),
1630 						GFP_KERNEL);
1631 		if (!tx_q->tx_skbuff)
1632 			goto err_dma;
1633 
1634 		if (priv->extend_desc) {
1635 			tx_q->dma_etx = dma_alloc_coherent(priv->device,
1636 							   DMA_TX_SIZE * sizeof(struct dma_extended_desc),
1637 							   &tx_q->dma_tx_phy,
1638 							   GFP_KERNEL);
1639 			if (!tx_q->dma_etx)
1640 				goto err_dma;
1641 		} else {
1642 			tx_q->dma_tx = dma_alloc_coherent(priv->device,
1643 							  DMA_TX_SIZE * sizeof(struct dma_desc),
1644 							  &tx_q->dma_tx_phy,
1645 							  GFP_KERNEL);
1646 			if (!tx_q->dma_tx)
1647 				goto err_dma;
1648 		}
1649 	}
1650 
1651 	return 0;
1652 
1653 err_dma:
1654 	free_dma_tx_desc_resources(priv);
1655 
1656 	return ret;
1657 }
1658 
1659 /**
1660  * alloc_dma_desc_resources - alloc TX/RX resources.
1661  * @priv: private structure
1662  * Description: according to which descriptor can be used (extend or basic)
1663  * this function allocates the resources for TX and RX paths. In case of
1664  * reception, for example, it pre-allocated the RX socket buffer in order to
1665  * allow zero-copy mechanism.
1666  */
1667 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1668 {
1669 	/* RX Allocation */
1670 	int ret = alloc_dma_rx_desc_resources(priv);
1671 
1672 	if (ret)
1673 		return ret;
1674 
1675 	ret = alloc_dma_tx_desc_resources(priv);
1676 
1677 	return ret;
1678 }
1679 
1680 /**
1681  * free_dma_desc_resources - free dma desc resources
1682  * @priv: private structure
1683  */
1684 static void free_dma_desc_resources(struct stmmac_priv *priv)
1685 {
1686 	/* Release the DMA RX socket buffers */
1687 	free_dma_rx_desc_resources(priv);
1688 
1689 	/* Release the DMA TX socket buffers */
1690 	free_dma_tx_desc_resources(priv);
1691 }
1692 
1693 /**
1694  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1695  *  @priv: driver private structure
1696  *  Description: It is used for enabling the rx queues in the MAC
1697  */
1698 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1699 {
1700 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
1701 	int queue;
1702 	u8 mode;
1703 
1704 	for (queue = 0; queue < rx_queues_count; queue++) {
1705 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1706 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1707 	}
1708 }
1709 
1710 /**
1711  * stmmac_start_rx_dma - start RX DMA channel
1712  * @priv: driver private structure
1713  * @chan: RX channel index
1714  * Description:
1715  * This starts a RX DMA channel
1716  */
1717 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1718 {
1719 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1720 	stmmac_start_rx(priv, priv->ioaddr, chan);
1721 }
1722 
1723 /**
1724  * stmmac_start_tx_dma - start TX DMA channel
1725  * @priv: driver private structure
1726  * @chan: TX channel index
1727  * Description:
1728  * This starts a TX DMA channel
1729  */
1730 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1731 {
1732 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1733 	stmmac_start_tx(priv, priv->ioaddr, chan);
1734 }
1735 
1736 /**
1737  * stmmac_stop_rx_dma - stop RX DMA channel
1738  * @priv: driver private structure
1739  * @chan: RX channel index
1740  * Description:
1741  * This stops a RX DMA channel
1742  */
1743 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1744 {
1745 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1746 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1747 }
1748 
1749 /**
1750  * stmmac_stop_tx_dma - stop TX DMA channel
1751  * @priv: driver private structure
1752  * @chan: TX channel index
1753  * Description:
1754  * This stops a TX DMA channel
1755  */
1756 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1757 {
1758 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1759 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1760 }
1761 
1762 /**
1763  * stmmac_start_all_dma - start all RX and TX DMA channels
1764  * @priv: driver private structure
1765  * Description:
1766  * This starts all the RX and TX DMA channels
1767  */
1768 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1769 {
1770 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1771 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1772 	u32 chan = 0;
1773 
1774 	for (chan = 0; chan < rx_channels_count; chan++)
1775 		stmmac_start_rx_dma(priv, chan);
1776 
1777 	for (chan = 0; chan < tx_channels_count; chan++)
1778 		stmmac_start_tx_dma(priv, chan);
1779 }
1780 
1781 /**
1782  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1783  * @priv: driver private structure
1784  * Description:
1785  * This stops the RX and TX DMA channels
1786  */
1787 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1788 {
1789 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1790 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1791 	u32 chan = 0;
1792 
1793 	for (chan = 0; chan < rx_channels_count; chan++)
1794 		stmmac_stop_rx_dma(priv, chan);
1795 
1796 	for (chan = 0; chan < tx_channels_count; chan++)
1797 		stmmac_stop_tx_dma(priv, chan);
1798 }
1799 
1800 /**
1801  *  stmmac_dma_operation_mode - HW DMA operation mode
1802  *  @priv: driver private structure
1803  *  Description: it is used for configuring the DMA operation mode register in
1804  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1805  */
1806 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1807 {
1808 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1809 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1810 	int rxfifosz = priv->plat->rx_fifo_size;
1811 	int txfifosz = priv->plat->tx_fifo_size;
1812 	u32 txmode = 0;
1813 	u32 rxmode = 0;
1814 	u32 chan = 0;
1815 	u8 qmode = 0;
1816 
1817 	if (rxfifosz == 0)
1818 		rxfifosz = priv->dma_cap.rx_fifo_size;
1819 	if (txfifosz == 0)
1820 		txfifosz = priv->dma_cap.tx_fifo_size;
1821 
1822 	/* Adjust for real per queue fifo size */
1823 	rxfifosz /= rx_channels_count;
1824 	txfifosz /= tx_channels_count;
1825 
1826 	if (priv->plat->force_thresh_dma_mode) {
1827 		txmode = tc;
1828 		rxmode = tc;
1829 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1830 		/*
1831 		 * In case of GMAC, SF mode can be enabled
1832 		 * to perform the TX COE in HW. This depends on:
1833 		 * 1) TX COE if actually supported
1834 		 * 2) There is no bugged Jumbo frame support
1835 		 *    that needs to not insert csum in the TDES.
1836 		 */
1837 		txmode = SF_DMA_MODE;
1838 		rxmode = SF_DMA_MODE;
1839 		priv->xstats.threshold = SF_DMA_MODE;
1840 	} else {
1841 		txmode = tc;
1842 		rxmode = SF_DMA_MODE;
1843 	}
1844 
1845 	/* configure all channels */
1846 	for (chan = 0; chan < rx_channels_count; chan++) {
1847 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1848 
1849 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1850 				rxfifosz, qmode);
1851 		stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1852 				chan);
1853 	}
1854 
1855 	for (chan = 0; chan < tx_channels_count; chan++) {
1856 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1857 
1858 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1859 				txfifosz, qmode);
1860 	}
1861 }
1862 
1863 /**
1864  * stmmac_tx_clean - to manage the transmission completion
1865  * @priv: driver private structure
1866  * @queue: TX queue index
1867  * Description: it reclaims the transmit resources after transmission completes.
1868  */
1869 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
1870 {
1871 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1872 	unsigned int bytes_compl = 0, pkts_compl = 0;
1873 	unsigned int entry, count = 0;
1874 
1875 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1876 
1877 	priv->xstats.tx_clean++;
1878 
1879 	entry = tx_q->dirty_tx;
1880 	while ((entry != tx_q->cur_tx) && (count < budget)) {
1881 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1882 		struct dma_desc *p;
1883 		int status;
1884 
1885 		if (priv->extend_desc)
1886 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1887 		else
1888 			p = tx_q->dma_tx + entry;
1889 
1890 		status = stmmac_tx_status(priv, &priv->dev->stats,
1891 				&priv->xstats, p, priv->ioaddr);
1892 		/* Check if the descriptor is owned by the DMA */
1893 		if (unlikely(status & tx_dma_own))
1894 			break;
1895 
1896 		count++;
1897 
1898 		/* Make sure descriptor fields are read after reading
1899 		 * the own bit.
1900 		 */
1901 		dma_rmb();
1902 
1903 		/* Just consider the last segment and ...*/
1904 		if (likely(!(status & tx_not_ls))) {
1905 			/* ... verify the status error condition */
1906 			if (unlikely(status & tx_err)) {
1907 				priv->dev->stats.tx_errors++;
1908 			} else {
1909 				priv->dev->stats.tx_packets++;
1910 				priv->xstats.tx_pkt_n++;
1911 			}
1912 			stmmac_get_tx_hwtstamp(priv, p, skb);
1913 		}
1914 
1915 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1916 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1917 				dma_unmap_page(priv->device,
1918 					       tx_q->tx_skbuff_dma[entry].buf,
1919 					       tx_q->tx_skbuff_dma[entry].len,
1920 					       DMA_TO_DEVICE);
1921 			else
1922 				dma_unmap_single(priv->device,
1923 						 tx_q->tx_skbuff_dma[entry].buf,
1924 						 tx_q->tx_skbuff_dma[entry].len,
1925 						 DMA_TO_DEVICE);
1926 			tx_q->tx_skbuff_dma[entry].buf = 0;
1927 			tx_q->tx_skbuff_dma[entry].len = 0;
1928 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1929 		}
1930 
1931 		stmmac_clean_desc3(priv, tx_q, p);
1932 
1933 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1934 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1935 
1936 		if (likely(skb != NULL)) {
1937 			pkts_compl++;
1938 			bytes_compl += skb->len;
1939 			dev_consume_skb_any(skb);
1940 			tx_q->tx_skbuff[entry] = NULL;
1941 		}
1942 
1943 		stmmac_release_tx_desc(priv, p, priv->mode);
1944 
1945 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1946 	}
1947 	tx_q->dirty_tx = entry;
1948 
1949 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1950 				  pkts_compl, bytes_compl);
1951 
1952 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1953 								queue))) &&
1954 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1955 
1956 		netif_dbg(priv, tx_done, priv->dev,
1957 			  "%s: restart transmit\n", __func__);
1958 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1959 	}
1960 
1961 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1962 		stmmac_enable_eee_mode(priv);
1963 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1964 	}
1965 
1966 	/* We still have pending packets, let's call for a new scheduling */
1967 	if (tx_q->dirty_tx != tx_q->cur_tx)
1968 		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
1969 
1970 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
1971 
1972 	return count;
1973 }
1974 
1975 /**
1976  * stmmac_tx_err - to manage the tx error
1977  * @priv: driver private structure
1978  * @chan: channel index
1979  * Description: it cleans the descriptors and restarts the transmission
1980  * in case of transmission errors.
1981  */
1982 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1983 {
1984 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1985 	int i;
1986 
1987 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1988 
1989 	stmmac_stop_tx_dma(priv, chan);
1990 	dma_free_tx_skbufs(priv, chan);
1991 	for (i = 0; i < DMA_TX_SIZE; i++)
1992 		if (priv->extend_desc)
1993 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1994 					priv->mode, (i == DMA_TX_SIZE - 1));
1995 		else
1996 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1997 					priv->mode, (i == DMA_TX_SIZE - 1));
1998 	tx_q->dirty_tx = 0;
1999 	tx_q->cur_tx = 0;
2000 	tx_q->mss = 0;
2001 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2002 	stmmac_start_tx_dma(priv, chan);
2003 
2004 	priv->dev->stats.tx_errors++;
2005 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2006 }
2007 
2008 /**
2009  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2010  *  @priv: driver private structure
2011  *  @txmode: TX operating mode
2012  *  @rxmode: RX operating mode
2013  *  @chan: channel index
2014  *  Description: it is used for configuring of the DMA operation mode in
2015  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2016  *  mode.
2017  */
2018 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2019 					  u32 rxmode, u32 chan)
2020 {
2021 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2022 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2023 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2024 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2025 	int rxfifosz = priv->plat->rx_fifo_size;
2026 	int txfifosz = priv->plat->tx_fifo_size;
2027 
2028 	if (rxfifosz == 0)
2029 		rxfifosz = priv->dma_cap.rx_fifo_size;
2030 	if (txfifosz == 0)
2031 		txfifosz = priv->dma_cap.tx_fifo_size;
2032 
2033 	/* Adjust for real per queue fifo size */
2034 	rxfifosz /= rx_channels_count;
2035 	txfifosz /= tx_channels_count;
2036 
2037 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2038 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2039 }
2040 
2041 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2042 {
2043 	int ret;
2044 
2045 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2046 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2047 	if (ret && (ret != -EINVAL)) {
2048 		stmmac_global_err(priv);
2049 		return true;
2050 	}
2051 
2052 	return false;
2053 }
2054 
2055 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2056 {
2057 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2058 						 &priv->xstats, chan);
2059 	struct stmmac_channel *ch = &priv->channel[chan];
2060 
2061 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2062 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2063 		napi_schedule_irqoff(&ch->rx_napi);
2064 	}
2065 
2066 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2067 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2068 		napi_schedule_irqoff(&ch->tx_napi);
2069 	}
2070 
2071 	return status;
2072 }
2073 
2074 /**
2075  * stmmac_dma_interrupt - DMA ISR
2076  * @priv: driver private structure
2077  * Description: this is the DMA ISR. It is called by the main ISR.
2078  * It calls the dwmac dma routine and schedule poll method in case of some
2079  * work can be done.
2080  */
2081 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2082 {
2083 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2084 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2085 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2086 				tx_channel_count : rx_channel_count;
2087 	u32 chan;
2088 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2089 
2090 	/* Make sure we never check beyond our status buffer. */
2091 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2092 		channels_to_check = ARRAY_SIZE(status);
2093 
2094 	for (chan = 0; chan < channels_to_check; chan++)
2095 		status[chan] = stmmac_napi_check(priv, chan);
2096 
2097 	for (chan = 0; chan < tx_channel_count; chan++) {
2098 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2099 			/* Try to bump up the dma threshold on this failure */
2100 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2101 			    (tc <= 256)) {
2102 				tc += 64;
2103 				if (priv->plat->force_thresh_dma_mode)
2104 					stmmac_set_dma_operation_mode(priv,
2105 								      tc,
2106 								      tc,
2107 								      chan);
2108 				else
2109 					stmmac_set_dma_operation_mode(priv,
2110 								    tc,
2111 								    SF_DMA_MODE,
2112 								    chan);
2113 				priv->xstats.threshold = tc;
2114 			}
2115 		} else if (unlikely(status[chan] == tx_hard_error)) {
2116 			stmmac_tx_err(priv, chan);
2117 		}
2118 	}
2119 }
2120 
2121 /**
2122  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2123  * @priv: driver private structure
2124  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2125  */
2126 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2127 {
2128 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2129 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2130 
2131 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
2132 
2133 	if (priv->dma_cap.rmon) {
2134 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
2135 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2136 	} else
2137 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2138 }
2139 
2140 /**
2141  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2142  * @priv: driver private structure
2143  * Description:
2144  *  new GMAC chip generations have a new register to indicate the
2145  *  presence of the optional feature/functions.
2146  *  This can be also used to override the value passed through the
2147  *  platform and necessary for old MAC10/100 and GMAC chips.
2148  */
2149 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2150 {
2151 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2152 }
2153 
2154 /**
2155  * stmmac_check_ether_addr - check if the MAC addr is valid
2156  * @priv: driver private structure
2157  * Description:
2158  * it is to verify if the MAC address is valid, in case of failures it
2159  * generates a random MAC address
2160  */
2161 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2162 {
2163 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2164 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2165 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2166 			eth_hw_addr_random(priv->dev);
2167 		netdev_info(priv->dev, "device MAC address %pM\n",
2168 			    priv->dev->dev_addr);
2169 	}
2170 }
2171 
2172 /**
2173  * stmmac_init_dma_engine - DMA init.
2174  * @priv: driver private structure
2175  * Description:
2176  * It inits the DMA invoking the specific MAC/GMAC callback.
2177  * Some DMA parameters can be passed from the platform;
2178  * in case of these are not passed a default is kept for the MAC or GMAC.
2179  */
2180 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2181 {
2182 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2183 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2184 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2185 	struct stmmac_rx_queue *rx_q;
2186 	struct stmmac_tx_queue *tx_q;
2187 	u32 chan = 0;
2188 	int atds = 0;
2189 	int ret = 0;
2190 
2191 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2192 		dev_err(priv->device, "Invalid DMA configuration\n");
2193 		return -EINVAL;
2194 	}
2195 
2196 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2197 		atds = 1;
2198 
2199 	ret = stmmac_reset(priv, priv->ioaddr);
2200 	if (ret) {
2201 		dev_err(priv->device, "Failed to reset the dma\n");
2202 		return ret;
2203 	}
2204 
2205 	/* DMA Configuration */
2206 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2207 
2208 	if (priv->plat->axi)
2209 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2210 
2211 	/* DMA RX Channel Configuration */
2212 	for (chan = 0; chan < rx_channels_count; chan++) {
2213 		rx_q = &priv->rx_queue[chan];
2214 
2215 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2216 				    rx_q->dma_rx_phy, chan);
2217 
2218 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2219 			    (DMA_RX_SIZE * sizeof(struct dma_desc));
2220 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2221 				       rx_q->rx_tail_addr, chan);
2222 	}
2223 
2224 	/* DMA TX Channel Configuration */
2225 	for (chan = 0; chan < tx_channels_count; chan++) {
2226 		tx_q = &priv->tx_queue[chan];
2227 
2228 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2229 				    tx_q->dma_tx_phy, chan);
2230 
2231 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2232 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2233 				       tx_q->tx_tail_addr, chan);
2234 	}
2235 
2236 	/* DMA CSR Channel configuration */
2237 	for (chan = 0; chan < dma_csr_ch; chan++)
2238 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2239 
2240 	return ret;
2241 }
2242 
2243 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2244 {
2245 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2246 
2247 	mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2248 }
2249 
2250 /**
2251  * stmmac_tx_timer - mitigation sw timer for tx.
2252  * @data: data pointer
2253  * Description:
2254  * This is the timer handler to directly invoke the stmmac_tx_clean.
2255  */
2256 static void stmmac_tx_timer(struct timer_list *t)
2257 {
2258 	struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
2259 	struct stmmac_priv *priv = tx_q->priv_data;
2260 	struct stmmac_channel *ch;
2261 
2262 	ch = &priv->channel[tx_q->queue_index];
2263 
2264 	/*
2265 	 * If NAPI is already running we can miss some events. Let's rearm
2266 	 * the timer and try again.
2267 	 */
2268 	if (likely(napi_schedule_prep(&ch->tx_napi)))
2269 		__napi_schedule(&ch->tx_napi);
2270 	else
2271 		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
2272 }
2273 
2274 /**
2275  * stmmac_init_tx_coalesce - init tx mitigation options.
2276  * @priv: driver private structure
2277  * Description:
2278  * This inits the transmit coalesce parameters: i.e. timer rate,
2279  * timer handler and default threshold used for enabling the
2280  * interrupt on completion bit.
2281  */
2282 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2283 {
2284 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2285 	u32 chan;
2286 
2287 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
2288 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2289 
2290 	for (chan = 0; chan < tx_channel_count; chan++) {
2291 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2292 
2293 		timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
2294 	}
2295 }
2296 
2297 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2298 {
2299 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2300 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2301 	u32 chan;
2302 
2303 	/* set TX ring length */
2304 	for (chan = 0; chan < tx_channels_count; chan++)
2305 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
2306 				(DMA_TX_SIZE - 1), chan);
2307 
2308 	/* set RX ring length */
2309 	for (chan = 0; chan < rx_channels_count; chan++)
2310 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
2311 				(DMA_RX_SIZE - 1), chan);
2312 }
2313 
2314 /**
2315  *  stmmac_set_tx_queue_weight - Set TX queue weight
2316  *  @priv: driver private structure
2317  *  Description: It is used for setting TX queues weight
2318  */
2319 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2320 {
2321 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2322 	u32 weight;
2323 	u32 queue;
2324 
2325 	for (queue = 0; queue < tx_queues_count; queue++) {
2326 		weight = priv->plat->tx_queues_cfg[queue].weight;
2327 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2328 	}
2329 }
2330 
2331 /**
2332  *  stmmac_configure_cbs - Configure CBS in TX queue
2333  *  @priv: driver private structure
2334  *  Description: It is used for configuring CBS in AVB TX queues
2335  */
2336 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2337 {
2338 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2339 	u32 mode_to_use;
2340 	u32 queue;
2341 
2342 	/* queue 0 is reserved for legacy traffic */
2343 	for (queue = 1; queue < tx_queues_count; queue++) {
2344 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2345 		if (mode_to_use == MTL_QUEUE_DCB)
2346 			continue;
2347 
2348 		stmmac_config_cbs(priv, priv->hw,
2349 				priv->plat->tx_queues_cfg[queue].send_slope,
2350 				priv->plat->tx_queues_cfg[queue].idle_slope,
2351 				priv->plat->tx_queues_cfg[queue].high_credit,
2352 				priv->plat->tx_queues_cfg[queue].low_credit,
2353 				queue);
2354 	}
2355 }
2356 
2357 /**
2358  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2359  *  @priv: driver private structure
2360  *  Description: It is used for mapping RX queues to RX dma channels
2361  */
2362 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2363 {
2364 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2365 	u32 queue;
2366 	u32 chan;
2367 
2368 	for (queue = 0; queue < rx_queues_count; queue++) {
2369 		chan = priv->plat->rx_queues_cfg[queue].chan;
2370 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2371 	}
2372 }
2373 
2374 /**
2375  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2376  *  @priv: driver private structure
2377  *  Description: It is used for configuring the RX Queue Priority
2378  */
2379 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2380 {
2381 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2382 	u32 queue;
2383 	u32 prio;
2384 
2385 	for (queue = 0; queue < rx_queues_count; queue++) {
2386 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2387 			continue;
2388 
2389 		prio = priv->plat->rx_queues_cfg[queue].prio;
2390 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2391 	}
2392 }
2393 
2394 /**
2395  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2396  *  @priv: driver private structure
2397  *  Description: It is used for configuring the TX Queue Priority
2398  */
2399 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2400 {
2401 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2402 	u32 queue;
2403 	u32 prio;
2404 
2405 	for (queue = 0; queue < tx_queues_count; queue++) {
2406 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2407 			continue;
2408 
2409 		prio = priv->plat->tx_queues_cfg[queue].prio;
2410 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2411 	}
2412 }
2413 
2414 /**
2415  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2416  *  @priv: driver private structure
2417  *  Description: It is used for configuring the RX queue routing
2418  */
2419 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2420 {
2421 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2422 	u32 queue;
2423 	u8 packet;
2424 
2425 	for (queue = 0; queue < rx_queues_count; queue++) {
2426 		/* no specific packet type routing specified for the queue */
2427 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2428 			continue;
2429 
2430 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2431 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2432 	}
2433 }
2434 
2435 /**
2436  *  stmmac_mtl_configuration - Configure MTL
2437  *  @priv: driver private structure
2438  *  Description: It is used for configurring MTL
2439  */
2440 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2441 {
2442 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2443 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2444 
2445 	if (tx_queues_count > 1)
2446 		stmmac_set_tx_queue_weight(priv);
2447 
2448 	/* Configure MTL RX algorithms */
2449 	if (rx_queues_count > 1)
2450 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2451 				priv->plat->rx_sched_algorithm);
2452 
2453 	/* Configure MTL TX algorithms */
2454 	if (tx_queues_count > 1)
2455 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2456 				priv->plat->tx_sched_algorithm);
2457 
2458 	/* Configure CBS in AVB TX queues */
2459 	if (tx_queues_count > 1)
2460 		stmmac_configure_cbs(priv);
2461 
2462 	/* Map RX MTL to DMA channels */
2463 	stmmac_rx_queue_dma_chan_map(priv);
2464 
2465 	/* Enable MAC RX Queues */
2466 	stmmac_mac_enable_rx_queues(priv);
2467 
2468 	/* Set RX priorities */
2469 	if (rx_queues_count > 1)
2470 		stmmac_mac_config_rx_queues_prio(priv);
2471 
2472 	/* Set TX priorities */
2473 	if (tx_queues_count > 1)
2474 		stmmac_mac_config_tx_queues_prio(priv);
2475 
2476 	/* Set RX routing */
2477 	if (rx_queues_count > 1)
2478 		stmmac_mac_config_rx_queues_routing(priv);
2479 }
2480 
2481 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2482 {
2483 	if (priv->dma_cap.asp) {
2484 		netdev_info(priv->dev, "Enabling Safety Features\n");
2485 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2486 	} else {
2487 		netdev_info(priv->dev, "No Safety Features support found\n");
2488 	}
2489 }
2490 
2491 /**
2492  * stmmac_hw_setup - setup mac in a usable state.
2493  *  @dev : pointer to the device structure.
2494  *  Description:
2495  *  this is the main function to setup the HW in a usable state because the
2496  *  dma engine is reset, the core registers are configured (e.g. AXI,
2497  *  Checksum features, timers). The DMA is ready to start receiving and
2498  *  transmitting.
2499  *  Return value:
2500  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2501  *  file on failure.
2502  */
2503 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2504 {
2505 	struct stmmac_priv *priv = netdev_priv(dev);
2506 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2507 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2508 	u32 chan;
2509 	int ret;
2510 
2511 	/* DMA initialization and SW reset */
2512 	ret = stmmac_init_dma_engine(priv);
2513 	if (ret < 0) {
2514 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2515 			   __func__);
2516 		return ret;
2517 	}
2518 
2519 	/* Copy the MAC addr into the HW  */
2520 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2521 
2522 	/* PS and related bits will be programmed according to the speed */
2523 	if (priv->hw->pcs) {
2524 		int speed = priv->plat->mac_port_sel_speed;
2525 
2526 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
2527 		    (speed == SPEED_1000)) {
2528 			priv->hw->ps = speed;
2529 		} else {
2530 			dev_warn(priv->device, "invalid port speed\n");
2531 			priv->hw->ps = 0;
2532 		}
2533 	}
2534 
2535 	/* Initialize the MAC Core */
2536 	stmmac_core_init(priv, priv->hw, dev);
2537 
2538 	/* Initialize MTL*/
2539 	stmmac_mtl_configuration(priv);
2540 
2541 	/* Initialize Safety Features */
2542 	stmmac_safety_feat_configuration(priv);
2543 
2544 	ret = stmmac_rx_ipc(priv, priv->hw);
2545 	if (!ret) {
2546 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2547 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2548 		priv->hw->rx_csum = 0;
2549 	}
2550 
2551 	/* Enable the MAC Rx/Tx */
2552 	stmmac_mac_set(priv, priv->ioaddr, true);
2553 
2554 	/* Set the HW DMA mode and the COE */
2555 	stmmac_dma_operation_mode(priv);
2556 
2557 	stmmac_mmc_setup(priv);
2558 
2559 	if (init_ptp) {
2560 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2561 		if (ret < 0)
2562 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2563 
2564 		ret = stmmac_init_ptp(priv);
2565 		if (ret == -EOPNOTSUPP)
2566 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2567 		else if (ret)
2568 			netdev_warn(priv->dev, "PTP init failed\n");
2569 	}
2570 
2571 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2572 
2573 	if (priv->use_riwt) {
2574 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2575 		if (!ret)
2576 			priv->rx_riwt = MAX_DMA_RIWT;
2577 	}
2578 
2579 	if (priv->hw->pcs)
2580 		stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2581 
2582 	/* set TX and RX rings length */
2583 	stmmac_set_rings_length(priv);
2584 
2585 	/* Enable TSO */
2586 	if (priv->tso) {
2587 		for (chan = 0; chan < tx_cnt; chan++)
2588 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2589 	}
2590 
2591 	/* Start the ball rolling... */
2592 	stmmac_start_all_dma(priv);
2593 
2594 	return 0;
2595 }
2596 
2597 static void stmmac_hw_teardown(struct net_device *dev)
2598 {
2599 	struct stmmac_priv *priv = netdev_priv(dev);
2600 
2601 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2602 }
2603 
2604 /**
2605  *  stmmac_open - open entry point of the driver
2606  *  @dev : pointer to the device structure.
2607  *  Description:
2608  *  This function is the open entry point of the driver.
2609  *  Return value:
2610  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2611  *  file on failure.
2612  */
2613 static int stmmac_open(struct net_device *dev)
2614 {
2615 	struct stmmac_priv *priv = netdev_priv(dev);
2616 	u32 chan;
2617 	int ret;
2618 
2619 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2620 	    priv->hw->pcs != STMMAC_PCS_TBI &&
2621 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
2622 		ret = stmmac_init_phy(dev);
2623 		if (ret) {
2624 			netdev_err(priv->dev,
2625 				   "%s: Cannot attach to PHY (error: %d)\n",
2626 				   __func__, ret);
2627 			return ret;
2628 		}
2629 	}
2630 
2631 	/* Extra statistics */
2632 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2633 	priv->xstats.threshold = tc;
2634 
2635 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2636 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2637 
2638 	ret = alloc_dma_desc_resources(priv);
2639 	if (ret < 0) {
2640 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2641 			   __func__);
2642 		goto dma_desc_error;
2643 	}
2644 
2645 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
2646 	if (ret < 0) {
2647 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2648 			   __func__);
2649 		goto init_error;
2650 	}
2651 
2652 	ret = stmmac_hw_setup(dev, true);
2653 	if (ret < 0) {
2654 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2655 		goto init_error;
2656 	}
2657 
2658 	stmmac_init_tx_coalesce(priv);
2659 
2660 	if (dev->phydev)
2661 		phy_start(dev->phydev);
2662 
2663 	/* Request the IRQ lines */
2664 	ret = request_irq(dev->irq, stmmac_interrupt,
2665 			  IRQF_SHARED, dev->name, dev);
2666 	if (unlikely(ret < 0)) {
2667 		netdev_err(priv->dev,
2668 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2669 			   __func__, dev->irq, ret);
2670 		goto irq_error;
2671 	}
2672 
2673 	/* Request the Wake IRQ in case of another line is used for WoL */
2674 	if (priv->wol_irq != dev->irq) {
2675 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
2676 				  IRQF_SHARED, dev->name, dev);
2677 		if (unlikely(ret < 0)) {
2678 			netdev_err(priv->dev,
2679 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2680 				   __func__, priv->wol_irq, ret);
2681 			goto wolirq_error;
2682 		}
2683 	}
2684 
2685 	/* Request the IRQ lines */
2686 	if (priv->lpi_irq > 0) {
2687 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2688 				  dev->name, dev);
2689 		if (unlikely(ret < 0)) {
2690 			netdev_err(priv->dev,
2691 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2692 				   __func__, priv->lpi_irq, ret);
2693 			goto lpiirq_error;
2694 		}
2695 	}
2696 
2697 	stmmac_enable_all_queues(priv);
2698 	stmmac_start_all_queues(priv);
2699 
2700 	return 0;
2701 
2702 lpiirq_error:
2703 	if (priv->wol_irq != dev->irq)
2704 		free_irq(priv->wol_irq, dev);
2705 wolirq_error:
2706 	free_irq(dev->irq, dev);
2707 irq_error:
2708 	if (dev->phydev)
2709 		phy_stop(dev->phydev);
2710 
2711 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2712 		del_timer_sync(&priv->tx_queue[chan].txtimer);
2713 
2714 	stmmac_hw_teardown(dev);
2715 init_error:
2716 	free_dma_desc_resources(priv);
2717 dma_desc_error:
2718 	if (dev->phydev)
2719 		phy_disconnect(dev->phydev);
2720 
2721 	return ret;
2722 }
2723 
2724 /**
2725  *  stmmac_release - close entry point of the driver
2726  *  @dev : device pointer.
2727  *  Description:
2728  *  This is the stop entry point of the driver.
2729  */
2730 static int stmmac_release(struct net_device *dev)
2731 {
2732 	struct stmmac_priv *priv = netdev_priv(dev);
2733 	u32 chan;
2734 
2735 	if (priv->eee_enabled)
2736 		del_timer_sync(&priv->eee_ctrl_timer);
2737 
2738 	/* Stop and disconnect the PHY */
2739 	if (dev->phydev) {
2740 		phy_stop(dev->phydev);
2741 		phy_disconnect(dev->phydev);
2742 	}
2743 
2744 	stmmac_stop_all_queues(priv);
2745 
2746 	stmmac_disable_all_queues(priv);
2747 
2748 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2749 		del_timer_sync(&priv->tx_queue[chan].txtimer);
2750 
2751 	/* Free the IRQ lines */
2752 	free_irq(dev->irq, dev);
2753 	if (priv->wol_irq != dev->irq)
2754 		free_irq(priv->wol_irq, dev);
2755 	if (priv->lpi_irq > 0)
2756 		free_irq(priv->lpi_irq, dev);
2757 
2758 	/* Stop TX/RX DMA and clear the descriptors */
2759 	stmmac_stop_all_dma(priv);
2760 
2761 	/* Release and free the Rx/Tx resources */
2762 	free_dma_desc_resources(priv);
2763 
2764 	/* Disable the MAC Rx/Tx */
2765 	stmmac_mac_set(priv, priv->ioaddr, false);
2766 
2767 	netif_carrier_off(dev);
2768 
2769 	stmmac_release_ptp(priv);
2770 
2771 	return 0;
2772 }
2773 
2774 /**
2775  *  stmmac_tso_allocator - close entry point of the driver
2776  *  @priv: driver private structure
2777  *  @des: buffer start address
2778  *  @total_len: total length to fill in descriptors
2779  *  @last_segmant: condition for the last descriptor
2780  *  @queue: TX queue index
2781  *  Description:
2782  *  This function fills descriptor and request new descriptors according to
2783  *  buffer length to fill
2784  */
2785 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2786 				 int total_len, bool last_segment, u32 queue)
2787 {
2788 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2789 	struct dma_desc *desc;
2790 	u32 buff_size;
2791 	int tmp_len;
2792 
2793 	tmp_len = total_len;
2794 
2795 	while (tmp_len > 0) {
2796 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2797 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2798 		desc = tx_q->dma_tx + tx_q->cur_tx;
2799 
2800 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2801 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2802 			    TSO_MAX_BUFF_SIZE : tmp_len;
2803 
2804 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2805 				0, 1,
2806 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2807 				0, 0);
2808 
2809 		tmp_len -= TSO_MAX_BUFF_SIZE;
2810 	}
2811 }
2812 
2813 /**
2814  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2815  *  @skb : the socket buffer
2816  *  @dev : device pointer
2817  *  Description: this is the transmit function that is called on TSO frames
2818  *  (support available on GMAC4 and newer chips).
2819  *  Diagram below show the ring programming in case of TSO frames:
2820  *
2821  *  First Descriptor
2822  *   --------
2823  *   | DES0 |---> buffer1 = L2/L3/L4 header
2824  *   | DES1 |---> TCP Payload (can continue on next descr...)
2825  *   | DES2 |---> buffer 1 and 2 len
2826  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2827  *   --------
2828  *	|
2829  *     ...
2830  *	|
2831  *   --------
2832  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2833  *   | DES1 | --|
2834  *   | DES2 | --> buffer 1 and 2 len
2835  *   | DES3 |
2836  *   --------
2837  *
2838  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2839  */
2840 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2841 {
2842 	struct dma_desc *desc, *first, *mss_desc = NULL;
2843 	struct stmmac_priv *priv = netdev_priv(dev);
2844 	int nfrags = skb_shinfo(skb)->nr_frags;
2845 	u32 queue = skb_get_queue_mapping(skb);
2846 	unsigned int first_entry, des;
2847 	struct stmmac_tx_queue *tx_q;
2848 	int tmp_pay_len = 0;
2849 	u32 pay_len, mss;
2850 	u8 proto_hdr_len;
2851 	int i;
2852 
2853 	tx_q = &priv->tx_queue[queue];
2854 
2855 	/* Compute header lengths */
2856 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2857 
2858 	/* Desc availability based on threshold should be enough safe */
2859 	if (unlikely(stmmac_tx_avail(priv, queue) <
2860 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2861 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2862 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2863 								queue));
2864 			/* This is a hard error, log it. */
2865 			netdev_err(priv->dev,
2866 				   "%s: Tx Ring full when queue awake\n",
2867 				   __func__);
2868 		}
2869 		return NETDEV_TX_BUSY;
2870 	}
2871 
2872 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2873 
2874 	mss = skb_shinfo(skb)->gso_size;
2875 
2876 	/* set new MSS value if needed */
2877 	if (mss != tx_q->mss) {
2878 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2879 		stmmac_set_mss(priv, mss_desc, mss);
2880 		tx_q->mss = mss;
2881 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2882 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2883 	}
2884 
2885 	if (netif_msg_tx_queued(priv)) {
2886 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2887 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2888 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2889 			skb->data_len);
2890 	}
2891 
2892 	first_entry = tx_q->cur_tx;
2893 	WARN_ON(tx_q->tx_skbuff[first_entry]);
2894 
2895 	desc = tx_q->dma_tx + first_entry;
2896 	first = desc;
2897 
2898 	/* first descriptor: fill Headers on Buf1 */
2899 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2900 			     DMA_TO_DEVICE);
2901 	if (dma_mapping_error(priv->device, des))
2902 		goto dma_map_err;
2903 
2904 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2905 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2906 
2907 	first->des0 = cpu_to_le32(des);
2908 
2909 	/* Fill start of payload in buff2 of first descriptor */
2910 	if (pay_len)
2911 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2912 
2913 	/* If needed take extra descriptors to fill the remaining payload */
2914 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2915 
2916 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2917 
2918 	/* Prepare fragments */
2919 	for (i = 0; i < nfrags; i++) {
2920 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2921 
2922 		des = skb_frag_dma_map(priv->device, frag, 0,
2923 				       skb_frag_size(frag),
2924 				       DMA_TO_DEVICE);
2925 		if (dma_mapping_error(priv->device, des))
2926 			goto dma_map_err;
2927 
2928 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2929 				     (i == nfrags - 1), queue);
2930 
2931 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2932 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2933 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2934 	}
2935 
2936 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2937 
2938 	/* Only the last descriptor gets to point to the skb. */
2939 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2940 
2941 	/* We've used all descriptors we need for this skb, however,
2942 	 * advance cur_tx so that it references a fresh descriptor.
2943 	 * ndo_start_xmit will fill this descriptor the next time it's
2944 	 * called and stmmac_tx_clean may clean up to this descriptor.
2945 	 */
2946 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2947 
2948 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2949 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2950 			  __func__);
2951 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2952 	}
2953 
2954 	dev->stats.tx_bytes += skb->len;
2955 	priv->xstats.tx_tso_frames++;
2956 	priv->xstats.tx_tso_nfrags += nfrags;
2957 
2958 	/* Manage tx mitigation */
2959 	tx_q->tx_count_frames += nfrags + 1;
2960 	if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
2961 		stmmac_set_tx_ic(priv, desc);
2962 		priv->xstats.tx_set_ic_bit++;
2963 		tx_q->tx_count_frames = 0;
2964 	} else {
2965 		stmmac_tx_timer_arm(priv, queue);
2966 	}
2967 
2968 	skb_tx_timestamp(skb);
2969 
2970 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2971 		     priv->hwts_tx_en)) {
2972 		/* declare that device is doing timestamping */
2973 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2974 		stmmac_enable_tx_timestamp(priv, first);
2975 	}
2976 
2977 	/* Complete the first descriptor before granting the DMA */
2978 	stmmac_prepare_tso_tx_desc(priv, first, 1,
2979 			proto_hdr_len,
2980 			pay_len,
2981 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2982 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2983 
2984 	/* If context desc is used to change MSS */
2985 	if (mss_desc) {
2986 		/* Make sure that first descriptor has been completely
2987 		 * written, including its own bit. This is because MSS is
2988 		 * actually before first descriptor, so we need to make
2989 		 * sure that MSS's own bit is the last thing written.
2990 		 */
2991 		dma_wmb();
2992 		stmmac_set_tx_owner(priv, mss_desc);
2993 	}
2994 
2995 	/* The own bit must be the latest setting done when prepare the
2996 	 * descriptor and then barrier is needed to make sure that
2997 	 * all is coherent before granting the DMA engine.
2998 	 */
2999 	wmb();
3000 
3001 	if (netif_msg_pktdata(priv)) {
3002 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3003 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3004 			tx_q->cur_tx, first, nfrags);
3005 
3006 		stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
3007 
3008 		pr_info(">>> frame to be transmitted: ");
3009 		print_pkt(skb->data, skb_headlen(skb));
3010 	}
3011 
3012 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3013 
3014 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3015 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3016 
3017 	return NETDEV_TX_OK;
3018 
3019 dma_map_err:
3020 	dev_err(priv->device, "Tx dma map failed\n");
3021 	dev_kfree_skb(skb);
3022 	priv->dev->stats.tx_dropped++;
3023 	return NETDEV_TX_OK;
3024 }
3025 
3026 /**
3027  *  stmmac_xmit - Tx entry point of the driver
3028  *  @skb : the socket buffer
3029  *  @dev : device pointer
3030  *  Description : this is the tx entry point of the driver.
3031  *  It programs the chain or the ring and supports oversized frames
3032  *  and SG feature.
3033  */
3034 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3035 {
3036 	struct stmmac_priv *priv = netdev_priv(dev);
3037 	unsigned int nopaged_len = skb_headlen(skb);
3038 	int i, csum_insertion = 0, is_jumbo = 0;
3039 	u32 queue = skb_get_queue_mapping(skb);
3040 	int nfrags = skb_shinfo(skb)->nr_frags;
3041 	int entry;
3042 	unsigned int first_entry;
3043 	struct dma_desc *desc, *first;
3044 	struct stmmac_tx_queue *tx_q;
3045 	unsigned int enh_desc;
3046 	unsigned int des;
3047 
3048 	tx_q = &priv->tx_queue[queue];
3049 
3050 	if (priv->tx_path_in_lpi_mode)
3051 		stmmac_disable_eee_mode(priv);
3052 
3053 	/* Manage oversized TCP frames for GMAC4 device */
3054 	if (skb_is_gso(skb) && priv->tso) {
3055 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3056 			/*
3057 			 * There is no way to determine the number of TSO
3058 			 * capable Queues. Let's use always the Queue 0
3059 			 * because if TSO is supported then at least this
3060 			 * one will be capable.
3061 			 */
3062 			skb_set_queue_mapping(skb, 0);
3063 
3064 			return stmmac_tso_xmit(skb, dev);
3065 		}
3066 	}
3067 
3068 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3069 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3070 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3071 								queue));
3072 			/* This is a hard error, log it. */
3073 			netdev_err(priv->dev,
3074 				   "%s: Tx Ring full when queue awake\n",
3075 				   __func__);
3076 		}
3077 		return NETDEV_TX_BUSY;
3078 	}
3079 
3080 	entry = tx_q->cur_tx;
3081 	first_entry = entry;
3082 	WARN_ON(tx_q->tx_skbuff[first_entry]);
3083 
3084 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3085 
3086 	if (likely(priv->extend_desc))
3087 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3088 	else
3089 		desc = tx_q->dma_tx + entry;
3090 
3091 	first = desc;
3092 
3093 	enh_desc = priv->plat->enh_desc;
3094 	/* To program the descriptors according to the size of the frame */
3095 	if (enh_desc)
3096 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3097 
3098 	if (unlikely(is_jumbo)) {
3099 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3100 		if (unlikely(entry < 0) && (entry != -EINVAL))
3101 			goto dma_map_err;
3102 	}
3103 
3104 	for (i = 0; i < nfrags; i++) {
3105 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3106 		int len = skb_frag_size(frag);
3107 		bool last_segment = (i == (nfrags - 1));
3108 
3109 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3110 		WARN_ON(tx_q->tx_skbuff[entry]);
3111 
3112 		if (likely(priv->extend_desc))
3113 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3114 		else
3115 			desc = tx_q->dma_tx + entry;
3116 
3117 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3118 				       DMA_TO_DEVICE);
3119 		if (dma_mapping_error(priv->device, des))
3120 			goto dma_map_err; /* should reuse desc w/o issues */
3121 
3122 		tx_q->tx_skbuff_dma[entry].buf = des;
3123 
3124 		stmmac_set_desc_addr(priv, desc, des);
3125 
3126 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3127 		tx_q->tx_skbuff_dma[entry].len = len;
3128 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3129 
3130 		/* Prepare the descriptor and set the own bit too */
3131 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3132 				priv->mode, 1, last_segment, skb->len);
3133 	}
3134 
3135 	/* Only the last descriptor gets to point to the skb. */
3136 	tx_q->tx_skbuff[entry] = skb;
3137 
3138 	/* We've used all descriptors we need for this skb, however,
3139 	 * advance cur_tx so that it references a fresh descriptor.
3140 	 * ndo_start_xmit will fill this descriptor the next time it's
3141 	 * called and stmmac_tx_clean may clean up to this descriptor.
3142 	 */
3143 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3144 	tx_q->cur_tx = entry;
3145 
3146 	if (netif_msg_pktdata(priv)) {
3147 		void *tx_head;
3148 
3149 		netdev_dbg(priv->dev,
3150 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3151 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3152 			   entry, first, nfrags);
3153 
3154 		if (priv->extend_desc)
3155 			tx_head = (void *)tx_q->dma_etx;
3156 		else
3157 			tx_head = (void *)tx_q->dma_tx;
3158 
3159 		stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3160 
3161 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3162 		print_pkt(skb->data, skb->len);
3163 	}
3164 
3165 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3166 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3167 			  __func__);
3168 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3169 	}
3170 
3171 	dev->stats.tx_bytes += skb->len;
3172 
3173 	/* According to the coalesce parameter the IC bit for the latest
3174 	 * segment is reset and the timer re-started to clean the tx status.
3175 	 * This approach takes care about the fragments: desc is the first
3176 	 * element in case of no SG.
3177 	 */
3178 	tx_q->tx_count_frames += nfrags + 1;
3179 	if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
3180 		stmmac_set_tx_ic(priv, desc);
3181 		priv->xstats.tx_set_ic_bit++;
3182 		tx_q->tx_count_frames = 0;
3183 	} else {
3184 		stmmac_tx_timer_arm(priv, queue);
3185 	}
3186 
3187 	skb_tx_timestamp(skb);
3188 
3189 	/* Ready to fill the first descriptor and set the OWN bit w/o any
3190 	 * problems because all the descriptors are actually ready to be
3191 	 * passed to the DMA engine.
3192 	 */
3193 	if (likely(!is_jumbo)) {
3194 		bool last_segment = (nfrags == 0);
3195 
3196 		des = dma_map_single(priv->device, skb->data,
3197 				     nopaged_len, DMA_TO_DEVICE);
3198 		if (dma_mapping_error(priv->device, des))
3199 			goto dma_map_err;
3200 
3201 		tx_q->tx_skbuff_dma[first_entry].buf = des;
3202 
3203 		stmmac_set_desc_addr(priv, first, des);
3204 
3205 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3206 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3207 
3208 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3209 			     priv->hwts_tx_en)) {
3210 			/* declare that device is doing timestamping */
3211 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3212 			stmmac_enable_tx_timestamp(priv, first);
3213 		}
3214 
3215 		/* Prepare the first descriptor setting the OWN bit too */
3216 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3217 				csum_insertion, priv->mode, 1, last_segment,
3218 				skb->len);
3219 	} else {
3220 		stmmac_set_tx_owner(priv, first);
3221 	}
3222 
3223 	/* The own bit must be the latest setting done when prepare the
3224 	 * descriptor and then barrier is needed to make sure that
3225 	 * all is coherent before granting the DMA engine.
3226 	 */
3227 	wmb();
3228 
3229 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3230 
3231 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
3232 
3233 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3234 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3235 
3236 	return NETDEV_TX_OK;
3237 
3238 dma_map_err:
3239 	netdev_err(priv->dev, "Tx DMA map failed\n");
3240 	dev_kfree_skb(skb);
3241 	priv->dev->stats.tx_dropped++;
3242 	return NETDEV_TX_OK;
3243 }
3244 
3245 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3246 {
3247 	struct vlan_ethhdr *veth;
3248 	__be16 vlan_proto;
3249 	u16 vlanid;
3250 
3251 	veth = (struct vlan_ethhdr *)skb->data;
3252 	vlan_proto = veth->h_vlan_proto;
3253 
3254 	if ((vlan_proto == htons(ETH_P_8021Q) &&
3255 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3256 	    (vlan_proto == htons(ETH_P_8021AD) &&
3257 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3258 		/* pop the vlan tag */
3259 		vlanid = ntohs(veth->h_vlan_TCI);
3260 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3261 		skb_pull(skb, VLAN_HLEN);
3262 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3263 	}
3264 }
3265 
3266 
3267 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3268 {
3269 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3270 		return 0;
3271 
3272 	return 1;
3273 }
3274 
3275 /**
3276  * stmmac_rx_refill - refill used skb preallocated buffers
3277  * @priv: driver private structure
3278  * @queue: RX queue index
3279  * Description : this is to reallocate the skb for the reception process
3280  * that is based on zero-copy.
3281  */
3282 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3283 {
3284 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3285 	int dirty = stmmac_rx_dirty(priv, queue);
3286 	unsigned int entry = rx_q->dirty_rx;
3287 
3288 	int bfsize = priv->dma_buf_sz;
3289 
3290 	while (dirty-- > 0) {
3291 		struct dma_desc *p;
3292 
3293 		if (priv->extend_desc)
3294 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3295 		else
3296 			p = rx_q->dma_rx + entry;
3297 
3298 		if (likely(!rx_q->rx_skbuff[entry])) {
3299 			struct sk_buff *skb;
3300 
3301 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3302 			if (unlikely(!skb)) {
3303 				/* so for a while no zero-copy! */
3304 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3305 				if (unlikely(net_ratelimit()))
3306 					dev_err(priv->device,
3307 						"fail to alloc skb entry %d\n",
3308 						entry);
3309 				break;
3310 			}
3311 
3312 			rx_q->rx_skbuff[entry] = skb;
3313 			rx_q->rx_skbuff_dma[entry] =
3314 			    dma_map_single(priv->device, skb->data, bfsize,
3315 					   DMA_FROM_DEVICE);
3316 			if (dma_mapping_error(priv->device,
3317 					      rx_q->rx_skbuff_dma[entry])) {
3318 				netdev_err(priv->dev, "Rx DMA map failed\n");
3319 				dev_kfree_skb(skb);
3320 				break;
3321 			}
3322 
3323 			stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3324 			stmmac_refill_desc3(priv, rx_q, p);
3325 
3326 			if (rx_q->rx_zeroc_thresh > 0)
3327 				rx_q->rx_zeroc_thresh--;
3328 
3329 			netif_dbg(priv, rx_status, priv->dev,
3330 				  "refill entry #%d\n", entry);
3331 		}
3332 		dma_wmb();
3333 
3334 		stmmac_set_rx_owner(priv, p, priv->use_riwt);
3335 
3336 		dma_wmb();
3337 
3338 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3339 	}
3340 	rx_q->dirty_rx = entry;
3341 }
3342 
3343 /**
3344  * stmmac_rx - manage the receive process
3345  * @priv: driver private structure
3346  * @limit: napi bugget
3347  * @queue: RX queue index.
3348  * Description :  this the function called by the napi poll method.
3349  * It gets all the frames inside the ring.
3350  */
3351 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3352 {
3353 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3354 	struct stmmac_channel *ch = &priv->channel[queue];
3355 	unsigned int next_entry = rx_q->cur_rx;
3356 	int coe = priv->hw->rx_csum;
3357 	unsigned int count = 0;
3358 	bool xmac;
3359 
3360 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3361 
3362 	if (netif_msg_rx_status(priv)) {
3363 		void *rx_head;
3364 
3365 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3366 		if (priv->extend_desc)
3367 			rx_head = (void *)rx_q->dma_erx;
3368 		else
3369 			rx_head = (void *)rx_q->dma_rx;
3370 
3371 		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3372 	}
3373 	while (count < limit) {
3374 		int entry, status;
3375 		struct dma_desc *p;
3376 		struct dma_desc *np;
3377 
3378 		entry = next_entry;
3379 
3380 		if (priv->extend_desc)
3381 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3382 		else
3383 			p = rx_q->dma_rx + entry;
3384 
3385 		/* read the status of the incoming frame */
3386 		status = stmmac_rx_status(priv, &priv->dev->stats,
3387 				&priv->xstats, p);
3388 		/* check if managed by the DMA otherwise go ahead */
3389 		if (unlikely(status & dma_own))
3390 			break;
3391 
3392 		count++;
3393 
3394 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3395 		next_entry = rx_q->cur_rx;
3396 
3397 		if (priv->extend_desc)
3398 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3399 		else
3400 			np = rx_q->dma_rx + next_entry;
3401 
3402 		prefetch(np);
3403 
3404 		if (priv->extend_desc)
3405 			stmmac_rx_extended_status(priv, &priv->dev->stats,
3406 					&priv->xstats, rx_q->dma_erx + entry);
3407 		if (unlikely(status == discard_frame)) {
3408 			priv->dev->stats.rx_errors++;
3409 			if (priv->hwts_rx_en && !priv->extend_desc) {
3410 				/* DESC2 & DESC3 will be overwritten by device
3411 				 * with timestamp value, hence reinitialize
3412 				 * them in stmmac_rx_refill() function so that
3413 				 * device can reuse it.
3414 				 */
3415 				dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3416 				rx_q->rx_skbuff[entry] = NULL;
3417 				dma_unmap_single(priv->device,
3418 						 rx_q->rx_skbuff_dma[entry],
3419 						 priv->dma_buf_sz,
3420 						 DMA_FROM_DEVICE);
3421 			}
3422 		} else {
3423 			struct sk_buff *skb;
3424 			int frame_len;
3425 			unsigned int des;
3426 
3427 			stmmac_get_desc_addr(priv, p, &des);
3428 			frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3429 
3430 			/*  If frame length is greater than skb buffer size
3431 			 *  (preallocated during init) then the packet is
3432 			 *  ignored
3433 			 */
3434 			if (frame_len > priv->dma_buf_sz) {
3435 				if (net_ratelimit())
3436 					netdev_err(priv->dev,
3437 						   "len %d larger than size (%d)\n",
3438 						   frame_len, priv->dma_buf_sz);
3439 				priv->dev->stats.rx_length_errors++;
3440 				continue;
3441 			}
3442 
3443 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3444 			 * Type frames (LLC/LLC-SNAP)
3445 			 *
3446 			 * llc_snap is never checked in GMAC >= 4, so this ACS
3447 			 * feature is always disabled and packets need to be
3448 			 * stripped manually.
3449 			 */
3450 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3451 			    unlikely(status != llc_snap))
3452 				frame_len -= ETH_FCS_LEN;
3453 
3454 			if (netif_msg_rx_status(priv)) {
3455 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3456 					   p, entry, des);
3457 				netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3458 					   frame_len, status);
3459 			}
3460 
3461 			/* The zero-copy is always used for all the sizes
3462 			 * in case of GMAC4 because it needs
3463 			 * to refill the used descriptors, always.
3464 			 */
3465 			if (unlikely(!xmac &&
3466 				     ((frame_len < priv->rx_copybreak) ||
3467 				     stmmac_rx_threshold_count(rx_q)))) {
3468 				skb = netdev_alloc_skb_ip_align(priv->dev,
3469 								frame_len);
3470 				if (unlikely(!skb)) {
3471 					if (net_ratelimit())
3472 						dev_warn(priv->device,
3473 							 "packet dropped\n");
3474 					priv->dev->stats.rx_dropped++;
3475 					continue;
3476 				}
3477 
3478 				dma_sync_single_for_cpu(priv->device,
3479 							rx_q->rx_skbuff_dma
3480 							[entry], frame_len,
3481 							DMA_FROM_DEVICE);
3482 				skb_copy_to_linear_data(skb,
3483 							rx_q->
3484 							rx_skbuff[entry]->data,
3485 							frame_len);
3486 
3487 				skb_put(skb, frame_len);
3488 				dma_sync_single_for_device(priv->device,
3489 							   rx_q->rx_skbuff_dma
3490 							   [entry], frame_len,
3491 							   DMA_FROM_DEVICE);
3492 			} else {
3493 				skb = rx_q->rx_skbuff[entry];
3494 				if (unlikely(!skb)) {
3495 					if (net_ratelimit())
3496 						netdev_err(priv->dev,
3497 							   "%s: Inconsistent Rx chain\n",
3498 							   priv->dev->name);
3499 					priv->dev->stats.rx_dropped++;
3500 					continue;
3501 				}
3502 				prefetch(skb->data - NET_IP_ALIGN);
3503 				rx_q->rx_skbuff[entry] = NULL;
3504 				rx_q->rx_zeroc_thresh++;
3505 
3506 				skb_put(skb, frame_len);
3507 				dma_unmap_single(priv->device,
3508 						 rx_q->rx_skbuff_dma[entry],
3509 						 priv->dma_buf_sz,
3510 						 DMA_FROM_DEVICE);
3511 			}
3512 
3513 			if (netif_msg_pktdata(priv)) {
3514 				netdev_dbg(priv->dev, "frame received (%dbytes)",
3515 					   frame_len);
3516 				print_pkt(skb->data, frame_len);
3517 			}
3518 
3519 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3520 
3521 			stmmac_rx_vlan(priv->dev, skb);
3522 
3523 			skb->protocol = eth_type_trans(skb, priv->dev);
3524 
3525 			if (unlikely(!coe))
3526 				skb_checksum_none_assert(skb);
3527 			else
3528 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3529 
3530 			napi_gro_receive(&ch->rx_napi, skb);
3531 
3532 			priv->dev->stats.rx_packets++;
3533 			priv->dev->stats.rx_bytes += frame_len;
3534 		}
3535 	}
3536 
3537 	stmmac_rx_refill(priv, queue);
3538 
3539 	priv->xstats.rx_pkt_n += count;
3540 
3541 	return count;
3542 }
3543 
3544 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
3545 {
3546 	struct stmmac_channel *ch =
3547 		container_of(napi, struct stmmac_channel, rx_napi);
3548 	struct stmmac_priv *priv = ch->priv_data;
3549 	u32 chan = ch->index;
3550 	int work_done;
3551 
3552 	priv->xstats.napi_poll++;
3553 
3554 	work_done = stmmac_rx(priv, budget, chan);
3555 	if (work_done < budget && napi_complete_done(napi, work_done))
3556 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3557 	return work_done;
3558 }
3559 
3560 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
3561 {
3562 	struct stmmac_channel *ch =
3563 		container_of(napi, struct stmmac_channel, tx_napi);
3564 	struct stmmac_priv *priv = ch->priv_data;
3565 	struct stmmac_tx_queue *tx_q;
3566 	u32 chan = ch->index;
3567 	int work_done;
3568 
3569 	priv->xstats.napi_poll++;
3570 
3571 	work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
3572 	work_done = min(work_done, budget);
3573 
3574 	if (work_done < budget && napi_complete_done(napi, work_done))
3575 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3576 
3577 	/* Force transmission restart */
3578 	tx_q = &priv->tx_queue[chan];
3579 	if (tx_q->cur_tx != tx_q->dirty_tx) {
3580 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
3581 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
3582 				       chan);
3583 	}
3584 
3585 	return work_done;
3586 }
3587 
3588 /**
3589  *  stmmac_tx_timeout
3590  *  @dev : Pointer to net device structure
3591  *  Description: this function is called when a packet transmission fails to
3592  *   complete within a reasonable time. The driver will mark the error in the
3593  *   netdev structure and arrange for the device to be reset to a sane state
3594  *   in order to transmit a new packet.
3595  */
3596 static void stmmac_tx_timeout(struct net_device *dev)
3597 {
3598 	struct stmmac_priv *priv = netdev_priv(dev);
3599 
3600 	stmmac_global_err(priv);
3601 }
3602 
3603 /**
3604  *  stmmac_set_rx_mode - entry point for multicast addressing
3605  *  @dev : pointer to the device structure
3606  *  Description:
3607  *  This function is a driver entry point which gets called by the kernel
3608  *  whenever multicast addresses must be enabled/disabled.
3609  *  Return value:
3610  *  void.
3611  */
3612 static void stmmac_set_rx_mode(struct net_device *dev)
3613 {
3614 	struct stmmac_priv *priv = netdev_priv(dev);
3615 
3616 	stmmac_set_filter(priv, priv->hw, dev);
3617 }
3618 
3619 /**
3620  *  stmmac_change_mtu - entry point to change MTU size for the device.
3621  *  @dev : device pointer.
3622  *  @new_mtu : the new MTU size for the device.
3623  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3624  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3625  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3626  *  Return value:
3627  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3628  *  file on failure.
3629  */
3630 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3631 {
3632 	struct stmmac_priv *priv = netdev_priv(dev);
3633 
3634 	if (netif_running(dev)) {
3635 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
3636 		return -EBUSY;
3637 	}
3638 
3639 	dev->mtu = new_mtu;
3640 
3641 	netdev_update_features(dev);
3642 
3643 	return 0;
3644 }
3645 
3646 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3647 					     netdev_features_t features)
3648 {
3649 	struct stmmac_priv *priv = netdev_priv(dev);
3650 
3651 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3652 		features &= ~NETIF_F_RXCSUM;
3653 
3654 	if (!priv->plat->tx_coe)
3655 		features &= ~NETIF_F_CSUM_MASK;
3656 
3657 	/* Some GMAC devices have a bugged Jumbo frame support that
3658 	 * needs to have the Tx COE disabled for oversized frames
3659 	 * (due to limited buffer sizes). In this case we disable
3660 	 * the TX csum insertion in the TDES and not use SF.
3661 	 */
3662 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3663 		features &= ~NETIF_F_CSUM_MASK;
3664 
3665 	/* Disable tso if asked by ethtool */
3666 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3667 		if (features & NETIF_F_TSO)
3668 			priv->tso = true;
3669 		else
3670 			priv->tso = false;
3671 	}
3672 
3673 	return features;
3674 }
3675 
3676 static int stmmac_set_features(struct net_device *netdev,
3677 			       netdev_features_t features)
3678 {
3679 	struct stmmac_priv *priv = netdev_priv(netdev);
3680 
3681 	/* Keep the COE Type in case of csum is supporting */
3682 	if (features & NETIF_F_RXCSUM)
3683 		priv->hw->rx_csum = priv->plat->rx_coe;
3684 	else
3685 		priv->hw->rx_csum = 0;
3686 	/* No check needed because rx_coe has been set before and it will be
3687 	 * fixed in case of issue.
3688 	 */
3689 	stmmac_rx_ipc(priv, priv->hw);
3690 
3691 	return 0;
3692 }
3693 
3694 /**
3695  *  stmmac_interrupt - main ISR
3696  *  @irq: interrupt number.
3697  *  @dev_id: to pass the net device pointer.
3698  *  Description: this is the main driver interrupt service routine.
3699  *  It can call:
3700  *  o DMA service routine (to manage incoming frame reception and transmission
3701  *    status)
3702  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3703  *    interrupts.
3704  */
3705 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3706 {
3707 	struct net_device *dev = (struct net_device *)dev_id;
3708 	struct stmmac_priv *priv = netdev_priv(dev);
3709 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3710 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3711 	u32 queues_count;
3712 	u32 queue;
3713 	bool xmac;
3714 
3715 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3716 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3717 
3718 	if (priv->irq_wake)
3719 		pm_wakeup_event(priv->device, 0);
3720 
3721 	if (unlikely(!dev)) {
3722 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3723 		return IRQ_NONE;
3724 	}
3725 
3726 	/* Check if adapter is up */
3727 	if (test_bit(STMMAC_DOWN, &priv->state))
3728 		return IRQ_HANDLED;
3729 	/* Check if a fatal error happened */
3730 	if (stmmac_safety_feat_interrupt(priv))
3731 		return IRQ_HANDLED;
3732 
3733 	/* To handle GMAC own interrupts */
3734 	if ((priv->plat->has_gmac) || xmac) {
3735 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3736 		int mtl_status;
3737 
3738 		if (unlikely(status)) {
3739 			/* For LPI we need to save the tx status */
3740 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3741 				priv->tx_path_in_lpi_mode = true;
3742 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3743 				priv->tx_path_in_lpi_mode = false;
3744 		}
3745 
3746 		for (queue = 0; queue < queues_count; queue++) {
3747 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3748 
3749 			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3750 								queue);
3751 			if (mtl_status != -EINVAL)
3752 				status |= mtl_status;
3753 
3754 			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3755 				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3756 						       rx_q->rx_tail_addr,
3757 						       queue);
3758 		}
3759 
3760 		/* PCS link status */
3761 		if (priv->hw->pcs) {
3762 			if (priv->xstats.pcs_link)
3763 				netif_carrier_on(dev);
3764 			else
3765 				netif_carrier_off(dev);
3766 		}
3767 	}
3768 
3769 	/* To handle DMA interrupts */
3770 	stmmac_dma_interrupt(priv);
3771 
3772 	return IRQ_HANDLED;
3773 }
3774 
3775 #ifdef CONFIG_NET_POLL_CONTROLLER
3776 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3777  * to allow network I/O with interrupts disabled.
3778  */
3779 static void stmmac_poll_controller(struct net_device *dev)
3780 {
3781 	disable_irq(dev->irq);
3782 	stmmac_interrupt(dev->irq, dev);
3783 	enable_irq(dev->irq);
3784 }
3785 #endif
3786 
3787 /**
3788  *  stmmac_ioctl - Entry point for the Ioctl
3789  *  @dev: Device pointer.
3790  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3791  *  a proprietary structure used to pass information to the driver.
3792  *  @cmd: IOCTL command
3793  *  Description:
3794  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3795  */
3796 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3797 {
3798 	int ret = -EOPNOTSUPP;
3799 
3800 	if (!netif_running(dev))
3801 		return -EINVAL;
3802 
3803 	switch (cmd) {
3804 	case SIOCGMIIPHY:
3805 	case SIOCGMIIREG:
3806 	case SIOCSMIIREG:
3807 		if (!dev->phydev)
3808 			return -EINVAL;
3809 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3810 		break;
3811 	case SIOCSHWTSTAMP:
3812 		ret = stmmac_hwtstamp_set(dev, rq);
3813 		break;
3814 	case SIOCGHWTSTAMP:
3815 		ret = stmmac_hwtstamp_get(dev, rq);
3816 		break;
3817 	default:
3818 		break;
3819 	}
3820 
3821 	return ret;
3822 }
3823 
3824 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3825 				    void *cb_priv)
3826 {
3827 	struct stmmac_priv *priv = cb_priv;
3828 	int ret = -EOPNOTSUPP;
3829 
3830 	stmmac_disable_all_queues(priv);
3831 
3832 	switch (type) {
3833 	case TC_SETUP_CLSU32:
3834 		if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3835 			ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3836 		break;
3837 	default:
3838 		break;
3839 	}
3840 
3841 	stmmac_enable_all_queues(priv);
3842 	return ret;
3843 }
3844 
3845 static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3846 				 struct tc_block_offload *f)
3847 {
3848 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3849 		return -EOPNOTSUPP;
3850 
3851 	switch (f->command) {
3852 	case TC_BLOCK_BIND:
3853 		return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3854 				priv, priv, f->extack);
3855 	case TC_BLOCK_UNBIND:
3856 		tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3857 		return 0;
3858 	default:
3859 		return -EOPNOTSUPP;
3860 	}
3861 }
3862 
3863 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3864 			   void *type_data)
3865 {
3866 	struct stmmac_priv *priv = netdev_priv(ndev);
3867 
3868 	switch (type) {
3869 	case TC_SETUP_BLOCK:
3870 		return stmmac_setup_tc_block(priv, type_data);
3871 	case TC_SETUP_QDISC_CBS:
3872 		return stmmac_tc_setup_cbs(priv, priv, type_data);
3873 	default:
3874 		return -EOPNOTSUPP;
3875 	}
3876 }
3877 
3878 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3879 {
3880 	struct stmmac_priv *priv = netdev_priv(ndev);
3881 	int ret = 0;
3882 
3883 	ret = eth_mac_addr(ndev, addr);
3884 	if (ret)
3885 		return ret;
3886 
3887 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3888 
3889 	return ret;
3890 }
3891 
3892 #ifdef CONFIG_DEBUG_FS
3893 static struct dentry *stmmac_fs_dir;
3894 
3895 static void sysfs_display_ring(void *head, int size, int extend_desc,
3896 			       struct seq_file *seq)
3897 {
3898 	int i;
3899 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3900 	struct dma_desc *p = (struct dma_desc *)head;
3901 
3902 	for (i = 0; i < size; i++) {
3903 		if (extend_desc) {
3904 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3905 				   i, (unsigned int)virt_to_phys(ep),
3906 				   le32_to_cpu(ep->basic.des0),
3907 				   le32_to_cpu(ep->basic.des1),
3908 				   le32_to_cpu(ep->basic.des2),
3909 				   le32_to_cpu(ep->basic.des3));
3910 			ep++;
3911 		} else {
3912 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3913 				   i, (unsigned int)virt_to_phys(p),
3914 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3915 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3916 			p++;
3917 		}
3918 		seq_printf(seq, "\n");
3919 	}
3920 }
3921 
3922 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
3923 {
3924 	struct net_device *dev = seq->private;
3925 	struct stmmac_priv *priv = netdev_priv(dev);
3926 	u32 rx_count = priv->plat->rx_queues_to_use;
3927 	u32 tx_count = priv->plat->tx_queues_to_use;
3928 	u32 queue;
3929 
3930 	if ((dev->flags & IFF_UP) == 0)
3931 		return 0;
3932 
3933 	for (queue = 0; queue < rx_count; queue++) {
3934 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3935 
3936 		seq_printf(seq, "RX Queue %d:\n", queue);
3937 
3938 		if (priv->extend_desc) {
3939 			seq_printf(seq, "Extended descriptor ring:\n");
3940 			sysfs_display_ring((void *)rx_q->dma_erx,
3941 					   DMA_RX_SIZE, 1, seq);
3942 		} else {
3943 			seq_printf(seq, "Descriptor ring:\n");
3944 			sysfs_display_ring((void *)rx_q->dma_rx,
3945 					   DMA_RX_SIZE, 0, seq);
3946 		}
3947 	}
3948 
3949 	for (queue = 0; queue < tx_count; queue++) {
3950 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3951 
3952 		seq_printf(seq, "TX Queue %d:\n", queue);
3953 
3954 		if (priv->extend_desc) {
3955 			seq_printf(seq, "Extended descriptor ring:\n");
3956 			sysfs_display_ring((void *)tx_q->dma_etx,
3957 					   DMA_TX_SIZE, 1, seq);
3958 		} else {
3959 			seq_printf(seq, "Descriptor ring:\n");
3960 			sysfs_display_ring((void *)tx_q->dma_tx,
3961 					   DMA_TX_SIZE, 0, seq);
3962 		}
3963 	}
3964 
3965 	return 0;
3966 }
3967 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
3968 
3969 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
3970 {
3971 	struct net_device *dev = seq->private;
3972 	struct stmmac_priv *priv = netdev_priv(dev);
3973 
3974 	if (!priv->hw_cap_support) {
3975 		seq_printf(seq, "DMA HW features not supported\n");
3976 		return 0;
3977 	}
3978 
3979 	seq_printf(seq, "==============================\n");
3980 	seq_printf(seq, "\tDMA HW features\n");
3981 	seq_printf(seq, "==============================\n");
3982 
3983 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3984 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3985 	seq_printf(seq, "\t1000 Mbps: %s\n",
3986 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
3987 	seq_printf(seq, "\tHalf duplex: %s\n",
3988 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3989 	seq_printf(seq, "\tHash Filter: %s\n",
3990 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3991 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3992 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
3993 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3994 		   (priv->dma_cap.pcs) ? "Y" : "N");
3995 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3996 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3997 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
3998 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3999 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
4000 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
4001 	seq_printf(seq, "\tRMON module: %s\n",
4002 		   (priv->dma_cap.rmon) ? "Y" : "N");
4003 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4004 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
4005 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4006 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
4007 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4008 		   (priv->dma_cap.eee) ? "Y" : "N");
4009 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4010 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4011 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
4012 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4013 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4014 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
4015 	} else {
4016 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4017 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4018 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4019 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4020 	}
4021 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4022 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4023 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4024 		   priv->dma_cap.number_rx_channel);
4025 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4026 		   priv->dma_cap.number_tx_channel);
4027 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
4028 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
4029 
4030 	return 0;
4031 }
4032 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4033 
4034 static int stmmac_init_fs(struct net_device *dev)
4035 {
4036 	struct stmmac_priv *priv = netdev_priv(dev);
4037 
4038 	/* Create per netdev entries */
4039 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4040 
4041 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
4042 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
4043 
4044 		return -ENOMEM;
4045 	}
4046 
4047 	/* Entry to report DMA RX/TX rings */
4048 	priv->dbgfs_rings_status =
4049 		debugfs_create_file("descriptors_status", 0444,
4050 				    priv->dbgfs_dir, dev,
4051 				    &stmmac_rings_status_fops);
4052 
4053 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
4054 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4055 		debugfs_remove_recursive(priv->dbgfs_dir);
4056 
4057 		return -ENOMEM;
4058 	}
4059 
4060 	/* Entry to report the DMA HW features */
4061 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4062 						  priv->dbgfs_dir,
4063 						  dev, &stmmac_dma_cap_fops);
4064 
4065 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4066 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4067 		debugfs_remove_recursive(priv->dbgfs_dir);
4068 
4069 		return -ENOMEM;
4070 	}
4071 
4072 	return 0;
4073 }
4074 
4075 static void stmmac_exit_fs(struct net_device *dev)
4076 {
4077 	struct stmmac_priv *priv = netdev_priv(dev);
4078 
4079 	debugfs_remove_recursive(priv->dbgfs_dir);
4080 }
4081 #endif /* CONFIG_DEBUG_FS */
4082 
4083 static const struct net_device_ops stmmac_netdev_ops = {
4084 	.ndo_open = stmmac_open,
4085 	.ndo_start_xmit = stmmac_xmit,
4086 	.ndo_stop = stmmac_release,
4087 	.ndo_change_mtu = stmmac_change_mtu,
4088 	.ndo_fix_features = stmmac_fix_features,
4089 	.ndo_set_features = stmmac_set_features,
4090 	.ndo_set_rx_mode = stmmac_set_rx_mode,
4091 	.ndo_tx_timeout = stmmac_tx_timeout,
4092 	.ndo_do_ioctl = stmmac_ioctl,
4093 	.ndo_setup_tc = stmmac_setup_tc,
4094 #ifdef CONFIG_NET_POLL_CONTROLLER
4095 	.ndo_poll_controller = stmmac_poll_controller,
4096 #endif
4097 	.ndo_set_mac_address = stmmac_set_mac_address,
4098 };
4099 
4100 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4101 {
4102 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4103 		return;
4104 	if (test_bit(STMMAC_DOWN, &priv->state))
4105 		return;
4106 
4107 	netdev_err(priv->dev, "Reset adapter.\n");
4108 
4109 	rtnl_lock();
4110 	netif_trans_update(priv->dev);
4111 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4112 		usleep_range(1000, 2000);
4113 
4114 	set_bit(STMMAC_DOWN, &priv->state);
4115 	dev_close(priv->dev);
4116 	dev_open(priv->dev, NULL);
4117 	clear_bit(STMMAC_DOWN, &priv->state);
4118 	clear_bit(STMMAC_RESETING, &priv->state);
4119 	rtnl_unlock();
4120 }
4121 
4122 static void stmmac_service_task(struct work_struct *work)
4123 {
4124 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4125 			service_task);
4126 
4127 	stmmac_reset_subtask(priv);
4128 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4129 }
4130 
4131 /**
4132  *  stmmac_hw_init - Init the MAC device
4133  *  @priv: driver private structure
4134  *  Description: this function is to configure the MAC device according to
4135  *  some platform parameters or the HW capability register. It prepares the
4136  *  driver to use either ring or chain modes and to setup either enhanced or
4137  *  normal descriptors.
4138  */
4139 static int stmmac_hw_init(struct stmmac_priv *priv)
4140 {
4141 	int ret;
4142 
4143 	/* dwmac-sun8i only work in chain mode */
4144 	if (priv->plat->has_sun8i)
4145 		chain_mode = 1;
4146 	priv->chain_mode = chain_mode;
4147 
4148 	/* Initialize HW Interface */
4149 	ret = stmmac_hwif_init(priv);
4150 	if (ret)
4151 		return ret;
4152 
4153 	/* Get the HW capability (new GMAC newer than 3.50a) */
4154 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4155 	if (priv->hw_cap_support) {
4156 		dev_info(priv->device, "DMA HW capability register supported\n");
4157 
4158 		/* We can override some gmac/dma configuration fields: e.g.
4159 		 * enh_desc, tx_coe (e.g. that are passed through the
4160 		 * platform) with the values from the HW capability
4161 		 * register (if supported).
4162 		 */
4163 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4164 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4165 		priv->hw->pmt = priv->plat->pmt;
4166 
4167 		/* TXCOE doesn't work in thresh DMA mode */
4168 		if (priv->plat->force_thresh_dma_mode)
4169 			priv->plat->tx_coe = 0;
4170 		else
4171 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4172 
4173 		/* In case of GMAC4 rx_coe is from HW cap register. */
4174 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4175 
4176 		if (priv->dma_cap.rx_coe_type2)
4177 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4178 		else if (priv->dma_cap.rx_coe_type1)
4179 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4180 
4181 	} else {
4182 		dev_info(priv->device, "No HW DMA feature register supported\n");
4183 	}
4184 
4185 	if (priv->plat->rx_coe) {
4186 		priv->hw->rx_csum = priv->plat->rx_coe;
4187 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4188 		if (priv->synopsys_id < DWMAC_CORE_4_00)
4189 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4190 	}
4191 	if (priv->plat->tx_coe)
4192 		dev_info(priv->device, "TX Checksum insertion supported\n");
4193 
4194 	if (priv->plat->pmt) {
4195 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4196 		device_set_wakeup_capable(priv->device, 1);
4197 	}
4198 
4199 	if (priv->dma_cap.tsoen)
4200 		dev_info(priv->device, "TSO supported\n");
4201 
4202 	/* Run HW quirks, if any */
4203 	if (priv->hwif_quirks) {
4204 		ret = priv->hwif_quirks(priv);
4205 		if (ret)
4206 			return ret;
4207 	}
4208 
4209 	/* Rx Watchdog is available in the COREs newer than the 3.40.
4210 	 * In some case, for example on bugged HW this feature
4211 	 * has to be disable and this can be done by passing the
4212 	 * riwt_off field from the platform.
4213 	 */
4214 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4215 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4216 		priv->use_riwt = 1;
4217 		dev_info(priv->device,
4218 			 "Enable RX Mitigation via HW Watchdog Timer\n");
4219 	}
4220 
4221 	return 0;
4222 }
4223 
4224 /**
4225  * stmmac_dvr_probe
4226  * @device: device pointer
4227  * @plat_dat: platform data pointer
4228  * @res: stmmac resource pointer
4229  * Description: this is the main probe function used to
4230  * call the alloc_etherdev, allocate the priv structure.
4231  * Return:
4232  * returns 0 on success, otherwise errno.
4233  */
4234 int stmmac_dvr_probe(struct device *device,
4235 		     struct plat_stmmacenet_data *plat_dat,
4236 		     struct stmmac_resources *res)
4237 {
4238 	struct net_device *ndev = NULL;
4239 	struct stmmac_priv *priv;
4240 	u32 queue, maxq;
4241 	int ret = 0;
4242 
4243 	ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4244 				  MTL_MAX_TX_QUEUES,
4245 				  MTL_MAX_RX_QUEUES);
4246 	if (!ndev)
4247 		return -ENOMEM;
4248 
4249 	SET_NETDEV_DEV(ndev, device);
4250 
4251 	priv = netdev_priv(ndev);
4252 	priv->device = device;
4253 	priv->dev = ndev;
4254 
4255 	stmmac_set_ethtool_ops(ndev);
4256 	priv->pause = pause;
4257 	priv->plat = plat_dat;
4258 	priv->ioaddr = res->addr;
4259 	priv->dev->base_addr = (unsigned long)res->addr;
4260 
4261 	priv->dev->irq = res->irq;
4262 	priv->wol_irq = res->wol_irq;
4263 	priv->lpi_irq = res->lpi_irq;
4264 
4265 	if (res->mac)
4266 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4267 
4268 	dev_set_drvdata(device, priv->dev);
4269 
4270 	/* Verify driver arguments */
4271 	stmmac_verify_args();
4272 
4273 	/* Allocate workqueue */
4274 	priv->wq = create_singlethread_workqueue("stmmac_wq");
4275 	if (!priv->wq) {
4276 		dev_err(priv->device, "failed to create workqueue\n");
4277 		ret = -ENOMEM;
4278 		goto error_wq;
4279 	}
4280 
4281 	INIT_WORK(&priv->service_task, stmmac_service_task);
4282 
4283 	/* Override with kernel parameters if supplied XXX CRS XXX
4284 	 * this needs to have multiple instances
4285 	 */
4286 	if ((phyaddr >= 0) && (phyaddr <= 31))
4287 		priv->plat->phy_addr = phyaddr;
4288 
4289 	if (priv->plat->stmmac_rst) {
4290 		ret = reset_control_assert(priv->plat->stmmac_rst);
4291 		reset_control_deassert(priv->plat->stmmac_rst);
4292 		/* Some reset controllers have only reset callback instead of
4293 		 * assert + deassert callbacks pair.
4294 		 */
4295 		if (ret == -ENOTSUPP)
4296 			reset_control_reset(priv->plat->stmmac_rst);
4297 	}
4298 
4299 	/* Init MAC and get the capabilities */
4300 	ret = stmmac_hw_init(priv);
4301 	if (ret)
4302 		goto error_hw_init;
4303 
4304 	stmmac_check_ether_addr(priv);
4305 
4306 	/* Configure real RX and TX queues */
4307 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4308 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4309 
4310 	ndev->netdev_ops = &stmmac_netdev_ops;
4311 
4312 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4313 			    NETIF_F_RXCSUM;
4314 
4315 	ret = stmmac_tc_init(priv, priv);
4316 	if (!ret) {
4317 		ndev->hw_features |= NETIF_F_HW_TC;
4318 	}
4319 
4320 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4321 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4322 		priv->tso = true;
4323 		dev_info(priv->device, "TSO feature enabled\n");
4324 	}
4325 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4326 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4327 #ifdef STMMAC_VLAN_TAG_USED
4328 	/* Both mac100 and gmac support receive VLAN tag detection */
4329 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4330 #endif
4331 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
4332 
4333 	/* MTU range: 46 - hw-specific max */
4334 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4335 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4336 		ndev->max_mtu = JUMBO_LEN;
4337 	else if (priv->plat->has_xgmac)
4338 		ndev->max_mtu = XGMAC_JUMBO_LEN;
4339 	else
4340 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4341 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4342 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4343 	 */
4344 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4345 	    (priv->plat->maxmtu >= ndev->min_mtu))
4346 		ndev->max_mtu = priv->plat->maxmtu;
4347 	else if (priv->plat->maxmtu < ndev->min_mtu)
4348 		dev_warn(priv->device,
4349 			 "%s: warning: maxmtu having invalid value (%d)\n",
4350 			 __func__, priv->plat->maxmtu);
4351 
4352 	if (flow_ctrl)
4353 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
4354 
4355 	/* Setup channels NAPI */
4356 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4357 
4358 	for (queue = 0; queue < maxq; queue++) {
4359 		struct stmmac_channel *ch = &priv->channel[queue];
4360 
4361 		ch->priv_data = priv;
4362 		ch->index = queue;
4363 
4364 		if (queue < priv->plat->rx_queues_to_use) {
4365 			netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx,
4366 				       NAPI_POLL_WEIGHT);
4367 		}
4368 		if (queue < priv->plat->tx_queues_to_use) {
4369 			netif_napi_add(ndev, &ch->tx_napi, stmmac_napi_poll_tx,
4370 				       NAPI_POLL_WEIGHT);
4371 		}
4372 	}
4373 
4374 	mutex_init(&priv->lock);
4375 
4376 	/* If a specific clk_csr value is passed from the platform
4377 	 * this means that the CSR Clock Range selection cannot be
4378 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4379 	 * set the MDC clock dynamically according to the csr actual
4380 	 * clock input.
4381 	 */
4382 	if (!priv->plat->clk_csr)
4383 		stmmac_clk_csr_set(priv);
4384 	else
4385 		priv->clk_csr = priv->plat->clk_csr;
4386 
4387 	stmmac_check_pcs_mode(priv);
4388 
4389 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4390 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4391 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
4392 		/* MDIO bus Registration */
4393 		ret = stmmac_mdio_register(ndev);
4394 		if (ret < 0) {
4395 			dev_err(priv->device,
4396 				"%s: MDIO bus (id: %d) registration failed",
4397 				__func__, priv->plat->bus_id);
4398 			goto error_mdio_register;
4399 		}
4400 	}
4401 
4402 	ret = register_netdev(ndev);
4403 	if (ret) {
4404 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
4405 			__func__, ret);
4406 		goto error_netdev_register;
4407 	}
4408 
4409 #ifdef CONFIG_DEBUG_FS
4410 	ret = stmmac_init_fs(ndev);
4411 	if (ret < 0)
4412 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
4413 			    __func__);
4414 #endif
4415 
4416 	return ret;
4417 
4418 error_netdev_register:
4419 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4420 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4421 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4422 		stmmac_mdio_unregister(ndev);
4423 error_mdio_register:
4424 	for (queue = 0; queue < maxq; queue++) {
4425 		struct stmmac_channel *ch = &priv->channel[queue];
4426 
4427 		if (queue < priv->plat->rx_queues_to_use)
4428 			netif_napi_del(&ch->rx_napi);
4429 		if (queue < priv->plat->tx_queues_to_use)
4430 			netif_napi_del(&ch->tx_napi);
4431 	}
4432 error_hw_init:
4433 	destroy_workqueue(priv->wq);
4434 error_wq:
4435 	free_netdev(ndev);
4436 
4437 	return ret;
4438 }
4439 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4440 
4441 /**
4442  * stmmac_dvr_remove
4443  * @dev: device pointer
4444  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4445  * changes the link status, releases the DMA descriptor rings.
4446  */
4447 int stmmac_dvr_remove(struct device *dev)
4448 {
4449 	struct net_device *ndev = dev_get_drvdata(dev);
4450 	struct stmmac_priv *priv = netdev_priv(ndev);
4451 
4452 	netdev_info(priv->dev, "%s: removing driver", __func__);
4453 
4454 #ifdef CONFIG_DEBUG_FS
4455 	stmmac_exit_fs(ndev);
4456 #endif
4457 	stmmac_stop_all_dma(priv);
4458 
4459 	stmmac_mac_set(priv, priv->ioaddr, false);
4460 	netif_carrier_off(ndev);
4461 	unregister_netdev(ndev);
4462 	if (priv->plat->stmmac_rst)
4463 		reset_control_assert(priv->plat->stmmac_rst);
4464 	clk_disable_unprepare(priv->plat->pclk);
4465 	clk_disable_unprepare(priv->plat->stmmac_clk);
4466 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4467 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4468 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4469 		stmmac_mdio_unregister(ndev);
4470 	destroy_workqueue(priv->wq);
4471 	mutex_destroy(&priv->lock);
4472 	free_netdev(ndev);
4473 
4474 	return 0;
4475 }
4476 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4477 
4478 /**
4479  * stmmac_suspend - suspend callback
4480  * @dev: device pointer
4481  * Description: this is the function to suspend the device and it is called
4482  * by the platform driver to stop the network queue, release the resources,
4483  * program the PMT register (for WoL), clean and release driver resources.
4484  */
4485 int stmmac_suspend(struct device *dev)
4486 {
4487 	struct net_device *ndev = dev_get_drvdata(dev);
4488 	struct stmmac_priv *priv = netdev_priv(ndev);
4489 
4490 	if (!ndev || !netif_running(ndev))
4491 		return 0;
4492 
4493 	if (ndev->phydev)
4494 		phy_stop(ndev->phydev);
4495 
4496 	mutex_lock(&priv->lock);
4497 
4498 	netif_device_detach(ndev);
4499 	stmmac_stop_all_queues(priv);
4500 
4501 	stmmac_disable_all_queues(priv);
4502 
4503 	/* Stop TX/RX DMA */
4504 	stmmac_stop_all_dma(priv);
4505 
4506 	/* Enable Power down mode by programming the PMT regs */
4507 	if (device_may_wakeup(priv->device)) {
4508 		stmmac_pmt(priv, priv->hw, priv->wolopts);
4509 		priv->irq_wake = 1;
4510 	} else {
4511 		stmmac_mac_set(priv, priv->ioaddr, false);
4512 		pinctrl_pm_select_sleep_state(priv->device);
4513 		/* Disable clock in case of PWM is off */
4514 		clk_disable(priv->plat->pclk);
4515 		clk_disable(priv->plat->stmmac_clk);
4516 	}
4517 	mutex_unlock(&priv->lock);
4518 
4519 	priv->oldlink = false;
4520 	priv->speed = SPEED_UNKNOWN;
4521 	priv->oldduplex = DUPLEX_UNKNOWN;
4522 	return 0;
4523 }
4524 EXPORT_SYMBOL_GPL(stmmac_suspend);
4525 
4526 /**
4527  * stmmac_reset_queues_param - reset queue parameters
4528  * @dev: device pointer
4529  */
4530 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4531 {
4532 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4533 	u32 tx_cnt = priv->plat->tx_queues_to_use;
4534 	u32 queue;
4535 
4536 	for (queue = 0; queue < rx_cnt; queue++) {
4537 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4538 
4539 		rx_q->cur_rx = 0;
4540 		rx_q->dirty_rx = 0;
4541 	}
4542 
4543 	for (queue = 0; queue < tx_cnt; queue++) {
4544 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4545 
4546 		tx_q->cur_tx = 0;
4547 		tx_q->dirty_tx = 0;
4548 		tx_q->mss = 0;
4549 	}
4550 }
4551 
4552 /**
4553  * stmmac_resume - resume callback
4554  * @dev: device pointer
4555  * Description: when resume this function is invoked to setup the DMA and CORE
4556  * in a usable state.
4557  */
4558 int stmmac_resume(struct device *dev)
4559 {
4560 	struct net_device *ndev = dev_get_drvdata(dev);
4561 	struct stmmac_priv *priv = netdev_priv(ndev);
4562 
4563 	if (!netif_running(ndev))
4564 		return 0;
4565 
4566 	/* Power Down bit, into the PM register, is cleared
4567 	 * automatically as soon as a magic packet or a Wake-up frame
4568 	 * is received. Anyway, it's better to manually clear
4569 	 * this bit because it can generate problems while resuming
4570 	 * from another devices (e.g. serial console).
4571 	 */
4572 	if (device_may_wakeup(priv->device)) {
4573 		mutex_lock(&priv->lock);
4574 		stmmac_pmt(priv, priv->hw, 0);
4575 		mutex_unlock(&priv->lock);
4576 		priv->irq_wake = 0;
4577 	} else {
4578 		pinctrl_pm_select_default_state(priv->device);
4579 		/* enable the clk previously disabled */
4580 		clk_enable(priv->plat->stmmac_clk);
4581 		clk_enable(priv->plat->pclk);
4582 		/* reset the phy so that it's ready */
4583 		if (priv->mii)
4584 			stmmac_mdio_reset(priv->mii);
4585 	}
4586 
4587 	netif_device_attach(ndev);
4588 
4589 	mutex_lock(&priv->lock);
4590 
4591 	stmmac_reset_queues_param(priv);
4592 
4593 	stmmac_clear_descriptors(priv);
4594 
4595 	stmmac_hw_setup(ndev, false);
4596 	stmmac_init_tx_coalesce(priv);
4597 	stmmac_set_rx_mode(ndev);
4598 
4599 	stmmac_enable_all_queues(priv);
4600 
4601 	stmmac_start_all_queues(priv);
4602 
4603 	mutex_unlock(&priv->lock);
4604 
4605 	if (ndev->phydev)
4606 		phy_start(ndev->phydev);
4607 
4608 	return 0;
4609 }
4610 EXPORT_SYMBOL_GPL(stmmac_resume);
4611 
4612 #ifndef MODULE
4613 static int __init stmmac_cmdline_opt(char *str)
4614 {
4615 	char *opt;
4616 
4617 	if (!str || !*str)
4618 		return -EINVAL;
4619 	while ((opt = strsep(&str, ",")) != NULL) {
4620 		if (!strncmp(opt, "debug:", 6)) {
4621 			if (kstrtoint(opt + 6, 0, &debug))
4622 				goto err;
4623 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4624 			if (kstrtoint(opt + 8, 0, &phyaddr))
4625 				goto err;
4626 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4627 			if (kstrtoint(opt + 7, 0, &buf_sz))
4628 				goto err;
4629 		} else if (!strncmp(opt, "tc:", 3)) {
4630 			if (kstrtoint(opt + 3, 0, &tc))
4631 				goto err;
4632 		} else if (!strncmp(opt, "watchdog:", 9)) {
4633 			if (kstrtoint(opt + 9, 0, &watchdog))
4634 				goto err;
4635 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4636 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
4637 				goto err;
4638 		} else if (!strncmp(opt, "pause:", 6)) {
4639 			if (kstrtoint(opt + 6, 0, &pause))
4640 				goto err;
4641 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4642 			if (kstrtoint(opt + 10, 0, &eee_timer))
4643 				goto err;
4644 		} else if (!strncmp(opt, "chain_mode:", 11)) {
4645 			if (kstrtoint(opt + 11, 0, &chain_mode))
4646 				goto err;
4647 		}
4648 	}
4649 	return 0;
4650 
4651 err:
4652 	pr_err("%s: ERROR broken module parameter conversion", __func__);
4653 	return -EINVAL;
4654 }
4655 
4656 __setup("stmmaceth=", stmmac_cmdline_opt);
4657 #endif /* MODULE */
4658 
4659 static int __init stmmac_init(void)
4660 {
4661 #ifdef CONFIG_DEBUG_FS
4662 	/* Create debugfs main directory if it doesn't exist yet */
4663 	if (!stmmac_fs_dir) {
4664 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4665 
4666 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4667 			pr_err("ERROR %s, debugfs create directory failed\n",
4668 			       STMMAC_RESOURCE_NAME);
4669 
4670 			return -ENOMEM;
4671 		}
4672 	}
4673 #endif
4674 
4675 	return 0;
4676 }
4677 
4678 static void __exit stmmac_exit(void)
4679 {
4680 #ifdef CONFIG_DEBUG_FS
4681 	debugfs_remove_recursive(stmmac_fs_dir);
4682 #endif
4683 }
4684 
4685 module_init(stmmac_init)
4686 module_exit(stmmac_exit)
4687 
4688 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4689 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4690 MODULE_LICENSE("GPL");
4691