1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4 
5 	Copyright(C) 2007-2011 STMicroelectronics Ltd
6 
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10 
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15 
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18 
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20 
21   Documentation available at:
22 	http://www.stlinux.com
23   Support available at:
24 	https://bugzilla.stlinux.com/
25 *******************************************************************************/
26 
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include <net/pkt_cls.h>
49 #include "stmmac_ptp.h"
50 #include "stmmac.h"
51 #include <linux/reset.h>
52 #include <linux/of_mdio.h>
53 #include "dwmac1000.h"
54 #include "dwxgmac2.h"
55 #include "hwif.h"
56 
57 #define	STMMAC_ALIGN(x)		__ALIGN_KERNEL(x, SMP_CACHE_BYTES)
58 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
59 
60 /* Module parameters */
61 #define TX_TIMEO	5000
62 static int watchdog = TX_TIMEO;
63 module_param(watchdog, int, 0644);
64 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
65 
66 static int debug = -1;
67 module_param(debug, int, 0644);
68 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
69 
70 static int phyaddr = -1;
71 module_param(phyaddr, int, 0444);
72 MODULE_PARM_DESC(phyaddr, "Physical device address");
73 
74 #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
75 #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
76 
77 static int flow_ctrl = FLOW_OFF;
78 module_param(flow_ctrl, int, 0644);
79 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
80 
81 static int pause = PAUSE_TIME;
82 module_param(pause, int, 0644);
83 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
84 
85 #define TC_DEFAULT 64
86 static int tc = TC_DEFAULT;
87 module_param(tc, int, 0644);
88 MODULE_PARM_DESC(tc, "DMA threshold control value");
89 
90 #define	DEFAULT_BUFSIZE	1536
91 static int buf_sz = DEFAULT_BUFSIZE;
92 module_param(buf_sz, int, 0644);
93 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
94 
95 #define	STMMAC_RX_COPYBREAK	256
96 
97 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
98 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
99 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
100 
101 #define STMMAC_DEFAULT_LPI_TIMER	1000
102 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
103 module_param(eee_timer, int, 0644);
104 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
105 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
106 
107 /* By default the driver will use the ring mode to manage tx and rx descriptors,
108  * but allow user to force to use the chain instead of the ring
109  */
110 static unsigned int chain_mode;
111 module_param(chain_mode, int, 0444);
112 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
113 
114 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
115 
116 #ifdef CONFIG_DEBUG_FS
117 static int stmmac_init_fs(struct net_device *dev);
118 static void stmmac_exit_fs(struct net_device *dev);
119 #endif
120 
121 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
122 
123 /**
124  * stmmac_verify_args - verify the driver parameters.
125  * Description: it checks the driver parameters and set a default in case of
126  * errors.
127  */
128 static void stmmac_verify_args(void)
129 {
130 	if (unlikely(watchdog < 0))
131 		watchdog = TX_TIMEO;
132 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
133 		buf_sz = DEFAULT_BUFSIZE;
134 	if (unlikely(flow_ctrl > 1))
135 		flow_ctrl = FLOW_AUTO;
136 	else if (likely(flow_ctrl < 0))
137 		flow_ctrl = FLOW_OFF;
138 	if (unlikely((pause < 0) || (pause > 0xffff)))
139 		pause = PAUSE_TIME;
140 	if (eee_timer < 0)
141 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
142 }
143 
144 /**
145  * stmmac_disable_all_queues - Disable all queues
146  * @priv: driver private structure
147  */
148 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
149 {
150 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
151 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
152 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
153 	u32 queue;
154 
155 	for (queue = 0; queue < maxq; queue++) {
156 		struct stmmac_channel *ch = &priv->channel[queue];
157 
158 		if (queue < rx_queues_cnt)
159 			napi_disable(&ch->rx_napi);
160 		if (queue < tx_queues_cnt)
161 			napi_disable(&ch->tx_napi);
162 	}
163 }
164 
165 /**
166  * stmmac_enable_all_queues - Enable all queues
167  * @priv: driver private structure
168  */
169 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
170 {
171 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
172 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
173 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
174 	u32 queue;
175 
176 	for (queue = 0; queue < maxq; queue++) {
177 		struct stmmac_channel *ch = &priv->channel[queue];
178 
179 		if (queue < rx_queues_cnt)
180 			napi_enable(&ch->rx_napi);
181 		if (queue < tx_queues_cnt)
182 			napi_enable(&ch->tx_napi);
183 	}
184 }
185 
186 /**
187  * stmmac_stop_all_queues - Stop all queues
188  * @priv: driver private structure
189  */
190 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
191 {
192 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193 	u32 queue;
194 
195 	for (queue = 0; queue < tx_queues_cnt; queue++)
196 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
197 }
198 
199 /**
200  * stmmac_start_all_queues - Start all queues
201  * @priv: driver private structure
202  */
203 static void stmmac_start_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
206 	u32 queue;
207 
208 	for (queue = 0; queue < tx_queues_cnt; queue++)
209 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
210 }
211 
212 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
213 {
214 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
215 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
216 		queue_work(priv->wq, &priv->service_task);
217 }
218 
219 static void stmmac_global_err(struct stmmac_priv *priv)
220 {
221 	netif_carrier_off(priv->dev);
222 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
223 	stmmac_service_event_schedule(priv);
224 }
225 
226 /**
227  * stmmac_clk_csr_set - dynamically set the MDC clock
228  * @priv: driver private structure
229  * Description: this is to dynamically set the MDC clock according to the csr
230  * clock input.
231  * Note:
232  *	If a specific clk_csr value is passed from the platform
233  *	this means that the CSR Clock Range selection cannot be
234  *	changed at run-time and it is fixed (as reported in the driver
235  *	documentation). Viceversa the driver will try to set the MDC
236  *	clock dynamically according to the actual clock input.
237  */
238 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
239 {
240 	u32 clk_rate;
241 
242 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
243 
244 	/* Platform provided default clk_csr would be assumed valid
245 	 * for all other cases except for the below mentioned ones.
246 	 * For values higher than the IEEE 802.3 specified frequency
247 	 * we can not estimate the proper divider as it is not known
248 	 * the frequency of clk_csr_i. So we do not change the default
249 	 * divider.
250 	 */
251 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
252 		if (clk_rate < CSR_F_35M)
253 			priv->clk_csr = STMMAC_CSR_20_35M;
254 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
255 			priv->clk_csr = STMMAC_CSR_35_60M;
256 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
257 			priv->clk_csr = STMMAC_CSR_60_100M;
258 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
259 			priv->clk_csr = STMMAC_CSR_100_150M;
260 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
261 			priv->clk_csr = STMMAC_CSR_150_250M;
262 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
263 			priv->clk_csr = STMMAC_CSR_250_300M;
264 	}
265 
266 	if (priv->plat->has_sun8i) {
267 		if (clk_rate > 160000000)
268 			priv->clk_csr = 0x03;
269 		else if (clk_rate > 80000000)
270 			priv->clk_csr = 0x02;
271 		else if (clk_rate > 40000000)
272 			priv->clk_csr = 0x01;
273 		else
274 			priv->clk_csr = 0;
275 	}
276 
277 	if (priv->plat->has_xgmac) {
278 		if (clk_rate > 400000000)
279 			priv->clk_csr = 0x5;
280 		else if (clk_rate > 350000000)
281 			priv->clk_csr = 0x4;
282 		else if (clk_rate > 300000000)
283 			priv->clk_csr = 0x3;
284 		else if (clk_rate > 250000000)
285 			priv->clk_csr = 0x2;
286 		else if (clk_rate > 150000000)
287 			priv->clk_csr = 0x1;
288 		else
289 			priv->clk_csr = 0x0;
290 	}
291 }
292 
293 static void print_pkt(unsigned char *buf, int len)
294 {
295 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
296 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
297 }
298 
299 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
300 {
301 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
302 	u32 avail;
303 
304 	if (tx_q->dirty_tx > tx_q->cur_tx)
305 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
306 	else
307 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
308 
309 	return avail;
310 }
311 
312 /**
313  * stmmac_rx_dirty - Get RX queue dirty
314  * @priv: driver private structure
315  * @queue: RX queue index
316  */
317 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
318 {
319 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
320 	u32 dirty;
321 
322 	if (rx_q->dirty_rx <= rx_q->cur_rx)
323 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
324 	else
325 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
326 
327 	return dirty;
328 }
329 
330 /**
331  * stmmac_hw_fix_mac_speed - callback for speed selection
332  * @priv: driver private structure
333  * Description: on some platforms (e.g. ST), some HW system configuration
334  * registers have to be set according to the link speed negotiated.
335  */
336 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
337 {
338 	struct net_device *ndev = priv->dev;
339 	struct phy_device *phydev = ndev->phydev;
340 
341 	if (likely(priv->plat->fix_mac_speed))
342 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
343 }
344 
345 /**
346  * stmmac_enable_eee_mode - check and enter in LPI mode
347  * @priv: driver private structure
348  * Description: this function is to verify and enter in LPI mode in case of
349  * EEE.
350  */
351 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
352 {
353 	u32 tx_cnt = priv->plat->tx_queues_to_use;
354 	u32 queue;
355 
356 	/* check if all TX queues have the work finished */
357 	for (queue = 0; queue < tx_cnt; queue++) {
358 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
359 
360 		if (tx_q->dirty_tx != tx_q->cur_tx)
361 			return; /* still unfinished work */
362 	}
363 
364 	/* Check and enter in LPI mode */
365 	if (!priv->tx_path_in_lpi_mode)
366 		stmmac_set_eee_mode(priv, priv->hw,
367 				priv->plat->en_tx_lpi_clockgating);
368 }
369 
370 /**
371  * stmmac_disable_eee_mode - disable and exit from LPI mode
372  * @priv: driver private structure
373  * Description: this function is to exit and disable EEE in case of
374  * LPI state is true. This is called by the xmit.
375  */
376 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
377 {
378 	stmmac_reset_eee_mode(priv, priv->hw);
379 	del_timer_sync(&priv->eee_ctrl_timer);
380 	priv->tx_path_in_lpi_mode = false;
381 }
382 
383 /**
384  * stmmac_eee_ctrl_timer - EEE TX SW timer.
385  * @arg : data hook
386  * Description:
387  *  if there is no data transfer and if we are not in LPI state,
388  *  then MAC Transmitter can be moved to LPI state.
389  */
390 static void stmmac_eee_ctrl_timer(struct timer_list *t)
391 {
392 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
393 
394 	stmmac_enable_eee_mode(priv);
395 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
396 }
397 
398 /**
399  * stmmac_eee_init - init EEE
400  * @priv: driver private structure
401  * Description:
402  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
403  *  can also manage EEE, this function enable the LPI state and start related
404  *  timer.
405  */
406 bool stmmac_eee_init(struct stmmac_priv *priv)
407 {
408 	struct net_device *ndev = priv->dev;
409 	int interface = priv->plat->interface;
410 	bool ret = false;
411 
412 	if ((interface != PHY_INTERFACE_MODE_MII) &&
413 	    (interface != PHY_INTERFACE_MODE_GMII) &&
414 	    !phy_interface_mode_is_rgmii(interface))
415 		goto out;
416 
417 	/* Using PCS we cannot dial with the phy registers at this stage
418 	 * so we do not support extra feature like EEE.
419 	 */
420 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
421 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
422 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
423 		goto out;
424 
425 	/* MAC core supports the EEE feature. */
426 	if (priv->dma_cap.eee) {
427 		int tx_lpi_timer = priv->tx_lpi_timer;
428 
429 		/* Check if the PHY supports EEE */
430 		if (phy_init_eee(ndev->phydev, 1)) {
431 			/* To manage at run-time if the EEE cannot be supported
432 			 * anymore (for example because the lp caps have been
433 			 * changed).
434 			 * In that case the driver disable own timers.
435 			 */
436 			mutex_lock(&priv->lock);
437 			if (priv->eee_active) {
438 				netdev_dbg(priv->dev, "disable EEE\n");
439 				del_timer_sync(&priv->eee_ctrl_timer);
440 				stmmac_set_eee_timer(priv, priv->hw, 0,
441 						tx_lpi_timer);
442 			}
443 			priv->eee_active = 0;
444 			mutex_unlock(&priv->lock);
445 			goto out;
446 		}
447 		/* Activate the EEE and start timers */
448 		mutex_lock(&priv->lock);
449 		if (!priv->eee_active) {
450 			priv->eee_active = 1;
451 			timer_setup(&priv->eee_ctrl_timer,
452 				    stmmac_eee_ctrl_timer, 0);
453 			mod_timer(&priv->eee_ctrl_timer,
454 				  STMMAC_LPI_T(eee_timer));
455 
456 			stmmac_set_eee_timer(priv, priv->hw,
457 					STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
458 		}
459 		/* Set HW EEE according to the speed */
460 		stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
461 
462 		ret = true;
463 		mutex_unlock(&priv->lock);
464 
465 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
466 	}
467 out:
468 	return ret;
469 }
470 
471 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
472  * @priv: driver private structure
473  * @p : descriptor pointer
474  * @skb : the socket buffer
475  * Description :
476  * This function will read timestamp from the descriptor & pass it to stack.
477  * and also perform some sanity checks.
478  */
479 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
480 				   struct dma_desc *p, struct sk_buff *skb)
481 {
482 	struct skb_shared_hwtstamps shhwtstamp;
483 	u64 ns = 0;
484 
485 	if (!priv->hwts_tx_en)
486 		return;
487 
488 	/* exit if skb doesn't support hw tstamp */
489 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
490 		return;
491 
492 	/* check tx tstamp status */
493 	if (stmmac_get_tx_timestamp_status(priv, p)) {
494 		/* get the valid tstamp */
495 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
496 
497 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
498 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
499 
500 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
501 		/* pass tstamp to stack */
502 		skb_tstamp_tx(skb, &shhwtstamp);
503 	}
504 
505 	return;
506 }
507 
508 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
509  * @priv: driver private structure
510  * @p : descriptor pointer
511  * @np : next descriptor pointer
512  * @skb : the socket buffer
513  * Description :
514  * This function will read received packet's timestamp from the descriptor
515  * and pass it to stack. It also perform some sanity checks.
516  */
517 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
518 				   struct dma_desc *np, struct sk_buff *skb)
519 {
520 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
521 	struct dma_desc *desc = p;
522 	u64 ns = 0;
523 
524 	if (!priv->hwts_rx_en)
525 		return;
526 	/* For GMAC4, the valid timestamp is from CTX next desc. */
527 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
528 		desc = np;
529 
530 	/* Check if timestamp is available */
531 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
532 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
533 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
534 		shhwtstamp = skb_hwtstamps(skb);
535 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
536 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
537 	} else  {
538 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
539 	}
540 }
541 
542 /**
543  *  stmmac_hwtstamp_set - control hardware timestamping.
544  *  @dev: device pointer.
545  *  @ifr: An IOCTL specific structure, that can contain a pointer to
546  *  a proprietary structure used to pass information to the driver.
547  *  Description:
548  *  This function configures the MAC to enable/disable both outgoing(TX)
549  *  and incoming(RX) packets time stamping based on user input.
550  *  Return Value:
551  *  0 on success and an appropriate -ve integer on failure.
552  */
553 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
554 {
555 	struct stmmac_priv *priv = netdev_priv(dev);
556 	struct hwtstamp_config config;
557 	struct timespec64 now;
558 	u64 temp = 0;
559 	u32 ptp_v2 = 0;
560 	u32 tstamp_all = 0;
561 	u32 ptp_over_ipv4_udp = 0;
562 	u32 ptp_over_ipv6_udp = 0;
563 	u32 ptp_over_ethernet = 0;
564 	u32 snap_type_sel = 0;
565 	u32 ts_master_en = 0;
566 	u32 ts_event_en = 0;
567 	u32 sec_inc = 0;
568 	u32 value = 0;
569 	bool xmac;
570 
571 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
572 
573 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
574 		netdev_alert(priv->dev, "No support for HW time stamping\n");
575 		priv->hwts_tx_en = 0;
576 		priv->hwts_rx_en = 0;
577 
578 		return -EOPNOTSUPP;
579 	}
580 
581 	if (copy_from_user(&config, ifr->ifr_data,
582 			   sizeof(config)))
583 		return -EFAULT;
584 
585 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
586 		   __func__, config.flags, config.tx_type, config.rx_filter);
587 
588 	/* reserved for future extensions */
589 	if (config.flags)
590 		return -EINVAL;
591 
592 	if (config.tx_type != HWTSTAMP_TX_OFF &&
593 	    config.tx_type != HWTSTAMP_TX_ON)
594 		return -ERANGE;
595 
596 	if (priv->adv_ts) {
597 		switch (config.rx_filter) {
598 		case HWTSTAMP_FILTER_NONE:
599 			/* time stamp no incoming packet at all */
600 			config.rx_filter = HWTSTAMP_FILTER_NONE;
601 			break;
602 
603 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
604 			/* PTP v1, UDP, any kind of event packet */
605 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
606 			/* 'xmac' hardware can support Sync, Pdelay_Req and
607 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
608 			 * This leaves Delay_Req timestamps out.
609 			 * Enable all events *and* general purpose message
610 			 * timestamping
611 			 */
612 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
613 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
614 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
615 			break;
616 
617 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
618 			/* PTP v1, UDP, Sync packet */
619 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
620 			/* take time stamp for SYNC messages only */
621 			ts_event_en = PTP_TCR_TSEVNTENA;
622 
623 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
624 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
625 			break;
626 
627 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
628 			/* PTP v1, UDP, Delay_req packet */
629 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
630 			/* take time stamp for Delay_Req messages only */
631 			ts_master_en = PTP_TCR_TSMSTRENA;
632 			ts_event_en = PTP_TCR_TSEVNTENA;
633 
634 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
635 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
636 			break;
637 
638 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
639 			/* PTP v2, UDP, any kind of event packet */
640 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
641 			ptp_v2 = PTP_TCR_TSVER2ENA;
642 			/* take time stamp for all event messages */
643 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
644 
645 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
646 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
647 			break;
648 
649 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
650 			/* PTP v2, UDP, Sync packet */
651 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
652 			ptp_v2 = PTP_TCR_TSVER2ENA;
653 			/* take time stamp for SYNC messages only */
654 			ts_event_en = PTP_TCR_TSEVNTENA;
655 
656 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
657 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
658 			break;
659 
660 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
661 			/* PTP v2, UDP, Delay_req packet */
662 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
663 			ptp_v2 = PTP_TCR_TSVER2ENA;
664 			/* take time stamp for Delay_Req messages only */
665 			ts_master_en = PTP_TCR_TSMSTRENA;
666 			ts_event_en = PTP_TCR_TSEVNTENA;
667 
668 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
669 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
670 			break;
671 
672 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
673 			/* PTP v2/802.AS1 any layer, any kind of event packet */
674 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
675 			ptp_v2 = PTP_TCR_TSVER2ENA;
676 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
677 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 			ptp_over_ethernet = PTP_TCR_TSIPENA;
680 			break;
681 
682 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
683 			/* PTP v2/802.AS1, any layer, Sync packet */
684 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
685 			ptp_v2 = PTP_TCR_TSVER2ENA;
686 			/* take time stamp for SYNC messages only */
687 			ts_event_en = PTP_TCR_TSEVNTENA;
688 
689 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
690 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
691 			ptp_over_ethernet = PTP_TCR_TSIPENA;
692 			break;
693 
694 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
695 			/* PTP v2/802.AS1, any layer, Delay_req packet */
696 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
697 			ptp_v2 = PTP_TCR_TSVER2ENA;
698 			/* take time stamp for Delay_Req messages only */
699 			ts_master_en = PTP_TCR_TSMSTRENA;
700 			ts_event_en = PTP_TCR_TSEVNTENA;
701 
702 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
703 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
704 			ptp_over_ethernet = PTP_TCR_TSIPENA;
705 			break;
706 
707 		case HWTSTAMP_FILTER_NTP_ALL:
708 		case HWTSTAMP_FILTER_ALL:
709 			/* time stamp any incoming packet */
710 			config.rx_filter = HWTSTAMP_FILTER_ALL;
711 			tstamp_all = PTP_TCR_TSENALL;
712 			break;
713 
714 		default:
715 			return -ERANGE;
716 		}
717 	} else {
718 		switch (config.rx_filter) {
719 		case HWTSTAMP_FILTER_NONE:
720 			config.rx_filter = HWTSTAMP_FILTER_NONE;
721 			break;
722 		default:
723 			/* PTP v1, UDP, any kind of event packet */
724 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
725 			break;
726 		}
727 	}
728 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
729 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
730 
731 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
732 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
733 	else {
734 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
735 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
736 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
737 			 ts_master_en | snap_type_sel);
738 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
739 
740 		/* program Sub Second Increment reg */
741 		stmmac_config_sub_second_increment(priv,
742 				priv->ptpaddr, priv->plat->clk_ptp_rate,
743 				xmac, &sec_inc);
744 		temp = div_u64(1000000000ULL, sec_inc);
745 
746 		/* Store sub second increment and flags for later use */
747 		priv->sub_second_inc = sec_inc;
748 		priv->systime_flags = value;
749 
750 		/* calculate default added value:
751 		 * formula is :
752 		 * addend = (2^32)/freq_div_ratio;
753 		 * where, freq_div_ratio = 1e9ns/sec_inc
754 		 */
755 		temp = (u64)(temp << 32);
756 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
757 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
758 
759 		/* initialize system time */
760 		ktime_get_real_ts64(&now);
761 
762 		/* lower 32 bits of tv_sec are safe until y2106 */
763 		stmmac_init_systime(priv, priv->ptpaddr,
764 				(u32)now.tv_sec, now.tv_nsec);
765 	}
766 
767 	memcpy(&priv->tstamp_config, &config, sizeof(config));
768 
769 	return copy_to_user(ifr->ifr_data, &config,
770 			    sizeof(config)) ? -EFAULT : 0;
771 }
772 
773 /**
774  *  stmmac_hwtstamp_get - read hardware timestamping.
775  *  @dev: device pointer.
776  *  @ifr: An IOCTL specific structure, that can contain a pointer to
777  *  a proprietary structure used to pass information to the driver.
778  *  Description:
779  *  This function obtain the current hardware timestamping settings
780     as requested.
781  */
782 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
783 {
784 	struct stmmac_priv *priv = netdev_priv(dev);
785 	struct hwtstamp_config *config = &priv->tstamp_config;
786 
787 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
788 		return -EOPNOTSUPP;
789 
790 	return copy_to_user(ifr->ifr_data, config,
791 			    sizeof(*config)) ? -EFAULT : 0;
792 }
793 
794 /**
795  * stmmac_init_ptp - init PTP
796  * @priv: driver private structure
797  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
798  * This is done by looking at the HW cap. register.
799  * This function also registers the ptp driver.
800  */
801 static int stmmac_init_ptp(struct stmmac_priv *priv)
802 {
803 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
804 
805 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
806 		return -EOPNOTSUPP;
807 
808 	priv->adv_ts = 0;
809 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
810 	if (xmac && priv->dma_cap.atime_stamp)
811 		priv->adv_ts = 1;
812 	/* Dwmac 3.x core with extend_desc can support adv_ts */
813 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
814 		priv->adv_ts = 1;
815 
816 	if (priv->dma_cap.time_stamp)
817 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
818 
819 	if (priv->adv_ts)
820 		netdev_info(priv->dev,
821 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
822 
823 	priv->hwts_tx_en = 0;
824 	priv->hwts_rx_en = 0;
825 
826 	stmmac_ptp_register(priv);
827 
828 	return 0;
829 }
830 
831 static void stmmac_release_ptp(struct stmmac_priv *priv)
832 {
833 	if (priv->plat->clk_ptp_ref)
834 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
835 	stmmac_ptp_unregister(priv);
836 }
837 
838 /**
839  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
840  *  @priv: driver private structure
841  *  Description: It is used for configuring the flow control in all queues
842  */
843 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
844 {
845 	u32 tx_cnt = priv->plat->tx_queues_to_use;
846 
847 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
848 			priv->pause, tx_cnt);
849 }
850 
851 /**
852  * stmmac_adjust_link - adjusts the link parameters
853  * @dev: net device structure
854  * Description: this is the helper called by the physical abstraction layer
855  * drivers to communicate the phy link status. According the speed and duplex
856  * this driver can invoke registered glue-logic as well.
857  * It also invoke the eee initialization because it could happen when switch
858  * on different networks (that are eee capable).
859  */
860 static void stmmac_adjust_link(struct net_device *dev)
861 {
862 	struct stmmac_priv *priv = netdev_priv(dev);
863 	struct phy_device *phydev = dev->phydev;
864 	bool new_state = false;
865 
866 	if (!phydev)
867 		return;
868 
869 	mutex_lock(&priv->lock);
870 
871 	if (phydev->link) {
872 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
873 
874 		/* Now we make sure that we can be in full duplex mode.
875 		 * If not, we operate in half-duplex mode. */
876 		if (phydev->duplex != priv->oldduplex) {
877 			new_state = true;
878 			if (!phydev->duplex)
879 				ctrl &= ~priv->hw->link.duplex;
880 			else
881 				ctrl |= priv->hw->link.duplex;
882 			priv->oldduplex = phydev->duplex;
883 		}
884 		/* Flow Control operation */
885 		if (phydev->pause)
886 			stmmac_mac_flow_ctrl(priv, phydev->duplex);
887 
888 		if (phydev->speed != priv->speed) {
889 			new_state = true;
890 			ctrl &= ~priv->hw->link.speed_mask;
891 			switch (phydev->speed) {
892 			case SPEED_1000:
893 				ctrl |= priv->hw->link.speed1000;
894 				break;
895 			case SPEED_100:
896 				ctrl |= priv->hw->link.speed100;
897 				break;
898 			case SPEED_10:
899 				ctrl |= priv->hw->link.speed10;
900 				break;
901 			default:
902 				netif_warn(priv, link, priv->dev,
903 					   "broken speed: %d\n", phydev->speed);
904 				phydev->speed = SPEED_UNKNOWN;
905 				break;
906 			}
907 			if (phydev->speed != SPEED_UNKNOWN)
908 				stmmac_hw_fix_mac_speed(priv);
909 			priv->speed = phydev->speed;
910 		}
911 
912 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
913 
914 		if (!priv->oldlink) {
915 			new_state = true;
916 			priv->oldlink = true;
917 		}
918 	} else if (priv->oldlink) {
919 		new_state = true;
920 		priv->oldlink = false;
921 		priv->speed = SPEED_UNKNOWN;
922 		priv->oldduplex = DUPLEX_UNKNOWN;
923 	}
924 
925 	if (new_state && netif_msg_link(priv))
926 		phy_print_status(phydev);
927 
928 	mutex_unlock(&priv->lock);
929 
930 	if (phydev->is_pseudo_fixed_link)
931 		/* Stop PHY layer to call the hook to adjust the link in case
932 		 * of a switch is attached to the stmmac driver.
933 		 */
934 		phydev->irq = PHY_IGNORE_INTERRUPT;
935 	else
936 		/* At this stage, init the EEE if supported.
937 		 * Never called in case of fixed_link.
938 		 */
939 		priv->eee_enabled = stmmac_eee_init(priv);
940 }
941 
942 /**
943  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
944  * @priv: driver private structure
945  * Description: this is to verify if the HW supports the PCS.
946  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
947  * configured for the TBI, RTBI, or SGMII PHY interface.
948  */
949 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
950 {
951 	int interface = priv->plat->interface;
952 
953 	if (priv->dma_cap.pcs) {
954 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
955 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
956 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
957 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
958 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
959 			priv->hw->pcs = STMMAC_PCS_RGMII;
960 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
961 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
962 			priv->hw->pcs = STMMAC_PCS_SGMII;
963 		}
964 	}
965 }
966 
967 /**
968  * stmmac_init_phy - PHY initialization
969  * @dev: net device structure
970  * Description: it initializes the driver's PHY state, and attaches the PHY
971  * to the mac driver.
972  *  Return value:
973  *  0 on success
974  */
975 static int stmmac_init_phy(struct net_device *dev)
976 {
977 	struct stmmac_priv *priv = netdev_priv(dev);
978 	u32 tx_cnt = priv->plat->tx_queues_to_use;
979 	struct phy_device *phydev;
980 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
981 	char bus_id[MII_BUS_ID_SIZE];
982 	int interface = priv->plat->interface;
983 	int max_speed = priv->plat->max_speed;
984 	priv->oldlink = false;
985 	priv->speed = SPEED_UNKNOWN;
986 	priv->oldduplex = DUPLEX_UNKNOWN;
987 
988 	if (priv->plat->phy_node) {
989 		phydev = of_phy_connect(dev, priv->plat->phy_node,
990 					&stmmac_adjust_link, 0, interface);
991 	} else {
992 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
993 			 priv->plat->bus_id);
994 
995 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
996 			 priv->plat->phy_addr);
997 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
998 			   phy_id_fmt);
999 
1000 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
1001 				     interface);
1002 	}
1003 
1004 	if (IS_ERR_OR_NULL(phydev)) {
1005 		netdev_err(priv->dev, "Could not attach to PHY\n");
1006 		if (!phydev)
1007 			return -ENODEV;
1008 
1009 		return PTR_ERR(phydev);
1010 	}
1011 
1012 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
1013 	if ((interface == PHY_INTERFACE_MODE_MII) ||
1014 	    (interface == PHY_INTERFACE_MODE_RMII) ||
1015 		(max_speed < 1000 && max_speed > 0))
1016 		phy_set_max_speed(phydev, SPEED_100);
1017 
1018 	/*
1019 	 * Half-duplex mode not supported with multiqueue
1020 	 * half-duplex can only works with single queue
1021 	 */
1022 	if (tx_cnt > 1) {
1023 		phy_remove_link_mode(phydev,
1024 				     ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1025 		phy_remove_link_mode(phydev,
1026 				     ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1027 		phy_remove_link_mode(phydev,
1028 				     ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1029 	}
1030 
1031 	/*
1032 	 * Broken HW is sometimes missing the pull-up resistor on the
1033 	 * MDIO line, which results in reads to non-existent devices returning
1034 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
1035 	 * device as well.
1036 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
1037 	 */
1038 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
1039 		phy_disconnect(phydev);
1040 		return -ENODEV;
1041 	}
1042 
1043 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
1044 	 * subsequent PHY polling, make sure we force a link transition if
1045 	 * we have a UP/DOWN/UP transition
1046 	 */
1047 	if (phydev->is_pseudo_fixed_link)
1048 		phydev->irq = PHY_POLL;
1049 
1050 	phy_attached_info(phydev);
1051 	return 0;
1052 }
1053 
1054 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1055 {
1056 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1057 	void *head_rx;
1058 	u32 queue;
1059 
1060 	/* Display RX rings */
1061 	for (queue = 0; queue < rx_cnt; queue++) {
1062 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1063 
1064 		pr_info("\tRX Queue %u rings\n", queue);
1065 
1066 		if (priv->extend_desc)
1067 			head_rx = (void *)rx_q->dma_erx;
1068 		else
1069 			head_rx = (void *)rx_q->dma_rx;
1070 
1071 		/* Display RX ring */
1072 		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1073 	}
1074 }
1075 
1076 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1077 {
1078 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1079 	void *head_tx;
1080 	u32 queue;
1081 
1082 	/* Display TX rings */
1083 	for (queue = 0; queue < tx_cnt; queue++) {
1084 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1085 
1086 		pr_info("\tTX Queue %d rings\n", queue);
1087 
1088 		if (priv->extend_desc)
1089 			head_tx = (void *)tx_q->dma_etx;
1090 		else
1091 			head_tx = (void *)tx_q->dma_tx;
1092 
1093 		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1094 	}
1095 }
1096 
1097 static void stmmac_display_rings(struct stmmac_priv *priv)
1098 {
1099 	/* Display RX ring */
1100 	stmmac_display_rx_rings(priv);
1101 
1102 	/* Display TX ring */
1103 	stmmac_display_tx_rings(priv);
1104 }
1105 
1106 static int stmmac_set_bfsize(int mtu, int bufsize)
1107 {
1108 	int ret = bufsize;
1109 
1110 	if (mtu >= BUF_SIZE_4KiB)
1111 		ret = BUF_SIZE_8KiB;
1112 	else if (mtu >= BUF_SIZE_2KiB)
1113 		ret = BUF_SIZE_4KiB;
1114 	else if (mtu > DEFAULT_BUFSIZE)
1115 		ret = BUF_SIZE_2KiB;
1116 	else
1117 		ret = DEFAULT_BUFSIZE;
1118 
1119 	return ret;
1120 }
1121 
1122 /**
1123  * stmmac_clear_rx_descriptors - clear RX descriptors
1124  * @priv: driver private structure
1125  * @queue: RX queue index
1126  * Description: this function is called to clear the RX descriptors
1127  * in case of both basic and extended descriptors are used.
1128  */
1129 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1130 {
1131 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1132 	int i;
1133 
1134 	/* Clear the RX descriptors */
1135 	for (i = 0; i < DMA_RX_SIZE; i++)
1136 		if (priv->extend_desc)
1137 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1138 					priv->use_riwt, priv->mode,
1139 					(i == DMA_RX_SIZE - 1),
1140 					priv->dma_buf_sz);
1141 		else
1142 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1143 					priv->use_riwt, priv->mode,
1144 					(i == DMA_RX_SIZE - 1),
1145 					priv->dma_buf_sz);
1146 }
1147 
1148 /**
1149  * stmmac_clear_tx_descriptors - clear tx descriptors
1150  * @priv: driver private structure
1151  * @queue: TX queue index.
1152  * Description: this function is called to clear the TX descriptors
1153  * in case of both basic and extended descriptors are used.
1154  */
1155 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1156 {
1157 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1158 	int i;
1159 
1160 	/* Clear the TX descriptors */
1161 	for (i = 0; i < DMA_TX_SIZE; i++)
1162 		if (priv->extend_desc)
1163 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1164 					priv->mode, (i == DMA_TX_SIZE - 1));
1165 		else
1166 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1167 					priv->mode, (i == DMA_TX_SIZE - 1));
1168 }
1169 
1170 /**
1171  * stmmac_clear_descriptors - clear descriptors
1172  * @priv: driver private structure
1173  * Description: this function is called to clear the TX and RX descriptors
1174  * in case of both basic and extended descriptors are used.
1175  */
1176 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1177 {
1178 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1179 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1180 	u32 queue;
1181 
1182 	/* Clear the RX descriptors */
1183 	for (queue = 0; queue < rx_queue_cnt; queue++)
1184 		stmmac_clear_rx_descriptors(priv, queue);
1185 
1186 	/* Clear the TX descriptors */
1187 	for (queue = 0; queue < tx_queue_cnt; queue++)
1188 		stmmac_clear_tx_descriptors(priv, queue);
1189 }
1190 
1191 /**
1192  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1193  * @priv: driver private structure
1194  * @p: descriptor pointer
1195  * @i: descriptor index
1196  * @flags: gfp flag
1197  * @queue: RX queue index
1198  * Description: this function is called to allocate a receive buffer, perform
1199  * the DMA mapping and init the descriptor.
1200  */
1201 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1202 				  int i, gfp_t flags, u32 queue)
1203 {
1204 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1205 	struct sk_buff *skb;
1206 
1207 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1208 	if (!skb) {
1209 		netdev_err(priv->dev,
1210 			   "%s: Rx init fails; skb is NULL\n", __func__);
1211 		return -ENOMEM;
1212 	}
1213 	rx_q->rx_skbuff[i] = skb;
1214 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1215 						priv->dma_buf_sz,
1216 						DMA_FROM_DEVICE);
1217 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1218 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1219 		dev_kfree_skb_any(skb);
1220 		return -EINVAL;
1221 	}
1222 
1223 	stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1224 
1225 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1226 		stmmac_init_desc3(priv, p);
1227 
1228 	return 0;
1229 }
1230 
1231 /**
1232  * stmmac_free_rx_buffer - free RX dma buffers
1233  * @priv: private structure
1234  * @queue: RX queue index
1235  * @i: buffer index.
1236  */
1237 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1238 {
1239 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1240 
1241 	if (rx_q->rx_skbuff[i]) {
1242 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1243 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
1244 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1245 	}
1246 	rx_q->rx_skbuff[i] = NULL;
1247 }
1248 
1249 /**
1250  * stmmac_free_tx_buffer - free RX dma buffers
1251  * @priv: private structure
1252  * @queue: RX queue index
1253  * @i: buffer index.
1254  */
1255 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1256 {
1257 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1258 
1259 	if (tx_q->tx_skbuff_dma[i].buf) {
1260 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1261 			dma_unmap_page(priv->device,
1262 				       tx_q->tx_skbuff_dma[i].buf,
1263 				       tx_q->tx_skbuff_dma[i].len,
1264 				       DMA_TO_DEVICE);
1265 		else
1266 			dma_unmap_single(priv->device,
1267 					 tx_q->tx_skbuff_dma[i].buf,
1268 					 tx_q->tx_skbuff_dma[i].len,
1269 					 DMA_TO_DEVICE);
1270 	}
1271 
1272 	if (tx_q->tx_skbuff[i]) {
1273 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1274 		tx_q->tx_skbuff[i] = NULL;
1275 		tx_q->tx_skbuff_dma[i].buf = 0;
1276 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1277 	}
1278 }
1279 
1280 /**
1281  * init_dma_rx_desc_rings - init the RX descriptor rings
1282  * @dev: net device structure
1283  * @flags: gfp flag.
1284  * Description: this function initializes the DMA RX descriptors
1285  * and allocates the socket buffers. It supports the chained and ring
1286  * modes.
1287  */
1288 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1289 {
1290 	struct stmmac_priv *priv = netdev_priv(dev);
1291 	u32 rx_count = priv->plat->rx_queues_to_use;
1292 	int ret = -ENOMEM;
1293 	int bfsize = 0;
1294 	int queue;
1295 	int i;
1296 
1297 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1298 	if (bfsize < 0)
1299 		bfsize = 0;
1300 
1301 	if (bfsize < BUF_SIZE_16KiB)
1302 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1303 
1304 	priv->dma_buf_sz = bfsize;
1305 
1306 	/* RX INITIALIZATION */
1307 	netif_dbg(priv, probe, priv->dev,
1308 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1309 
1310 	for (queue = 0; queue < rx_count; queue++) {
1311 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1312 
1313 		netif_dbg(priv, probe, priv->dev,
1314 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
1315 			  (u32)rx_q->dma_rx_phy);
1316 
1317 		for (i = 0; i < DMA_RX_SIZE; i++) {
1318 			struct dma_desc *p;
1319 
1320 			if (priv->extend_desc)
1321 				p = &((rx_q->dma_erx + i)->basic);
1322 			else
1323 				p = rx_q->dma_rx + i;
1324 
1325 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
1326 						     queue);
1327 			if (ret)
1328 				goto err_init_rx_buffers;
1329 
1330 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1331 				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1332 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
1333 		}
1334 
1335 		rx_q->cur_rx = 0;
1336 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1337 
1338 		stmmac_clear_rx_descriptors(priv, queue);
1339 
1340 		/* Setup the chained descriptor addresses */
1341 		if (priv->mode == STMMAC_CHAIN_MODE) {
1342 			if (priv->extend_desc)
1343 				stmmac_mode_init(priv, rx_q->dma_erx,
1344 						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1345 			else
1346 				stmmac_mode_init(priv, rx_q->dma_rx,
1347 						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1348 		}
1349 	}
1350 
1351 	buf_sz = bfsize;
1352 
1353 	return 0;
1354 
1355 err_init_rx_buffers:
1356 	while (queue >= 0) {
1357 		while (--i >= 0)
1358 			stmmac_free_rx_buffer(priv, queue, i);
1359 
1360 		if (queue == 0)
1361 			break;
1362 
1363 		i = DMA_RX_SIZE;
1364 		queue--;
1365 	}
1366 
1367 	return ret;
1368 }
1369 
1370 /**
1371  * init_dma_tx_desc_rings - init the TX descriptor rings
1372  * @dev: net device structure.
1373  * Description: this function initializes the DMA TX descriptors
1374  * and allocates the socket buffers. It supports the chained and ring
1375  * modes.
1376  */
1377 static int init_dma_tx_desc_rings(struct net_device *dev)
1378 {
1379 	struct stmmac_priv *priv = netdev_priv(dev);
1380 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1381 	u32 queue;
1382 	int i;
1383 
1384 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1385 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1386 
1387 		netif_dbg(priv, probe, priv->dev,
1388 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1389 			 (u32)tx_q->dma_tx_phy);
1390 
1391 		/* Setup the chained descriptor addresses */
1392 		if (priv->mode == STMMAC_CHAIN_MODE) {
1393 			if (priv->extend_desc)
1394 				stmmac_mode_init(priv, tx_q->dma_etx,
1395 						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1396 			else
1397 				stmmac_mode_init(priv, tx_q->dma_tx,
1398 						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1399 		}
1400 
1401 		for (i = 0; i < DMA_TX_SIZE; i++) {
1402 			struct dma_desc *p;
1403 			if (priv->extend_desc)
1404 				p = &((tx_q->dma_etx + i)->basic);
1405 			else
1406 				p = tx_q->dma_tx + i;
1407 
1408 			stmmac_clear_desc(priv, p);
1409 
1410 			tx_q->tx_skbuff_dma[i].buf = 0;
1411 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1412 			tx_q->tx_skbuff_dma[i].len = 0;
1413 			tx_q->tx_skbuff_dma[i].last_segment = false;
1414 			tx_q->tx_skbuff[i] = NULL;
1415 		}
1416 
1417 		tx_q->dirty_tx = 0;
1418 		tx_q->cur_tx = 0;
1419 		tx_q->mss = 0;
1420 
1421 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1422 	}
1423 
1424 	return 0;
1425 }
1426 
1427 /**
1428  * init_dma_desc_rings - init the RX/TX descriptor rings
1429  * @dev: net device structure
1430  * @flags: gfp flag.
1431  * Description: this function initializes the DMA RX/TX descriptors
1432  * and allocates the socket buffers. It supports the chained and ring
1433  * modes.
1434  */
1435 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1436 {
1437 	struct stmmac_priv *priv = netdev_priv(dev);
1438 	int ret;
1439 
1440 	ret = init_dma_rx_desc_rings(dev, flags);
1441 	if (ret)
1442 		return ret;
1443 
1444 	ret = init_dma_tx_desc_rings(dev);
1445 
1446 	stmmac_clear_descriptors(priv);
1447 
1448 	if (netif_msg_hw(priv))
1449 		stmmac_display_rings(priv);
1450 
1451 	return ret;
1452 }
1453 
1454 /**
1455  * dma_free_rx_skbufs - free RX dma buffers
1456  * @priv: private structure
1457  * @queue: RX queue index
1458  */
1459 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1460 {
1461 	int i;
1462 
1463 	for (i = 0; i < DMA_RX_SIZE; i++)
1464 		stmmac_free_rx_buffer(priv, queue, i);
1465 }
1466 
1467 /**
1468  * dma_free_tx_skbufs - free TX dma buffers
1469  * @priv: private structure
1470  * @queue: TX queue index
1471  */
1472 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1473 {
1474 	int i;
1475 
1476 	for (i = 0; i < DMA_TX_SIZE; i++)
1477 		stmmac_free_tx_buffer(priv, queue, i);
1478 }
1479 
1480 /**
1481  * free_dma_rx_desc_resources - free RX dma desc resources
1482  * @priv: private structure
1483  */
1484 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1485 {
1486 	u32 rx_count = priv->plat->rx_queues_to_use;
1487 	u32 queue;
1488 
1489 	/* Free RX queue resources */
1490 	for (queue = 0; queue < rx_count; queue++) {
1491 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1492 
1493 		/* Release the DMA RX socket buffers */
1494 		dma_free_rx_skbufs(priv, queue);
1495 
1496 		/* Free DMA regions of consistent memory previously allocated */
1497 		if (!priv->extend_desc)
1498 			dma_free_coherent(priv->device,
1499 					  DMA_RX_SIZE * sizeof(struct dma_desc),
1500 					  rx_q->dma_rx, rx_q->dma_rx_phy);
1501 		else
1502 			dma_free_coherent(priv->device, DMA_RX_SIZE *
1503 					  sizeof(struct dma_extended_desc),
1504 					  rx_q->dma_erx, rx_q->dma_rx_phy);
1505 
1506 		kfree(rx_q->rx_skbuff_dma);
1507 		kfree(rx_q->rx_skbuff);
1508 	}
1509 }
1510 
1511 /**
1512  * free_dma_tx_desc_resources - free TX dma desc resources
1513  * @priv: private structure
1514  */
1515 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1516 {
1517 	u32 tx_count = priv->plat->tx_queues_to_use;
1518 	u32 queue;
1519 
1520 	/* Free TX queue resources */
1521 	for (queue = 0; queue < tx_count; queue++) {
1522 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1523 
1524 		/* Release the DMA TX socket buffers */
1525 		dma_free_tx_skbufs(priv, queue);
1526 
1527 		/* Free DMA regions of consistent memory previously allocated */
1528 		if (!priv->extend_desc)
1529 			dma_free_coherent(priv->device,
1530 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1531 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1532 		else
1533 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1534 					  sizeof(struct dma_extended_desc),
1535 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1536 
1537 		kfree(tx_q->tx_skbuff_dma);
1538 		kfree(tx_q->tx_skbuff);
1539 	}
1540 }
1541 
1542 /**
1543  * alloc_dma_rx_desc_resources - alloc RX resources.
1544  * @priv: private structure
1545  * Description: according to which descriptor can be used (extend or basic)
1546  * this function allocates the resources for TX and RX paths. In case of
1547  * reception, for example, it pre-allocated the RX socket buffer in order to
1548  * allow zero-copy mechanism.
1549  */
1550 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1551 {
1552 	u32 rx_count = priv->plat->rx_queues_to_use;
1553 	int ret = -ENOMEM;
1554 	u32 queue;
1555 
1556 	/* RX queues buffers and DMA */
1557 	for (queue = 0; queue < rx_count; queue++) {
1558 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1559 
1560 		rx_q->queue_index = queue;
1561 		rx_q->priv_data = priv;
1562 
1563 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1564 						    sizeof(dma_addr_t),
1565 						    GFP_KERNEL);
1566 		if (!rx_q->rx_skbuff_dma)
1567 			goto err_dma;
1568 
1569 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1570 						sizeof(struct sk_buff *),
1571 						GFP_KERNEL);
1572 		if (!rx_q->rx_skbuff)
1573 			goto err_dma;
1574 
1575 		if (priv->extend_desc) {
1576 			rx_q->dma_erx = dma_alloc_coherent(priv->device,
1577 							   DMA_RX_SIZE * sizeof(struct dma_extended_desc),
1578 							   &rx_q->dma_rx_phy,
1579 							   GFP_KERNEL);
1580 			if (!rx_q->dma_erx)
1581 				goto err_dma;
1582 
1583 		} else {
1584 			rx_q->dma_rx = dma_alloc_coherent(priv->device,
1585 							  DMA_RX_SIZE * sizeof(struct dma_desc),
1586 							  &rx_q->dma_rx_phy,
1587 							  GFP_KERNEL);
1588 			if (!rx_q->dma_rx)
1589 				goto err_dma;
1590 		}
1591 	}
1592 
1593 	return 0;
1594 
1595 err_dma:
1596 	free_dma_rx_desc_resources(priv);
1597 
1598 	return ret;
1599 }
1600 
1601 /**
1602  * alloc_dma_tx_desc_resources - alloc TX resources.
1603  * @priv: private structure
1604  * Description: according to which descriptor can be used (extend or basic)
1605  * this function allocates the resources for TX and RX paths. In case of
1606  * reception, for example, it pre-allocated the RX socket buffer in order to
1607  * allow zero-copy mechanism.
1608  */
1609 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1610 {
1611 	u32 tx_count = priv->plat->tx_queues_to_use;
1612 	int ret = -ENOMEM;
1613 	u32 queue;
1614 
1615 	/* TX queues buffers and DMA */
1616 	for (queue = 0; queue < tx_count; queue++) {
1617 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1618 
1619 		tx_q->queue_index = queue;
1620 		tx_q->priv_data = priv;
1621 
1622 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1623 						    sizeof(*tx_q->tx_skbuff_dma),
1624 						    GFP_KERNEL);
1625 		if (!tx_q->tx_skbuff_dma)
1626 			goto err_dma;
1627 
1628 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1629 						sizeof(struct sk_buff *),
1630 						GFP_KERNEL);
1631 		if (!tx_q->tx_skbuff)
1632 			goto err_dma;
1633 
1634 		if (priv->extend_desc) {
1635 			tx_q->dma_etx = dma_alloc_coherent(priv->device,
1636 							   DMA_TX_SIZE * sizeof(struct dma_extended_desc),
1637 							   &tx_q->dma_tx_phy,
1638 							   GFP_KERNEL);
1639 			if (!tx_q->dma_etx)
1640 				goto err_dma;
1641 		} else {
1642 			tx_q->dma_tx = dma_alloc_coherent(priv->device,
1643 							  DMA_TX_SIZE * sizeof(struct dma_desc),
1644 							  &tx_q->dma_tx_phy,
1645 							  GFP_KERNEL);
1646 			if (!tx_q->dma_tx)
1647 				goto err_dma;
1648 		}
1649 	}
1650 
1651 	return 0;
1652 
1653 err_dma:
1654 	free_dma_tx_desc_resources(priv);
1655 
1656 	return ret;
1657 }
1658 
1659 /**
1660  * alloc_dma_desc_resources - alloc TX/RX resources.
1661  * @priv: private structure
1662  * Description: according to which descriptor can be used (extend or basic)
1663  * this function allocates the resources for TX and RX paths. In case of
1664  * reception, for example, it pre-allocated the RX socket buffer in order to
1665  * allow zero-copy mechanism.
1666  */
1667 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1668 {
1669 	/* RX Allocation */
1670 	int ret = alloc_dma_rx_desc_resources(priv);
1671 
1672 	if (ret)
1673 		return ret;
1674 
1675 	ret = alloc_dma_tx_desc_resources(priv);
1676 
1677 	return ret;
1678 }
1679 
1680 /**
1681  * free_dma_desc_resources - free dma desc resources
1682  * @priv: private structure
1683  */
1684 static void free_dma_desc_resources(struct stmmac_priv *priv)
1685 {
1686 	/* Release the DMA RX socket buffers */
1687 	free_dma_rx_desc_resources(priv);
1688 
1689 	/* Release the DMA TX socket buffers */
1690 	free_dma_tx_desc_resources(priv);
1691 }
1692 
1693 /**
1694  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1695  *  @priv: driver private structure
1696  *  Description: It is used for enabling the rx queues in the MAC
1697  */
1698 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1699 {
1700 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
1701 	int queue;
1702 	u8 mode;
1703 
1704 	for (queue = 0; queue < rx_queues_count; queue++) {
1705 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1706 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1707 	}
1708 }
1709 
1710 /**
1711  * stmmac_start_rx_dma - start RX DMA channel
1712  * @priv: driver private structure
1713  * @chan: RX channel index
1714  * Description:
1715  * This starts a RX DMA channel
1716  */
1717 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1718 {
1719 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1720 	stmmac_start_rx(priv, priv->ioaddr, chan);
1721 }
1722 
1723 /**
1724  * stmmac_start_tx_dma - start TX DMA channel
1725  * @priv: driver private structure
1726  * @chan: TX channel index
1727  * Description:
1728  * This starts a TX DMA channel
1729  */
1730 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1731 {
1732 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1733 	stmmac_start_tx(priv, priv->ioaddr, chan);
1734 }
1735 
1736 /**
1737  * stmmac_stop_rx_dma - stop RX DMA channel
1738  * @priv: driver private structure
1739  * @chan: RX channel index
1740  * Description:
1741  * This stops a RX DMA channel
1742  */
1743 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1744 {
1745 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1746 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1747 }
1748 
1749 /**
1750  * stmmac_stop_tx_dma - stop TX DMA channel
1751  * @priv: driver private structure
1752  * @chan: TX channel index
1753  * Description:
1754  * This stops a TX DMA channel
1755  */
1756 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1757 {
1758 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1759 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1760 }
1761 
1762 /**
1763  * stmmac_start_all_dma - start all RX and TX DMA channels
1764  * @priv: driver private structure
1765  * Description:
1766  * This starts all the RX and TX DMA channels
1767  */
1768 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1769 {
1770 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1771 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1772 	u32 chan = 0;
1773 
1774 	for (chan = 0; chan < rx_channels_count; chan++)
1775 		stmmac_start_rx_dma(priv, chan);
1776 
1777 	for (chan = 0; chan < tx_channels_count; chan++)
1778 		stmmac_start_tx_dma(priv, chan);
1779 }
1780 
1781 /**
1782  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1783  * @priv: driver private structure
1784  * Description:
1785  * This stops the RX and TX DMA channels
1786  */
1787 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1788 {
1789 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1790 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1791 	u32 chan = 0;
1792 
1793 	for (chan = 0; chan < rx_channels_count; chan++)
1794 		stmmac_stop_rx_dma(priv, chan);
1795 
1796 	for (chan = 0; chan < tx_channels_count; chan++)
1797 		stmmac_stop_tx_dma(priv, chan);
1798 }
1799 
1800 /**
1801  *  stmmac_dma_operation_mode - HW DMA operation mode
1802  *  @priv: driver private structure
1803  *  Description: it is used for configuring the DMA operation mode register in
1804  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1805  */
1806 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1807 {
1808 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1809 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1810 	int rxfifosz = priv->plat->rx_fifo_size;
1811 	int txfifosz = priv->plat->tx_fifo_size;
1812 	u32 txmode = 0;
1813 	u32 rxmode = 0;
1814 	u32 chan = 0;
1815 	u8 qmode = 0;
1816 
1817 	if (rxfifosz == 0)
1818 		rxfifosz = priv->dma_cap.rx_fifo_size;
1819 	if (txfifosz == 0)
1820 		txfifosz = priv->dma_cap.tx_fifo_size;
1821 
1822 	/* Adjust for real per queue fifo size */
1823 	rxfifosz /= rx_channels_count;
1824 	txfifosz /= tx_channels_count;
1825 
1826 	if (priv->plat->force_thresh_dma_mode) {
1827 		txmode = tc;
1828 		rxmode = tc;
1829 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1830 		/*
1831 		 * In case of GMAC, SF mode can be enabled
1832 		 * to perform the TX COE in HW. This depends on:
1833 		 * 1) TX COE if actually supported
1834 		 * 2) There is no bugged Jumbo frame support
1835 		 *    that needs to not insert csum in the TDES.
1836 		 */
1837 		txmode = SF_DMA_MODE;
1838 		rxmode = SF_DMA_MODE;
1839 		priv->xstats.threshold = SF_DMA_MODE;
1840 	} else {
1841 		txmode = tc;
1842 		rxmode = SF_DMA_MODE;
1843 	}
1844 
1845 	/* configure all channels */
1846 	for (chan = 0; chan < rx_channels_count; chan++) {
1847 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1848 
1849 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1850 				rxfifosz, qmode);
1851 		stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1852 				chan);
1853 	}
1854 
1855 	for (chan = 0; chan < tx_channels_count; chan++) {
1856 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1857 
1858 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1859 				txfifosz, qmode);
1860 	}
1861 }
1862 
1863 /**
1864  * stmmac_tx_clean - to manage the transmission completion
1865  * @priv: driver private structure
1866  * @queue: TX queue index
1867  * Description: it reclaims the transmit resources after transmission completes.
1868  */
1869 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
1870 {
1871 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1872 	unsigned int bytes_compl = 0, pkts_compl = 0;
1873 	unsigned int entry, count = 0;
1874 
1875 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1876 
1877 	priv->xstats.tx_clean++;
1878 
1879 	entry = tx_q->dirty_tx;
1880 	while ((entry != tx_q->cur_tx) && (count < budget)) {
1881 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1882 		struct dma_desc *p;
1883 		int status;
1884 
1885 		if (priv->extend_desc)
1886 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1887 		else
1888 			p = tx_q->dma_tx + entry;
1889 
1890 		status = stmmac_tx_status(priv, &priv->dev->stats,
1891 				&priv->xstats, p, priv->ioaddr);
1892 		/* Check if the descriptor is owned by the DMA */
1893 		if (unlikely(status & tx_dma_own))
1894 			break;
1895 
1896 		count++;
1897 
1898 		/* Make sure descriptor fields are read after reading
1899 		 * the own bit.
1900 		 */
1901 		dma_rmb();
1902 
1903 		/* Just consider the last segment and ...*/
1904 		if (likely(!(status & tx_not_ls))) {
1905 			/* ... verify the status error condition */
1906 			if (unlikely(status & tx_err)) {
1907 				priv->dev->stats.tx_errors++;
1908 			} else {
1909 				priv->dev->stats.tx_packets++;
1910 				priv->xstats.tx_pkt_n++;
1911 			}
1912 			stmmac_get_tx_hwtstamp(priv, p, skb);
1913 		}
1914 
1915 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1916 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1917 				dma_unmap_page(priv->device,
1918 					       tx_q->tx_skbuff_dma[entry].buf,
1919 					       tx_q->tx_skbuff_dma[entry].len,
1920 					       DMA_TO_DEVICE);
1921 			else
1922 				dma_unmap_single(priv->device,
1923 						 tx_q->tx_skbuff_dma[entry].buf,
1924 						 tx_q->tx_skbuff_dma[entry].len,
1925 						 DMA_TO_DEVICE);
1926 			tx_q->tx_skbuff_dma[entry].buf = 0;
1927 			tx_q->tx_skbuff_dma[entry].len = 0;
1928 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1929 		}
1930 
1931 		stmmac_clean_desc3(priv, tx_q, p);
1932 
1933 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1934 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1935 
1936 		if (likely(skb != NULL)) {
1937 			pkts_compl++;
1938 			bytes_compl += skb->len;
1939 			dev_consume_skb_any(skb);
1940 			tx_q->tx_skbuff[entry] = NULL;
1941 		}
1942 
1943 		stmmac_release_tx_desc(priv, p, priv->mode);
1944 
1945 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1946 	}
1947 	tx_q->dirty_tx = entry;
1948 
1949 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1950 				  pkts_compl, bytes_compl);
1951 
1952 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1953 								queue))) &&
1954 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1955 
1956 		netif_dbg(priv, tx_done, priv->dev,
1957 			  "%s: restart transmit\n", __func__);
1958 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1959 	}
1960 
1961 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1962 		stmmac_enable_eee_mode(priv);
1963 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1964 	}
1965 
1966 	/* We still have pending packets, let's call for a new scheduling */
1967 	if (tx_q->dirty_tx != tx_q->cur_tx)
1968 		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
1969 
1970 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
1971 
1972 	return count;
1973 }
1974 
1975 /**
1976  * stmmac_tx_err - to manage the tx error
1977  * @priv: driver private structure
1978  * @chan: channel index
1979  * Description: it cleans the descriptors and restarts the transmission
1980  * in case of transmission errors.
1981  */
1982 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1983 {
1984 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1985 	int i;
1986 
1987 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1988 
1989 	stmmac_stop_tx_dma(priv, chan);
1990 	dma_free_tx_skbufs(priv, chan);
1991 	for (i = 0; i < DMA_TX_SIZE; i++)
1992 		if (priv->extend_desc)
1993 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1994 					priv->mode, (i == DMA_TX_SIZE - 1));
1995 		else
1996 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1997 					priv->mode, (i == DMA_TX_SIZE - 1));
1998 	tx_q->dirty_tx = 0;
1999 	tx_q->cur_tx = 0;
2000 	tx_q->mss = 0;
2001 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2002 	stmmac_start_tx_dma(priv, chan);
2003 
2004 	priv->dev->stats.tx_errors++;
2005 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2006 }
2007 
2008 /**
2009  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2010  *  @priv: driver private structure
2011  *  @txmode: TX operating mode
2012  *  @rxmode: RX operating mode
2013  *  @chan: channel index
2014  *  Description: it is used for configuring of the DMA operation mode in
2015  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2016  *  mode.
2017  */
2018 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2019 					  u32 rxmode, u32 chan)
2020 {
2021 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2022 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2023 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2024 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2025 	int rxfifosz = priv->plat->rx_fifo_size;
2026 	int txfifosz = priv->plat->tx_fifo_size;
2027 
2028 	if (rxfifosz == 0)
2029 		rxfifosz = priv->dma_cap.rx_fifo_size;
2030 	if (txfifosz == 0)
2031 		txfifosz = priv->dma_cap.tx_fifo_size;
2032 
2033 	/* Adjust for real per queue fifo size */
2034 	rxfifosz /= rx_channels_count;
2035 	txfifosz /= tx_channels_count;
2036 
2037 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2038 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2039 }
2040 
2041 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2042 {
2043 	int ret;
2044 
2045 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2046 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2047 	if (ret && (ret != -EINVAL)) {
2048 		stmmac_global_err(priv);
2049 		return true;
2050 	}
2051 
2052 	return false;
2053 }
2054 
2055 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2056 {
2057 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2058 						 &priv->xstats, chan);
2059 	struct stmmac_channel *ch = &priv->channel[chan];
2060 
2061 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2062 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2063 		napi_schedule_irqoff(&ch->rx_napi);
2064 	}
2065 
2066 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2067 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2068 		napi_schedule_irqoff(&ch->tx_napi);
2069 	}
2070 
2071 	return status;
2072 }
2073 
2074 /**
2075  * stmmac_dma_interrupt - DMA ISR
2076  * @priv: driver private structure
2077  * Description: this is the DMA ISR. It is called by the main ISR.
2078  * It calls the dwmac dma routine and schedule poll method in case of some
2079  * work can be done.
2080  */
2081 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2082 {
2083 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2084 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2085 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2086 				tx_channel_count : rx_channel_count;
2087 	u32 chan;
2088 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2089 
2090 	/* Make sure we never check beyond our status buffer. */
2091 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2092 		channels_to_check = ARRAY_SIZE(status);
2093 
2094 	for (chan = 0; chan < channels_to_check; chan++)
2095 		status[chan] = stmmac_napi_check(priv, chan);
2096 
2097 	for (chan = 0; chan < tx_channel_count; chan++) {
2098 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2099 			/* Try to bump up the dma threshold on this failure */
2100 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2101 			    (tc <= 256)) {
2102 				tc += 64;
2103 				if (priv->plat->force_thresh_dma_mode)
2104 					stmmac_set_dma_operation_mode(priv,
2105 								      tc,
2106 								      tc,
2107 								      chan);
2108 				else
2109 					stmmac_set_dma_operation_mode(priv,
2110 								    tc,
2111 								    SF_DMA_MODE,
2112 								    chan);
2113 				priv->xstats.threshold = tc;
2114 			}
2115 		} else if (unlikely(status[chan] == tx_hard_error)) {
2116 			stmmac_tx_err(priv, chan);
2117 		}
2118 	}
2119 }
2120 
2121 /**
2122  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2123  * @priv: driver private structure
2124  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2125  */
2126 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2127 {
2128 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2129 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2130 
2131 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
2132 
2133 	if (priv->dma_cap.rmon) {
2134 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
2135 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2136 	} else
2137 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2138 }
2139 
2140 /**
2141  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2142  * @priv: driver private structure
2143  * Description:
2144  *  new GMAC chip generations have a new register to indicate the
2145  *  presence of the optional feature/functions.
2146  *  This can be also used to override the value passed through the
2147  *  platform and necessary for old MAC10/100 and GMAC chips.
2148  */
2149 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2150 {
2151 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2152 }
2153 
2154 /**
2155  * stmmac_check_ether_addr - check if the MAC addr is valid
2156  * @priv: driver private structure
2157  * Description:
2158  * it is to verify if the MAC address is valid, in case of failures it
2159  * generates a random MAC address
2160  */
2161 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2162 {
2163 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2164 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2165 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2166 			eth_hw_addr_random(priv->dev);
2167 		netdev_info(priv->dev, "device MAC address %pM\n",
2168 			    priv->dev->dev_addr);
2169 	}
2170 }
2171 
2172 /**
2173  * stmmac_init_dma_engine - DMA init.
2174  * @priv: driver private structure
2175  * Description:
2176  * It inits the DMA invoking the specific MAC/GMAC callback.
2177  * Some DMA parameters can be passed from the platform;
2178  * in case of these are not passed a default is kept for the MAC or GMAC.
2179  */
2180 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2181 {
2182 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2183 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2184 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2185 	struct stmmac_rx_queue *rx_q;
2186 	struct stmmac_tx_queue *tx_q;
2187 	u32 chan = 0;
2188 	int atds = 0;
2189 	int ret = 0;
2190 
2191 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2192 		dev_err(priv->device, "Invalid DMA configuration\n");
2193 		return -EINVAL;
2194 	}
2195 
2196 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2197 		atds = 1;
2198 
2199 	ret = stmmac_reset(priv, priv->ioaddr);
2200 	if (ret) {
2201 		dev_err(priv->device, "Failed to reset the dma\n");
2202 		return ret;
2203 	}
2204 
2205 	/* DMA Configuration */
2206 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2207 
2208 	if (priv->plat->axi)
2209 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2210 
2211 	/* DMA RX Channel Configuration */
2212 	for (chan = 0; chan < rx_channels_count; chan++) {
2213 		rx_q = &priv->rx_queue[chan];
2214 
2215 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2216 				    rx_q->dma_rx_phy, chan);
2217 
2218 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2219 			    (DMA_RX_SIZE * sizeof(struct dma_desc));
2220 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2221 				       rx_q->rx_tail_addr, chan);
2222 	}
2223 
2224 	/* DMA TX Channel Configuration */
2225 	for (chan = 0; chan < tx_channels_count; chan++) {
2226 		tx_q = &priv->tx_queue[chan];
2227 
2228 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2229 				    tx_q->dma_tx_phy, chan);
2230 
2231 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2232 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2233 				       tx_q->tx_tail_addr, chan);
2234 	}
2235 
2236 	/* DMA CSR Channel configuration */
2237 	for (chan = 0; chan < dma_csr_ch; chan++)
2238 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2239 
2240 	return ret;
2241 }
2242 
2243 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2244 {
2245 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2246 
2247 	mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2248 }
2249 
2250 /**
2251  * stmmac_tx_timer - mitigation sw timer for tx.
2252  * @data: data pointer
2253  * Description:
2254  * This is the timer handler to directly invoke the stmmac_tx_clean.
2255  */
2256 static void stmmac_tx_timer(struct timer_list *t)
2257 {
2258 	struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
2259 	struct stmmac_priv *priv = tx_q->priv_data;
2260 	struct stmmac_channel *ch;
2261 
2262 	ch = &priv->channel[tx_q->queue_index];
2263 
2264 	/*
2265 	 * If NAPI is already running we can miss some events. Let's rearm
2266 	 * the timer and try again.
2267 	 */
2268 	if (likely(napi_schedule_prep(&ch->tx_napi)))
2269 		__napi_schedule(&ch->tx_napi);
2270 	else
2271 		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
2272 }
2273 
2274 /**
2275  * stmmac_init_tx_coalesce - init tx mitigation options.
2276  * @priv: driver private structure
2277  * Description:
2278  * This inits the transmit coalesce parameters: i.e. timer rate,
2279  * timer handler and default threshold used for enabling the
2280  * interrupt on completion bit.
2281  */
2282 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2283 {
2284 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2285 	u32 chan;
2286 
2287 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
2288 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2289 
2290 	for (chan = 0; chan < tx_channel_count; chan++) {
2291 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2292 
2293 		timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
2294 	}
2295 }
2296 
2297 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2298 {
2299 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2300 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2301 	u32 chan;
2302 
2303 	/* set TX ring length */
2304 	for (chan = 0; chan < tx_channels_count; chan++)
2305 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
2306 				(DMA_TX_SIZE - 1), chan);
2307 
2308 	/* set RX ring length */
2309 	for (chan = 0; chan < rx_channels_count; chan++)
2310 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
2311 				(DMA_RX_SIZE - 1), chan);
2312 }
2313 
2314 /**
2315  *  stmmac_set_tx_queue_weight - Set TX queue weight
2316  *  @priv: driver private structure
2317  *  Description: It is used for setting TX queues weight
2318  */
2319 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2320 {
2321 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2322 	u32 weight;
2323 	u32 queue;
2324 
2325 	for (queue = 0; queue < tx_queues_count; queue++) {
2326 		weight = priv->plat->tx_queues_cfg[queue].weight;
2327 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2328 	}
2329 }
2330 
2331 /**
2332  *  stmmac_configure_cbs - Configure CBS in TX queue
2333  *  @priv: driver private structure
2334  *  Description: It is used for configuring CBS in AVB TX queues
2335  */
2336 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2337 {
2338 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2339 	u32 mode_to_use;
2340 	u32 queue;
2341 
2342 	/* queue 0 is reserved for legacy traffic */
2343 	for (queue = 1; queue < tx_queues_count; queue++) {
2344 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2345 		if (mode_to_use == MTL_QUEUE_DCB)
2346 			continue;
2347 
2348 		stmmac_config_cbs(priv, priv->hw,
2349 				priv->plat->tx_queues_cfg[queue].send_slope,
2350 				priv->plat->tx_queues_cfg[queue].idle_slope,
2351 				priv->plat->tx_queues_cfg[queue].high_credit,
2352 				priv->plat->tx_queues_cfg[queue].low_credit,
2353 				queue);
2354 	}
2355 }
2356 
2357 /**
2358  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2359  *  @priv: driver private structure
2360  *  Description: It is used for mapping RX queues to RX dma channels
2361  */
2362 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2363 {
2364 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2365 	u32 queue;
2366 	u32 chan;
2367 
2368 	for (queue = 0; queue < rx_queues_count; queue++) {
2369 		chan = priv->plat->rx_queues_cfg[queue].chan;
2370 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2371 	}
2372 }
2373 
2374 /**
2375  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2376  *  @priv: driver private structure
2377  *  Description: It is used for configuring the RX Queue Priority
2378  */
2379 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2380 {
2381 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2382 	u32 queue;
2383 	u32 prio;
2384 
2385 	for (queue = 0; queue < rx_queues_count; queue++) {
2386 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2387 			continue;
2388 
2389 		prio = priv->plat->rx_queues_cfg[queue].prio;
2390 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2391 	}
2392 }
2393 
2394 /**
2395  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2396  *  @priv: driver private structure
2397  *  Description: It is used for configuring the TX Queue Priority
2398  */
2399 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2400 {
2401 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2402 	u32 queue;
2403 	u32 prio;
2404 
2405 	for (queue = 0; queue < tx_queues_count; queue++) {
2406 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2407 			continue;
2408 
2409 		prio = priv->plat->tx_queues_cfg[queue].prio;
2410 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2411 	}
2412 }
2413 
2414 /**
2415  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2416  *  @priv: driver private structure
2417  *  Description: It is used for configuring the RX queue routing
2418  */
2419 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2420 {
2421 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2422 	u32 queue;
2423 	u8 packet;
2424 
2425 	for (queue = 0; queue < rx_queues_count; queue++) {
2426 		/* no specific packet type routing specified for the queue */
2427 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2428 			continue;
2429 
2430 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2431 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2432 	}
2433 }
2434 
2435 /**
2436  *  stmmac_mtl_configuration - Configure MTL
2437  *  @priv: driver private structure
2438  *  Description: It is used for configurring MTL
2439  */
2440 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2441 {
2442 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2443 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2444 
2445 	if (tx_queues_count > 1)
2446 		stmmac_set_tx_queue_weight(priv);
2447 
2448 	/* Configure MTL RX algorithms */
2449 	if (rx_queues_count > 1)
2450 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2451 				priv->plat->rx_sched_algorithm);
2452 
2453 	/* Configure MTL TX algorithms */
2454 	if (tx_queues_count > 1)
2455 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2456 				priv->plat->tx_sched_algorithm);
2457 
2458 	/* Configure CBS in AVB TX queues */
2459 	if (tx_queues_count > 1)
2460 		stmmac_configure_cbs(priv);
2461 
2462 	/* Map RX MTL to DMA channels */
2463 	stmmac_rx_queue_dma_chan_map(priv);
2464 
2465 	/* Enable MAC RX Queues */
2466 	stmmac_mac_enable_rx_queues(priv);
2467 
2468 	/* Set RX priorities */
2469 	if (rx_queues_count > 1)
2470 		stmmac_mac_config_rx_queues_prio(priv);
2471 
2472 	/* Set TX priorities */
2473 	if (tx_queues_count > 1)
2474 		stmmac_mac_config_tx_queues_prio(priv);
2475 
2476 	/* Set RX routing */
2477 	if (rx_queues_count > 1)
2478 		stmmac_mac_config_rx_queues_routing(priv);
2479 }
2480 
2481 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2482 {
2483 	if (priv->dma_cap.asp) {
2484 		netdev_info(priv->dev, "Enabling Safety Features\n");
2485 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2486 	} else {
2487 		netdev_info(priv->dev, "No Safety Features support found\n");
2488 	}
2489 }
2490 
2491 /**
2492  * stmmac_hw_setup - setup mac in a usable state.
2493  *  @dev : pointer to the device structure.
2494  *  Description:
2495  *  this is the main function to setup the HW in a usable state because the
2496  *  dma engine is reset, the core registers are configured (e.g. AXI,
2497  *  Checksum features, timers). The DMA is ready to start receiving and
2498  *  transmitting.
2499  *  Return value:
2500  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2501  *  file on failure.
2502  */
2503 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2504 {
2505 	struct stmmac_priv *priv = netdev_priv(dev);
2506 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2507 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2508 	u32 chan;
2509 	int ret;
2510 
2511 	/* DMA initialization and SW reset */
2512 	ret = stmmac_init_dma_engine(priv);
2513 	if (ret < 0) {
2514 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2515 			   __func__);
2516 		return ret;
2517 	}
2518 
2519 	/* Copy the MAC addr into the HW  */
2520 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2521 
2522 	/* PS and related bits will be programmed according to the speed */
2523 	if (priv->hw->pcs) {
2524 		int speed = priv->plat->mac_port_sel_speed;
2525 
2526 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
2527 		    (speed == SPEED_1000)) {
2528 			priv->hw->ps = speed;
2529 		} else {
2530 			dev_warn(priv->device, "invalid port speed\n");
2531 			priv->hw->ps = 0;
2532 		}
2533 	}
2534 
2535 	/* Initialize the MAC Core */
2536 	stmmac_core_init(priv, priv->hw, dev);
2537 
2538 	/* Initialize MTL*/
2539 	stmmac_mtl_configuration(priv);
2540 
2541 	/* Initialize Safety Features */
2542 	stmmac_safety_feat_configuration(priv);
2543 
2544 	ret = stmmac_rx_ipc(priv, priv->hw);
2545 	if (!ret) {
2546 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2547 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2548 		priv->hw->rx_csum = 0;
2549 	}
2550 
2551 	/* Enable the MAC Rx/Tx */
2552 	stmmac_mac_set(priv, priv->ioaddr, true);
2553 
2554 	/* Set the HW DMA mode and the COE */
2555 	stmmac_dma_operation_mode(priv);
2556 
2557 	stmmac_mmc_setup(priv);
2558 
2559 	if (init_ptp) {
2560 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2561 		if (ret < 0)
2562 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2563 
2564 		ret = stmmac_init_ptp(priv);
2565 		if (ret == -EOPNOTSUPP)
2566 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2567 		else if (ret)
2568 			netdev_warn(priv->dev, "PTP init failed\n");
2569 	}
2570 
2571 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2572 
2573 	if (priv->use_riwt) {
2574 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2575 		if (!ret)
2576 			priv->rx_riwt = MAX_DMA_RIWT;
2577 	}
2578 
2579 	if (priv->hw->pcs)
2580 		stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2581 
2582 	/* set TX and RX rings length */
2583 	stmmac_set_rings_length(priv);
2584 
2585 	/* Enable TSO */
2586 	if (priv->tso) {
2587 		for (chan = 0; chan < tx_cnt; chan++)
2588 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2589 	}
2590 
2591 	/* Start the ball rolling... */
2592 	stmmac_start_all_dma(priv);
2593 
2594 	return 0;
2595 }
2596 
2597 static void stmmac_hw_teardown(struct net_device *dev)
2598 {
2599 	struct stmmac_priv *priv = netdev_priv(dev);
2600 
2601 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2602 }
2603 
2604 /**
2605  *  stmmac_open - open entry point of the driver
2606  *  @dev : pointer to the device structure.
2607  *  Description:
2608  *  This function is the open entry point of the driver.
2609  *  Return value:
2610  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2611  *  file on failure.
2612  */
2613 static int stmmac_open(struct net_device *dev)
2614 {
2615 	struct stmmac_priv *priv = netdev_priv(dev);
2616 	u32 chan;
2617 	int ret;
2618 
2619 	stmmac_check_ether_addr(priv);
2620 
2621 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2622 	    priv->hw->pcs != STMMAC_PCS_TBI &&
2623 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
2624 		ret = stmmac_init_phy(dev);
2625 		if (ret) {
2626 			netdev_err(priv->dev,
2627 				   "%s: Cannot attach to PHY (error: %d)\n",
2628 				   __func__, ret);
2629 			return ret;
2630 		}
2631 	}
2632 
2633 	/* Extra statistics */
2634 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2635 	priv->xstats.threshold = tc;
2636 
2637 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2638 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2639 
2640 	ret = alloc_dma_desc_resources(priv);
2641 	if (ret < 0) {
2642 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2643 			   __func__);
2644 		goto dma_desc_error;
2645 	}
2646 
2647 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
2648 	if (ret < 0) {
2649 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2650 			   __func__);
2651 		goto init_error;
2652 	}
2653 
2654 	ret = stmmac_hw_setup(dev, true);
2655 	if (ret < 0) {
2656 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2657 		goto init_error;
2658 	}
2659 
2660 	stmmac_init_tx_coalesce(priv);
2661 
2662 	if (dev->phydev)
2663 		phy_start(dev->phydev);
2664 
2665 	/* Request the IRQ lines */
2666 	ret = request_irq(dev->irq, stmmac_interrupt,
2667 			  IRQF_SHARED, dev->name, dev);
2668 	if (unlikely(ret < 0)) {
2669 		netdev_err(priv->dev,
2670 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2671 			   __func__, dev->irq, ret);
2672 		goto irq_error;
2673 	}
2674 
2675 	/* Request the Wake IRQ in case of another line is used for WoL */
2676 	if (priv->wol_irq != dev->irq) {
2677 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
2678 				  IRQF_SHARED, dev->name, dev);
2679 		if (unlikely(ret < 0)) {
2680 			netdev_err(priv->dev,
2681 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2682 				   __func__, priv->wol_irq, ret);
2683 			goto wolirq_error;
2684 		}
2685 	}
2686 
2687 	/* Request the IRQ lines */
2688 	if (priv->lpi_irq > 0) {
2689 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2690 				  dev->name, dev);
2691 		if (unlikely(ret < 0)) {
2692 			netdev_err(priv->dev,
2693 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2694 				   __func__, priv->lpi_irq, ret);
2695 			goto lpiirq_error;
2696 		}
2697 	}
2698 
2699 	stmmac_enable_all_queues(priv);
2700 	stmmac_start_all_queues(priv);
2701 
2702 	return 0;
2703 
2704 lpiirq_error:
2705 	if (priv->wol_irq != dev->irq)
2706 		free_irq(priv->wol_irq, dev);
2707 wolirq_error:
2708 	free_irq(dev->irq, dev);
2709 irq_error:
2710 	if (dev->phydev)
2711 		phy_stop(dev->phydev);
2712 
2713 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2714 		del_timer_sync(&priv->tx_queue[chan].txtimer);
2715 
2716 	stmmac_hw_teardown(dev);
2717 init_error:
2718 	free_dma_desc_resources(priv);
2719 dma_desc_error:
2720 	if (dev->phydev)
2721 		phy_disconnect(dev->phydev);
2722 
2723 	return ret;
2724 }
2725 
2726 /**
2727  *  stmmac_release - close entry point of the driver
2728  *  @dev : device pointer.
2729  *  Description:
2730  *  This is the stop entry point of the driver.
2731  */
2732 static int stmmac_release(struct net_device *dev)
2733 {
2734 	struct stmmac_priv *priv = netdev_priv(dev);
2735 	u32 chan;
2736 
2737 	if (priv->eee_enabled)
2738 		del_timer_sync(&priv->eee_ctrl_timer);
2739 
2740 	/* Stop and disconnect the PHY */
2741 	if (dev->phydev) {
2742 		phy_stop(dev->phydev);
2743 		phy_disconnect(dev->phydev);
2744 	}
2745 
2746 	stmmac_stop_all_queues(priv);
2747 
2748 	stmmac_disable_all_queues(priv);
2749 
2750 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2751 		del_timer_sync(&priv->tx_queue[chan].txtimer);
2752 
2753 	/* Free the IRQ lines */
2754 	free_irq(dev->irq, dev);
2755 	if (priv->wol_irq != dev->irq)
2756 		free_irq(priv->wol_irq, dev);
2757 	if (priv->lpi_irq > 0)
2758 		free_irq(priv->lpi_irq, dev);
2759 
2760 	/* Stop TX/RX DMA and clear the descriptors */
2761 	stmmac_stop_all_dma(priv);
2762 
2763 	/* Release and free the Rx/Tx resources */
2764 	free_dma_desc_resources(priv);
2765 
2766 	/* Disable the MAC Rx/Tx */
2767 	stmmac_mac_set(priv, priv->ioaddr, false);
2768 
2769 	netif_carrier_off(dev);
2770 
2771 	stmmac_release_ptp(priv);
2772 
2773 	return 0;
2774 }
2775 
2776 /**
2777  *  stmmac_tso_allocator - close entry point of the driver
2778  *  @priv: driver private structure
2779  *  @des: buffer start address
2780  *  @total_len: total length to fill in descriptors
2781  *  @last_segmant: condition for the last descriptor
2782  *  @queue: TX queue index
2783  *  Description:
2784  *  This function fills descriptor and request new descriptors according to
2785  *  buffer length to fill
2786  */
2787 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2788 				 int total_len, bool last_segment, u32 queue)
2789 {
2790 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2791 	struct dma_desc *desc;
2792 	u32 buff_size;
2793 	int tmp_len;
2794 
2795 	tmp_len = total_len;
2796 
2797 	while (tmp_len > 0) {
2798 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2799 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2800 		desc = tx_q->dma_tx + tx_q->cur_tx;
2801 
2802 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2803 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2804 			    TSO_MAX_BUFF_SIZE : tmp_len;
2805 
2806 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2807 				0, 1,
2808 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2809 				0, 0);
2810 
2811 		tmp_len -= TSO_MAX_BUFF_SIZE;
2812 	}
2813 }
2814 
2815 /**
2816  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2817  *  @skb : the socket buffer
2818  *  @dev : device pointer
2819  *  Description: this is the transmit function that is called on TSO frames
2820  *  (support available on GMAC4 and newer chips).
2821  *  Diagram below show the ring programming in case of TSO frames:
2822  *
2823  *  First Descriptor
2824  *   --------
2825  *   | DES0 |---> buffer1 = L2/L3/L4 header
2826  *   | DES1 |---> TCP Payload (can continue on next descr...)
2827  *   | DES2 |---> buffer 1 and 2 len
2828  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2829  *   --------
2830  *	|
2831  *     ...
2832  *	|
2833  *   --------
2834  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2835  *   | DES1 | --|
2836  *   | DES2 | --> buffer 1 and 2 len
2837  *   | DES3 |
2838  *   --------
2839  *
2840  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2841  */
2842 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2843 {
2844 	struct dma_desc *desc, *first, *mss_desc = NULL;
2845 	struct stmmac_priv *priv = netdev_priv(dev);
2846 	int nfrags = skb_shinfo(skb)->nr_frags;
2847 	u32 queue = skb_get_queue_mapping(skb);
2848 	unsigned int first_entry, des;
2849 	struct stmmac_tx_queue *tx_q;
2850 	int tmp_pay_len = 0;
2851 	u32 pay_len, mss;
2852 	u8 proto_hdr_len;
2853 	int i;
2854 
2855 	tx_q = &priv->tx_queue[queue];
2856 
2857 	/* Compute header lengths */
2858 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2859 
2860 	/* Desc availability based on threshold should be enough safe */
2861 	if (unlikely(stmmac_tx_avail(priv, queue) <
2862 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2863 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2864 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2865 								queue));
2866 			/* This is a hard error, log it. */
2867 			netdev_err(priv->dev,
2868 				   "%s: Tx Ring full when queue awake\n",
2869 				   __func__);
2870 		}
2871 		return NETDEV_TX_BUSY;
2872 	}
2873 
2874 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2875 
2876 	mss = skb_shinfo(skb)->gso_size;
2877 
2878 	/* set new MSS value if needed */
2879 	if (mss != tx_q->mss) {
2880 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2881 		stmmac_set_mss(priv, mss_desc, mss);
2882 		tx_q->mss = mss;
2883 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2884 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2885 	}
2886 
2887 	if (netif_msg_tx_queued(priv)) {
2888 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2889 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2890 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2891 			skb->data_len);
2892 	}
2893 
2894 	first_entry = tx_q->cur_tx;
2895 	WARN_ON(tx_q->tx_skbuff[first_entry]);
2896 
2897 	desc = tx_q->dma_tx + first_entry;
2898 	first = desc;
2899 
2900 	/* first descriptor: fill Headers on Buf1 */
2901 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2902 			     DMA_TO_DEVICE);
2903 	if (dma_mapping_error(priv->device, des))
2904 		goto dma_map_err;
2905 
2906 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2907 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2908 
2909 	first->des0 = cpu_to_le32(des);
2910 
2911 	/* Fill start of payload in buff2 of first descriptor */
2912 	if (pay_len)
2913 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2914 
2915 	/* If needed take extra descriptors to fill the remaining payload */
2916 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2917 
2918 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2919 
2920 	/* Prepare fragments */
2921 	for (i = 0; i < nfrags; i++) {
2922 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2923 
2924 		des = skb_frag_dma_map(priv->device, frag, 0,
2925 				       skb_frag_size(frag),
2926 				       DMA_TO_DEVICE);
2927 		if (dma_mapping_error(priv->device, des))
2928 			goto dma_map_err;
2929 
2930 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2931 				     (i == nfrags - 1), queue);
2932 
2933 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2934 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2935 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2936 	}
2937 
2938 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2939 
2940 	/* Only the last descriptor gets to point to the skb. */
2941 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2942 
2943 	/* We've used all descriptors we need for this skb, however,
2944 	 * advance cur_tx so that it references a fresh descriptor.
2945 	 * ndo_start_xmit will fill this descriptor the next time it's
2946 	 * called and stmmac_tx_clean may clean up to this descriptor.
2947 	 */
2948 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2949 
2950 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2951 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2952 			  __func__);
2953 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2954 	}
2955 
2956 	dev->stats.tx_bytes += skb->len;
2957 	priv->xstats.tx_tso_frames++;
2958 	priv->xstats.tx_tso_nfrags += nfrags;
2959 
2960 	/* Manage tx mitigation */
2961 	tx_q->tx_count_frames += nfrags + 1;
2962 	if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
2963 		stmmac_set_tx_ic(priv, desc);
2964 		priv->xstats.tx_set_ic_bit++;
2965 		tx_q->tx_count_frames = 0;
2966 	} else {
2967 		stmmac_tx_timer_arm(priv, queue);
2968 	}
2969 
2970 	skb_tx_timestamp(skb);
2971 
2972 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2973 		     priv->hwts_tx_en)) {
2974 		/* declare that device is doing timestamping */
2975 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2976 		stmmac_enable_tx_timestamp(priv, first);
2977 	}
2978 
2979 	/* Complete the first descriptor before granting the DMA */
2980 	stmmac_prepare_tso_tx_desc(priv, first, 1,
2981 			proto_hdr_len,
2982 			pay_len,
2983 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2984 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2985 
2986 	/* If context desc is used to change MSS */
2987 	if (mss_desc) {
2988 		/* Make sure that first descriptor has been completely
2989 		 * written, including its own bit. This is because MSS is
2990 		 * actually before first descriptor, so we need to make
2991 		 * sure that MSS's own bit is the last thing written.
2992 		 */
2993 		dma_wmb();
2994 		stmmac_set_tx_owner(priv, mss_desc);
2995 	}
2996 
2997 	/* The own bit must be the latest setting done when prepare the
2998 	 * descriptor and then barrier is needed to make sure that
2999 	 * all is coherent before granting the DMA engine.
3000 	 */
3001 	wmb();
3002 
3003 	if (netif_msg_pktdata(priv)) {
3004 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3005 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3006 			tx_q->cur_tx, first, nfrags);
3007 
3008 		stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
3009 
3010 		pr_info(">>> frame to be transmitted: ");
3011 		print_pkt(skb->data, skb_headlen(skb));
3012 	}
3013 
3014 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3015 
3016 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3017 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3018 
3019 	return NETDEV_TX_OK;
3020 
3021 dma_map_err:
3022 	dev_err(priv->device, "Tx dma map failed\n");
3023 	dev_kfree_skb(skb);
3024 	priv->dev->stats.tx_dropped++;
3025 	return NETDEV_TX_OK;
3026 }
3027 
3028 /**
3029  *  stmmac_xmit - Tx entry point of the driver
3030  *  @skb : the socket buffer
3031  *  @dev : device pointer
3032  *  Description : this is the tx entry point of the driver.
3033  *  It programs the chain or the ring and supports oversized frames
3034  *  and SG feature.
3035  */
3036 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3037 {
3038 	struct stmmac_priv *priv = netdev_priv(dev);
3039 	unsigned int nopaged_len = skb_headlen(skb);
3040 	int i, csum_insertion = 0, is_jumbo = 0;
3041 	u32 queue = skb_get_queue_mapping(skb);
3042 	int nfrags = skb_shinfo(skb)->nr_frags;
3043 	int entry;
3044 	unsigned int first_entry;
3045 	struct dma_desc *desc, *first;
3046 	struct stmmac_tx_queue *tx_q;
3047 	unsigned int enh_desc;
3048 	unsigned int des;
3049 
3050 	tx_q = &priv->tx_queue[queue];
3051 
3052 	if (priv->tx_path_in_lpi_mode)
3053 		stmmac_disable_eee_mode(priv);
3054 
3055 	/* Manage oversized TCP frames for GMAC4 device */
3056 	if (skb_is_gso(skb) && priv->tso) {
3057 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3058 			/*
3059 			 * There is no way to determine the number of TSO
3060 			 * capable Queues. Let's use always the Queue 0
3061 			 * because if TSO is supported then at least this
3062 			 * one will be capable.
3063 			 */
3064 			skb_set_queue_mapping(skb, 0);
3065 
3066 			return stmmac_tso_xmit(skb, dev);
3067 		}
3068 	}
3069 
3070 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3071 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3072 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3073 								queue));
3074 			/* This is a hard error, log it. */
3075 			netdev_err(priv->dev,
3076 				   "%s: Tx Ring full when queue awake\n",
3077 				   __func__);
3078 		}
3079 		return NETDEV_TX_BUSY;
3080 	}
3081 
3082 	entry = tx_q->cur_tx;
3083 	first_entry = entry;
3084 	WARN_ON(tx_q->tx_skbuff[first_entry]);
3085 
3086 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3087 
3088 	if (likely(priv->extend_desc))
3089 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3090 	else
3091 		desc = tx_q->dma_tx + entry;
3092 
3093 	first = desc;
3094 
3095 	enh_desc = priv->plat->enh_desc;
3096 	/* To program the descriptors according to the size of the frame */
3097 	if (enh_desc)
3098 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3099 
3100 	if (unlikely(is_jumbo)) {
3101 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3102 		if (unlikely(entry < 0) && (entry != -EINVAL))
3103 			goto dma_map_err;
3104 	}
3105 
3106 	for (i = 0; i < nfrags; i++) {
3107 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3108 		int len = skb_frag_size(frag);
3109 		bool last_segment = (i == (nfrags - 1));
3110 
3111 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3112 		WARN_ON(tx_q->tx_skbuff[entry]);
3113 
3114 		if (likely(priv->extend_desc))
3115 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3116 		else
3117 			desc = tx_q->dma_tx + entry;
3118 
3119 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3120 				       DMA_TO_DEVICE);
3121 		if (dma_mapping_error(priv->device, des))
3122 			goto dma_map_err; /* should reuse desc w/o issues */
3123 
3124 		tx_q->tx_skbuff_dma[entry].buf = des;
3125 
3126 		stmmac_set_desc_addr(priv, desc, des);
3127 
3128 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3129 		tx_q->tx_skbuff_dma[entry].len = len;
3130 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3131 
3132 		/* Prepare the descriptor and set the own bit too */
3133 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3134 				priv->mode, 1, last_segment, skb->len);
3135 	}
3136 
3137 	/* Only the last descriptor gets to point to the skb. */
3138 	tx_q->tx_skbuff[entry] = skb;
3139 
3140 	/* We've used all descriptors we need for this skb, however,
3141 	 * advance cur_tx so that it references a fresh descriptor.
3142 	 * ndo_start_xmit will fill this descriptor the next time it's
3143 	 * called and stmmac_tx_clean may clean up to this descriptor.
3144 	 */
3145 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3146 	tx_q->cur_tx = entry;
3147 
3148 	if (netif_msg_pktdata(priv)) {
3149 		void *tx_head;
3150 
3151 		netdev_dbg(priv->dev,
3152 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3153 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3154 			   entry, first, nfrags);
3155 
3156 		if (priv->extend_desc)
3157 			tx_head = (void *)tx_q->dma_etx;
3158 		else
3159 			tx_head = (void *)tx_q->dma_tx;
3160 
3161 		stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3162 
3163 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3164 		print_pkt(skb->data, skb->len);
3165 	}
3166 
3167 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3168 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3169 			  __func__);
3170 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3171 	}
3172 
3173 	dev->stats.tx_bytes += skb->len;
3174 
3175 	/* According to the coalesce parameter the IC bit for the latest
3176 	 * segment is reset and the timer re-started to clean the tx status.
3177 	 * This approach takes care about the fragments: desc is the first
3178 	 * element in case of no SG.
3179 	 */
3180 	tx_q->tx_count_frames += nfrags + 1;
3181 	if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
3182 		stmmac_set_tx_ic(priv, desc);
3183 		priv->xstats.tx_set_ic_bit++;
3184 		tx_q->tx_count_frames = 0;
3185 	} else {
3186 		stmmac_tx_timer_arm(priv, queue);
3187 	}
3188 
3189 	skb_tx_timestamp(skb);
3190 
3191 	/* Ready to fill the first descriptor and set the OWN bit w/o any
3192 	 * problems because all the descriptors are actually ready to be
3193 	 * passed to the DMA engine.
3194 	 */
3195 	if (likely(!is_jumbo)) {
3196 		bool last_segment = (nfrags == 0);
3197 
3198 		des = dma_map_single(priv->device, skb->data,
3199 				     nopaged_len, DMA_TO_DEVICE);
3200 		if (dma_mapping_error(priv->device, des))
3201 			goto dma_map_err;
3202 
3203 		tx_q->tx_skbuff_dma[first_entry].buf = des;
3204 
3205 		stmmac_set_desc_addr(priv, first, des);
3206 
3207 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3208 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3209 
3210 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3211 			     priv->hwts_tx_en)) {
3212 			/* declare that device is doing timestamping */
3213 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3214 			stmmac_enable_tx_timestamp(priv, first);
3215 		}
3216 
3217 		/* Prepare the first descriptor setting the OWN bit too */
3218 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3219 				csum_insertion, priv->mode, 1, last_segment,
3220 				skb->len);
3221 	} else {
3222 		stmmac_set_tx_owner(priv, first);
3223 	}
3224 
3225 	/* The own bit must be the latest setting done when prepare the
3226 	 * descriptor and then barrier is needed to make sure that
3227 	 * all is coherent before granting the DMA engine.
3228 	 */
3229 	wmb();
3230 
3231 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3232 
3233 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
3234 
3235 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3236 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3237 
3238 	return NETDEV_TX_OK;
3239 
3240 dma_map_err:
3241 	netdev_err(priv->dev, "Tx DMA map failed\n");
3242 	dev_kfree_skb(skb);
3243 	priv->dev->stats.tx_dropped++;
3244 	return NETDEV_TX_OK;
3245 }
3246 
3247 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3248 {
3249 	struct vlan_ethhdr *veth;
3250 	__be16 vlan_proto;
3251 	u16 vlanid;
3252 
3253 	veth = (struct vlan_ethhdr *)skb->data;
3254 	vlan_proto = veth->h_vlan_proto;
3255 
3256 	if ((vlan_proto == htons(ETH_P_8021Q) &&
3257 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3258 	    (vlan_proto == htons(ETH_P_8021AD) &&
3259 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3260 		/* pop the vlan tag */
3261 		vlanid = ntohs(veth->h_vlan_TCI);
3262 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3263 		skb_pull(skb, VLAN_HLEN);
3264 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3265 	}
3266 }
3267 
3268 
3269 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3270 {
3271 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3272 		return 0;
3273 
3274 	return 1;
3275 }
3276 
3277 /**
3278  * stmmac_rx_refill - refill used skb preallocated buffers
3279  * @priv: driver private structure
3280  * @queue: RX queue index
3281  * Description : this is to reallocate the skb for the reception process
3282  * that is based on zero-copy.
3283  */
3284 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3285 {
3286 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3287 	int dirty = stmmac_rx_dirty(priv, queue);
3288 	unsigned int entry = rx_q->dirty_rx;
3289 
3290 	int bfsize = priv->dma_buf_sz;
3291 
3292 	while (dirty-- > 0) {
3293 		struct dma_desc *p;
3294 
3295 		if (priv->extend_desc)
3296 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3297 		else
3298 			p = rx_q->dma_rx + entry;
3299 
3300 		if (likely(!rx_q->rx_skbuff[entry])) {
3301 			struct sk_buff *skb;
3302 
3303 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3304 			if (unlikely(!skb)) {
3305 				/* so for a while no zero-copy! */
3306 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3307 				if (unlikely(net_ratelimit()))
3308 					dev_err(priv->device,
3309 						"fail to alloc skb entry %d\n",
3310 						entry);
3311 				break;
3312 			}
3313 
3314 			rx_q->rx_skbuff[entry] = skb;
3315 			rx_q->rx_skbuff_dma[entry] =
3316 			    dma_map_single(priv->device, skb->data, bfsize,
3317 					   DMA_FROM_DEVICE);
3318 			if (dma_mapping_error(priv->device,
3319 					      rx_q->rx_skbuff_dma[entry])) {
3320 				netdev_err(priv->dev, "Rx DMA map failed\n");
3321 				dev_kfree_skb(skb);
3322 				break;
3323 			}
3324 
3325 			stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3326 			stmmac_refill_desc3(priv, rx_q, p);
3327 
3328 			if (rx_q->rx_zeroc_thresh > 0)
3329 				rx_q->rx_zeroc_thresh--;
3330 
3331 			netif_dbg(priv, rx_status, priv->dev,
3332 				  "refill entry #%d\n", entry);
3333 		}
3334 		dma_wmb();
3335 
3336 		stmmac_set_rx_owner(priv, p, priv->use_riwt);
3337 
3338 		dma_wmb();
3339 
3340 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3341 	}
3342 	rx_q->dirty_rx = entry;
3343 }
3344 
3345 /**
3346  * stmmac_rx - manage the receive process
3347  * @priv: driver private structure
3348  * @limit: napi bugget
3349  * @queue: RX queue index.
3350  * Description :  this the function called by the napi poll method.
3351  * It gets all the frames inside the ring.
3352  */
3353 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3354 {
3355 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3356 	struct stmmac_channel *ch = &priv->channel[queue];
3357 	unsigned int next_entry = rx_q->cur_rx;
3358 	int coe = priv->hw->rx_csum;
3359 	unsigned int count = 0;
3360 	bool xmac;
3361 
3362 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3363 
3364 	if (netif_msg_rx_status(priv)) {
3365 		void *rx_head;
3366 
3367 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3368 		if (priv->extend_desc)
3369 			rx_head = (void *)rx_q->dma_erx;
3370 		else
3371 			rx_head = (void *)rx_q->dma_rx;
3372 
3373 		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3374 	}
3375 	while (count < limit) {
3376 		int entry, status;
3377 		struct dma_desc *p;
3378 		struct dma_desc *np;
3379 
3380 		entry = next_entry;
3381 
3382 		if (priv->extend_desc)
3383 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3384 		else
3385 			p = rx_q->dma_rx + entry;
3386 
3387 		/* read the status of the incoming frame */
3388 		status = stmmac_rx_status(priv, &priv->dev->stats,
3389 				&priv->xstats, p);
3390 		/* check if managed by the DMA otherwise go ahead */
3391 		if (unlikely(status & dma_own))
3392 			break;
3393 
3394 		count++;
3395 
3396 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3397 		next_entry = rx_q->cur_rx;
3398 
3399 		if (priv->extend_desc)
3400 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3401 		else
3402 			np = rx_q->dma_rx + next_entry;
3403 
3404 		prefetch(np);
3405 
3406 		if (priv->extend_desc)
3407 			stmmac_rx_extended_status(priv, &priv->dev->stats,
3408 					&priv->xstats, rx_q->dma_erx + entry);
3409 		if (unlikely(status == discard_frame)) {
3410 			priv->dev->stats.rx_errors++;
3411 			if (priv->hwts_rx_en && !priv->extend_desc) {
3412 				/* DESC2 & DESC3 will be overwritten by device
3413 				 * with timestamp value, hence reinitialize
3414 				 * them in stmmac_rx_refill() function so that
3415 				 * device can reuse it.
3416 				 */
3417 				dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3418 				rx_q->rx_skbuff[entry] = NULL;
3419 				dma_unmap_single(priv->device,
3420 						 rx_q->rx_skbuff_dma[entry],
3421 						 priv->dma_buf_sz,
3422 						 DMA_FROM_DEVICE);
3423 			}
3424 		} else {
3425 			struct sk_buff *skb;
3426 			int frame_len;
3427 			unsigned int des;
3428 
3429 			stmmac_get_desc_addr(priv, p, &des);
3430 			frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3431 
3432 			/*  If frame length is greater than skb buffer size
3433 			 *  (preallocated during init) then the packet is
3434 			 *  ignored
3435 			 */
3436 			if (frame_len > priv->dma_buf_sz) {
3437 				if (net_ratelimit())
3438 					netdev_err(priv->dev,
3439 						   "len %d larger than size (%d)\n",
3440 						   frame_len, priv->dma_buf_sz);
3441 				priv->dev->stats.rx_length_errors++;
3442 				continue;
3443 			}
3444 
3445 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3446 			 * Type frames (LLC/LLC-SNAP)
3447 			 *
3448 			 * llc_snap is never checked in GMAC >= 4, so this ACS
3449 			 * feature is always disabled and packets need to be
3450 			 * stripped manually.
3451 			 */
3452 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3453 			    unlikely(status != llc_snap))
3454 				frame_len -= ETH_FCS_LEN;
3455 
3456 			if (netif_msg_rx_status(priv)) {
3457 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3458 					   p, entry, des);
3459 				netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3460 					   frame_len, status);
3461 			}
3462 
3463 			/* The zero-copy is always used for all the sizes
3464 			 * in case of GMAC4 because it needs
3465 			 * to refill the used descriptors, always.
3466 			 */
3467 			if (unlikely(!xmac &&
3468 				     ((frame_len < priv->rx_copybreak) ||
3469 				     stmmac_rx_threshold_count(rx_q)))) {
3470 				skb = netdev_alloc_skb_ip_align(priv->dev,
3471 								frame_len);
3472 				if (unlikely(!skb)) {
3473 					if (net_ratelimit())
3474 						dev_warn(priv->device,
3475 							 "packet dropped\n");
3476 					priv->dev->stats.rx_dropped++;
3477 					continue;
3478 				}
3479 
3480 				dma_sync_single_for_cpu(priv->device,
3481 							rx_q->rx_skbuff_dma
3482 							[entry], frame_len,
3483 							DMA_FROM_DEVICE);
3484 				skb_copy_to_linear_data(skb,
3485 							rx_q->
3486 							rx_skbuff[entry]->data,
3487 							frame_len);
3488 
3489 				skb_put(skb, frame_len);
3490 				dma_sync_single_for_device(priv->device,
3491 							   rx_q->rx_skbuff_dma
3492 							   [entry], frame_len,
3493 							   DMA_FROM_DEVICE);
3494 			} else {
3495 				skb = rx_q->rx_skbuff[entry];
3496 				if (unlikely(!skb)) {
3497 					if (net_ratelimit())
3498 						netdev_err(priv->dev,
3499 							   "%s: Inconsistent Rx chain\n",
3500 							   priv->dev->name);
3501 					priv->dev->stats.rx_dropped++;
3502 					continue;
3503 				}
3504 				prefetch(skb->data - NET_IP_ALIGN);
3505 				rx_q->rx_skbuff[entry] = NULL;
3506 				rx_q->rx_zeroc_thresh++;
3507 
3508 				skb_put(skb, frame_len);
3509 				dma_unmap_single(priv->device,
3510 						 rx_q->rx_skbuff_dma[entry],
3511 						 priv->dma_buf_sz,
3512 						 DMA_FROM_DEVICE);
3513 			}
3514 
3515 			if (netif_msg_pktdata(priv)) {
3516 				netdev_dbg(priv->dev, "frame received (%dbytes)",
3517 					   frame_len);
3518 				print_pkt(skb->data, frame_len);
3519 			}
3520 
3521 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3522 
3523 			stmmac_rx_vlan(priv->dev, skb);
3524 
3525 			skb->protocol = eth_type_trans(skb, priv->dev);
3526 
3527 			if (unlikely(!coe))
3528 				skb_checksum_none_assert(skb);
3529 			else
3530 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3531 
3532 			napi_gro_receive(&ch->rx_napi, skb);
3533 
3534 			priv->dev->stats.rx_packets++;
3535 			priv->dev->stats.rx_bytes += frame_len;
3536 		}
3537 	}
3538 
3539 	stmmac_rx_refill(priv, queue);
3540 
3541 	priv->xstats.rx_pkt_n += count;
3542 
3543 	return count;
3544 }
3545 
3546 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
3547 {
3548 	struct stmmac_channel *ch =
3549 		container_of(napi, struct stmmac_channel, rx_napi);
3550 	struct stmmac_priv *priv = ch->priv_data;
3551 	u32 chan = ch->index;
3552 	int work_done;
3553 
3554 	priv->xstats.napi_poll++;
3555 
3556 	work_done = stmmac_rx(priv, budget, chan);
3557 	if (work_done < budget && napi_complete_done(napi, work_done))
3558 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3559 	return work_done;
3560 }
3561 
3562 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
3563 {
3564 	struct stmmac_channel *ch =
3565 		container_of(napi, struct stmmac_channel, tx_napi);
3566 	struct stmmac_priv *priv = ch->priv_data;
3567 	struct stmmac_tx_queue *tx_q;
3568 	u32 chan = ch->index;
3569 	int work_done;
3570 
3571 	priv->xstats.napi_poll++;
3572 
3573 	work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
3574 	work_done = min(work_done, budget);
3575 
3576 	if (work_done < budget && napi_complete_done(napi, work_done))
3577 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3578 
3579 	/* Force transmission restart */
3580 	tx_q = &priv->tx_queue[chan];
3581 	if (tx_q->cur_tx != tx_q->dirty_tx) {
3582 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
3583 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
3584 				       chan);
3585 	}
3586 
3587 	return work_done;
3588 }
3589 
3590 /**
3591  *  stmmac_tx_timeout
3592  *  @dev : Pointer to net device structure
3593  *  Description: this function is called when a packet transmission fails to
3594  *   complete within a reasonable time. The driver will mark the error in the
3595  *   netdev structure and arrange for the device to be reset to a sane state
3596  *   in order to transmit a new packet.
3597  */
3598 static void stmmac_tx_timeout(struct net_device *dev)
3599 {
3600 	struct stmmac_priv *priv = netdev_priv(dev);
3601 
3602 	stmmac_global_err(priv);
3603 }
3604 
3605 /**
3606  *  stmmac_set_rx_mode - entry point for multicast addressing
3607  *  @dev : pointer to the device structure
3608  *  Description:
3609  *  This function is a driver entry point which gets called by the kernel
3610  *  whenever multicast addresses must be enabled/disabled.
3611  *  Return value:
3612  *  void.
3613  */
3614 static void stmmac_set_rx_mode(struct net_device *dev)
3615 {
3616 	struct stmmac_priv *priv = netdev_priv(dev);
3617 
3618 	stmmac_set_filter(priv, priv->hw, dev);
3619 }
3620 
3621 /**
3622  *  stmmac_change_mtu - entry point to change MTU size for the device.
3623  *  @dev : device pointer.
3624  *  @new_mtu : the new MTU size for the device.
3625  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3626  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3627  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3628  *  Return value:
3629  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3630  *  file on failure.
3631  */
3632 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3633 {
3634 	struct stmmac_priv *priv = netdev_priv(dev);
3635 
3636 	if (netif_running(dev)) {
3637 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
3638 		return -EBUSY;
3639 	}
3640 
3641 	dev->mtu = new_mtu;
3642 
3643 	netdev_update_features(dev);
3644 
3645 	return 0;
3646 }
3647 
3648 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3649 					     netdev_features_t features)
3650 {
3651 	struct stmmac_priv *priv = netdev_priv(dev);
3652 
3653 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3654 		features &= ~NETIF_F_RXCSUM;
3655 
3656 	if (!priv->plat->tx_coe)
3657 		features &= ~NETIF_F_CSUM_MASK;
3658 
3659 	/* Some GMAC devices have a bugged Jumbo frame support that
3660 	 * needs to have the Tx COE disabled for oversized frames
3661 	 * (due to limited buffer sizes). In this case we disable
3662 	 * the TX csum insertion in the TDES and not use SF.
3663 	 */
3664 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3665 		features &= ~NETIF_F_CSUM_MASK;
3666 
3667 	/* Disable tso if asked by ethtool */
3668 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3669 		if (features & NETIF_F_TSO)
3670 			priv->tso = true;
3671 		else
3672 			priv->tso = false;
3673 	}
3674 
3675 	return features;
3676 }
3677 
3678 static int stmmac_set_features(struct net_device *netdev,
3679 			       netdev_features_t features)
3680 {
3681 	struct stmmac_priv *priv = netdev_priv(netdev);
3682 
3683 	/* Keep the COE Type in case of csum is supporting */
3684 	if (features & NETIF_F_RXCSUM)
3685 		priv->hw->rx_csum = priv->plat->rx_coe;
3686 	else
3687 		priv->hw->rx_csum = 0;
3688 	/* No check needed because rx_coe has been set before and it will be
3689 	 * fixed in case of issue.
3690 	 */
3691 	stmmac_rx_ipc(priv, priv->hw);
3692 
3693 	return 0;
3694 }
3695 
3696 /**
3697  *  stmmac_interrupt - main ISR
3698  *  @irq: interrupt number.
3699  *  @dev_id: to pass the net device pointer.
3700  *  Description: this is the main driver interrupt service routine.
3701  *  It can call:
3702  *  o DMA service routine (to manage incoming frame reception and transmission
3703  *    status)
3704  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3705  *    interrupts.
3706  */
3707 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3708 {
3709 	struct net_device *dev = (struct net_device *)dev_id;
3710 	struct stmmac_priv *priv = netdev_priv(dev);
3711 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3712 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3713 	u32 queues_count;
3714 	u32 queue;
3715 	bool xmac;
3716 
3717 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3718 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3719 
3720 	if (priv->irq_wake)
3721 		pm_wakeup_event(priv->device, 0);
3722 
3723 	if (unlikely(!dev)) {
3724 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3725 		return IRQ_NONE;
3726 	}
3727 
3728 	/* Check if adapter is up */
3729 	if (test_bit(STMMAC_DOWN, &priv->state))
3730 		return IRQ_HANDLED;
3731 	/* Check if a fatal error happened */
3732 	if (stmmac_safety_feat_interrupt(priv))
3733 		return IRQ_HANDLED;
3734 
3735 	/* To handle GMAC own interrupts */
3736 	if ((priv->plat->has_gmac) || xmac) {
3737 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3738 		int mtl_status;
3739 
3740 		if (unlikely(status)) {
3741 			/* For LPI we need to save the tx status */
3742 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3743 				priv->tx_path_in_lpi_mode = true;
3744 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3745 				priv->tx_path_in_lpi_mode = false;
3746 		}
3747 
3748 		for (queue = 0; queue < queues_count; queue++) {
3749 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3750 
3751 			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3752 								queue);
3753 			if (mtl_status != -EINVAL)
3754 				status |= mtl_status;
3755 
3756 			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3757 				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3758 						       rx_q->rx_tail_addr,
3759 						       queue);
3760 		}
3761 
3762 		/* PCS link status */
3763 		if (priv->hw->pcs) {
3764 			if (priv->xstats.pcs_link)
3765 				netif_carrier_on(dev);
3766 			else
3767 				netif_carrier_off(dev);
3768 		}
3769 	}
3770 
3771 	/* To handle DMA interrupts */
3772 	stmmac_dma_interrupt(priv);
3773 
3774 	return IRQ_HANDLED;
3775 }
3776 
3777 #ifdef CONFIG_NET_POLL_CONTROLLER
3778 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3779  * to allow network I/O with interrupts disabled.
3780  */
3781 static void stmmac_poll_controller(struct net_device *dev)
3782 {
3783 	disable_irq(dev->irq);
3784 	stmmac_interrupt(dev->irq, dev);
3785 	enable_irq(dev->irq);
3786 }
3787 #endif
3788 
3789 /**
3790  *  stmmac_ioctl - Entry point for the Ioctl
3791  *  @dev: Device pointer.
3792  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3793  *  a proprietary structure used to pass information to the driver.
3794  *  @cmd: IOCTL command
3795  *  Description:
3796  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3797  */
3798 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3799 {
3800 	int ret = -EOPNOTSUPP;
3801 
3802 	if (!netif_running(dev))
3803 		return -EINVAL;
3804 
3805 	switch (cmd) {
3806 	case SIOCGMIIPHY:
3807 	case SIOCGMIIREG:
3808 	case SIOCSMIIREG:
3809 		if (!dev->phydev)
3810 			return -EINVAL;
3811 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3812 		break;
3813 	case SIOCSHWTSTAMP:
3814 		ret = stmmac_hwtstamp_set(dev, rq);
3815 		break;
3816 	case SIOCGHWTSTAMP:
3817 		ret = stmmac_hwtstamp_get(dev, rq);
3818 		break;
3819 	default:
3820 		break;
3821 	}
3822 
3823 	return ret;
3824 }
3825 
3826 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3827 				    void *cb_priv)
3828 {
3829 	struct stmmac_priv *priv = cb_priv;
3830 	int ret = -EOPNOTSUPP;
3831 
3832 	stmmac_disable_all_queues(priv);
3833 
3834 	switch (type) {
3835 	case TC_SETUP_CLSU32:
3836 		if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3837 			ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3838 		break;
3839 	default:
3840 		break;
3841 	}
3842 
3843 	stmmac_enable_all_queues(priv);
3844 	return ret;
3845 }
3846 
3847 static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3848 				 struct tc_block_offload *f)
3849 {
3850 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3851 		return -EOPNOTSUPP;
3852 
3853 	switch (f->command) {
3854 	case TC_BLOCK_BIND:
3855 		return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3856 				priv, priv, f->extack);
3857 	case TC_BLOCK_UNBIND:
3858 		tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3859 		return 0;
3860 	default:
3861 		return -EOPNOTSUPP;
3862 	}
3863 }
3864 
3865 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3866 			   void *type_data)
3867 {
3868 	struct stmmac_priv *priv = netdev_priv(ndev);
3869 
3870 	switch (type) {
3871 	case TC_SETUP_BLOCK:
3872 		return stmmac_setup_tc_block(priv, type_data);
3873 	case TC_SETUP_QDISC_CBS:
3874 		return stmmac_tc_setup_cbs(priv, priv, type_data);
3875 	default:
3876 		return -EOPNOTSUPP;
3877 	}
3878 }
3879 
3880 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3881 {
3882 	struct stmmac_priv *priv = netdev_priv(ndev);
3883 	int ret = 0;
3884 
3885 	ret = eth_mac_addr(ndev, addr);
3886 	if (ret)
3887 		return ret;
3888 
3889 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3890 
3891 	return ret;
3892 }
3893 
3894 #ifdef CONFIG_DEBUG_FS
3895 static struct dentry *stmmac_fs_dir;
3896 
3897 static void sysfs_display_ring(void *head, int size, int extend_desc,
3898 			       struct seq_file *seq)
3899 {
3900 	int i;
3901 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3902 	struct dma_desc *p = (struct dma_desc *)head;
3903 
3904 	for (i = 0; i < size; i++) {
3905 		if (extend_desc) {
3906 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3907 				   i, (unsigned int)virt_to_phys(ep),
3908 				   le32_to_cpu(ep->basic.des0),
3909 				   le32_to_cpu(ep->basic.des1),
3910 				   le32_to_cpu(ep->basic.des2),
3911 				   le32_to_cpu(ep->basic.des3));
3912 			ep++;
3913 		} else {
3914 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3915 				   i, (unsigned int)virt_to_phys(p),
3916 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3917 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3918 			p++;
3919 		}
3920 		seq_printf(seq, "\n");
3921 	}
3922 }
3923 
3924 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
3925 {
3926 	struct net_device *dev = seq->private;
3927 	struct stmmac_priv *priv = netdev_priv(dev);
3928 	u32 rx_count = priv->plat->rx_queues_to_use;
3929 	u32 tx_count = priv->plat->tx_queues_to_use;
3930 	u32 queue;
3931 
3932 	if ((dev->flags & IFF_UP) == 0)
3933 		return 0;
3934 
3935 	for (queue = 0; queue < rx_count; queue++) {
3936 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3937 
3938 		seq_printf(seq, "RX Queue %d:\n", queue);
3939 
3940 		if (priv->extend_desc) {
3941 			seq_printf(seq, "Extended descriptor ring:\n");
3942 			sysfs_display_ring((void *)rx_q->dma_erx,
3943 					   DMA_RX_SIZE, 1, seq);
3944 		} else {
3945 			seq_printf(seq, "Descriptor ring:\n");
3946 			sysfs_display_ring((void *)rx_q->dma_rx,
3947 					   DMA_RX_SIZE, 0, seq);
3948 		}
3949 	}
3950 
3951 	for (queue = 0; queue < tx_count; queue++) {
3952 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3953 
3954 		seq_printf(seq, "TX Queue %d:\n", queue);
3955 
3956 		if (priv->extend_desc) {
3957 			seq_printf(seq, "Extended descriptor ring:\n");
3958 			sysfs_display_ring((void *)tx_q->dma_etx,
3959 					   DMA_TX_SIZE, 1, seq);
3960 		} else {
3961 			seq_printf(seq, "Descriptor ring:\n");
3962 			sysfs_display_ring((void *)tx_q->dma_tx,
3963 					   DMA_TX_SIZE, 0, seq);
3964 		}
3965 	}
3966 
3967 	return 0;
3968 }
3969 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
3970 
3971 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
3972 {
3973 	struct net_device *dev = seq->private;
3974 	struct stmmac_priv *priv = netdev_priv(dev);
3975 
3976 	if (!priv->hw_cap_support) {
3977 		seq_printf(seq, "DMA HW features not supported\n");
3978 		return 0;
3979 	}
3980 
3981 	seq_printf(seq, "==============================\n");
3982 	seq_printf(seq, "\tDMA HW features\n");
3983 	seq_printf(seq, "==============================\n");
3984 
3985 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3986 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3987 	seq_printf(seq, "\t1000 Mbps: %s\n",
3988 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
3989 	seq_printf(seq, "\tHalf duplex: %s\n",
3990 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3991 	seq_printf(seq, "\tHash Filter: %s\n",
3992 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3993 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3994 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
3995 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3996 		   (priv->dma_cap.pcs) ? "Y" : "N");
3997 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3998 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3999 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
4000 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
4001 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
4002 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
4003 	seq_printf(seq, "\tRMON module: %s\n",
4004 		   (priv->dma_cap.rmon) ? "Y" : "N");
4005 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4006 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
4007 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4008 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
4009 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4010 		   (priv->dma_cap.eee) ? "Y" : "N");
4011 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4012 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4013 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
4014 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4015 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4016 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
4017 	} else {
4018 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4019 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4020 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4021 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4022 	}
4023 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4024 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4025 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4026 		   priv->dma_cap.number_rx_channel);
4027 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4028 		   priv->dma_cap.number_tx_channel);
4029 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
4030 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
4031 
4032 	return 0;
4033 }
4034 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4035 
4036 static int stmmac_init_fs(struct net_device *dev)
4037 {
4038 	struct stmmac_priv *priv = netdev_priv(dev);
4039 
4040 	/* Create per netdev entries */
4041 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4042 
4043 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
4044 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
4045 
4046 		return -ENOMEM;
4047 	}
4048 
4049 	/* Entry to report DMA RX/TX rings */
4050 	priv->dbgfs_rings_status =
4051 		debugfs_create_file("descriptors_status", 0444,
4052 				    priv->dbgfs_dir, dev,
4053 				    &stmmac_rings_status_fops);
4054 
4055 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
4056 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4057 		debugfs_remove_recursive(priv->dbgfs_dir);
4058 
4059 		return -ENOMEM;
4060 	}
4061 
4062 	/* Entry to report the DMA HW features */
4063 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4064 						  priv->dbgfs_dir,
4065 						  dev, &stmmac_dma_cap_fops);
4066 
4067 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4068 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4069 		debugfs_remove_recursive(priv->dbgfs_dir);
4070 
4071 		return -ENOMEM;
4072 	}
4073 
4074 	return 0;
4075 }
4076 
4077 static void stmmac_exit_fs(struct net_device *dev)
4078 {
4079 	struct stmmac_priv *priv = netdev_priv(dev);
4080 
4081 	debugfs_remove_recursive(priv->dbgfs_dir);
4082 }
4083 #endif /* CONFIG_DEBUG_FS */
4084 
4085 static const struct net_device_ops stmmac_netdev_ops = {
4086 	.ndo_open = stmmac_open,
4087 	.ndo_start_xmit = stmmac_xmit,
4088 	.ndo_stop = stmmac_release,
4089 	.ndo_change_mtu = stmmac_change_mtu,
4090 	.ndo_fix_features = stmmac_fix_features,
4091 	.ndo_set_features = stmmac_set_features,
4092 	.ndo_set_rx_mode = stmmac_set_rx_mode,
4093 	.ndo_tx_timeout = stmmac_tx_timeout,
4094 	.ndo_do_ioctl = stmmac_ioctl,
4095 	.ndo_setup_tc = stmmac_setup_tc,
4096 #ifdef CONFIG_NET_POLL_CONTROLLER
4097 	.ndo_poll_controller = stmmac_poll_controller,
4098 #endif
4099 	.ndo_set_mac_address = stmmac_set_mac_address,
4100 };
4101 
4102 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4103 {
4104 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4105 		return;
4106 	if (test_bit(STMMAC_DOWN, &priv->state))
4107 		return;
4108 
4109 	netdev_err(priv->dev, "Reset adapter.\n");
4110 
4111 	rtnl_lock();
4112 	netif_trans_update(priv->dev);
4113 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4114 		usleep_range(1000, 2000);
4115 
4116 	set_bit(STMMAC_DOWN, &priv->state);
4117 	dev_close(priv->dev);
4118 	dev_open(priv->dev, NULL);
4119 	clear_bit(STMMAC_DOWN, &priv->state);
4120 	clear_bit(STMMAC_RESETING, &priv->state);
4121 	rtnl_unlock();
4122 }
4123 
4124 static void stmmac_service_task(struct work_struct *work)
4125 {
4126 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4127 			service_task);
4128 
4129 	stmmac_reset_subtask(priv);
4130 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4131 }
4132 
4133 /**
4134  *  stmmac_hw_init - Init the MAC device
4135  *  @priv: driver private structure
4136  *  Description: this function is to configure the MAC device according to
4137  *  some platform parameters or the HW capability register. It prepares the
4138  *  driver to use either ring or chain modes and to setup either enhanced or
4139  *  normal descriptors.
4140  */
4141 static int stmmac_hw_init(struct stmmac_priv *priv)
4142 {
4143 	int ret;
4144 
4145 	/* dwmac-sun8i only work in chain mode */
4146 	if (priv->plat->has_sun8i)
4147 		chain_mode = 1;
4148 	priv->chain_mode = chain_mode;
4149 
4150 	/* Initialize HW Interface */
4151 	ret = stmmac_hwif_init(priv);
4152 	if (ret)
4153 		return ret;
4154 
4155 	/* Get the HW capability (new GMAC newer than 3.50a) */
4156 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4157 	if (priv->hw_cap_support) {
4158 		dev_info(priv->device, "DMA HW capability register supported\n");
4159 
4160 		/* We can override some gmac/dma configuration fields: e.g.
4161 		 * enh_desc, tx_coe (e.g. that are passed through the
4162 		 * platform) with the values from the HW capability
4163 		 * register (if supported).
4164 		 */
4165 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4166 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4167 		priv->hw->pmt = priv->plat->pmt;
4168 
4169 		/* TXCOE doesn't work in thresh DMA mode */
4170 		if (priv->plat->force_thresh_dma_mode)
4171 			priv->plat->tx_coe = 0;
4172 		else
4173 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4174 
4175 		/* In case of GMAC4 rx_coe is from HW cap register. */
4176 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4177 
4178 		if (priv->dma_cap.rx_coe_type2)
4179 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4180 		else if (priv->dma_cap.rx_coe_type1)
4181 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4182 
4183 	} else {
4184 		dev_info(priv->device, "No HW DMA feature register supported\n");
4185 	}
4186 
4187 	if (priv->plat->rx_coe) {
4188 		priv->hw->rx_csum = priv->plat->rx_coe;
4189 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4190 		if (priv->synopsys_id < DWMAC_CORE_4_00)
4191 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4192 	}
4193 	if (priv->plat->tx_coe)
4194 		dev_info(priv->device, "TX Checksum insertion supported\n");
4195 
4196 	if (priv->plat->pmt) {
4197 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4198 		device_set_wakeup_capable(priv->device, 1);
4199 	}
4200 
4201 	if (priv->dma_cap.tsoen)
4202 		dev_info(priv->device, "TSO supported\n");
4203 
4204 	/* Run HW quirks, if any */
4205 	if (priv->hwif_quirks) {
4206 		ret = priv->hwif_quirks(priv);
4207 		if (ret)
4208 			return ret;
4209 	}
4210 
4211 	/* Rx Watchdog is available in the COREs newer than the 3.40.
4212 	 * In some case, for example on bugged HW this feature
4213 	 * has to be disable and this can be done by passing the
4214 	 * riwt_off field from the platform.
4215 	 */
4216 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4217 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4218 		priv->use_riwt = 1;
4219 		dev_info(priv->device,
4220 			 "Enable RX Mitigation via HW Watchdog Timer\n");
4221 	}
4222 
4223 	return 0;
4224 }
4225 
4226 /**
4227  * stmmac_dvr_probe
4228  * @device: device pointer
4229  * @plat_dat: platform data pointer
4230  * @res: stmmac resource pointer
4231  * Description: this is the main probe function used to
4232  * call the alloc_etherdev, allocate the priv structure.
4233  * Return:
4234  * returns 0 on success, otherwise errno.
4235  */
4236 int stmmac_dvr_probe(struct device *device,
4237 		     struct plat_stmmacenet_data *plat_dat,
4238 		     struct stmmac_resources *res)
4239 {
4240 	struct net_device *ndev = NULL;
4241 	struct stmmac_priv *priv;
4242 	u32 queue, maxq;
4243 	int ret = 0;
4244 
4245 	ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4246 				  MTL_MAX_TX_QUEUES,
4247 				  MTL_MAX_RX_QUEUES);
4248 	if (!ndev)
4249 		return -ENOMEM;
4250 
4251 	SET_NETDEV_DEV(ndev, device);
4252 
4253 	priv = netdev_priv(ndev);
4254 	priv->device = device;
4255 	priv->dev = ndev;
4256 
4257 	stmmac_set_ethtool_ops(ndev);
4258 	priv->pause = pause;
4259 	priv->plat = plat_dat;
4260 	priv->ioaddr = res->addr;
4261 	priv->dev->base_addr = (unsigned long)res->addr;
4262 
4263 	priv->dev->irq = res->irq;
4264 	priv->wol_irq = res->wol_irq;
4265 	priv->lpi_irq = res->lpi_irq;
4266 
4267 	if (res->mac)
4268 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4269 
4270 	dev_set_drvdata(device, priv->dev);
4271 
4272 	/* Verify driver arguments */
4273 	stmmac_verify_args();
4274 
4275 	/* Allocate workqueue */
4276 	priv->wq = create_singlethread_workqueue("stmmac_wq");
4277 	if (!priv->wq) {
4278 		dev_err(priv->device, "failed to create workqueue\n");
4279 		ret = -ENOMEM;
4280 		goto error_wq;
4281 	}
4282 
4283 	INIT_WORK(&priv->service_task, stmmac_service_task);
4284 
4285 	/* Override with kernel parameters if supplied XXX CRS XXX
4286 	 * this needs to have multiple instances
4287 	 */
4288 	if ((phyaddr >= 0) && (phyaddr <= 31))
4289 		priv->plat->phy_addr = phyaddr;
4290 
4291 	if (priv->plat->stmmac_rst) {
4292 		ret = reset_control_assert(priv->plat->stmmac_rst);
4293 		reset_control_deassert(priv->plat->stmmac_rst);
4294 		/* Some reset controllers have only reset callback instead of
4295 		 * assert + deassert callbacks pair.
4296 		 */
4297 		if (ret == -ENOTSUPP)
4298 			reset_control_reset(priv->plat->stmmac_rst);
4299 	}
4300 
4301 	/* Init MAC and get the capabilities */
4302 	ret = stmmac_hw_init(priv);
4303 	if (ret)
4304 		goto error_hw_init;
4305 
4306 	/* Configure real RX and TX queues */
4307 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4308 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4309 
4310 	ndev->netdev_ops = &stmmac_netdev_ops;
4311 
4312 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4313 			    NETIF_F_RXCSUM;
4314 
4315 	ret = stmmac_tc_init(priv, priv);
4316 	if (!ret) {
4317 		ndev->hw_features |= NETIF_F_HW_TC;
4318 	}
4319 
4320 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4321 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4322 		priv->tso = true;
4323 		dev_info(priv->device, "TSO feature enabled\n");
4324 	}
4325 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4326 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4327 #ifdef STMMAC_VLAN_TAG_USED
4328 	/* Both mac100 and gmac support receive VLAN tag detection */
4329 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4330 #endif
4331 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
4332 
4333 	/* MTU range: 46 - hw-specific max */
4334 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4335 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4336 		ndev->max_mtu = JUMBO_LEN;
4337 	else if (priv->plat->has_xgmac)
4338 		ndev->max_mtu = XGMAC_JUMBO_LEN;
4339 	else
4340 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4341 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4342 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4343 	 */
4344 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4345 	    (priv->plat->maxmtu >= ndev->min_mtu))
4346 		ndev->max_mtu = priv->plat->maxmtu;
4347 	else if (priv->plat->maxmtu < ndev->min_mtu)
4348 		dev_warn(priv->device,
4349 			 "%s: warning: maxmtu having invalid value (%d)\n",
4350 			 __func__, priv->plat->maxmtu);
4351 
4352 	if (flow_ctrl)
4353 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
4354 
4355 	/* Setup channels NAPI */
4356 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4357 
4358 	for (queue = 0; queue < maxq; queue++) {
4359 		struct stmmac_channel *ch = &priv->channel[queue];
4360 
4361 		ch->priv_data = priv;
4362 		ch->index = queue;
4363 
4364 		if (queue < priv->plat->rx_queues_to_use) {
4365 			netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx,
4366 				       NAPI_POLL_WEIGHT);
4367 		}
4368 		if (queue < priv->plat->tx_queues_to_use) {
4369 			netif_napi_add(ndev, &ch->tx_napi, stmmac_napi_poll_tx,
4370 				       NAPI_POLL_WEIGHT);
4371 		}
4372 	}
4373 
4374 	mutex_init(&priv->lock);
4375 
4376 	/* If a specific clk_csr value is passed from the platform
4377 	 * this means that the CSR Clock Range selection cannot be
4378 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4379 	 * set the MDC clock dynamically according to the csr actual
4380 	 * clock input.
4381 	 */
4382 	if (!priv->plat->clk_csr)
4383 		stmmac_clk_csr_set(priv);
4384 	else
4385 		priv->clk_csr = priv->plat->clk_csr;
4386 
4387 	stmmac_check_pcs_mode(priv);
4388 
4389 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4390 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4391 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
4392 		/* MDIO bus Registration */
4393 		ret = stmmac_mdio_register(ndev);
4394 		if (ret < 0) {
4395 			dev_err(priv->device,
4396 				"%s: MDIO bus (id: %d) registration failed",
4397 				__func__, priv->plat->bus_id);
4398 			goto error_mdio_register;
4399 		}
4400 	}
4401 
4402 	ret = register_netdev(ndev);
4403 	if (ret) {
4404 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
4405 			__func__, ret);
4406 		goto error_netdev_register;
4407 	}
4408 
4409 #ifdef CONFIG_DEBUG_FS
4410 	ret = stmmac_init_fs(ndev);
4411 	if (ret < 0)
4412 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
4413 			    __func__);
4414 #endif
4415 
4416 	return ret;
4417 
4418 error_netdev_register:
4419 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4420 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4421 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4422 		stmmac_mdio_unregister(ndev);
4423 error_mdio_register:
4424 	for (queue = 0; queue < maxq; queue++) {
4425 		struct stmmac_channel *ch = &priv->channel[queue];
4426 
4427 		if (queue < priv->plat->rx_queues_to_use)
4428 			netif_napi_del(&ch->rx_napi);
4429 		if (queue < priv->plat->tx_queues_to_use)
4430 			netif_napi_del(&ch->tx_napi);
4431 	}
4432 error_hw_init:
4433 	destroy_workqueue(priv->wq);
4434 error_wq:
4435 	free_netdev(ndev);
4436 
4437 	return ret;
4438 }
4439 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4440 
4441 /**
4442  * stmmac_dvr_remove
4443  * @dev: device pointer
4444  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4445  * changes the link status, releases the DMA descriptor rings.
4446  */
4447 int stmmac_dvr_remove(struct device *dev)
4448 {
4449 	struct net_device *ndev = dev_get_drvdata(dev);
4450 	struct stmmac_priv *priv = netdev_priv(ndev);
4451 
4452 	netdev_info(priv->dev, "%s: removing driver", __func__);
4453 
4454 #ifdef CONFIG_DEBUG_FS
4455 	stmmac_exit_fs(ndev);
4456 #endif
4457 	stmmac_stop_all_dma(priv);
4458 
4459 	stmmac_mac_set(priv, priv->ioaddr, false);
4460 	netif_carrier_off(ndev);
4461 	unregister_netdev(ndev);
4462 	if (priv->plat->stmmac_rst)
4463 		reset_control_assert(priv->plat->stmmac_rst);
4464 	clk_disable_unprepare(priv->plat->pclk);
4465 	clk_disable_unprepare(priv->plat->stmmac_clk);
4466 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4467 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4468 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4469 		stmmac_mdio_unregister(ndev);
4470 	destroy_workqueue(priv->wq);
4471 	mutex_destroy(&priv->lock);
4472 	free_netdev(ndev);
4473 
4474 	return 0;
4475 }
4476 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4477 
4478 /**
4479  * stmmac_suspend - suspend callback
4480  * @dev: device pointer
4481  * Description: this is the function to suspend the device and it is called
4482  * by the platform driver to stop the network queue, release the resources,
4483  * program the PMT register (for WoL), clean and release driver resources.
4484  */
4485 int stmmac_suspend(struct device *dev)
4486 {
4487 	struct net_device *ndev = dev_get_drvdata(dev);
4488 	struct stmmac_priv *priv = netdev_priv(ndev);
4489 
4490 	if (!ndev || !netif_running(ndev))
4491 		return 0;
4492 
4493 	if (ndev->phydev)
4494 		phy_stop(ndev->phydev);
4495 
4496 	mutex_lock(&priv->lock);
4497 
4498 	netif_device_detach(ndev);
4499 	stmmac_stop_all_queues(priv);
4500 
4501 	stmmac_disable_all_queues(priv);
4502 
4503 	/* Stop TX/RX DMA */
4504 	stmmac_stop_all_dma(priv);
4505 
4506 	/* Enable Power down mode by programming the PMT regs */
4507 	if (device_may_wakeup(priv->device)) {
4508 		stmmac_pmt(priv, priv->hw, priv->wolopts);
4509 		priv->irq_wake = 1;
4510 	} else {
4511 		stmmac_mac_set(priv, priv->ioaddr, false);
4512 		pinctrl_pm_select_sleep_state(priv->device);
4513 		/* Disable clock in case of PWM is off */
4514 		clk_disable(priv->plat->pclk);
4515 		clk_disable(priv->plat->stmmac_clk);
4516 	}
4517 	mutex_unlock(&priv->lock);
4518 
4519 	priv->oldlink = false;
4520 	priv->speed = SPEED_UNKNOWN;
4521 	priv->oldduplex = DUPLEX_UNKNOWN;
4522 	return 0;
4523 }
4524 EXPORT_SYMBOL_GPL(stmmac_suspend);
4525 
4526 /**
4527  * stmmac_reset_queues_param - reset queue parameters
4528  * @dev: device pointer
4529  */
4530 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4531 {
4532 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4533 	u32 tx_cnt = priv->plat->tx_queues_to_use;
4534 	u32 queue;
4535 
4536 	for (queue = 0; queue < rx_cnt; queue++) {
4537 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4538 
4539 		rx_q->cur_rx = 0;
4540 		rx_q->dirty_rx = 0;
4541 	}
4542 
4543 	for (queue = 0; queue < tx_cnt; queue++) {
4544 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4545 
4546 		tx_q->cur_tx = 0;
4547 		tx_q->dirty_tx = 0;
4548 		tx_q->mss = 0;
4549 	}
4550 }
4551 
4552 /**
4553  * stmmac_resume - resume callback
4554  * @dev: device pointer
4555  * Description: when resume this function is invoked to setup the DMA and CORE
4556  * in a usable state.
4557  */
4558 int stmmac_resume(struct device *dev)
4559 {
4560 	struct net_device *ndev = dev_get_drvdata(dev);
4561 	struct stmmac_priv *priv = netdev_priv(ndev);
4562 
4563 	if (!netif_running(ndev))
4564 		return 0;
4565 
4566 	/* Power Down bit, into the PM register, is cleared
4567 	 * automatically as soon as a magic packet or a Wake-up frame
4568 	 * is received. Anyway, it's better to manually clear
4569 	 * this bit because it can generate problems while resuming
4570 	 * from another devices (e.g. serial console).
4571 	 */
4572 	if (device_may_wakeup(priv->device)) {
4573 		mutex_lock(&priv->lock);
4574 		stmmac_pmt(priv, priv->hw, 0);
4575 		mutex_unlock(&priv->lock);
4576 		priv->irq_wake = 0;
4577 	} else {
4578 		pinctrl_pm_select_default_state(priv->device);
4579 		/* enable the clk previously disabled */
4580 		clk_enable(priv->plat->stmmac_clk);
4581 		clk_enable(priv->plat->pclk);
4582 		/* reset the phy so that it's ready */
4583 		if (priv->mii)
4584 			stmmac_mdio_reset(priv->mii);
4585 	}
4586 
4587 	netif_device_attach(ndev);
4588 
4589 	mutex_lock(&priv->lock);
4590 
4591 	stmmac_reset_queues_param(priv);
4592 
4593 	stmmac_clear_descriptors(priv);
4594 
4595 	stmmac_hw_setup(ndev, false);
4596 	stmmac_init_tx_coalesce(priv);
4597 	stmmac_set_rx_mode(ndev);
4598 
4599 	stmmac_enable_all_queues(priv);
4600 
4601 	stmmac_start_all_queues(priv);
4602 
4603 	mutex_unlock(&priv->lock);
4604 
4605 	if (ndev->phydev)
4606 		phy_start(ndev->phydev);
4607 
4608 	return 0;
4609 }
4610 EXPORT_SYMBOL_GPL(stmmac_resume);
4611 
4612 #ifndef MODULE
4613 static int __init stmmac_cmdline_opt(char *str)
4614 {
4615 	char *opt;
4616 
4617 	if (!str || !*str)
4618 		return -EINVAL;
4619 	while ((opt = strsep(&str, ",")) != NULL) {
4620 		if (!strncmp(opt, "debug:", 6)) {
4621 			if (kstrtoint(opt + 6, 0, &debug))
4622 				goto err;
4623 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4624 			if (kstrtoint(opt + 8, 0, &phyaddr))
4625 				goto err;
4626 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4627 			if (kstrtoint(opt + 7, 0, &buf_sz))
4628 				goto err;
4629 		} else if (!strncmp(opt, "tc:", 3)) {
4630 			if (kstrtoint(opt + 3, 0, &tc))
4631 				goto err;
4632 		} else if (!strncmp(opt, "watchdog:", 9)) {
4633 			if (kstrtoint(opt + 9, 0, &watchdog))
4634 				goto err;
4635 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4636 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
4637 				goto err;
4638 		} else if (!strncmp(opt, "pause:", 6)) {
4639 			if (kstrtoint(opt + 6, 0, &pause))
4640 				goto err;
4641 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4642 			if (kstrtoint(opt + 10, 0, &eee_timer))
4643 				goto err;
4644 		} else if (!strncmp(opt, "chain_mode:", 11)) {
4645 			if (kstrtoint(opt + 11, 0, &chain_mode))
4646 				goto err;
4647 		}
4648 	}
4649 	return 0;
4650 
4651 err:
4652 	pr_err("%s: ERROR broken module parameter conversion", __func__);
4653 	return -EINVAL;
4654 }
4655 
4656 __setup("stmmaceth=", stmmac_cmdline_opt);
4657 #endif /* MODULE */
4658 
4659 static int __init stmmac_init(void)
4660 {
4661 #ifdef CONFIG_DEBUG_FS
4662 	/* Create debugfs main directory if it doesn't exist yet */
4663 	if (!stmmac_fs_dir) {
4664 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4665 
4666 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4667 			pr_err("ERROR %s, debugfs create directory failed\n",
4668 			       STMMAC_RESOURCE_NAME);
4669 
4670 			return -ENOMEM;
4671 		}
4672 	}
4673 #endif
4674 
4675 	return 0;
4676 }
4677 
4678 static void __exit stmmac_exit(void)
4679 {
4680 #ifdef CONFIG_DEBUG_FS
4681 	debugfs_remove_recursive(stmmac_fs_dir);
4682 #endif
4683 }
4684 
4685 module_init(stmmac_init)
4686 module_exit(stmmac_exit)
4687 
4688 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4689 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4690 MODULE_LICENSE("GPL");
4691