xref: /openbmc/linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 28efb0046512e8a13ed9f9bdf0d68d10bbfbe9cf)
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4 
5 	Copyright(C) 2007-2011 STMicroelectronics Ltd
6 
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10 
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15 
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18 
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20 
21   Documentation available at:
22 	http://www.stlinux.com
23   Support available at:
24 	https://bugzilla.stlinux.com/
25 *******************************************************************************/
26 
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53 
54 #define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
55 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
56 
57 /* Module parameters */
58 #define TX_TIMEO	5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62 
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66 
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70 
71 #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
73 
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77 
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81 
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86 
87 #define	DEFAULT_BUFSIZE	1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91 
92 #define	STMMAC_RX_COPYBREAK	256
93 
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
96 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97 
98 #define STMMAC_DEFAULT_LPI_TIMER	1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103 
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105  * but allow user to force to use the chain instead of the ring
106  */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110 
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112 
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117 
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119 
120 /**
121  * stmmac_verify_args - verify the driver parameters.
122  * Description: it checks the driver parameters and set a default in case of
123  * errors.
124  */
125 static void stmmac_verify_args(void)
126 {
127 	if (unlikely(watchdog < 0))
128 		watchdog = TX_TIMEO;
129 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130 		buf_sz = DEFAULT_BUFSIZE;
131 	if (unlikely(flow_ctrl > 1))
132 		flow_ctrl = FLOW_AUTO;
133 	else if (likely(flow_ctrl < 0))
134 		flow_ctrl = FLOW_OFF;
135 	if (unlikely((pause < 0) || (pause > 0xffff)))
136 		pause = PAUSE_TIME;
137 	if (eee_timer < 0)
138 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140 
141 /**
142  * stmmac_disable_all_queues - Disable all queues
143  * @priv: driver private structure
144  */
145 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
146 {
147 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
148 	u32 queue;
149 
150 	for (queue = 0; queue < rx_queues_cnt; queue++) {
151 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
152 
153 		napi_disable(&rx_q->napi);
154 	}
155 }
156 
157 /**
158  * stmmac_enable_all_queues - Enable all queues
159  * @priv: driver private structure
160  */
161 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
162 {
163 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
164 	u32 queue;
165 
166 	for (queue = 0; queue < rx_queues_cnt; queue++) {
167 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
168 
169 		napi_enable(&rx_q->napi);
170 	}
171 }
172 
173 /**
174  * stmmac_stop_all_queues - Stop all queues
175  * @priv: driver private structure
176  */
177 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
178 {
179 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
180 	u32 queue;
181 
182 	for (queue = 0; queue < tx_queues_cnt; queue++)
183 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
184 }
185 
186 /**
187  * stmmac_start_all_queues - Start all queues
188  * @priv: driver private structure
189  */
190 static void stmmac_start_all_queues(struct stmmac_priv *priv)
191 {
192 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193 	u32 queue;
194 
195 	for (queue = 0; queue < tx_queues_cnt; queue++)
196 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
197 }
198 
199 /**
200  * stmmac_clk_csr_set - dynamically set the MDC clock
201  * @priv: driver private structure
202  * Description: this is to dynamically set the MDC clock according to the csr
203  * clock input.
204  * Note:
205  *	If a specific clk_csr value is passed from the platform
206  *	this means that the CSR Clock Range selection cannot be
207  *	changed at run-time and it is fixed (as reported in the driver
208  *	documentation). Viceversa the driver will try to set the MDC
209  *	clock dynamically according to the actual clock input.
210  */
211 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
212 {
213 	u32 clk_rate;
214 
215 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
216 
217 	/* Platform provided default clk_csr would be assumed valid
218 	 * for all other cases except for the below mentioned ones.
219 	 * For values higher than the IEEE 802.3 specified frequency
220 	 * we can not estimate the proper divider as it is not known
221 	 * the frequency of clk_csr_i. So we do not change the default
222 	 * divider.
223 	 */
224 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
225 		if (clk_rate < CSR_F_35M)
226 			priv->clk_csr = STMMAC_CSR_20_35M;
227 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
228 			priv->clk_csr = STMMAC_CSR_35_60M;
229 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
230 			priv->clk_csr = STMMAC_CSR_60_100M;
231 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
232 			priv->clk_csr = STMMAC_CSR_100_150M;
233 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
234 			priv->clk_csr = STMMAC_CSR_150_250M;
235 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
236 			priv->clk_csr = STMMAC_CSR_250_300M;
237 	}
238 
239 	if (priv->plat->has_sun8i) {
240 		if (clk_rate > 160000000)
241 			priv->clk_csr = 0x03;
242 		else if (clk_rate > 80000000)
243 			priv->clk_csr = 0x02;
244 		else if (clk_rate > 40000000)
245 			priv->clk_csr = 0x01;
246 		else
247 			priv->clk_csr = 0;
248 	}
249 }
250 
251 static void print_pkt(unsigned char *buf, int len)
252 {
253 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
254 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
255 }
256 
257 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
258 {
259 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
260 	u32 avail;
261 
262 	if (tx_q->dirty_tx > tx_q->cur_tx)
263 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
264 	else
265 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
266 
267 	return avail;
268 }
269 
270 /**
271  * stmmac_rx_dirty - Get RX queue dirty
272  * @priv: driver private structure
273  * @queue: RX queue index
274  */
275 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
276 {
277 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
278 	u32 dirty;
279 
280 	if (rx_q->dirty_rx <= rx_q->cur_rx)
281 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
282 	else
283 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
284 
285 	return dirty;
286 }
287 
288 /**
289  * stmmac_hw_fix_mac_speed - callback for speed selection
290  * @priv: driver private structure
291  * Description: on some platforms (e.g. ST), some HW system configuration
292  * registers have to be set according to the link speed negotiated.
293  */
294 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
295 {
296 	struct net_device *ndev = priv->dev;
297 	struct phy_device *phydev = ndev->phydev;
298 
299 	if (likely(priv->plat->fix_mac_speed))
300 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
301 }
302 
303 /**
304  * stmmac_enable_eee_mode - check and enter in LPI mode
305  * @priv: driver private structure
306  * Description: this function is to verify and enter in LPI mode in case of
307  * EEE.
308  */
309 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
310 {
311 	u32 tx_cnt = priv->plat->tx_queues_to_use;
312 	u32 queue;
313 
314 	/* check if all TX queues have the work finished */
315 	for (queue = 0; queue < tx_cnt; queue++) {
316 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
317 
318 		if (tx_q->dirty_tx != tx_q->cur_tx)
319 			return; /* still unfinished work */
320 	}
321 
322 	/* Check and enter in LPI mode */
323 	if (!priv->tx_path_in_lpi_mode)
324 		priv->hw->mac->set_eee_mode(priv->hw,
325 					    priv->plat->en_tx_lpi_clockgating);
326 }
327 
328 /**
329  * stmmac_disable_eee_mode - disable and exit from LPI mode
330  * @priv: driver private structure
331  * Description: this function is to exit and disable EEE in case of
332  * LPI state is true. This is called by the xmit.
333  */
334 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
335 {
336 	priv->hw->mac->reset_eee_mode(priv->hw);
337 	del_timer_sync(&priv->eee_ctrl_timer);
338 	priv->tx_path_in_lpi_mode = false;
339 }
340 
341 /**
342  * stmmac_eee_ctrl_timer - EEE TX SW timer.
343  * @arg : data hook
344  * Description:
345  *  if there is no data transfer and if we are not in LPI state,
346  *  then MAC Transmitter can be moved to LPI state.
347  */
348 static void stmmac_eee_ctrl_timer(unsigned long arg)
349 {
350 	struct stmmac_priv *priv = (struct stmmac_priv *)arg;
351 
352 	stmmac_enable_eee_mode(priv);
353 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
354 }
355 
356 /**
357  * stmmac_eee_init - init EEE
358  * @priv: driver private structure
359  * Description:
360  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
361  *  can also manage EEE, this function enable the LPI state and start related
362  *  timer.
363  */
364 bool stmmac_eee_init(struct stmmac_priv *priv)
365 {
366 	struct net_device *ndev = priv->dev;
367 	unsigned long flags;
368 	bool ret = false;
369 
370 	/* Using PCS we cannot dial with the phy registers at this stage
371 	 * so we do not support extra feature like EEE.
372 	 */
373 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
374 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
375 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
376 		goto out;
377 
378 	/* MAC core supports the EEE feature. */
379 	if (priv->dma_cap.eee) {
380 		int tx_lpi_timer = priv->tx_lpi_timer;
381 
382 		/* Check if the PHY supports EEE */
383 		if (phy_init_eee(ndev->phydev, 1)) {
384 			/* To manage at run-time if the EEE cannot be supported
385 			 * anymore (for example because the lp caps have been
386 			 * changed).
387 			 * In that case the driver disable own timers.
388 			 */
389 			spin_lock_irqsave(&priv->lock, flags);
390 			if (priv->eee_active) {
391 				netdev_dbg(priv->dev, "disable EEE\n");
392 				del_timer_sync(&priv->eee_ctrl_timer);
393 				priv->hw->mac->set_eee_timer(priv->hw, 0,
394 							     tx_lpi_timer);
395 			}
396 			priv->eee_active = 0;
397 			spin_unlock_irqrestore(&priv->lock, flags);
398 			goto out;
399 		}
400 		/* Activate the EEE and start timers */
401 		spin_lock_irqsave(&priv->lock, flags);
402 		if (!priv->eee_active) {
403 			priv->eee_active = 1;
404 			setup_timer(&priv->eee_ctrl_timer,
405 				    stmmac_eee_ctrl_timer,
406 				    (unsigned long)priv);
407 			mod_timer(&priv->eee_ctrl_timer,
408 				  STMMAC_LPI_T(eee_timer));
409 
410 			priv->hw->mac->set_eee_timer(priv->hw,
411 						     STMMAC_DEFAULT_LIT_LS,
412 						     tx_lpi_timer);
413 		}
414 		/* Set HW EEE according to the speed */
415 		priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
416 
417 		ret = true;
418 		spin_unlock_irqrestore(&priv->lock, flags);
419 
420 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
421 	}
422 out:
423 	return ret;
424 }
425 
426 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
427  * @priv: driver private structure
428  * @p : descriptor pointer
429  * @skb : the socket buffer
430  * Description :
431  * This function will read timestamp from the descriptor & pass it to stack.
432  * and also perform some sanity checks.
433  */
434 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
435 				   struct dma_desc *p, struct sk_buff *skb)
436 {
437 	struct skb_shared_hwtstamps shhwtstamp;
438 	u64 ns;
439 
440 	if (!priv->hwts_tx_en)
441 		return;
442 
443 	/* exit if skb doesn't support hw tstamp */
444 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
445 		return;
446 
447 	/* check tx tstamp status */
448 	if (priv->hw->desc->get_tx_timestamp_status(p)) {
449 		/* get the valid tstamp */
450 		ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
451 
452 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
453 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
454 
455 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
456 		/* pass tstamp to stack */
457 		skb_tstamp_tx(skb, &shhwtstamp);
458 	}
459 
460 	return;
461 }
462 
463 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
464  * @priv: driver private structure
465  * @p : descriptor pointer
466  * @np : next descriptor pointer
467  * @skb : the socket buffer
468  * Description :
469  * This function will read received packet's timestamp from the descriptor
470  * and pass it to stack. It also perform some sanity checks.
471  */
472 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
473 				   struct dma_desc *np, struct sk_buff *skb)
474 {
475 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
476 	u64 ns;
477 
478 	if (!priv->hwts_rx_en)
479 		return;
480 
481 	/* Check if timestamp is available */
482 	if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
483 		/* For GMAC4, the valid timestamp is from CTX next desc. */
484 		if (priv->plat->has_gmac4)
485 			ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
486 		else
487 			ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
488 
489 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
490 		shhwtstamp = skb_hwtstamps(skb);
491 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
492 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
493 	} else  {
494 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
495 	}
496 }
497 
498 /**
499  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
500  *  @dev: device pointer.
501  *  @ifr: An IOCTL specific structure, that can contain a pointer to
502  *  a proprietary structure used to pass information to the driver.
503  *  Description:
504  *  This function configures the MAC to enable/disable both outgoing(TX)
505  *  and incoming(RX) packets time stamping based on user input.
506  *  Return Value:
507  *  0 on success and an appropriate -ve integer on failure.
508  */
509 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
510 {
511 	struct stmmac_priv *priv = netdev_priv(dev);
512 	struct hwtstamp_config config;
513 	struct timespec64 now;
514 	u64 temp = 0;
515 	u32 ptp_v2 = 0;
516 	u32 tstamp_all = 0;
517 	u32 ptp_over_ipv4_udp = 0;
518 	u32 ptp_over_ipv6_udp = 0;
519 	u32 ptp_over_ethernet = 0;
520 	u32 snap_type_sel = 0;
521 	u32 ts_master_en = 0;
522 	u32 ts_event_en = 0;
523 	u32 value = 0;
524 	u32 sec_inc;
525 
526 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
527 		netdev_alert(priv->dev, "No support for HW time stamping\n");
528 		priv->hwts_tx_en = 0;
529 		priv->hwts_rx_en = 0;
530 
531 		return -EOPNOTSUPP;
532 	}
533 
534 	if (copy_from_user(&config, ifr->ifr_data,
535 			   sizeof(struct hwtstamp_config)))
536 		return -EFAULT;
537 
538 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
539 		   __func__, config.flags, config.tx_type, config.rx_filter);
540 
541 	/* reserved for future extensions */
542 	if (config.flags)
543 		return -EINVAL;
544 
545 	if (config.tx_type != HWTSTAMP_TX_OFF &&
546 	    config.tx_type != HWTSTAMP_TX_ON)
547 		return -ERANGE;
548 
549 	if (priv->adv_ts) {
550 		switch (config.rx_filter) {
551 		case HWTSTAMP_FILTER_NONE:
552 			/* time stamp no incoming packet at all */
553 			config.rx_filter = HWTSTAMP_FILTER_NONE;
554 			break;
555 
556 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
557 			/* PTP v1, UDP, any kind of event packet */
558 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
559 			/* take time stamp for all event messages */
560 			if (priv->plat->has_gmac4)
561 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
562 			else
563 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
564 
565 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
566 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
567 			break;
568 
569 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
570 			/* PTP v1, UDP, Sync packet */
571 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
572 			/* take time stamp for SYNC messages only */
573 			ts_event_en = PTP_TCR_TSEVNTENA;
574 
575 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
576 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
577 			break;
578 
579 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
580 			/* PTP v1, UDP, Delay_req packet */
581 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
582 			/* take time stamp for Delay_Req messages only */
583 			ts_master_en = PTP_TCR_TSMSTRENA;
584 			ts_event_en = PTP_TCR_TSEVNTENA;
585 
586 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
587 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
588 			break;
589 
590 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
591 			/* PTP v2, UDP, any kind of event packet */
592 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
593 			ptp_v2 = PTP_TCR_TSVER2ENA;
594 			/* take time stamp for all event messages */
595 			if (priv->plat->has_gmac4)
596 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
597 			else
598 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
599 
600 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
601 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
602 			break;
603 
604 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
605 			/* PTP v2, UDP, Sync packet */
606 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
607 			ptp_v2 = PTP_TCR_TSVER2ENA;
608 			/* take time stamp for SYNC messages only */
609 			ts_event_en = PTP_TCR_TSEVNTENA;
610 
611 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
612 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
613 			break;
614 
615 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
616 			/* PTP v2, UDP, Delay_req packet */
617 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
618 			ptp_v2 = PTP_TCR_TSVER2ENA;
619 			/* take time stamp for Delay_Req messages only */
620 			ts_master_en = PTP_TCR_TSMSTRENA;
621 			ts_event_en = PTP_TCR_TSEVNTENA;
622 
623 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
624 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
625 			break;
626 
627 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
628 			/* PTP v2/802.AS1 any layer, any kind of event packet */
629 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
630 			ptp_v2 = PTP_TCR_TSVER2ENA;
631 			/* take time stamp for all event messages */
632 			if (priv->plat->has_gmac4)
633 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
634 			else
635 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
636 
637 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
638 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
639 			ptp_over_ethernet = PTP_TCR_TSIPENA;
640 			break;
641 
642 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
643 			/* PTP v2/802.AS1, any layer, Sync packet */
644 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
645 			ptp_v2 = PTP_TCR_TSVER2ENA;
646 			/* take time stamp for SYNC messages only */
647 			ts_event_en = PTP_TCR_TSEVNTENA;
648 
649 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
650 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
651 			ptp_over_ethernet = PTP_TCR_TSIPENA;
652 			break;
653 
654 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
655 			/* PTP v2/802.AS1, any layer, Delay_req packet */
656 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
657 			ptp_v2 = PTP_TCR_TSVER2ENA;
658 			/* take time stamp for Delay_Req messages only */
659 			ts_master_en = PTP_TCR_TSMSTRENA;
660 			ts_event_en = PTP_TCR_TSEVNTENA;
661 
662 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
663 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
664 			ptp_over_ethernet = PTP_TCR_TSIPENA;
665 			break;
666 
667 		case HWTSTAMP_FILTER_NTP_ALL:
668 		case HWTSTAMP_FILTER_ALL:
669 			/* time stamp any incoming packet */
670 			config.rx_filter = HWTSTAMP_FILTER_ALL;
671 			tstamp_all = PTP_TCR_TSENALL;
672 			break;
673 
674 		default:
675 			return -ERANGE;
676 		}
677 	} else {
678 		switch (config.rx_filter) {
679 		case HWTSTAMP_FILTER_NONE:
680 			config.rx_filter = HWTSTAMP_FILTER_NONE;
681 			break;
682 		default:
683 			/* PTP v1, UDP, any kind of event packet */
684 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
685 			break;
686 		}
687 	}
688 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
689 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
690 
691 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
692 		priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
693 	else {
694 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
695 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
696 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
697 			 ts_master_en | snap_type_sel);
698 		priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
699 
700 		/* program Sub Second Increment reg */
701 		sec_inc = priv->hw->ptp->config_sub_second_increment(
702 			priv->ptpaddr, priv->plat->clk_ptp_rate,
703 			priv->plat->has_gmac4);
704 		temp = div_u64(1000000000ULL, sec_inc);
705 
706 		/* calculate default added value:
707 		 * formula is :
708 		 * addend = (2^32)/freq_div_ratio;
709 		 * where, freq_div_ratio = 1e9ns/sec_inc
710 		 */
711 		temp = (u64)(temp << 32);
712 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
713 		priv->hw->ptp->config_addend(priv->ptpaddr,
714 					     priv->default_addend);
715 
716 		/* initialize system time */
717 		ktime_get_real_ts64(&now);
718 
719 		/* lower 32 bits of tv_sec are safe until y2106 */
720 		priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
721 					    now.tv_nsec);
722 	}
723 
724 	return copy_to_user(ifr->ifr_data, &config,
725 			    sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
726 }
727 
728 /**
729  * stmmac_init_ptp - init PTP
730  * @priv: driver private structure
731  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
732  * This is done by looking at the HW cap. register.
733  * This function also registers the ptp driver.
734  */
735 static int stmmac_init_ptp(struct stmmac_priv *priv)
736 {
737 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
738 		return -EOPNOTSUPP;
739 
740 	priv->adv_ts = 0;
741 	/* Check if adv_ts can be enabled for dwmac 4.x core */
742 	if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
743 		priv->adv_ts = 1;
744 	/* Dwmac 3.x core with extend_desc can support adv_ts */
745 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
746 		priv->adv_ts = 1;
747 
748 	if (priv->dma_cap.time_stamp)
749 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
750 
751 	if (priv->adv_ts)
752 		netdev_info(priv->dev,
753 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
754 
755 	priv->hw->ptp = &stmmac_ptp;
756 	priv->hwts_tx_en = 0;
757 	priv->hwts_rx_en = 0;
758 
759 	stmmac_ptp_register(priv);
760 
761 	return 0;
762 }
763 
764 static void stmmac_release_ptp(struct stmmac_priv *priv)
765 {
766 	if (priv->plat->clk_ptp_ref)
767 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
768 	stmmac_ptp_unregister(priv);
769 }
770 
771 /**
772  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
773  *  @priv: driver private structure
774  *  Description: It is used for configuring the flow control in all queues
775  */
776 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
777 {
778 	u32 tx_cnt = priv->plat->tx_queues_to_use;
779 
780 	priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
781 				 priv->pause, tx_cnt);
782 }
783 
784 /**
785  * stmmac_adjust_link - adjusts the link parameters
786  * @dev: net device structure
787  * Description: this is the helper called by the physical abstraction layer
788  * drivers to communicate the phy link status. According the speed and duplex
789  * this driver can invoke registered glue-logic as well.
790  * It also invoke the eee initialization because it could happen when switch
791  * on different networks (that are eee capable).
792  */
793 static void stmmac_adjust_link(struct net_device *dev)
794 {
795 	struct stmmac_priv *priv = netdev_priv(dev);
796 	struct phy_device *phydev = dev->phydev;
797 	unsigned long flags;
798 	bool new_state = false;
799 
800 	if (!phydev)
801 		return;
802 
803 	spin_lock_irqsave(&priv->lock, flags);
804 
805 	if (phydev->link) {
806 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
807 
808 		/* Now we make sure that we can be in full duplex mode.
809 		 * If not, we operate in half-duplex mode. */
810 		if (phydev->duplex != priv->oldduplex) {
811 			new_state = true;
812 			if (!phydev->duplex)
813 				ctrl &= ~priv->hw->link.duplex;
814 			else
815 				ctrl |= priv->hw->link.duplex;
816 			priv->oldduplex = phydev->duplex;
817 		}
818 		/* Flow Control operation */
819 		if (phydev->pause)
820 			stmmac_mac_flow_ctrl(priv, phydev->duplex);
821 
822 		if (phydev->speed != priv->speed) {
823 			new_state = true;
824 			ctrl &= ~priv->hw->link.speed_mask;
825 			switch (phydev->speed) {
826 			case SPEED_1000:
827 				ctrl |= priv->hw->link.speed1000;
828 				break;
829 			case SPEED_100:
830 				ctrl |= priv->hw->link.speed100;
831 				break;
832 			case SPEED_10:
833 				ctrl |= priv->hw->link.speed10;
834 				break;
835 			default:
836 				netif_warn(priv, link, priv->dev,
837 					   "broken speed: %d\n", phydev->speed);
838 				phydev->speed = SPEED_UNKNOWN;
839 				break;
840 			}
841 			if (phydev->speed != SPEED_UNKNOWN)
842 				stmmac_hw_fix_mac_speed(priv);
843 			priv->speed = phydev->speed;
844 		}
845 
846 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
847 
848 		if (!priv->oldlink) {
849 			new_state = true;
850 			priv->oldlink = true;
851 		}
852 	} else if (priv->oldlink) {
853 		new_state = true;
854 		priv->oldlink = false;
855 		priv->speed = SPEED_UNKNOWN;
856 		priv->oldduplex = DUPLEX_UNKNOWN;
857 	}
858 
859 	if (new_state && netif_msg_link(priv))
860 		phy_print_status(phydev);
861 
862 	spin_unlock_irqrestore(&priv->lock, flags);
863 
864 	if (phydev->is_pseudo_fixed_link)
865 		/* Stop PHY layer to call the hook to adjust the link in case
866 		 * of a switch is attached to the stmmac driver.
867 		 */
868 		phydev->irq = PHY_IGNORE_INTERRUPT;
869 	else
870 		/* At this stage, init the EEE if supported.
871 		 * Never called in case of fixed_link.
872 		 */
873 		priv->eee_enabled = stmmac_eee_init(priv);
874 }
875 
876 /**
877  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
878  * @priv: driver private structure
879  * Description: this is to verify if the HW supports the PCS.
880  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
881  * configured for the TBI, RTBI, or SGMII PHY interface.
882  */
883 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
884 {
885 	int interface = priv->plat->interface;
886 
887 	if (priv->dma_cap.pcs) {
888 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
889 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
890 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
891 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
892 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
893 			priv->hw->pcs = STMMAC_PCS_RGMII;
894 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
895 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
896 			priv->hw->pcs = STMMAC_PCS_SGMII;
897 		}
898 	}
899 }
900 
901 /**
902  * stmmac_init_phy - PHY initialization
903  * @dev: net device structure
904  * Description: it initializes the driver's PHY state, and attaches the PHY
905  * to the mac driver.
906  *  Return value:
907  *  0 on success
908  */
909 static int stmmac_init_phy(struct net_device *dev)
910 {
911 	struct stmmac_priv *priv = netdev_priv(dev);
912 	struct phy_device *phydev;
913 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
914 	char bus_id[MII_BUS_ID_SIZE];
915 	int interface = priv->plat->interface;
916 	int max_speed = priv->plat->max_speed;
917 	priv->oldlink = false;
918 	priv->speed = SPEED_UNKNOWN;
919 	priv->oldduplex = DUPLEX_UNKNOWN;
920 
921 	if (priv->plat->phy_node) {
922 		phydev = of_phy_connect(dev, priv->plat->phy_node,
923 					&stmmac_adjust_link, 0, interface);
924 	} else {
925 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
926 			 priv->plat->bus_id);
927 
928 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
929 			 priv->plat->phy_addr);
930 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
931 			   phy_id_fmt);
932 
933 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
934 				     interface);
935 	}
936 
937 	if (IS_ERR_OR_NULL(phydev)) {
938 		netdev_err(priv->dev, "Could not attach to PHY\n");
939 		if (!phydev)
940 			return -ENODEV;
941 
942 		return PTR_ERR(phydev);
943 	}
944 
945 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
946 	if ((interface == PHY_INTERFACE_MODE_MII) ||
947 	    (interface == PHY_INTERFACE_MODE_RMII) ||
948 		(max_speed < 1000 && max_speed > 0))
949 		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
950 					 SUPPORTED_1000baseT_Full);
951 
952 	/*
953 	 * Broken HW is sometimes missing the pull-up resistor on the
954 	 * MDIO line, which results in reads to non-existent devices returning
955 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
956 	 * device as well.
957 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
958 	 */
959 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
960 		phy_disconnect(phydev);
961 		return -ENODEV;
962 	}
963 
964 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
965 	 * subsequent PHY polling, make sure we force a link transition if
966 	 * we have a UP/DOWN/UP transition
967 	 */
968 	if (phydev->is_pseudo_fixed_link)
969 		phydev->irq = PHY_POLL;
970 
971 	phy_attached_info(phydev);
972 	return 0;
973 }
974 
975 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
976 {
977 	u32 rx_cnt = priv->plat->rx_queues_to_use;
978 	void *head_rx;
979 	u32 queue;
980 
981 	/* Display RX rings */
982 	for (queue = 0; queue < rx_cnt; queue++) {
983 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
984 
985 		pr_info("\tRX Queue %u rings\n", queue);
986 
987 		if (priv->extend_desc)
988 			head_rx = (void *)rx_q->dma_erx;
989 		else
990 			head_rx = (void *)rx_q->dma_rx;
991 
992 		/* Display RX ring */
993 		priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
994 	}
995 }
996 
997 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
998 {
999 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1000 	void *head_tx;
1001 	u32 queue;
1002 
1003 	/* Display TX rings */
1004 	for (queue = 0; queue < tx_cnt; queue++) {
1005 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1006 
1007 		pr_info("\tTX Queue %d rings\n", queue);
1008 
1009 		if (priv->extend_desc)
1010 			head_tx = (void *)tx_q->dma_etx;
1011 		else
1012 			head_tx = (void *)tx_q->dma_tx;
1013 
1014 		priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
1015 	}
1016 }
1017 
1018 static void stmmac_display_rings(struct stmmac_priv *priv)
1019 {
1020 	/* Display RX ring */
1021 	stmmac_display_rx_rings(priv);
1022 
1023 	/* Display TX ring */
1024 	stmmac_display_tx_rings(priv);
1025 }
1026 
1027 static int stmmac_set_bfsize(int mtu, int bufsize)
1028 {
1029 	int ret = bufsize;
1030 
1031 	if (mtu >= BUF_SIZE_4KiB)
1032 		ret = BUF_SIZE_8KiB;
1033 	else if (mtu >= BUF_SIZE_2KiB)
1034 		ret = BUF_SIZE_4KiB;
1035 	else if (mtu > DEFAULT_BUFSIZE)
1036 		ret = BUF_SIZE_2KiB;
1037 	else
1038 		ret = DEFAULT_BUFSIZE;
1039 
1040 	return ret;
1041 }
1042 
1043 /**
1044  * stmmac_clear_rx_descriptors - clear RX descriptors
1045  * @priv: driver private structure
1046  * @queue: RX queue index
1047  * Description: this function is called to clear the RX descriptors
1048  * in case of both basic and extended descriptors are used.
1049  */
1050 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1051 {
1052 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1053 	int i;
1054 
1055 	/* Clear the RX descriptors */
1056 	for (i = 0; i < DMA_RX_SIZE; i++)
1057 		if (priv->extend_desc)
1058 			priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
1059 						     priv->use_riwt, priv->mode,
1060 						     (i == DMA_RX_SIZE - 1));
1061 		else
1062 			priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
1063 						     priv->use_riwt, priv->mode,
1064 						     (i == DMA_RX_SIZE - 1));
1065 }
1066 
1067 /**
1068  * stmmac_clear_tx_descriptors - clear tx descriptors
1069  * @priv: driver private structure
1070  * @queue: TX queue index.
1071  * Description: this function is called to clear the TX descriptors
1072  * in case of both basic and extended descriptors are used.
1073  */
1074 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1075 {
1076 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1077 	int i;
1078 
1079 	/* Clear the TX descriptors */
1080 	for (i = 0; i < DMA_TX_SIZE; i++)
1081 		if (priv->extend_desc)
1082 			priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1083 						     priv->mode,
1084 						     (i == DMA_TX_SIZE - 1));
1085 		else
1086 			priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1087 						     priv->mode,
1088 						     (i == DMA_TX_SIZE - 1));
1089 }
1090 
1091 /**
1092  * stmmac_clear_descriptors - clear descriptors
1093  * @priv: driver private structure
1094  * Description: this function is called to clear the TX and RX descriptors
1095  * in case of both basic and extended descriptors are used.
1096  */
1097 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1098 {
1099 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1100 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1101 	u32 queue;
1102 
1103 	/* Clear the RX descriptors */
1104 	for (queue = 0; queue < rx_queue_cnt; queue++)
1105 		stmmac_clear_rx_descriptors(priv, queue);
1106 
1107 	/* Clear the TX descriptors */
1108 	for (queue = 0; queue < tx_queue_cnt; queue++)
1109 		stmmac_clear_tx_descriptors(priv, queue);
1110 }
1111 
1112 /**
1113  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1114  * @priv: driver private structure
1115  * @p: descriptor pointer
1116  * @i: descriptor index
1117  * @flags: gfp flag
1118  * @queue: RX queue index
1119  * Description: this function is called to allocate a receive buffer, perform
1120  * the DMA mapping and init the descriptor.
1121  */
1122 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1123 				  int i, gfp_t flags, u32 queue)
1124 {
1125 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1126 	struct sk_buff *skb;
1127 
1128 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1129 	if (!skb) {
1130 		netdev_err(priv->dev,
1131 			   "%s: Rx init fails; skb is NULL\n", __func__);
1132 		return -ENOMEM;
1133 	}
1134 	rx_q->rx_skbuff[i] = skb;
1135 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1136 						priv->dma_buf_sz,
1137 						DMA_FROM_DEVICE);
1138 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1139 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1140 		dev_kfree_skb_any(skb);
1141 		return -EINVAL;
1142 	}
1143 
1144 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
1145 		p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1146 	else
1147 		p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1148 
1149 	if ((priv->hw->mode->init_desc3) &&
1150 	    (priv->dma_buf_sz == BUF_SIZE_16KiB))
1151 		priv->hw->mode->init_desc3(p);
1152 
1153 	return 0;
1154 }
1155 
1156 /**
1157  * stmmac_free_rx_buffer - free RX dma buffers
1158  * @priv: private structure
1159  * @queue: RX queue index
1160  * @i: buffer index.
1161  */
1162 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1163 {
1164 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1165 
1166 	if (rx_q->rx_skbuff[i]) {
1167 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1168 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
1169 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1170 	}
1171 	rx_q->rx_skbuff[i] = NULL;
1172 }
1173 
1174 /**
1175  * stmmac_free_tx_buffer - free RX dma buffers
1176  * @priv: private structure
1177  * @queue: RX queue index
1178  * @i: buffer index.
1179  */
1180 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1181 {
1182 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1183 
1184 	if (tx_q->tx_skbuff_dma[i].buf) {
1185 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1186 			dma_unmap_page(priv->device,
1187 				       tx_q->tx_skbuff_dma[i].buf,
1188 				       tx_q->tx_skbuff_dma[i].len,
1189 				       DMA_TO_DEVICE);
1190 		else
1191 			dma_unmap_single(priv->device,
1192 					 tx_q->tx_skbuff_dma[i].buf,
1193 					 tx_q->tx_skbuff_dma[i].len,
1194 					 DMA_TO_DEVICE);
1195 	}
1196 
1197 	if (tx_q->tx_skbuff[i]) {
1198 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1199 		tx_q->tx_skbuff[i] = NULL;
1200 		tx_q->tx_skbuff_dma[i].buf = 0;
1201 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1202 	}
1203 }
1204 
1205 /**
1206  * init_dma_rx_desc_rings - init the RX descriptor rings
1207  * @dev: net device structure
1208  * @flags: gfp flag.
1209  * Description: this function initializes the DMA RX descriptors
1210  * and allocates the socket buffers. It supports the chained and ring
1211  * modes.
1212  */
1213 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1214 {
1215 	struct stmmac_priv *priv = netdev_priv(dev);
1216 	u32 rx_count = priv->plat->rx_queues_to_use;
1217 	unsigned int bfsize = 0;
1218 	int ret = -ENOMEM;
1219 	int queue;
1220 	int i;
1221 
1222 	if (priv->hw->mode->set_16kib_bfsize)
1223 		bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1224 
1225 	if (bfsize < BUF_SIZE_16KiB)
1226 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1227 
1228 	priv->dma_buf_sz = bfsize;
1229 
1230 	/* RX INITIALIZATION */
1231 	netif_dbg(priv, probe, priv->dev,
1232 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1233 
1234 	for (queue = 0; queue < rx_count; queue++) {
1235 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1236 
1237 		netif_dbg(priv, probe, priv->dev,
1238 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
1239 			  (u32)rx_q->dma_rx_phy);
1240 
1241 		for (i = 0; i < DMA_RX_SIZE; i++) {
1242 			struct dma_desc *p;
1243 
1244 			if (priv->extend_desc)
1245 				p = &((rx_q->dma_erx + i)->basic);
1246 			else
1247 				p = rx_q->dma_rx + i;
1248 
1249 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
1250 						     queue);
1251 			if (ret)
1252 				goto err_init_rx_buffers;
1253 
1254 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1255 				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1256 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
1257 		}
1258 
1259 		rx_q->cur_rx = 0;
1260 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1261 
1262 		stmmac_clear_rx_descriptors(priv, queue);
1263 
1264 		/* Setup the chained descriptor addresses */
1265 		if (priv->mode == STMMAC_CHAIN_MODE) {
1266 			if (priv->extend_desc)
1267 				priv->hw->mode->init(rx_q->dma_erx,
1268 						     rx_q->dma_rx_phy,
1269 						     DMA_RX_SIZE, 1);
1270 			else
1271 				priv->hw->mode->init(rx_q->dma_rx,
1272 						     rx_q->dma_rx_phy,
1273 						     DMA_RX_SIZE, 0);
1274 		}
1275 	}
1276 
1277 	buf_sz = bfsize;
1278 
1279 	return 0;
1280 
1281 err_init_rx_buffers:
1282 	while (queue >= 0) {
1283 		while (--i >= 0)
1284 			stmmac_free_rx_buffer(priv, queue, i);
1285 
1286 		if (queue == 0)
1287 			break;
1288 
1289 		i = DMA_RX_SIZE;
1290 		queue--;
1291 	}
1292 
1293 	return ret;
1294 }
1295 
1296 /**
1297  * init_dma_tx_desc_rings - init the TX descriptor rings
1298  * @dev: net device structure.
1299  * Description: this function initializes the DMA TX descriptors
1300  * and allocates the socket buffers. It supports the chained and ring
1301  * modes.
1302  */
1303 static int init_dma_tx_desc_rings(struct net_device *dev)
1304 {
1305 	struct stmmac_priv *priv = netdev_priv(dev);
1306 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1307 	u32 queue;
1308 	int i;
1309 
1310 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1311 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1312 
1313 		netif_dbg(priv, probe, priv->dev,
1314 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1315 			 (u32)tx_q->dma_tx_phy);
1316 
1317 		/* Setup the chained descriptor addresses */
1318 		if (priv->mode == STMMAC_CHAIN_MODE) {
1319 			if (priv->extend_desc)
1320 				priv->hw->mode->init(tx_q->dma_etx,
1321 						     tx_q->dma_tx_phy,
1322 						     DMA_TX_SIZE, 1);
1323 			else
1324 				priv->hw->mode->init(tx_q->dma_tx,
1325 						     tx_q->dma_tx_phy,
1326 						     DMA_TX_SIZE, 0);
1327 		}
1328 
1329 		for (i = 0; i < DMA_TX_SIZE; i++) {
1330 			struct dma_desc *p;
1331 			if (priv->extend_desc)
1332 				p = &((tx_q->dma_etx + i)->basic);
1333 			else
1334 				p = tx_q->dma_tx + i;
1335 
1336 			if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1337 				p->des0 = 0;
1338 				p->des1 = 0;
1339 				p->des2 = 0;
1340 				p->des3 = 0;
1341 			} else {
1342 				p->des2 = 0;
1343 			}
1344 
1345 			tx_q->tx_skbuff_dma[i].buf = 0;
1346 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1347 			tx_q->tx_skbuff_dma[i].len = 0;
1348 			tx_q->tx_skbuff_dma[i].last_segment = false;
1349 			tx_q->tx_skbuff[i] = NULL;
1350 		}
1351 
1352 		tx_q->dirty_tx = 0;
1353 		tx_q->cur_tx = 0;
1354 
1355 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1356 	}
1357 
1358 	return 0;
1359 }
1360 
1361 /**
1362  * init_dma_desc_rings - init the RX/TX descriptor rings
1363  * @dev: net device structure
1364  * @flags: gfp flag.
1365  * Description: this function initializes the DMA RX/TX descriptors
1366  * and allocates the socket buffers. It supports the chained and ring
1367  * modes.
1368  */
1369 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1370 {
1371 	struct stmmac_priv *priv = netdev_priv(dev);
1372 	int ret;
1373 
1374 	ret = init_dma_rx_desc_rings(dev, flags);
1375 	if (ret)
1376 		return ret;
1377 
1378 	ret = init_dma_tx_desc_rings(dev);
1379 
1380 	stmmac_clear_descriptors(priv);
1381 
1382 	if (netif_msg_hw(priv))
1383 		stmmac_display_rings(priv);
1384 
1385 	return ret;
1386 }
1387 
1388 /**
1389  * dma_free_rx_skbufs - free RX dma buffers
1390  * @priv: private structure
1391  * @queue: RX queue index
1392  */
1393 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1394 {
1395 	int i;
1396 
1397 	for (i = 0; i < DMA_RX_SIZE; i++)
1398 		stmmac_free_rx_buffer(priv, queue, i);
1399 }
1400 
1401 /**
1402  * dma_free_tx_skbufs - free TX dma buffers
1403  * @priv: private structure
1404  * @queue: TX queue index
1405  */
1406 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1407 {
1408 	int i;
1409 
1410 	for (i = 0; i < DMA_TX_SIZE; i++)
1411 		stmmac_free_tx_buffer(priv, queue, i);
1412 }
1413 
1414 /**
1415  * free_dma_rx_desc_resources - free RX dma desc resources
1416  * @priv: private structure
1417  */
1418 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1419 {
1420 	u32 rx_count = priv->plat->rx_queues_to_use;
1421 	u32 queue;
1422 
1423 	/* Free RX queue resources */
1424 	for (queue = 0; queue < rx_count; queue++) {
1425 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1426 
1427 		/* Release the DMA RX socket buffers */
1428 		dma_free_rx_skbufs(priv, queue);
1429 
1430 		/* Free DMA regions of consistent memory previously allocated */
1431 		if (!priv->extend_desc)
1432 			dma_free_coherent(priv->device,
1433 					  DMA_RX_SIZE * sizeof(struct dma_desc),
1434 					  rx_q->dma_rx, rx_q->dma_rx_phy);
1435 		else
1436 			dma_free_coherent(priv->device, DMA_RX_SIZE *
1437 					  sizeof(struct dma_extended_desc),
1438 					  rx_q->dma_erx, rx_q->dma_rx_phy);
1439 
1440 		kfree(rx_q->rx_skbuff_dma);
1441 		kfree(rx_q->rx_skbuff);
1442 	}
1443 }
1444 
1445 /**
1446  * free_dma_tx_desc_resources - free TX dma desc resources
1447  * @priv: private structure
1448  */
1449 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1450 {
1451 	u32 tx_count = priv->plat->tx_queues_to_use;
1452 	u32 queue;
1453 
1454 	/* Free TX queue resources */
1455 	for (queue = 0; queue < tx_count; queue++) {
1456 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1457 
1458 		/* Release the DMA TX socket buffers */
1459 		dma_free_tx_skbufs(priv, queue);
1460 
1461 		/* Free DMA regions of consistent memory previously allocated */
1462 		if (!priv->extend_desc)
1463 			dma_free_coherent(priv->device,
1464 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1465 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1466 		else
1467 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1468 					  sizeof(struct dma_extended_desc),
1469 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1470 
1471 		kfree(tx_q->tx_skbuff_dma);
1472 		kfree(tx_q->tx_skbuff);
1473 	}
1474 }
1475 
1476 /**
1477  * alloc_dma_rx_desc_resources - alloc RX resources.
1478  * @priv: private structure
1479  * Description: according to which descriptor can be used (extend or basic)
1480  * this function allocates the resources for TX and RX paths. In case of
1481  * reception, for example, it pre-allocated the RX socket buffer in order to
1482  * allow zero-copy mechanism.
1483  */
1484 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1485 {
1486 	u32 rx_count = priv->plat->rx_queues_to_use;
1487 	int ret = -ENOMEM;
1488 	u32 queue;
1489 
1490 	/* RX queues buffers and DMA */
1491 	for (queue = 0; queue < rx_count; queue++) {
1492 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1493 
1494 		rx_q->queue_index = queue;
1495 		rx_q->priv_data = priv;
1496 
1497 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1498 						    sizeof(dma_addr_t),
1499 						    GFP_KERNEL);
1500 		if (!rx_q->rx_skbuff_dma)
1501 			goto err_dma;
1502 
1503 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1504 						sizeof(struct sk_buff *),
1505 						GFP_KERNEL);
1506 		if (!rx_q->rx_skbuff)
1507 			goto err_dma;
1508 
1509 		if (priv->extend_desc) {
1510 			rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1511 							    DMA_RX_SIZE *
1512 							    sizeof(struct
1513 							    dma_extended_desc),
1514 							    &rx_q->dma_rx_phy,
1515 							    GFP_KERNEL);
1516 			if (!rx_q->dma_erx)
1517 				goto err_dma;
1518 
1519 		} else {
1520 			rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1521 							   DMA_RX_SIZE *
1522 							   sizeof(struct
1523 							   dma_desc),
1524 							   &rx_q->dma_rx_phy,
1525 							   GFP_KERNEL);
1526 			if (!rx_q->dma_rx)
1527 				goto err_dma;
1528 		}
1529 	}
1530 
1531 	return 0;
1532 
1533 err_dma:
1534 	free_dma_rx_desc_resources(priv);
1535 
1536 	return ret;
1537 }
1538 
1539 /**
1540  * alloc_dma_tx_desc_resources - alloc TX resources.
1541  * @priv: private structure
1542  * Description: according to which descriptor can be used (extend or basic)
1543  * this function allocates the resources for TX and RX paths. In case of
1544  * reception, for example, it pre-allocated the RX socket buffer in order to
1545  * allow zero-copy mechanism.
1546  */
1547 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1548 {
1549 	u32 tx_count = priv->plat->tx_queues_to_use;
1550 	int ret = -ENOMEM;
1551 	u32 queue;
1552 
1553 	/* TX queues buffers and DMA */
1554 	for (queue = 0; queue < tx_count; queue++) {
1555 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1556 
1557 		tx_q->queue_index = queue;
1558 		tx_q->priv_data = priv;
1559 
1560 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1561 						    sizeof(*tx_q->tx_skbuff_dma),
1562 						    GFP_KERNEL);
1563 		if (!tx_q->tx_skbuff_dma)
1564 			goto err_dma;
1565 
1566 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1567 						sizeof(struct sk_buff *),
1568 						GFP_KERNEL);
1569 		if (!tx_q->tx_skbuff)
1570 			goto err_dma;
1571 
1572 		if (priv->extend_desc) {
1573 			tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1574 							    DMA_TX_SIZE *
1575 							    sizeof(struct
1576 							    dma_extended_desc),
1577 							    &tx_q->dma_tx_phy,
1578 							    GFP_KERNEL);
1579 			if (!tx_q->dma_etx)
1580 				goto err_dma;
1581 		} else {
1582 			tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1583 							   DMA_TX_SIZE *
1584 							   sizeof(struct
1585 								  dma_desc),
1586 							   &tx_q->dma_tx_phy,
1587 							   GFP_KERNEL);
1588 			if (!tx_q->dma_tx)
1589 				goto err_dma;
1590 		}
1591 	}
1592 
1593 	return 0;
1594 
1595 err_dma:
1596 	free_dma_tx_desc_resources(priv);
1597 
1598 	return ret;
1599 }
1600 
1601 /**
1602  * alloc_dma_desc_resources - alloc TX/RX resources.
1603  * @priv: private structure
1604  * Description: according to which descriptor can be used (extend or basic)
1605  * this function allocates the resources for TX and RX paths. In case of
1606  * reception, for example, it pre-allocated the RX socket buffer in order to
1607  * allow zero-copy mechanism.
1608  */
1609 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1610 {
1611 	/* RX Allocation */
1612 	int ret = alloc_dma_rx_desc_resources(priv);
1613 
1614 	if (ret)
1615 		return ret;
1616 
1617 	ret = alloc_dma_tx_desc_resources(priv);
1618 
1619 	return ret;
1620 }
1621 
1622 /**
1623  * free_dma_desc_resources - free dma desc resources
1624  * @priv: private structure
1625  */
1626 static void free_dma_desc_resources(struct stmmac_priv *priv)
1627 {
1628 	/* Release the DMA RX socket buffers */
1629 	free_dma_rx_desc_resources(priv);
1630 
1631 	/* Release the DMA TX socket buffers */
1632 	free_dma_tx_desc_resources(priv);
1633 }
1634 
1635 /**
1636  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1637  *  @priv: driver private structure
1638  *  Description: It is used for enabling the rx queues in the MAC
1639  */
1640 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1641 {
1642 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
1643 	int queue;
1644 	u8 mode;
1645 
1646 	for (queue = 0; queue < rx_queues_count; queue++) {
1647 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1648 		priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1649 	}
1650 }
1651 
1652 /**
1653  * stmmac_start_rx_dma - start RX DMA channel
1654  * @priv: driver private structure
1655  * @chan: RX channel index
1656  * Description:
1657  * This starts a RX DMA channel
1658  */
1659 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1660 {
1661 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1662 	priv->hw->dma->start_rx(priv->ioaddr, chan);
1663 }
1664 
1665 /**
1666  * stmmac_start_tx_dma - start TX DMA channel
1667  * @priv: driver private structure
1668  * @chan: TX channel index
1669  * Description:
1670  * This starts a TX DMA channel
1671  */
1672 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1673 {
1674 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1675 	priv->hw->dma->start_tx(priv->ioaddr, chan);
1676 }
1677 
1678 /**
1679  * stmmac_stop_rx_dma - stop RX DMA channel
1680  * @priv: driver private structure
1681  * @chan: RX channel index
1682  * Description:
1683  * This stops a RX DMA channel
1684  */
1685 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1686 {
1687 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1688 	priv->hw->dma->stop_rx(priv->ioaddr, chan);
1689 }
1690 
1691 /**
1692  * stmmac_stop_tx_dma - stop TX DMA channel
1693  * @priv: driver private structure
1694  * @chan: TX channel index
1695  * Description:
1696  * This stops a TX DMA channel
1697  */
1698 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1699 {
1700 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1701 	priv->hw->dma->stop_tx(priv->ioaddr, chan);
1702 }
1703 
1704 /**
1705  * stmmac_start_all_dma - start all RX and TX DMA channels
1706  * @priv: driver private structure
1707  * Description:
1708  * This starts all the RX and TX DMA channels
1709  */
1710 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1711 {
1712 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1713 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1714 	u32 chan = 0;
1715 
1716 	for (chan = 0; chan < rx_channels_count; chan++)
1717 		stmmac_start_rx_dma(priv, chan);
1718 
1719 	for (chan = 0; chan < tx_channels_count; chan++)
1720 		stmmac_start_tx_dma(priv, chan);
1721 }
1722 
1723 /**
1724  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1725  * @priv: driver private structure
1726  * Description:
1727  * This stops the RX and TX DMA channels
1728  */
1729 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1730 {
1731 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1732 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1733 	u32 chan = 0;
1734 
1735 	for (chan = 0; chan < rx_channels_count; chan++)
1736 		stmmac_stop_rx_dma(priv, chan);
1737 
1738 	for (chan = 0; chan < tx_channels_count; chan++)
1739 		stmmac_stop_tx_dma(priv, chan);
1740 }
1741 
1742 /**
1743  *  stmmac_dma_operation_mode - HW DMA operation mode
1744  *  @priv: driver private structure
1745  *  Description: it is used for configuring the DMA operation mode register in
1746  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1747  */
1748 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1749 {
1750 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1751 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1752 	int rxfifosz = priv->plat->rx_fifo_size;
1753 	u32 txmode = 0;
1754 	u32 rxmode = 0;
1755 	u32 chan = 0;
1756 
1757 	if (rxfifosz == 0)
1758 		rxfifosz = priv->dma_cap.rx_fifo_size;
1759 
1760 	if (priv->plat->force_thresh_dma_mode) {
1761 		txmode = tc;
1762 		rxmode = tc;
1763 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1764 		/*
1765 		 * In case of GMAC, SF mode can be enabled
1766 		 * to perform the TX COE in HW. This depends on:
1767 		 * 1) TX COE if actually supported
1768 		 * 2) There is no bugged Jumbo frame support
1769 		 *    that needs to not insert csum in the TDES.
1770 		 */
1771 		txmode = SF_DMA_MODE;
1772 		rxmode = SF_DMA_MODE;
1773 		priv->xstats.threshold = SF_DMA_MODE;
1774 	} else {
1775 		txmode = tc;
1776 		rxmode = SF_DMA_MODE;
1777 	}
1778 
1779 	/* configure all channels */
1780 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1781 		for (chan = 0; chan < rx_channels_count; chan++)
1782 			priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1783 						   rxfifosz);
1784 
1785 		for (chan = 0; chan < tx_channels_count; chan++)
1786 			priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1787 	} else {
1788 		priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1789 					rxfifosz);
1790 	}
1791 }
1792 
1793 /**
1794  * stmmac_tx_clean - to manage the transmission completion
1795  * @priv: driver private structure
1796  * @queue: TX queue index
1797  * Description: it reclaims the transmit resources after transmission completes.
1798  */
1799 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1800 {
1801 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1802 	unsigned int bytes_compl = 0, pkts_compl = 0;
1803 	unsigned int entry = tx_q->dirty_tx;
1804 
1805 	netif_tx_lock(priv->dev);
1806 
1807 	priv->xstats.tx_clean++;
1808 
1809 	while (entry != tx_q->cur_tx) {
1810 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1811 		struct dma_desc *p;
1812 		int status;
1813 
1814 		if (priv->extend_desc)
1815 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1816 		else
1817 			p = tx_q->dma_tx + entry;
1818 
1819 		status = priv->hw->desc->tx_status(&priv->dev->stats,
1820 						      &priv->xstats, p,
1821 						      priv->ioaddr);
1822 		/* Check if the descriptor is owned by the DMA */
1823 		if (unlikely(status & tx_dma_own))
1824 			break;
1825 
1826 		/* Just consider the last segment and ...*/
1827 		if (likely(!(status & tx_not_ls))) {
1828 			/* ... verify the status error condition */
1829 			if (unlikely(status & tx_err)) {
1830 				priv->dev->stats.tx_errors++;
1831 			} else {
1832 				priv->dev->stats.tx_packets++;
1833 				priv->xstats.tx_pkt_n++;
1834 			}
1835 			stmmac_get_tx_hwtstamp(priv, p, skb);
1836 		}
1837 
1838 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1839 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1840 				dma_unmap_page(priv->device,
1841 					       tx_q->tx_skbuff_dma[entry].buf,
1842 					       tx_q->tx_skbuff_dma[entry].len,
1843 					       DMA_TO_DEVICE);
1844 			else
1845 				dma_unmap_single(priv->device,
1846 						 tx_q->tx_skbuff_dma[entry].buf,
1847 						 tx_q->tx_skbuff_dma[entry].len,
1848 						 DMA_TO_DEVICE);
1849 			tx_q->tx_skbuff_dma[entry].buf = 0;
1850 			tx_q->tx_skbuff_dma[entry].len = 0;
1851 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1852 		}
1853 
1854 		if (priv->hw->mode->clean_desc3)
1855 			priv->hw->mode->clean_desc3(tx_q, p);
1856 
1857 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1858 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1859 
1860 		if (likely(skb != NULL)) {
1861 			pkts_compl++;
1862 			bytes_compl += skb->len;
1863 			dev_consume_skb_any(skb);
1864 			tx_q->tx_skbuff[entry] = NULL;
1865 		}
1866 
1867 		priv->hw->desc->release_tx_desc(p, priv->mode);
1868 
1869 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1870 	}
1871 	tx_q->dirty_tx = entry;
1872 
1873 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1874 				  pkts_compl, bytes_compl);
1875 
1876 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1877 								queue))) &&
1878 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1879 
1880 		netif_dbg(priv, tx_done, priv->dev,
1881 			  "%s: restart transmit\n", __func__);
1882 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1883 	}
1884 
1885 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1886 		stmmac_enable_eee_mode(priv);
1887 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1888 	}
1889 	netif_tx_unlock(priv->dev);
1890 }
1891 
1892 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
1893 {
1894 	priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
1895 }
1896 
1897 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
1898 {
1899 	priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
1900 }
1901 
1902 /**
1903  * stmmac_tx_err - to manage the tx error
1904  * @priv: driver private structure
1905  * @chan: channel index
1906  * Description: it cleans the descriptors and restarts the transmission
1907  * in case of transmission errors.
1908  */
1909 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1910 {
1911 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1912 	int i;
1913 
1914 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1915 
1916 	stmmac_stop_tx_dma(priv, chan);
1917 	dma_free_tx_skbufs(priv, chan);
1918 	for (i = 0; i < DMA_TX_SIZE; i++)
1919 		if (priv->extend_desc)
1920 			priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1921 						     priv->mode,
1922 						     (i == DMA_TX_SIZE - 1));
1923 		else
1924 			priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1925 						     priv->mode,
1926 						     (i == DMA_TX_SIZE - 1));
1927 	tx_q->dirty_tx = 0;
1928 	tx_q->cur_tx = 0;
1929 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1930 	stmmac_start_tx_dma(priv, chan);
1931 
1932 	priv->dev->stats.tx_errors++;
1933 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1934 }
1935 
1936 /**
1937  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1938  *  @priv: driver private structure
1939  *  @txmode: TX operating mode
1940  *  @rxmode: RX operating mode
1941  *  @chan: channel index
1942  *  Description: it is used for configuring of the DMA operation mode in
1943  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1944  *  mode.
1945  */
1946 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1947 					  u32 rxmode, u32 chan)
1948 {
1949 	int rxfifosz = priv->plat->rx_fifo_size;
1950 
1951 	if (rxfifosz == 0)
1952 		rxfifosz = priv->dma_cap.rx_fifo_size;
1953 
1954 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1955 		priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1956 					   rxfifosz);
1957 		priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1958 	} else {
1959 		priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1960 					rxfifosz);
1961 	}
1962 }
1963 
1964 /**
1965  * stmmac_dma_interrupt - DMA ISR
1966  * @priv: driver private structure
1967  * Description: this is the DMA ISR. It is called by the main ISR.
1968  * It calls the dwmac dma routine and schedule poll method in case of some
1969  * work can be done.
1970  */
1971 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1972 {
1973 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
1974 	int status;
1975 	u32 chan;
1976 
1977 	for (chan = 0; chan < tx_channel_count; chan++) {
1978 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
1979 
1980 		status = priv->hw->dma->dma_interrupt(priv->ioaddr,
1981 						      &priv->xstats, chan);
1982 		if (likely((status & handle_rx)) || (status & handle_tx)) {
1983 			if (likely(napi_schedule_prep(&rx_q->napi))) {
1984 				stmmac_disable_dma_irq(priv, chan);
1985 				__napi_schedule(&rx_q->napi);
1986 			}
1987 		}
1988 
1989 		if (unlikely(status & tx_hard_error_bump_tc)) {
1990 			/* Try to bump up the dma threshold on this failure */
1991 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1992 			    (tc <= 256)) {
1993 				tc += 64;
1994 				if (priv->plat->force_thresh_dma_mode)
1995 					stmmac_set_dma_operation_mode(priv,
1996 								      tc,
1997 								      tc,
1998 								      chan);
1999 				else
2000 					stmmac_set_dma_operation_mode(priv,
2001 								    tc,
2002 								    SF_DMA_MODE,
2003 								    chan);
2004 				priv->xstats.threshold = tc;
2005 			}
2006 		} else if (unlikely(status == tx_hard_error)) {
2007 			stmmac_tx_err(priv, chan);
2008 		}
2009 	}
2010 }
2011 
2012 /**
2013  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2014  * @priv: driver private structure
2015  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2016  */
2017 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2018 {
2019 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2020 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2021 
2022 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2023 		priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
2024 		priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
2025 	} else {
2026 		priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
2027 		priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
2028 	}
2029 
2030 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
2031 
2032 	if (priv->dma_cap.rmon) {
2033 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
2034 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2035 	} else
2036 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2037 }
2038 
2039 /**
2040  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
2041  * @priv: driver private structure
2042  * Description: select the Enhanced/Alternate or Normal descriptors.
2043  * In case of Enhanced/Alternate, it checks if the extended descriptors are
2044  * supported by the HW capability register.
2045  */
2046 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
2047 {
2048 	if (priv->plat->enh_desc) {
2049 		dev_info(priv->device, "Enhanced/Alternate descriptors\n");
2050 
2051 		/* GMAC older than 3.50 has no extended descriptors */
2052 		if (priv->synopsys_id >= DWMAC_CORE_3_50) {
2053 			dev_info(priv->device, "Enabled extended descriptors\n");
2054 			priv->extend_desc = 1;
2055 		} else
2056 			dev_warn(priv->device, "Extended descriptors not supported\n");
2057 
2058 		priv->hw->desc = &enh_desc_ops;
2059 	} else {
2060 		dev_info(priv->device, "Normal descriptors\n");
2061 		priv->hw->desc = &ndesc_ops;
2062 	}
2063 }
2064 
2065 /**
2066  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2067  * @priv: driver private structure
2068  * Description:
2069  *  new GMAC chip generations have a new register to indicate the
2070  *  presence of the optional feature/functions.
2071  *  This can be also used to override the value passed through the
2072  *  platform and necessary for old MAC10/100 and GMAC chips.
2073  */
2074 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2075 {
2076 	u32 ret = 0;
2077 
2078 	if (priv->hw->dma->get_hw_feature) {
2079 		priv->hw->dma->get_hw_feature(priv->ioaddr,
2080 					      &priv->dma_cap);
2081 		ret = 1;
2082 	}
2083 
2084 	return ret;
2085 }
2086 
2087 /**
2088  * stmmac_check_ether_addr - check if the MAC addr is valid
2089  * @priv: driver private structure
2090  * Description:
2091  * it is to verify if the MAC address is valid, in case of failures it
2092  * generates a random MAC address
2093  */
2094 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2095 {
2096 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2097 		priv->hw->mac->get_umac_addr(priv->hw,
2098 					     priv->dev->dev_addr, 0);
2099 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2100 			eth_hw_addr_random(priv->dev);
2101 		netdev_info(priv->dev, "device MAC address %pM\n",
2102 			    priv->dev->dev_addr);
2103 	}
2104 }
2105 
2106 /**
2107  * stmmac_init_dma_engine - DMA init.
2108  * @priv: driver private structure
2109  * Description:
2110  * It inits the DMA invoking the specific MAC/GMAC callback.
2111  * Some DMA parameters can be passed from the platform;
2112  * in case of these are not passed a default is kept for the MAC or GMAC.
2113  */
2114 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2115 {
2116 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2117 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2118 	struct stmmac_rx_queue *rx_q;
2119 	struct stmmac_tx_queue *tx_q;
2120 	u32 dummy_dma_rx_phy = 0;
2121 	u32 dummy_dma_tx_phy = 0;
2122 	u32 chan = 0;
2123 	int atds = 0;
2124 	int ret = 0;
2125 
2126 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2127 		dev_err(priv->device, "Invalid DMA configuration\n");
2128 		return -EINVAL;
2129 	}
2130 
2131 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2132 		atds = 1;
2133 
2134 	ret = priv->hw->dma->reset(priv->ioaddr);
2135 	if (ret) {
2136 		dev_err(priv->device, "Failed to reset the dma\n");
2137 		return ret;
2138 	}
2139 
2140 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2141 		/* DMA Configuration */
2142 		priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2143 				    dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2144 
2145 		/* DMA RX Channel Configuration */
2146 		for (chan = 0; chan < rx_channels_count; chan++) {
2147 			rx_q = &priv->rx_queue[chan];
2148 
2149 			priv->hw->dma->init_rx_chan(priv->ioaddr,
2150 						    priv->plat->dma_cfg,
2151 						    rx_q->dma_rx_phy, chan);
2152 
2153 			rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2154 				    (DMA_RX_SIZE * sizeof(struct dma_desc));
2155 			priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2156 						       rx_q->rx_tail_addr,
2157 						       chan);
2158 		}
2159 
2160 		/* DMA TX Channel Configuration */
2161 		for (chan = 0; chan < tx_channels_count; chan++) {
2162 			tx_q = &priv->tx_queue[chan];
2163 
2164 			priv->hw->dma->init_chan(priv->ioaddr,
2165 						 priv->plat->dma_cfg,
2166 						 chan);
2167 
2168 			priv->hw->dma->init_tx_chan(priv->ioaddr,
2169 						    priv->plat->dma_cfg,
2170 						    tx_q->dma_tx_phy, chan);
2171 
2172 			tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2173 				    (DMA_TX_SIZE * sizeof(struct dma_desc));
2174 			priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
2175 						       tx_q->tx_tail_addr,
2176 						       chan);
2177 		}
2178 	} else {
2179 		rx_q = &priv->rx_queue[chan];
2180 		tx_q = &priv->tx_queue[chan];
2181 		priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2182 				    tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2183 	}
2184 
2185 	if (priv->plat->axi && priv->hw->dma->axi)
2186 		priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
2187 
2188 	return ret;
2189 }
2190 
2191 /**
2192  * stmmac_tx_timer - mitigation sw timer for tx.
2193  * @data: data pointer
2194  * Description:
2195  * This is the timer handler to directly invoke the stmmac_tx_clean.
2196  */
2197 static void stmmac_tx_timer(unsigned long data)
2198 {
2199 	struct stmmac_priv *priv = (struct stmmac_priv *)data;
2200 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2201 	u32 queue;
2202 
2203 	/* let's scan all the tx queues */
2204 	for (queue = 0; queue < tx_queues_count; queue++)
2205 		stmmac_tx_clean(priv, queue);
2206 }
2207 
2208 /**
2209  * stmmac_init_tx_coalesce - init tx mitigation options.
2210  * @priv: driver private structure
2211  * Description:
2212  * This inits the transmit coalesce parameters: i.e. timer rate,
2213  * timer handler and default threshold used for enabling the
2214  * interrupt on completion bit.
2215  */
2216 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2217 {
2218 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
2219 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2220 	setup_timer(&priv->txtimer, stmmac_tx_timer, (unsigned long)priv);
2221 	priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2222 	add_timer(&priv->txtimer);
2223 }
2224 
2225 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2226 {
2227 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2228 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2229 	u32 chan;
2230 
2231 	/* set TX ring length */
2232 	if (priv->hw->dma->set_tx_ring_len) {
2233 		for (chan = 0; chan < tx_channels_count; chan++)
2234 			priv->hw->dma->set_tx_ring_len(priv->ioaddr,
2235 						       (DMA_TX_SIZE - 1), chan);
2236 	}
2237 
2238 	/* set RX ring length */
2239 	if (priv->hw->dma->set_rx_ring_len) {
2240 		for (chan = 0; chan < rx_channels_count; chan++)
2241 			priv->hw->dma->set_rx_ring_len(priv->ioaddr,
2242 						       (DMA_RX_SIZE - 1), chan);
2243 	}
2244 }
2245 
2246 /**
2247  *  stmmac_set_tx_queue_weight - Set TX queue weight
2248  *  @priv: driver private structure
2249  *  Description: It is used for setting TX queues weight
2250  */
2251 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2252 {
2253 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2254 	u32 weight;
2255 	u32 queue;
2256 
2257 	for (queue = 0; queue < tx_queues_count; queue++) {
2258 		weight = priv->plat->tx_queues_cfg[queue].weight;
2259 		priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
2260 	}
2261 }
2262 
2263 /**
2264  *  stmmac_configure_cbs - Configure CBS in TX queue
2265  *  @priv: driver private structure
2266  *  Description: It is used for configuring CBS in AVB TX queues
2267  */
2268 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2269 {
2270 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2271 	u32 mode_to_use;
2272 	u32 queue;
2273 
2274 	/* queue 0 is reserved for legacy traffic */
2275 	for (queue = 1; queue < tx_queues_count; queue++) {
2276 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2277 		if (mode_to_use == MTL_QUEUE_DCB)
2278 			continue;
2279 
2280 		priv->hw->mac->config_cbs(priv->hw,
2281 				priv->plat->tx_queues_cfg[queue].send_slope,
2282 				priv->plat->tx_queues_cfg[queue].idle_slope,
2283 				priv->plat->tx_queues_cfg[queue].high_credit,
2284 				priv->plat->tx_queues_cfg[queue].low_credit,
2285 				queue);
2286 	}
2287 }
2288 
2289 /**
2290  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2291  *  @priv: driver private structure
2292  *  Description: It is used for mapping RX queues to RX dma channels
2293  */
2294 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2295 {
2296 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2297 	u32 queue;
2298 	u32 chan;
2299 
2300 	for (queue = 0; queue < rx_queues_count; queue++) {
2301 		chan = priv->plat->rx_queues_cfg[queue].chan;
2302 		priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
2303 	}
2304 }
2305 
2306 /**
2307  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2308  *  @priv: driver private structure
2309  *  Description: It is used for configuring the RX Queue Priority
2310  */
2311 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2312 {
2313 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2314 	u32 queue;
2315 	u32 prio;
2316 
2317 	for (queue = 0; queue < rx_queues_count; queue++) {
2318 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2319 			continue;
2320 
2321 		prio = priv->plat->rx_queues_cfg[queue].prio;
2322 		priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
2323 	}
2324 }
2325 
2326 /**
2327  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2328  *  @priv: driver private structure
2329  *  Description: It is used for configuring the TX Queue Priority
2330  */
2331 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2332 {
2333 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2334 	u32 queue;
2335 	u32 prio;
2336 
2337 	for (queue = 0; queue < tx_queues_count; queue++) {
2338 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2339 			continue;
2340 
2341 		prio = priv->plat->tx_queues_cfg[queue].prio;
2342 		priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
2343 	}
2344 }
2345 
2346 /**
2347  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2348  *  @priv: driver private structure
2349  *  Description: It is used for configuring the RX queue routing
2350  */
2351 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2352 {
2353 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2354 	u32 queue;
2355 	u8 packet;
2356 
2357 	for (queue = 0; queue < rx_queues_count; queue++) {
2358 		/* no specific packet type routing specified for the queue */
2359 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2360 			continue;
2361 
2362 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2363 		priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
2364 	}
2365 }
2366 
2367 /**
2368  *  stmmac_mtl_configuration - Configure MTL
2369  *  @priv: driver private structure
2370  *  Description: It is used for configurring MTL
2371  */
2372 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2373 {
2374 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2375 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2376 
2377 	if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
2378 		stmmac_set_tx_queue_weight(priv);
2379 
2380 	/* Configure MTL RX algorithms */
2381 	if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
2382 		priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
2383 						priv->plat->rx_sched_algorithm);
2384 
2385 	/* Configure MTL TX algorithms */
2386 	if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
2387 		priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
2388 						priv->plat->tx_sched_algorithm);
2389 
2390 	/* Configure CBS in AVB TX queues */
2391 	if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
2392 		stmmac_configure_cbs(priv);
2393 
2394 	/* Map RX MTL to DMA channels */
2395 	if (priv->hw->mac->map_mtl_to_dma)
2396 		stmmac_rx_queue_dma_chan_map(priv);
2397 
2398 	/* Enable MAC RX Queues */
2399 	if (priv->hw->mac->rx_queue_enable)
2400 		stmmac_mac_enable_rx_queues(priv);
2401 
2402 	/* Set RX priorities */
2403 	if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2404 		stmmac_mac_config_rx_queues_prio(priv);
2405 
2406 	/* Set TX priorities */
2407 	if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2408 		stmmac_mac_config_tx_queues_prio(priv);
2409 
2410 	/* Set RX routing */
2411 	if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2412 		stmmac_mac_config_rx_queues_routing(priv);
2413 }
2414 
2415 /**
2416  * stmmac_hw_setup - setup mac in a usable state.
2417  *  @dev : pointer to the device structure.
2418  *  Description:
2419  *  this is the main function to setup the HW in a usable state because the
2420  *  dma engine is reset, the core registers are configured (e.g. AXI,
2421  *  Checksum features, timers). The DMA is ready to start receiving and
2422  *  transmitting.
2423  *  Return value:
2424  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2425  *  file on failure.
2426  */
2427 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2428 {
2429 	struct stmmac_priv *priv = netdev_priv(dev);
2430 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2431 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2432 	u32 chan;
2433 	int ret;
2434 
2435 	/* DMA initialization and SW reset */
2436 	ret = stmmac_init_dma_engine(priv);
2437 	if (ret < 0) {
2438 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2439 			   __func__);
2440 		return ret;
2441 	}
2442 
2443 	/* Copy the MAC addr into the HW  */
2444 	priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
2445 
2446 	/* PS and related bits will be programmed according to the speed */
2447 	if (priv->hw->pcs) {
2448 		int speed = priv->plat->mac_port_sel_speed;
2449 
2450 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
2451 		    (speed == SPEED_1000)) {
2452 			priv->hw->ps = speed;
2453 		} else {
2454 			dev_warn(priv->device, "invalid port speed\n");
2455 			priv->hw->ps = 0;
2456 		}
2457 	}
2458 
2459 	/* Initialize the MAC Core */
2460 	priv->hw->mac->core_init(priv->hw, dev->mtu);
2461 
2462 	/* Initialize MTL*/
2463 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
2464 		stmmac_mtl_configuration(priv);
2465 
2466 	ret = priv->hw->mac->rx_ipc(priv->hw);
2467 	if (!ret) {
2468 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2469 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2470 		priv->hw->rx_csum = 0;
2471 	}
2472 
2473 	/* Enable the MAC Rx/Tx */
2474 	priv->hw->mac->set_mac(priv->ioaddr, true);
2475 
2476 	/* Set the HW DMA mode and the COE */
2477 	stmmac_dma_operation_mode(priv);
2478 
2479 	stmmac_mmc_setup(priv);
2480 
2481 	if (init_ptp) {
2482 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2483 		if (ret < 0)
2484 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2485 
2486 		ret = stmmac_init_ptp(priv);
2487 		if (ret == -EOPNOTSUPP)
2488 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2489 		else if (ret)
2490 			netdev_warn(priv->dev, "PTP init failed\n");
2491 	}
2492 
2493 #ifdef CONFIG_DEBUG_FS
2494 	ret = stmmac_init_fs(dev);
2495 	if (ret < 0)
2496 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2497 			    __func__);
2498 #endif
2499 	/* Start the ball rolling... */
2500 	stmmac_start_all_dma(priv);
2501 
2502 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2503 
2504 	if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
2505 		priv->rx_riwt = MAX_DMA_RIWT;
2506 		priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2507 	}
2508 
2509 	if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
2510 		priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
2511 
2512 	/* set TX and RX rings length */
2513 	stmmac_set_rings_length(priv);
2514 
2515 	/* Enable TSO */
2516 	if (priv->tso) {
2517 		for (chan = 0; chan < tx_cnt; chan++)
2518 			priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2519 	}
2520 
2521 	return 0;
2522 }
2523 
2524 static void stmmac_hw_teardown(struct net_device *dev)
2525 {
2526 	struct stmmac_priv *priv = netdev_priv(dev);
2527 
2528 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2529 }
2530 
2531 /**
2532  *  stmmac_open - open entry point of the driver
2533  *  @dev : pointer to the device structure.
2534  *  Description:
2535  *  This function is the open entry point of the driver.
2536  *  Return value:
2537  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2538  *  file on failure.
2539  */
2540 static int stmmac_open(struct net_device *dev)
2541 {
2542 	struct stmmac_priv *priv = netdev_priv(dev);
2543 	int ret;
2544 
2545 	stmmac_check_ether_addr(priv);
2546 
2547 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2548 	    priv->hw->pcs != STMMAC_PCS_TBI &&
2549 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
2550 		ret = stmmac_init_phy(dev);
2551 		if (ret) {
2552 			netdev_err(priv->dev,
2553 				   "%s: Cannot attach to PHY (error: %d)\n",
2554 				   __func__, ret);
2555 			return ret;
2556 		}
2557 	}
2558 
2559 	/* Extra statistics */
2560 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2561 	priv->xstats.threshold = tc;
2562 
2563 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2564 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2565 
2566 	ret = alloc_dma_desc_resources(priv);
2567 	if (ret < 0) {
2568 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2569 			   __func__);
2570 		goto dma_desc_error;
2571 	}
2572 
2573 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
2574 	if (ret < 0) {
2575 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2576 			   __func__);
2577 		goto init_error;
2578 	}
2579 
2580 	ret = stmmac_hw_setup(dev, true);
2581 	if (ret < 0) {
2582 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2583 		goto init_error;
2584 	}
2585 
2586 	stmmac_init_tx_coalesce(priv);
2587 
2588 	if (dev->phydev)
2589 		phy_start(dev->phydev);
2590 
2591 	/* Request the IRQ lines */
2592 	ret = request_irq(dev->irq, stmmac_interrupt,
2593 			  IRQF_SHARED, dev->name, dev);
2594 	if (unlikely(ret < 0)) {
2595 		netdev_err(priv->dev,
2596 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2597 			   __func__, dev->irq, ret);
2598 		goto irq_error;
2599 	}
2600 
2601 	/* Request the Wake IRQ in case of another line is used for WoL */
2602 	if (priv->wol_irq != dev->irq) {
2603 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
2604 				  IRQF_SHARED, dev->name, dev);
2605 		if (unlikely(ret < 0)) {
2606 			netdev_err(priv->dev,
2607 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2608 				   __func__, priv->wol_irq, ret);
2609 			goto wolirq_error;
2610 		}
2611 	}
2612 
2613 	/* Request the IRQ lines */
2614 	if (priv->lpi_irq > 0) {
2615 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2616 				  dev->name, dev);
2617 		if (unlikely(ret < 0)) {
2618 			netdev_err(priv->dev,
2619 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2620 				   __func__, priv->lpi_irq, ret);
2621 			goto lpiirq_error;
2622 		}
2623 	}
2624 
2625 	stmmac_enable_all_queues(priv);
2626 	stmmac_start_all_queues(priv);
2627 
2628 	return 0;
2629 
2630 lpiirq_error:
2631 	if (priv->wol_irq != dev->irq)
2632 		free_irq(priv->wol_irq, dev);
2633 wolirq_error:
2634 	free_irq(dev->irq, dev);
2635 irq_error:
2636 	if (dev->phydev)
2637 		phy_stop(dev->phydev);
2638 
2639 	del_timer_sync(&priv->txtimer);
2640 	stmmac_hw_teardown(dev);
2641 init_error:
2642 	free_dma_desc_resources(priv);
2643 dma_desc_error:
2644 	if (dev->phydev)
2645 		phy_disconnect(dev->phydev);
2646 
2647 	return ret;
2648 }
2649 
2650 /**
2651  *  stmmac_release - close entry point of the driver
2652  *  @dev : device pointer.
2653  *  Description:
2654  *  This is the stop entry point of the driver.
2655  */
2656 static int stmmac_release(struct net_device *dev)
2657 {
2658 	struct stmmac_priv *priv = netdev_priv(dev);
2659 
2660 	if (priv->eee_enabled)
2661 		del_timer_sync(&priv->eee_ctrl_timer);
2662 
2663 	/* Stop and disconnect the PHY */
2664 	if (dev->phydev) {
2665 		phy_stop(dev->phydev);
2666 		phy_disconnect(dev->phydev);
2667 	}
2668 
2669 	stmmac_stop_all_queues(priv);
2670 
2671 	stmmac_disable_all_queues(priv);
2672 
2673 	del_timer_sync(&priv->txtimer);
2674 
2675 	/* Free the IRQ lines */
2676 	free_irq(dev->irq, dev);
2677 	if (priv->wol_irq != dev->irq)
2678 		free_irq(priv->wol_irq, dev);
2679 	if (priv->lpi_irq > 0)
2680 		free_irq(priv->lpi_irq, dev);
2681 
2682 	/* Stop TX/RX DMA and clear the descriptors */
2683 	stmmac_stop_all_dma(priv);
2684 
2685 	/* Release and free the Rx/Tx resources */
2686 	free_dma_desc_resources(priv);
2687 
2688 	/* Disable the MAC Rx/Tx */
2689 	priv->hw->mac->set_mac(priv->ioaddr, false);
2690 
2691 	netif_carrier_off(dev);
2692 
2693 #ifdef CONFIG_DEBUG_FS
2694 	stmmac_exit_fs(dev);
2695 #endif
2696 
2697 	stmmac_release_ptp(priv);
2698 
2699 	return 0;
2700 }
2701 
2702 /**
2703  *  stmmac_tso_allocator - close entry point of the driver
2704  *  @priv: driver private structure
2705  *  @des: buffer start address
2706  *  @total_len: total length to fill in descriptors
2707  *  @last_segmant: condition for the last descriptor
2708  *  @queue: TX queue index
2709  *  Description:
2710  *  This function fills descriptor and request new descriptors according to
2711  *  buffer length to fill
2712  */
2713 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2714 				 int total_len, bool last_segment, u32 queue)
2715 {
2716 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2717 	struct dma_desc *desc;
2718 	u32 buff_size;
2719 	int tmp_len;
2720 
2721 	tmp_len = total_len;
2722 
2723 	while (tmp_len > 0) {
2724 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2725 		desc = tx_q->dma_tx + tx_q->cur_tx;
2726 
2727 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2728 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2729 			    TSO_MAX_BUFF_SIZE : tmp_len;
2730 
2731 		priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2732 			0, 1,
2733 			(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2734 			0, 0);
2735 
2736 		tmp_len -= TSO_MAX_BUFF_SIZE;
2737 	}
2738 }
2739 
2740 /**
2741  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2742  *  @skb : the socket buffer
2743  *  @dev : device pointer
2744  *  Description: this is the transmit function that is called on TSO frames
2745  *  (support available on GMAC4 and newer chips).
2746  *  Diagram below show the ring programming in case of TSO frames:
2747  *
2748  *  First Descriptor
2749  *   --------
2750  *   | DES0 |---> buffer1 = L2/L3/L4 header
2751  *   | DES1 |---> TCP Payload (can continue on next descr...)
2752  *   | DES2 |---> buffer 1 and 2 len
2753  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2754  *   --------
2755  *	|
2756  *     ...
2757  *	|
2758  *   --------
2759  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2760  *   | DES1 | --|
2761  *   | DES2 | --> buffer 1 and 2 len
2762  *   | DES3 |
2763  *   --------
2764  *
2765  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2766  */
2767 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2768 {
2769 	struct dma_desc *desc, *first, *mss_desc = NULL;
2770 	struct stmmac_priv *priv = netdev_priv(dev);
2771 	int nfrags = skb_shinfo(skb)->nr_frags;
2772 	u32 queue = skb_get_queue_mapping(skb);
2773 	unsigned int first_entry, des;
2774 	struct stmmac_tx_queue *tx_q;
2775 	int tmp_pay_len = 0;
2776 	u32 pay_len, mss;
2777 	u8 proto_hdr_len;
2778 	int i;
2779 
2780 	tx_q = &priv->tx_queue[queue];
2781 
2782 	/* Compute header lengths */
2783 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2784 
2785 	/* Desc availability based on threshold should be enough safe */
2786 	if (unlikely(stmmac_tx_avail(priv, queue) <
2787 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2788 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2789 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2790 								queue));
2791 			/* This is a hard error, log it. */
2792 			netdev_err(priv->dev,
2793 				   "%s: Tx Ring full when queue awake\n",
2794 				   __func__);
2795 		}
2796 		return NETDEV_TX_BUSY;
2797 	}
2798 
2799 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2800 
2801 	mss = skb_shinfo(skb)->gso_size;
2802 
2803 	/* set new MSS value if needed */
2804 	if (mss != priv->mss) {
2805 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2806 		priv->hw->desc->set_mss(mss_desc, mss);
2807 		priv->mss = mss;
2808 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2809 	}
2810 
2811 	if (netif_msg_tx_queued(priv)) {
2812 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2813 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2814 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2815 			skb->data_len);
2816 	}
2817 
2818 	first_entry = tx_q->cur_tx;
2819 
2820 	desc = tx_q->dma_tx + first_entry;
2821 	first = desc;
2822 
2823 	/* first descriptor: fill Headers on Buf1 */
2824 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2825 			     DMA_TO_DEVICE);
2826 	if (dma_mapping_error(priv->device, des))
2827 		goto dma_map_err;
2828 
2829 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2830 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2831 
2832 	first->des0 = cpu_to_le32(des);
2833 
2834 	/* Fill start of payload in buff2 of first descriptor */
2835 	if (pay_len)
2836 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2837 
2838 	/* If needed take extra descriptors to fill the remaining payload */
2839 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2840 
2841 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2842 
2843 	/* Prepare fragments */
2844 	for (i = 0; i < nfrags; i++) {
2845 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2846 
2847 		des = skb_frag_dma_map(priv->device, frag, 0,
2848 				       skb_frag_size(frag),
2849 				       DMA_TO_DEVICE);
2850 		if (dma_mapping_error(priv->device, des))
2851 			goto dma_map_err;
2852 
2853 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2854 				     (i == nfrags - 1), queue);
2855 
2856 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2857 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2858 		tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
2859 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2860 	}
2861 
2862 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2863 
2864 	/* Only the last descriptor gets to point to the skb. */
2865 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2866 
2867 	/* We've used all descriptors we need for this skb, however,
2868 	 * advance cur_tx so that it references a fresh descriptor.
2869 	 * ndo_start_xmit will fill this descriptor the next time it's
2870 	 * called and stmmac_tx_clean may clean up to this descriptor.
2871 	 */
2872 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2873 
2874 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2875 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2876 			  __func__);
2877 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2878 	}
2879 
2880 	dev->stats.tx_bytes += skb->len;
2881 	priv->xstats.tx_tso_frames++;
2882 	priv->xstats.tx_tso_nfrags += nfrags;
2883 
2884 	/* Manage tx mitigation */
2885 	priv->tx_count_frames += nfrags + 1;
2886 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2887 		mod_timer(&priv->txtimer,
2888 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
2889 	} else {
2890 		priv->tx_count_frames = 0;
2891 		priv->hw->desc->set_tx_ic(desc);
2892 		priv->xstats.tx_set_ic_bit++;
2893 	}
2894 
2895 	skb_tx_timestamp(skb);
2896 
2897 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2898 		     priv->hwts_tx_en)) {
2899 		/* declare that device is doing timestamping */
2900 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2901 		priv->hw->desc->enable_tx_timestamp(first);
2902 	}
2903 
2904 	/* Complete the first descriptor before granting the DMA */
2905 	priv->hw->desc->prepare_tso_tx_desc(first, 1,
2906 			proto_hdr_len,
2907 			pay_len,
2908 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2909 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2910 
2911 	/* If context desc is used to change MSS */
2912 	if (mss_desc)
2913 		priv->hw->desc->set_tx_owner(mss_desc);
2914 
2915 	/* The own bit must be the latest setting done when prepare the
2916 	 * descriptor and then barrier is needed to make sure that
2917 	 * all is coherent before granting the DMA engine.
2918 	 */
2919 	dma_wmb();
2920 
2921 	if (netif_msg_pktdata(priv)) {
2922 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2923 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2924 			tx_q->cur_tx, first, nfrags);
2925 
2926 		priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
2927 					     0);
2928 
2929 		pr_info(">>> frame to be transmitted: ");
2930 		print_pkt(skb->data, skb_headlen(skb));
2931 	}
2932 
2933 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2934 
2935 	priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
2936 				       queue);
2937 
2938 	return NETDEV_TX_OK;
2939 
2940 dma_map_err:
2941 	dev_err(priv->device, "Tx dma map failed\n");
2942 	dev_kfree_skb(skb);
2943 	priv->dev->stats.tx_dropped++;
2944 	return NETDEV_TX_OK;
2945 }
2946 
2947 /**
2948  *  stmmac_xmit - Tx entry point of the driver
2949  *  @skb : the socket buffer
2950  *  @dev : device pointer
2951  *  Description : this is the tx entry point of the driver.
2952  *  It programs the chain or the ring and supports oversized frames
2953  *  and SG feature.
2954  */
2955 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2956 {
2957 	struct stmmac_priv *priv = netdev_priv(dev);
2958 	unsigned int nopaged_len = skb_headlen(skb);
2959 	int i, csum_insertion = 0, is_jumbo = 0;
2960 	u32 queue = skb_get_queue_mapping(skb);
2961 	int nfrags = skb_shinfo(skb)->nr_frags;
2962 	int entry;
2963 	unsigned int first_entry;
2964 	struct dma_desc *desc, *first;
2965 	struct stmmac_tx_queue *tx_q;
2966 	unsigned int enh_desc;
2967 	unsigned int des;
2968 
2969 	tx_q = &priv->tx_queue[queue];
2970 
2971 	/* Manage oversized TCP frames for GMAC4 device */
2972 	if (skb_is_gso(skb) && priv->tso) {
2973 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
2974 			return stmmac_tso_xmit(skb, dev);
2975 	}
2976 
2977 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
2978 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2979 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2980 								queue));
2981 			/* This is a hard error, log it. */
2982 			netdev_err(priv->dev,
2983 				   "%s: Tx Ring full when queue awake\n",
2984 				   __func__);
2985 		}
2986 		return NETDEV_TX_BUSY;
2987 	}
2988 
2989 	if (priv->tx_path_in_lpi_mode)
2990 		stmmac_disable_eee_mode(priv);
2991 
2992 	entry = tx_q->cur_tx;
2993 	first_entry = entry;
2994 
2995 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
2996 
2997 	if (likely(priv->extend_desc))
2998 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2999 	else
3000 		desc = tx_q->dma_tx + entry;
3001 
3002 	first = desc;
3003 
3004 	enh_desc = priv->plat->enh_desc;
3005 	/* To program the descriptors according to the size of the frame */
3006 	if (enh_desc)
3007 		is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
3008 
3009 	if (unlikely(is_jumbo) && likely(priv->synopsys_id <
3010 					 DWMAC_CORE_4_00)) {
3011 		entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
3012 		if (unlikely(entry < 0))
3013 			goto dma_map_err;
3014 	}
3015 
3016 	for (i = 0; i < nfrags; i++) {
3017 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3018 		int len = skb_frag_size(frag);
3019 		bool last_segment = (i == (nfrags - 1));
3020 
3021 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3022 
3023 		if (likely(priv->extend_desc))
3024 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3025 		else
3026 			desc = tx_q->dma_tx + entry;
3027 
3028 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3029 				       DMA_TO_DEVICE);
3030 		if (dma_mapping_error(priv->device, des))
3031 			goto dma_map_err; /* should reuse desc w/o issues */
3032 
3033 		tx_q->tx_skbuff[entry] = NULL;
3034 
3035 		tx_q->tx_skbuff_dma[entry].buf = des;
3036 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3037 			desc->des0 = cpu_to_le32(des);
3038 		else
3039 			desc->des2 = cpu_to_le32(des);
3040 
3041 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3042 		tx_q->tx_skbuff_dma[entry].len = len;
3043 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3044 
3045 		/* Prepare the descriptor and set the own bit too */
3046 		priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
3047 						priv->mode, 1, last_segment,
3048 						skb->len);
3049 	}
3050 
3051 	/* Only the last descriptor gets to point to the skb. */
3052 	tx_q->tx_skbuff[entry] = skb;
3053 
3054 	/* We've used all descriptors we need for this skb, however,
3055 	 * advance cur_tx so that it references a fresh descriptor.
3056 	 * ndo_start_xmit will fill this descriptor the next time it's
3057 	 * called and stmmac_tx_clean may clean up to this descriptor.
3058 	 */
3059 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3060 	tx_q->cur_tx = entry;
3061 
3062 	if (netif_msg_pktdata(priv)) {
3063 		void *tx_head;
3064 
3065 		netdev_dbg(priv->dev,
3066 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3067 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3068 			   entry, first, nfrags);
3069 
3070 		if (priv->extend_desc)
3071 			tx_head = (void *)tx_q->dma_etx;
3072 		else
3073 			tx_head = (void *)tx_q->dma_tx;
3074 
3075 		priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
3076 
3077 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3078 		print_pkt(skb->data, skb->len);
3079 	}
3080 
3081 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3082 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3083 			  __func__);
3084 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3085 	}
3086 
3087 	dev->stats.tx_bytes += skb->len;
3088 
3089 	/* According to the coalesce parameter the IC bit for the latest
3090 	 * segment is reset and the timer re-started to clean the tx status.
3091 	 * This approach takes care about the fragments: desc is the first
3092 	 * element in case of no SG.
3093 	 */
3094 	priv->tx_count_frames += nfrags + 1;
3095 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3096 		mod_timer(&priv->txtimer,
3097 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
3098 	} else {
3099 		priv->tx_count_frames = 0;
3100 		priv->hw->desc->set_tx_ic(desc);
3101 		priv->xstats.tx_set_ic_bit++;
3102 	}
3103 
3104 	skb_tx_timestamp(skb);
3105 
3106 	/* Ready to fill the first descriptor and set the OWN bit w/o any
3107 	 * problems because all the descriptors are actually ready to be
3108 	 * passed to the DMA engine.
3109 	 */
3110 	if (likely(!is_jumbo)) {
3111 		bool last_segment = (nfrags == 0);
3112 
3113 		des = dma_map_single(priv->device, skb->data,
3114 				     nopaged_len, DMA_TO_DEVICE);
3115 		if (dma_mapping_error(priv->device, des))
3116 			goto dma_map_err;
3117 
3118 		tx_q->tx_skbuff_dma[first_entry].buf = des;
3119 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3120 			first->des0 = cpu_to_le32(des);
3121 		else
3122 			first->des2 = cpu_to_le32(des);
3123 
3124 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3125 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3126 
3127 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3128 			     priv->hwts_tx_en)) {
3129 			/* declare that device is doing timestamping */
3130 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3131 			priv->hw->desc->enable_tx_timestamp(first);
3132 		}
3133 
3134 		/* Prepare the first descriptor setting the OWN bit too */
3135 		priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
3136 						csum_insertion, priv->mode, 1,
3137 						last_segment, skb->len);
3138 
3139 		/* The own bit must be the latest setting done when prepare the
3140 		 * descriptor and then barrier is needed to make sure that
3141 		 * all is coherent before granting the DMA engine.
3142 		 */
3143 		dma_wmb();
3144 	}
3145 
3146 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3147 
3148 	if (priv->synopsys_id < DWMAC_CORE_4_00)
3149 		priv->hw->dma->enable_dma_transmission(priv->ioaddr);
3150 	else
3151 		priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3152 					       queue);
3153 
3154 	return NETDEV_TX_OK;
3155 
3156 dma_map_err:
3157 	netdev_err(priv->dev, "Tx DMA map failed\n");
3158 	dev_kfree_skb(skb);
3159 	priv->dev->stats.tx_dropped++;
3160 	return NETDEV_TX_OK;
3161 }
3162 
3163 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3164 {
3165 	struct ethhdr *ehdr;
3166 	u16 vlanid;
3167 
3168 	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3169 	    NETIF_F_HW_VLAN_CTAG_RX &&
3170 	    !__vlan_get_tag(skb, &vlanid)) {
3171 		/* pop the vlan tag */
3172 		ehdr = (struct ethhdr *)skb->data;
3173 		memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3174 		skb_pull(skb, VLAN_HLEN);
3175 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3176 	}
3177 }
3178 
3179 
3180 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3181 {
3182 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3183 		return 0;
3184 
3185 	return 1;
3186 }
3187 
3188 /**
3189  * stmmac_rx_refill - refill used skb preallocated buffers
3190  * @priv: driver private structure
3191  * @queue: RX queue index
3192  * Description : this is to reallocate the skb for the reception process
3193  * that is based on zero-copy.
3194  */
3195 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3196 {
3197 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3198 	int dirty = stmmac_rx_dirty(priv, queue);
3199 	unsigned int entry = rx_q->dirty_rx;
3200 
3201 	int bfsize = priv->dma_buf_sz;
3202 
3203 	while (dirty-- > 0) {
3204 		struct dma_desc *p;
3205 
3206 		if (priv->extend_desc)
3207 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3208 		else
3209 			p = rx_q->dma_rx + entry;
3210 
3211 		if (likely(!rx_q->rx_skbuff[entry])) {
3212 			struct sk_buff *skb;
3213 
3214 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3215 			if (unlikely(!skb)) {
3216 				/* so for a while no zero-copy! */
3217 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3218 				if (unlikely(net_ratelimit()))
3219 					dev_err(priv->device,
3220 						"fail to alloc skb entry %d\n",
3221 						entry);
3222 				break;
3223 			}
3224 
3225 			rx_q->rx_skbuff[entry] = skb;
3226 			rx_q->rx_skbuff_dma[entry] =
3227 			    dma_map_single(priv->device, skb->data, bfsize,
3228 					   DMA_FROM_DEVICE);
3229 			if (dma_mapping_error(priv->device,
3230 					      rx_q->rx_skbuff_dma[entry])) {
3231 				netdev_err(priv->dev, "Rx DMA map failed\n");
3232 				dev_kfree_skb(skb);
3233 				break;
3234 			}
3235 
3236 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
3237 				p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3238 				p->des1 = 0;
3239 			} else {
3240 				p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3241 			}
3242 			if (priv->hw->mode->refill_desc3)
3243 				priv->hw->mode->refill_desc3(rx_q, p);
3244 
3245 			if (rx_q->rx_zeroc_thresh > 0)
3246 				rx_q->rx_zeroc_thresh--;
3247 
3248 			netif_dbg(priv, rx_status, priv->dev,
3249 				  "refill entry #%d\n", entry);
3250 		}
3251 		dma_wmb();
3252 
3253 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3254 			priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
3255 		else
3256 			priv->hw->desc->set_rx_owner(p);
3257 
3258 		dma_wmb();
3259 
3260 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3261 	}
3262 	rx_q->dirty_rx = entry;
3263 }
3264 
3265 /**
3266  * stmmac_rx - manage the receive process
3267  * @priv: driver private structure
3268  * @limit: napi bugget
3269  * @queue: RX queue index.
3270  * Description :  this the function called by the napi poll method.
3271  * It gets all the frames inside the ring.
3272  */
3273 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3274 {
3275 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3276 	unsigned int entry = rx_q->cur_rx;
3277 	int coe = priv->hw->rx_csum;
3278 	unsigned int next_entry;
3279 	unsigned int count = 0;
3280 
3281 	if (netif_msg_rx_status(priv)) {
3282 		void *rx_head;
3283 
3284 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3285 		if (priv->extend_desc)
3286 			rx_head = (void *)rx_q->dma_erx;
3287 		else
3288 			rx_head = (void *)rx_q->dma_rx;
3289 
3290 		priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
3291 	}
3292 	while (count < limit) {
3293 		int status;
3294 		struct dma_desc *p;
3295 		struct dma_desc *np;
3296 
3297 		if (priv->extend_desc)
3298 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3299 		else
3300 			p = rx_q->dma_rx + entry;
3301 
3302 		/* read the status of the incoming frame */
3303 		status = priv->hw->desc->rx_status(&priv->dev->stats,
3304 						   &priv->xstats, p);
3305 		/* check if managed by the DMA otherwise go ahead */
3306 		if (unlikely(status & dma_own))
3307 			break;
3308 
3309 		count++;
3310 
3311 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3312 		next_entry = rx_q->cur_rx;
3313 
3314 		if (priv->extend_desc)
3315 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3316 		else
3317 			np = rx_q->dma_rx + next_entry;
3318 
3319 		prefetch(np);
3320 
3321 		if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
3322 			priv->hw->desc->rx_extended_status(&priv->dev->stats,
3323 							   &priv->xstats,
3324 							   rx_q->dma_erx +
3325 							   entry);
3326 		if (unlikely(status == discard_frame)) {
3327 			priv->dev->stats.rx_errors++;
3328 			if (priv->hwts_rx_en && !priv->extend_desc) {
3329 				/* DESC2 & DESC3 will be overwritten by device
3330 				 * with timestamp value, hence reinitialize
3331 				 * them in stmmac_rx_refill() function so that
3332 				 * device can reuse it.
3333 				 */
3334 				rx_q->rx_skbuff[entry] = NULL;
3335 				dma_unmap_single(priv->device,
3336 						 rx_q->rx_skbuff_dma[entry],
3337 						 priv->dma_buf_sz,
3338 						 DMA_FROM_DEVICE);
3339 			}
3340 		} else {
3341 			struct sk_buff *skb;
3342 			int frame_len;
3343 			unsigned int des;
3344 
3345 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3346 				des = le32_to_cpu(p->des0);
3347 			else
3348 				des = le32_to_cpu(p->des2);
3349 
3350 			frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
3351 
3352 			/*  If frame length is greater than skb buffer size
3353 			 *  (preallocated during init) then the packet is
3354 			 *  ignored
3355 			 */
3356 			if (frame_len > priv->dma_buf_sz) {
3357 				netdev_err(priv->dev,
3358 					   "len %d larger than size (%d)\n",
3359 					   frame_len, priv->dma_buf_sz);
3360 				priv->dev->stats.rx_length_errors++;
3361 				break;
3362 			}
3363 
3364 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3365 			 * Type frames (LLC/LLC-SNAP)
3366 			 */
3367 			if (unlikely(status != llc_snap))
3368 				frame_len -= ETH_FCS_LEN;
3369 
3370 			if (netif_msg_rx_status(priv)) {
3371 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3372 					   p, entry, des);
3373 				if (frame_len > ETH_FRAME_LEN)
3374 					netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3375 						   frame_len, status);
3376 			}
3377 
3378 			/* The zero-copy is always used for all the sizes
3379 			 * in case of GMAC4 because it needs
3380 			 * to refill the used descriptors, always.
3381 			 */
3382 			if (unlikely(!priv->plat->has_gmac4 &&
3383 				     ((frame_len < priv->rx_copybreak) ||
3384 				     stmmac_rx_threshold_count(rx_q)))) {
3385 				skb = netdev_alloc_skb_ip_align(priv->dev,
3386 								frame_len);
3387 				if (unlikely(!skb)) {
3388 					if (net_ratelimit())
3389 						dev_warn(priv->device,
3390 							 "packet dropped\n");
3391 					priv->dev->stats.rx_dropped++;
3392 					break;
3393 				}
3394 
3395 				dma_sync_single_for_cpu(priv->device,
3396 							rx_q->rx_skbuff_dma
3397 							[entry], frame_len,
3398 							DMA_FROM_DEVICE);
3399 				skb_copy_to_linear_data(skb,
3400 							rx_q->
3401 							rx_skbuff[entry]->data,
3402 							frame_len);
3403 
3404 				skb_put(skb, frame_len);
3405 				dma_sync_single_for_device(priv->device,
3406 							   rx_q->rx_skbuff_dma
3407 							   [entry], frame_len,
3408 							   DMA_FROM_DEVICE);
3409 			} else {
3410 				skb = rx_q->rx_skbuff[entry];
3411 				if (unlikely(!skb)) {
3412 					netdev_err(priv->dev,
3413 						   "%s: Inconsistent Rx chain\n",
3414 						   priv->dev->name);
3415 					priv->dev->stats.rx_dropped++;
3416 					break;
3417 				}
3418 				prefetch(skb->data - NET_IP_ALIGN);
3419 				rx_q->rx_skbuff[entry] = NULL;
3420 				rx_q->rx_zeroc_thresh++;
3421 
3422 				skb_put(skb, frame_len);
3423 				dma_unmap_single(priv->device,
3424 						 rx_q->rx_skbuff_dma[entry],
3425 						 priv->dma_buf_sz,
3426 						 DMA_FROM_DEVICE);
3427 			}
3428 
3429 			if (netif_msg_pktdata(priv)) {
3430 				netdev_dbg(priv->dev, "frame received (%dbytes)",
3431 					   frame_len);
3432 				print_pkt(skb->data, frame_len);
3433 			}
3434 
3435 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3436 
3437 			stmmac_rx_vlan(priv->dev, skb);
3438 
3439 			skb->protocol = eth_type_trans(skb, priv->dev);
3440 
3441 			if (unlikely(!coe))
3442 				skb_checksum_none_assert(skb);
3443 			else
3444 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3445 
3446 			napi_gro_receive(&rx_q->napi, skb);
3447 
3448 			priv->dev->stats.rx_packets++;
3449 			priv->dev->stats.rx_bytes += frame_len;
3450 		}
3451 		entry = next_entry;
3452 	}
3453 
3454 	stmmac_rx_refill(priv, queue);
3455 
3456 	priv->xstats.rx_pkt_n += count;
3457 
3458 	return count;
3459 }
3460 
3461 /**
3462  *  stmmac_poll - stmmac poll method (NAPI)
3463  *  @napi : pointer to the napi structure.
3464  *  @budget : maximum number of packets that the current CPU can receive from
3465  *	      all interfaces.
3466  *  Description :
3467  *  To look at the incoming frames and clear the tx resources.
3468  */
3469 static int stmmac_poll(struct napi_struct *napi, int budget)
3470 {
3471 	struct stmmac_rx_queue *rx_q =
3472 		container_of(napi, struct stmmac_rx_queue, napi);
3473 	struct stmmac_priv *priv = rx_q->priv_data;
3474 	u32 tx_count = priv->plat->tx_queues_to_use;
3475 	u32 chan = rx_q->queue_index;
3476 	int work_done = 0;
3477 	u32 queue;
3478 
3479 	priv->xstats.napi_poll++;
3480 
3481 	/* check all the queues */
3482 	for (queue = 0; queue < tx_count; queue++)
3483 		stmmac_tx_clean(priv, queue);
3484 
3485 	work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3486 	if (work_done < budget) {
3487 		napi_complete_done(napi, work_done);
3488 		stmmac_enable_dma_irq(priv, chan);
3489 	}
3490 	return work_done;
3491 }
3492 
3493 /**
3494  *  stmmac_tx_timeout
3495  *  @dev : Pointer to net device structure
3496  *  Description: this function is called when a packet transmission fails to
3497  *   complete within a reasonable time. The driver will mark the error in the
3498  *   netdev structure and arrange for the device to be reset to a sane state
3499  *   in order to transmit a new packet.
3500  */
3501 static void stmmac_tx_timeout(struct net_device *dev)
3502 {
3503 	struct stmmac_priv *priv = netdev_priv(dev);
3504 	u32 tx_count = priv->plat->tx_queues_to_use;
3505 	u32 chan;
3506 
3507 	/* Clear Tx resources and restart transmitting again */
3508 	for (chan = 0; chan < tx_count; chan++)
3509 		stmmac_tx_err(priv, chan);
3510 }
3511 
3512 /**
3513  *  stmmac_set_rx_mode - entry point for multicast addressing
3514  *  @dev : pointer to the device structure
3515  *  Description:
3516  *  This function is a driver entry point which gets called by the kernel
3517  *  whenever multicast addresses must be enabled/disabled.
3518  *  Return value:
3519  *  void.
3520  */
3521 static void stmmac_set_rx_mode(struct net_device *dev)
3522 {
3523 	struct stmmac_priv *priv = netdev_priv(dev);
3524 
3525 	priv->hw->mac->set_filter(priv->hw, dev);
3526 }
3527 
3528 /**
3529  *  stmmac_change_mtu - entry point to change MTU size for the device.
3530  *  @dev : device pointer.
3531  *  @new_mtu : the new MTU size for the device.
3532  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3533  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3534  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3535  *  Return value:
3536  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3537  *  file on failure.
3538  */
3539 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3540 {
3541 	struct stmmac_priv *priv = netdev_priv(dev);
3542 
3543 	if (netif_running(dev)) {
3544 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
3545 		return -EBUSY;
3546 	}
3547 
3548 	dev->mtu = new_mtu;
3549 
3550 	netdev_update_features(dev);
3551 
3552 	return 0;
3553 }
3554 
3555 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3556 					     netdev_features_t features)
3557 {
3558 	struct stmmac_priv *priv = netdev_priv(dev);
3559 
3560 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3561 		features &= ~NETIF_F_RXCSUM;
3562 
3563 	if (!priv->plat->tx_coe)
3564 		features &= ~NETIF_F_CSUM_MASK;
3565 
3566 	/* Some GMAC devices have a bugged Jumbo frame support that
3567 	 * needs to have the Tx COE disabled for oversized frames
3568 	 * (due to limited buffer sizes). In this case we disable
3569 	 * the TX csum insertion in the TDES and not use SF.
3570 	 */
3571 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3572 		features &= ~NETIF_F_CSUM_MASK;
3573 
3574 	/* Disable tso if asked by ethtool */
3575 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3576 		if (features & NETIF_F_TSO)
3577 			priv->tso = true;
3578 		else
3579 			priv->tso = false;
3580 	}
3581 
3582 	return features;
3583 }
3584 
3585 static int stmmac_set_features(struct net_device *netdev,
3586 			       netdev_features_t features)
3587 {
3588 	struct stmmac_priv *priv = netdev_priv(netdev);
3589 
3590 	/* Keep the COE Type in case of csum is supporting */
3591 	if (features & NETIF_F_RXCSUM)
3592 		priv->hw->rx_csum = priv->plat->rx_coe;
3593 	else
3594 		priv->hw->rx_csum = 0;
3595 	/* No check needed because rx_coe has been set before and it will be
3596 	 * fixed in case of issue.
3597 	 */
3598 	priv->hw->mac->rx_ipc(priv->hw);
3599 
3600 	return 0;
3601 }
3602 
3603 /**
3604  *  stmmac_interrupt - main ISR
3605  *  @irq: interrupt number.
3606  *  @dev_id: to pass the net device pointer.
3607  *  Description: this is the main driver interrupt service routine.
3608  *  It can call:
3609  *  o DMA service routine (to manage incoming frame reception and transmission
3610  *    status)
3611  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3612  *    interrupts.
3613  */
3614 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3615 {
3616 	struct net_device *dev = (struct net_device *)dev_id;
3617 	struct stmmac_priv *priv = netdev_priv(dev);
3618 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3619 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3620 	u32 queues_count;
3621 	u32 queue;
3622 
3623 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3624 
3625 	if (priv->irq_wake)
3626 		pm_wakeup_event(priv->device, 0);
3627 
3628 	if (unlikely(!dev)) {
3629 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3630 		return IRQ_NONE;
3631 	}
3632 
3633 	/* To handle GMAC own interrupts */
3634 	if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3635 		int status = priv->hw->mac->host_irq_status(priv->hw,
3636 							    &priv->xstats);
3637 
3638 		if (unlikely(status)) {
3639 			/* For LPI we need to save the tx status */
3640 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3641 				priv->tx_path_in_lpi_mode = true;
3642 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3643 				priv->tx_path_in_lpi_mode = false;
3644 		}
3645 
3646 		if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3647 			for (queue = 0; queue < queues_count; queue++) {
3648 				struct stmmac_rx_queue *rx_q =
3649 				&priv->rx_queue[queue];
3650 
3651 				status |=
3652 				priv->hw->mac->host_mtl_irq_status(priv->hw,
3653 								   queue);
3654 
3655 				if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
3656 				    priv->hw->dma->set_rx_tail_ptr)
3657 					priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
3658 								rx_q->rx_tail_addr,
3659 								queue);
3660 			}
3661 		}
3662 
3663 		/* PCS link status */
3664 		if (priv->hw->pcs) {
3665 			if (priv->xstats.pcs_link)
3666 				netif_carrier_on(dev);
3667 			else
3668 				netif_carrier_off(dev);
3669 		}
3670 	}
3671 
3672 	/* To handle DMA interrupts */
3673 	stmmac_dma_interrupt(priv);
3674 
3675 	return IRQ_HANDLED;
3676 }
3677 
3678 #ifdef CONFIG_NET_POLL_CONTROLLER
3679 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3680  * to allow network I/O with interrupts disabled.
3681  */
3682 static void stmmac_poll_controller(struct net_device *dev)
3683 {
3684 	disable_irq(dev->irq);
3685 	stmmac_interrupt(dev->irq, dev);
3686 	enable_irq(dev->irq);
3687 }
3688 #endif
3689 
3690 /**
3691  *  stmmac_ioctl - Entry point for the Ioctl
3692  *  @dev: Device pointer.
3693  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3694  *  a proprietary structure used to pass information to the driver.
3695  *  @cmd: IOCTL command
3696  *  Description:
3697  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3698  */
3699 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3700 {
3701 	int ret = -EOPNOTSUPP;
3702 
3703 	if (!netif_running(dev))
3704 		return -EINVAL;
3705 
3706 	switch (cmd) {
3707 	case SIOCGMIIPHY:
3708 	case SIOCGMIIREG:
3709 	case SIOCSMIIREG:
3710 		if (!dev->phydev)
3711 			return -EINVAL;
3712 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3713 		break;
3714 	case SIOCSHWTSTAMP:
3715 		ret = stmmac_hwtstamp_ioctl(dev, rq);
3716 		break;
3717 	default:
3718 		break;
3719 	}
3720 
3721 	return ret;
3722 }
3723 
3724 #ifdef CONFIG_DEBUG_FS
3725 static struct dentry *stmmac_fs_dir;
3726 
3727 static void sysfs_display_ring(void *head, int size, int extend_desc,
3728 			       struct seq_file *seq)
3729 {
3730 	int i;
3731 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3732 	struct dma_desc *p = (struct dma_desc *)head;
3733 
3734 	for (i = 0; i < size; i++) {
3735 		if (extend_desc) {
3736 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3737 				   i, (unsigned int)virt_to_phys(ep),
3738 				   le32_to_cpu(ep->basic.des0),
3739 				   le32_to_cpu(ep->basic.des1),
3740 				   le32_to_cpu(ep->basic.des2),
3741 				   le32_to_cpu(ep->basic.des3));
3742 			ep++;
3743 		} else {
3744 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3745 				   i, (unsigned int)virt_to_phys(p),
3746 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3747 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3748 			p++;
3749 		}
3750 		seq_printf(seq, "\n");
3751 	}
3752 }
3753 
3754 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3755 {
3756 	struct net_device *dev = seq->private;
3757 	struct stmmac_priv *priv = netdev_priv(dev);
3758 	u32 rx_count = priv->plat->rx_queues_to_use;
3759 	u32 tx_count = priv->plat->tx_queues_to_use;
3760 	u32 queue;
3761 
3762 	for (queue = 0; queue < rx_count; queue++) {
3763 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3764 
3765 		seq_printf(seq, "RX Queue %d:\n", queue);
3766 
3767 		if (priv->extend_desc) {
3768 			seq_printf(seq, "Extended descriptor ring:\n");
3769 			sysfs_display_ring((void *)rx_q->dma_erx,
3770 					   DMA_RX_SIZE, 1, seq);
3771 		} else {
3772 			seq_printf(seq, "Descriptor ring:\n");
3773 			sysfs_display_ring((void *)rx_q->dma_rx,
3774 					   DMA_RX_SIZE, 0, seq);
3775 		}
3776 	}
3777 
3778 	for (queue = 0; queue < tx_count; queue++) {
3779 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3780 
3781 		seq_printf(seq, "TX Queue %d:\n", queue);
3782 
3783 		if (priv->extend_desc) {
3784 			seq_printf(seq, "Extended descriptor ring:\n");
3785 			sysfs_display_ring((void *)tx_q->dma_etx,
3786 					   DMA_TX_SIZE, 1, seq);
3787 		} else {
3788 			seq_printf(seq, "Descriptor ring:\n");
3789 			sysfs_display_ring((void *)tx_q->dma_tx,
3790 					   DMA_TX_SIZE, 0, seq);
3791 		}
3792 	}
3793 
3794 	return 0;
3795 }
3796 
3797 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3798 {
3799 	return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3800 }
3801 
3802 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3803 
3804 static const struct file_operations stmmac_rings_status_fops = {
3805 	.owner = THIS_MODULE,
3806 	.open = stmmac_sysfs_ring_open,
3807 	.read = seq_read,
3808 	.llseek = seq_lseek,
3809 	.release = single_release,
3810 };
3811 
3812 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3813 {
3814 	struct net_device *dev = seq->private;
3815 	struct stmmac_priv *priv = netdev_priv(dev);
3816 
3817 	if (!priv->hw_cap_support) {
3818 		seq_printf(seq, "DMA HW features not supported\n");
3819 		return 0;
3820 	}
3821 
3822 	seq_printf(seq, "==============================\n");
3823 	seq_printf(seq, "\tDMA HW features\n");
3824 	seq_printf(seq, "==============================\n");
3825 
3826 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3827 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3828 	seq_printf(seq, "\t1000 Mbps: %s\n",
3829 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
3830 	seq_printf(seq, "\tHalf duplex: %s\n",
3831 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3832 	seq_printf(seq, "\tHash Filter: %s\n",
3833 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3834 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3835 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
3836 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3837 		   (priv->dma_cap.pcs) ? "Y" : "N");
3838 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3839 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3840 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
3841 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3842 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
3843 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3844 	seq_printf(seq, "\tRMON module: %s\n",
3845 		   (priv->dma_cap.rmon) ? "Y" : "N");
3846 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3847 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
3848 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3849 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
3850 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3851 		   (priv->dma_cap.eee) ? "Y" : "N");
3852 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3853 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3854 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
3855 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3856 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3857 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
3858 	} else {
3859 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3860 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3861 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3862 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3863 	}
3864 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3865 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3866 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3867 		   priv->dma_cap.number_rx_channel);
3868 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3869 		   priv->dma_cap.number_tx_channel);
3870 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
3871 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
3872 
3873 	return 0;
3874 }
3875 
3876 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3877 {
3878 	return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3879 }
3880 
3881 static const struct file_operations stmmac_dma_cap_fops = {
3882 	.owner = THIS_MODULE,
3883 	.open = stmmac_sysfs_dma_cap_open,
3884 	.read = seq_read,
3885 	.llseek = seq_lseek,
3886 	.release = single_release,
3887 };
3888 
3889 static int stmmac_init_fs(struct net_device *dev)
3890 {
3891 	struct stmmac_priv *priv = netdev_priv(dev);
3892 
3893 	/* Create per netdev entries */
3894 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3895 
3896 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3897 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3898 
3899 		return -ENOMEM;
3900 	}
3901 
3902 	/* Entry to report DMA RX/TX rings */
3903 	priv->dbgfs_rings_status =
3904 		debugfs_create_file("descriptors_status", S_IRUGO,
3905 				    priv->dbgfs_dir, dev,
3906 				    &stmmac_rings_status_fops);
3907 
3908 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3909 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3910 		debugfs_remove_recursive(priv->dbgfs_dir);
3911 
3912 		return -ENOMEM;
3913 	}
3914 
3915 	/* Entry to report the DMA HW features */
3916 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3917 					    priv->dbgfs_dir,
3918 					    dev, &stmmac_dma_cap_fops);
3919 
3920 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3921 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3922 		debugfs_remove_recursive(priv->dbgfs_dir);
3923 
3924 		return -ENOMEM;
3925 	}
3926 
3927 	return 0;
3928 }
3929 
3930 static void stmmac_exit_fs(struct net_device *dev)
3931 {
3932 	struct stmmac_priv *priv = netdev_priv(dev);
3933 
3934 	debugfs_remove_recursive(priv->dbgfs_dir);
3935 }
3936 #endif /* CONFIG_DEBUG_FS */
3937 
3938 static const struct net_device_ops stmmac_netdev_ops = {
3939 	.ndo_open = stmmac_open,
3940 	.ndo_start_xmit = stmmac_xmit,
3941 	.ndo_stop = stmmac_release,
3942 	.ndo_change_mtu = stmmac_change_mtu,
3943 	.ndo_fix_features = stmmac_fix_features,
3944 	.ndo_set_features = stmmac_set_features,
3945 	.ndo_set_rx_mode = stmmac_set_rx_mode,
3946 	.ndo_tx_timeout = stmmac_tx_timeout,
3947 	.ndo_do_ioctl = stmmac_ioctl,
3948 #ifdef CONFIG_NET_POLL_CONTROLLER
3949 	.ndo_poll_controller = stmmac_poll_controller,
3950 #endif
3951 	.ndo_set_mac_address = eth_mac_addr,
3952 };
3953 
3954 /**
3955  *  stmmac_hw_init - Init the MAC device
3956  *  @priv: driver private structure
3957  *  Description: this function is to configure the MAC device according to
3958  *  some platform parameters or the HW capability register. It prepares the
3959  *  driver to use either ring or chain modes and to setup either enhanced or
3960  *  normal descriptors.
3961  */
3962 static int stmmac_hw_init(struct stmmac_priv *priv)
3963 {
3964 	struct mac_device_info *mac;
3965 
3966 	/* Identify the MAC HW device */
3967 	if (priv->plat->setup) {
3968 		mac = priv->plat->setup(priv);
3969 	} else if (priv->plat->has_gmac) {
3970 		priv->dev->priv_flags |= IFF_UNICAST_FLT;
3971 		mac = dwmac1000_setup(priv->ioaddr,
3972 				      priv->plat->multicast_filter_bins,
3973 				      priv->plat->unicast_filter_entries,
3974 				      &priv->synopsys_id);
3975 	} else if (priv->plat->has_gmac4) {
3976 		priv->dev->priv_flags |= IFF_UNICAST_FLT;
3977 		mac = dwmac4_setup(priv->ioaddr,
3978 				   priv->plat->multicast_filter_bins,
3979 				   priv->plat->unicast_filter_entries,
3980 				   &priv->synopsys_id);
3981 	} else {
3982 		mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
3983 	}
3984 	if (!mac)
3985 		return -ENOMEM;
3986 
3987 	priv->hw = mac;
3988 
3989 	/* dwmac-sun8i only work in chain mode */
3990 	if (priv->plat->has_sun8i)
3991 		chain_mode = 1;
3992 
3993 	/* To use the chained or ring mode */
3994 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3995 		priv->hw->mode = &dwmac4_ring_mode_ops;
3996 	} else {
3997 		if (chain_mode) {
3998 			priv->hw->mode = &chain_mode_ops;
3999 			dev_info(priv->device, "Chain mode enabled\n");
4000 			priv->mode = STMMAC_CHAIN_MODE;
4001 		} else {
4002 			priv->hw->mode = &ring_mode_ops;
4003 			dev_info(priv->device, "Ring mode enabled\n");
4004 			priv->mode = STMMAC_RING_MODE;
4005 		}
4006 	}
4007 
4008 	/* Get the HW capability (new GMAC newer than 3.50a) */
4009 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4010 	if (priv->hw_cap_support) {
4011 		dev_info(priv->device, "DMA HW capability register supported\n");
4012 
4013 		/* We can override some gmac/dma configuration fields: e.g.
4014 		 * enh_desc, tx_coe (e.g. that are passed through the
4015 		 * platform) with the values from the HW capability
4016 		 * register (if supported).
4017 		 */
4018 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4019 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4020 		priv->hw->pmt = priv->plat->pmt;
4021 
4022 		/* TXCOE doesn't work in thresh DMA mode */
4023 		if (priv->plat->force_thresh_dma_mode)
4024 			priv->plat->tx_coe = 0;
4025 		else
4026 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4027 
4028 		/* In case of GMAC4 rx_coe is from HW cap register. */
4029 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4030 
4031 		if (priv->dma_cap.rx_coe_type2)
4032 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4033 		else if (priv->dma_cap.rx_coe_type1)
4034 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4035 
4036 	} else {
4037 		dev_info(priv->device, "No HW DMA feature register supported\n");
4038 	}
4039 
4040 	/* To use alternate (extended), normal or GMAC4 descriptor structures */
4041 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
4042 		priv->hw->desc = &dwmac4_desc_ops;
4043 	else
4044 		stmmac_selec_desc_mode(priv);
4045 
4046 	if (priv->plat->rx_coe) {
4047 		priv->hw->rx_csum = priv->plat->rx_coe;
4048 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4049 		if (priv->synopsys_id < DWMAC_CORE_4_00)
4050 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4051 	}
4052 	if (priv->plat->tx_coe)
4053 		dev_info(priv->device, "TX Checksum insertion supported\n");
4054 
4055 	if (priv->plat->pmt) {
4056 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4057 		device_set_wakeup_capable(priv->device, 1);
4058 	}
4059 
4060 	if (priv->dma_cap.tsoen)
4061 		dev_info(priv->device, "TSO supported\n");
4062 
4063 	return 0;
4064 }
4065 
4066 /**
4067  * stmmac_dvr_probe
4068  * @device: device pointer
4069  * @plat_dat: platform data pointer
4070  * @res: stmmac resource pointer
4071  * Description: this is the main probe function used to
4072  * call the alloc_etherdev, allocate the priv structure.
4073  * Return:
4074  * returns 0 on success, otherwise errno.
4075  */
4076 int stmmac_dvr_probe(struct device *device,
4077 		     struct plat_stmmacenet_data *plat_dat,
4078 		     struct stmmac_resources *res)
4079 {
4080 	struct net_device *ndev = NULL;
4081 	struct stmmac_priv *priv;
4082 	int ret = 0;
4083 	u32 queue;
4084 
4085 	ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4086 				  MTL_MAX_TX_QUEUES,
4087 				  MTL_MAX_RX_QUEUES);
4088 	if (!ndev)
4089 		return -ENOMEM;
4090 
4091 	SET_NETDEV_DEV(ndev, device);
4092 
4093 	priv = netdev_priv(ndev);
4094 	priv->device = device;
4095 	priv->dev = ndev;
4096 
4097 	stmmac_set_ethtool_ops(ndev);
4098 	priv->pause = pause;
4099 	priv->plat = plat_dat;
4100 	priv->ioaddr = res->addr;
4101 	priv->dev->base_addr = (unsigned long)res->addr;
4102 
4103 	priv->dev->irq = res->irq;
4104 	priv->wol_irq = res->wol_irq;
4105 	priv->lpi_irq = res->lpi_irq;
4106 
4107 	if (res->mac)
4108 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4109 
4110 	dev_set_drvdata(device, priv->dev);
4111 
4112 	/* Verify driver arguments */
4113 	stmmac_verify_args();
4114 
4115 	/* Override with kernel parameters if supplied XXX CRS XXX
4116 	 * this needs to have multiple instances
4117 	 */
4118 	if ((phyaddr >= 0) && (phyaddr <= 31))
4119 		priv->plat->phy_addr = phyaddr;
4120 
4121 	if (priv->plat->stmmac_rst) {
4122 		ret = reset_control_assert(priv->plat->stmmac_rst);
4123 		reset_control_deassert(priv->plat->stmmac_rst);
4124 		/* Some reset controllers have only reset callback instead of
4125 		 * assert + deassert callbacks pair.
4126 		 */
4127 		if (ret == -ENOTSUPP)
4128 			reset_control_reset(priv->plat->stmmac_rst);
4129 	}
4130 
4131 	/* Init MAC and get the capabilities */
4132 	ret = stmmac_hw_init(priv);
4133 	if (ret)
4134 		goto error_hw_init;
4135 
4136 	/* Configure real RX and TX queues */
4137 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4138 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4139 
4140 	ndev->netdev_ops = &stmmac_netdev_ops;
4141 
4142 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4143 			    NETIF_F_RXCSUM;
4144 
4145 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4146 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4147 		priv->tso = true;
4148 		dev_info(priv->device, "TSO feature enabled\n");
4149 	}
4150 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4151 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4152 #ifdef STMMAC_VLAN_TAG_USED
4153 	/* Both mac100 and gmac support receive VLAN tag detection */
4154 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4155 #endif
4156 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
4157 
4158 	/* MTU range: 46 - hw-specific max */
4159 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4160 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4161 		ndev->max_mtu = JUMBO_LEN;
4162 	else
4163 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4164 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4165 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4166 	 */
4167 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4168 	    (priv->plat->maxmtu >= ndev->min_mtu))
4169 		ndev->max_mtu = priv->plat->maxmtu;
4170 	else if (priv->plat->maxmtu < ndev->min_mtu)
4171 		dev_warn(priv->device,
4172 			 "%s: warning: maxmtu having invalid value (%d)\n",
4173 			 __func__, priv->plat->maxmtu);
4174 
4175 	if (flow_ctrl)
4176 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
4177 
4178 	/* Rx Watchdog is available in the COREs newer than the 3.40.
4179 	 * In some case, for example on bugged HW this feature
4180 	 * has to be disable and this can be done by passing the
4181 	 * riwt_off field from the platform.
4182 	 */
4183 	if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4184 		priv->use_riwt = 1;
4185 		dev_info(priv->device,
4186 			 "Enable RX Mitigation via HW Watchdog Timer\n");
4187 	}
4188 
4189 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4190 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4191 
4192 		netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4193 			       (8 * priv->plat->rx_queues_to_use));
4194 	}
4195 
4196 	spin_lock_init(&priv->lock);
4197 
4198 	/* If a specific clk_csr value is passed from the platform
4199 	 * this means that the CSR Clock Range selection cannot be
4200 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4201 	 * set the MDC clock dynamically according to the csr actual
4202 	 * clock input.
4203 	 */
4204 	if (!priv->plat->clk_csr)
4205 		stmmac_clk_csr_set(priv);
4206 	else
4207 		priv->clk_csr = priv->plat->clk_csr;
4208 
4209 	stmmac_check_pcs_mode(priv);
4210 
4211 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4212 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4213 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
4214 		/* MDIO bus Registration */
4215 		ret = stmmac_mdio_register(ndev);
4216 		if (ret < 0) {
4217 			dev_err(priv->device,
4218 				"%s: MDIO bus (id: %d) registration failed",
4219 				__func__, priv->plat->bus_id);
4220 			goto error_mdio_register;
4221 		}
4222 	}
4223 
4224 	ret = register_netdev(ndev);
4225 	if (ret) {
4226 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
4227 			__func__, ret);
4228 		goto error_netdev_register;
4229 	}
4230 
4231 	return ret;
4232 
4233 error_netdev_register:
4234 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4235 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4236 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4237 		stmmac_mdio_unregister(ndev);
4238 error_mdio_register:
4239 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4240 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4241 
4242 		netif_napi_del(&rx_q->napi);
4243 	}
4244 error_hw_init:
4245 	free_netdev(ndev);
4246 
4247 	return ret;
4248 }
4249 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4250 
4251 /**
4252  * stmmac_dvr_remove
4253  * @dev: device pointer
4254  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4255  * changes the link status, releases the DMA descriptor rings.
4256  */
4257 int stmmac_dvr_remove(struct device *dev)
4258 {
4259 	struct net_device *ndev = dev_get_drvdata(dev);
4260 	struct stmmac_priv *priv = netdev_priv(ndev);
4261 
4262 	netdev_info(priv->dev, "%s: removing driver", __func__);
4263 
4264 	stmmac_stop_all_dma(priv);
4265 
4266 	priv->hw->mac->set_mac(priv->ioaddr, false);
4267 	netif_carrier_off(ndev);
4268 	unregister_netdev(ndev);
4269 	if (priv->plat->stmmac_rst)
4270 		reset_control_assert(priv->plat->stmmac_rst);
4271 	clk_disable_unprepare(priv->plat->pclk);
4272 	clk_disable_unprepare(priv->plat->stmmac_clk);
4273 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4274 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4275 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4276 		stmmac_mdio_unregister(ndev);
4277 	free_netdev(ndev);
4278 
4279 	return 0;
4280 }
4281 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4282 
4283 /**
4284  * stmmac_suspend - suspend callback
4285  * @dev: device pointer
4286  * Description: this is the function to suspend the device and it is called
4287  * by the platform driver to stop the network queue, release the resources,
4288  * program the PMT register (for WoL), clean and release driver resources.
4289  */
4290 int stmmac_suspend(struct device *dev)
4291 {
4292 	struct net_device *ndev = dev_get_drvdata(dev);
4293 	struct stmmac_priv *priv = netdev_priv(ndev);
4294 	unsigned long flags;
4295 
4296 	if (!ndev || !netif_running(ndev))
4297 		return 0;
4298 
4299 	if (ndev->phydev)
4300 		phy_stop(ndev->phydev);
4301 
4302 	spin_lock_irqsave(&priv->lock, flags);
4303 
4304 	netif_device_detach(ndev);
4305 	stmmac_stop_all_queues(priv);
4306 
4307 	stmmac_disable_all_queues(priv);
4308 
4309 	/* Stop TX/RX DMA */
4310 	stmmac_stop_all_dma(priv);
4311 
4312 	/* Enable Power down mode by programming the PMT regs */
4313 	if (device_may_wakeup(priv->device)) {
4314 		priv->hw->mac->pmt(priv->hw, priv->wolopts);
4315 		priv->irq_wake = 1;
4316 	} else {
4317 		priv->hw->mac->set_mac(priv->ioaddr, false);
4318 		pinctrl_pm_select_sleep_state(priv->device);
4319 		/* Disable clock in case of PWM is off */
4320 		clk_disable(priv->plat->pclk);
4321 		clk_disable(priv->plat->stmmac_clk);
4322 	}
4323 	spin_unlock_irqrestore(&priv->lock, flags);
4324 
4325 	priv->oldlink = false;
4326 	priv->speed = SPEED_UNKNOWN;
4327 	priv->oldduplex = DUPLEX_UNKNOWN;
4328 	return 0;
4329 }
4330 EXPORT_SYMBOL_GPL(stmmac_suspend);
4331 
4332 /**
4333  * stmmac_reset_queues_param - reset queue parameters
4334  * @dev: device pointer
4335  */
4336 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4337 {
4338 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4339 	u32 tx_cnt = priv->plat->tx_queues_to_use;
4340 	u32 queue;
4341 
4342 	for (queue = 0; queue < rx_cnt; queue++) {
4343 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4344 
4345 		rx_q->cur_rx = 0;
4346 		rx_q->dirty_rx = 0;
4347 	}
4348 
4349 	for (queue = 0; queue < tx_cnt; queue++) {
4350 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4351 
4352 		tx_q->cur_tx = 0;
4353 		tx_q->dirty_tx = 0;
4354 	}
4355 }
4356 
4357 /**
4358  * stmmac_resume - resume callback
4359  * @dev: device pointer
4360  * Description: when resume this function is invoked to setup the DMA and CORE
4361  * in a usable state.
4362  */
4363 int stmmac_resume(struct device *dev)
4364 {
4365 	struct net_device *ndev = dev_get_drvdata(dev);
4366 	struct stmmac_priv *priv = netdev_priv(ndev);
4367 	unsigned long flags;
4368 
4369 	if (!netif_running(ndev))
4370 		return 0;
4371 
4372 	/* Power Down bit, into the PM register, is cleared
4373 	 * automatically as soon as a magic packet or a Wake-up frame
4374 	 * is received. Anyway, it's better to manually clear
4375 	 * this bit because it can generate problems while resuming
4376 	 * from another devices (e.g. serial console).
4377 	 */
4378 	if (device_may_wakeup(priv->device)) {
4379 		spin_lock_irqsave(&priv->lock, flags);
4380 		priv->hw->mac->pmt(priv->hw, 0);
4381 		spin_unlock_irqrestore(&priv->lock, flags);
4382 		priv->irq_wake = 0;
4383 	} else {
4384 		pinctrl_pm_select_default_state(priv->device);
4385 		/* enable the clk previously disabled */
4386 		clk_enable(priv->plat->stmmac_clk);
4387 		clk_enable(priv->plat->pclk);
4388 		/* reset the phy so that it's ready */
4389 		if (priv->mii)
4390 			stmmac_mdio_reset(priv->mii);
4391 	}
4392 
4393 	netif_device_attach(ndev);
4394 
4395 	spin_lock_irqsave(&priv->lock, flags);
4396 
4397 	stmmac_reset_queues_param(priv);
4398 
4399 	/* reset private mss value to force mss context settings at
4400 	 * next tso xmit (only used for gmac4).
4401 	 */
4402 	priv->mss = 0;
4403 
4404 	stmmac_clear_descriptors(priv);
4405 
4406 	stmmac_hw_setup(ndev, false);
4407 	stmmac_init_tx_coalesce(priv);
4408 	stmmac_set_rx_mode(ndev);
4409 
4410 	stmmac_enable_all_queues(priv);
4411 
4412 	stmmac_start_all_queues(priv);
4413 
4414 	spin_unlock_irqrestore(&priv->lock, flags);
4415 
4416 	if (ndev->phydev)
4417 		phy_start(ndev->phydev);
4418 
4419 	return 0;
4420 }
4421 EXPORT_SYMBOL_GPL(stmmac_resume);
4422 
4423 #ifndef MODULE
4424 static int __init stmmac_cmdline_opt(char *str)
4425 {
4426 	char *opt;
4427 
4428 	if (!str || !*str)
4429 		return -EINVAL;
4430 	while ((opt = strsep(&str, ",")) != NULL) {
4431 		if (!strncmp(opt, "debug:", 6)) {
4432 			if (kstrtoint(opt + 6, 0, &debug))
4433 				goto err;
4434 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4435 			if (kstrtoint(opt + 8, 0, &phyaddr))
4436 				goto err;
4437 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4438 			if (kstrtoint(opt + 7, 0, &buf_sz))
4439 				goto err;
4440 		} else if (!strncmp(opt, "tc:", 3)) {
4441 			if (kstrtoint(opt + 3, 0, &tc))
4442 				goto err;
4443 		} else if (!strncmp(opt, "watchdog:", 9)) {
4444 			if (kstrtoint(opt + 9, 0, &watchdog))
4445 				goto err;
4446 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4447 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
4448 				goto err;
4449 		} else if (!strncmp(opt, "pause:", 6)) {
4450 			if (kstrtoint(opt + 6, 0, &pause))
4451 				goto err;
4452 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4453 			if (kstrtoint(opt + 10, 0, &eee_timer))
4454 				goto err;
4455 		} else if (!strncmp(opt, "chain_mode:", 11)) {
4456 			if (kstrtoint(opt + 11, 0, &chain_mode))
4457 				goto err;
4458 		}
4459 	}
4460 	return 0;
4461 
4462 err:
4463 	pr_err("%s: ERROR broken module parameter conversion", __func__);
4464 	return -EINVAL;
4465 }
4466 
4467 __setup("stmmaceth=", stmmac_cmdline_opt);
4468 #endif /* MODULE */
4469 
4470 static int __init stmmac_init(void)
4471 {
4472 #ifdef CONFIG_DEBUG_FS
4473 	/* Create debugfs main directory if it doesn't exist yet */
4474 	if (!stmmac_fs_dir) {
4475 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4476 
4477 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4478 			pr_err("ERROR %s, debugfs create directory failed\n",
4479 			       STMMAC_RESOURCE_NAME);
4480 
4481 			return -ENOMEM;
4482 		}
4483 	}
4484 #endif
4485 
4486 	return 0;
4487 }
4488 
4489 static void __exit stmmac_exit(void)
4490 {
4491 #ifdef CONFIG_DEBUG_FS
4492 	debugfs_remove_recursive(stmmac_fs_dir);
4493 #endif
4494 }
4495 
4496 module_init(stmmac_init)
4497 module_exit(stmmac_exit)
4498 
4499 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4500 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4501 MODULE_LICENSE("GPL");
4502